text stringlengths 0 1.05M | meta dict |
|---|---|
"""Arithmetics for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy.polys.densebasic import (
dup_slice,
dup_LC, dmp_LC,
dup_degree, dmp_degree,
dup_strip, dmp_strip,
dmp_zero_p, dmp_zero,
dmp_one_p, dmp_one,
dmp_ground, dmp_zeros)
from sympy.polys.polyerrors import (ExactQuotientFailed, PolynomialDivisionFailed)
def dup_add_term(f, c, i, K):
"""
Add ``c*x**i`` to ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add_term(x**2 - 1, ZZ(2), 4)
2*x**4 + x**2 - 1
"""
if not c:
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dup_strip([f[0] + c] + f[1:])
else:
if i >= n:
return [c] + [K.zero]*(i - n) + f
else:
return f[:m] + [f[m] + c] + f[m + 1:]
def dmp_add_term(f, c, i, u, K):
"""
Add ``c(x_2..x_u)*x_0**i`` to ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add_term(x*y + 1, 2, 2)
2*x**2 + x*y + 1
"""
if not u:
return dup_add_term(f, c, i, K)
v = u - 1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dmp_strip([dmp_add(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [c] + dmp_zeros(i - n, v, K) + f
else:
return f[:m] + [dmp_add(f[m], c, v, K)] + f[m + 1:]
def dup_sub_term(f, c, i, K):
"""
Subtract ``c*x**i`` from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub_term(2*x**4 + x**2 - 1, ZZ(2), 4)
x**2 - 1
"""
if not c:
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dup_strip([f[0] - c] + f[1:])
else:
if i >= n:
return [-c] + [K.zero]*(i - n) + f
else:
return f[:m] + [f[m] - c] + f[m + 1:]
def dmp_sub_term(f, c, i, u, K):
"""
Subtract ``c(x_2..x_u)*x_0**i`` from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub_term(2*x**2 + x*y + 1, 2, 2)
x*y + 1
"""
if not u:
return dup_add_term(f, -c, i, K)
v = u - 1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dmp_strip([dmp_sub(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [dmp_neg(c, v, K)] + dmp_zeros(i - n, v, K) + f
else:
return f[:m] + [dmp_sub(f[m], c, v, K)] + f[m + 1:]
def dup_mul_term(f, c, i, K):
"""
Multiply ``f`` by ``c*x**i`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mul_term(x**2 - 1, ZZ(3), 2)
3*x**4 - 3*x**2
"""
if not c or not f:
return []
else:
return [ cf * c for cf in f ] + [K.zero]*i
def dmp_mul_term(f, c, i, u, K):
"""
Multiply ``f`` by ``c(x_2..x_u)*x_0**i`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_mul_term(x**2*y + x, 3*y, 2)
3*x**4*y**2 + 3*x**3*y
"""
if not u:
return dup_mul_term(f, c, i, K)
v = u - 1
if dmp_zero_p(f, u):
return f
if dmp_zero_p(c, v):
return dmp_zero(u)
else:
return [ dmp_mul(cf, c, v, K) for cf in f ] + dmp_zeros(i, v, K)
def dup_add_ground(f, c, K):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x + 8
"""
return dup_add_term(f, c, 0, K)
def dmp_add_ground(f, c, u, K):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x + 8
"""
return dmp_add_term(f, dmp_ground(c, u - 1), 0, u, K)
def dup_sub_ground(f, c, K):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x
"""
return dup_sub_term(f, c, 0, K)
def dmp_sub_ground(f, c, u, K):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x
"""
return dmp_sub_term(f, dmp_ground(c, u - 1), 0, u, K)
def dup_mul_ground(f, c, K):
"""
Multiply ``f`` by a constant value in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mul_ground(x**2 + 2*x - 1, ZZ(3))
3*x**2 + 6*x - 3
"""
if not c or not f:
return []
else:
return [ cf * c for cf in f ]
def dmp_mul_ground(f, c, u, K):
"""
Multiply ``f`` by a constant value in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_mul_ground(2*x + 2*y, ZZ(3))
6*x + 6*y
"""
if not u:
return dup_mul_ground(f, c, K)
v = u - 1
return [ dmp_mul_ground(cf, c, v, K) for cf in f ]
def dup_quo_ground(f, c, K):
"""
Quotient by a constant in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_quo_ground(3*x**2 + 2, ZZ(2))
x**2 + 1
>>> R, x = ring("x", QQ)
>>> R.dup_quo_ground(3*x**2 + 2, QQ(2))
3/2*x**2 + 1
"""
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
if K.is_Field:
return [ K.quo(cf, c) for cf in f ]
else:
return [ cf // c for cf in f ]
def dmp_quo_ground(f, c, u, K):
"""
Quotient by a constant in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_quo_ground(2*x**2*y + 3*x, ZZ(2))
x**2*y + x
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_quo_ground(2*x**2*y + 3*x, QQ(2))
x**2*y + 3/2*x
"""
if not u:
return dup_quo_ground(f, c, K)
v = u - 1
return [ dmp_quo_ground(cf, c, v, K) for cf in f ]
def dup_exquo_ground(f, c, K):
"""
Exact quotient by a constant in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_exquo_ground(x**2 + 2, QQ(2))
1/2*x**2 + 1
"""
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
return [ K.exquo(cf, c) for cf in f ]
def dmp_exquo_ground(f, c, u, K):
"""
Exact quotient by a constant in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_exquo_ground(x**2*y + 2*x, QQ(2))
1/2*x**2*y + x
"""
if not u:
return dup_exquo_ground(f, c, K)
v = u - 1
return [ dmp_exquo_ground(cf, c, v, K) for cf in f ]
def dup_lshift(f, n, K):
"""
Efficiently multiply ``f`` by ``x**n`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_lshift(x**2 + 1, 2)
x**4 + x**2
"""
if not f:
return f
else:
return f + [K.zero]*n
def dup_rshift(f, n, K):
"""
Efficiently divide ``f`` by ``x**n`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_rshift(x**4 + x**2, 2)
x**2 + 1
>>> R.dup_rshift(x**4 + x**2 + 2, 2)
x**2 + 1
"""
return f[:-n]
def dup_abs(f, K):
"""
Make all coefficients positive in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_abs(x**2 - 1)
x**2 + 1
"""
return [ K.abs(coeff) for coeff in f ]
def dmp_abs(f, u, K):
"""
Make all coefficients positive in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_abs(x**2*y - x)
x**2*y + x
"""
if not u:
return dup_abs(f, K)
v = u - 1
return [ dmp_abs(cf, v, K) for cf in f ]
def dup_neg(f, K):
"""
Negate a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_neg(x**2 - 1)
-x**2 + 1
"""
return [ -coeff for coeff in f ]
def dmp_neg(f, u, K):
"""
Negate a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_neg(x**2*y - x)
-x**2*y + x
"""
if not u:
return dup_neg(f, K)
v = u - 1
return [ dmp_neg(cf, v, K) for cf in f ]
def dup_add(f, g, K):
"""
Add dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add(x**2 - 1, x - 2)
x**2 + x - 3
"""
if not f:
return g
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a + b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ a + b for a, b in zip(f, g) ]
def dmp_add(f, g, u, K):
"""
Add dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add(x**2 + y, x**2*y + x)
x**2*y + x**2 + x + y
"""
if not u:
return dup_add(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return g
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u - 1
if df == dg:
return dmp_strip([ dmp_add(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ dmp_add(a, b, v, K) for a, b in zip(f, g) ]
def dup_sub(f, g, K):
"""
Subtract dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub(x**2 - 1, x - 2)
x**2 - x + 1
"""
if not f:
return dup_neg(g, K)
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a - b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dup_neg(g[:k], K), g[k:]
return h + [ a - b for a, b in zip(f, g) ]
def dmp_sub(f, g, u, K):
"""
Subtract dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub(x**2 + y, x**2*y + x)
-x**2*y + x**2 - x + y
"""
if not u:
return dup_sub(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return dmp_neg(g, u, K)
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u - 1
if df == dg:
return dmp_strip([ dmp_sub(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dmp_neg(g[:k], u, K), g[k:]
return h + [ dmp_sub(a, b, v, K) for a, b in zip(f, g) ]
def dup_add_mul(f, g, h, K):
"""
Returns ``f + g*h`` where ``f, g, h`` are in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add_mul(x**2 - 1, x - 2, x + 2)
2*x**2 - 5
"""
return dup_add(f, dup_mul(g, h, K), K)
def dmp_add_mul(f, g, h, u, K):
"""
Returns ``f + g*h`` where ``f, g, h`` are in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add_mul(x**2 + y, x, x + 2)
2*x**2 + 2*x + y
"""
return dmp_add(f, dmp_mul(g, h, u, K), u, K)
def dup_sub_mul(f, g, h, K):
"""
Returns ``f - g*h`` where ``f, g, h`` are in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub_mul(x**2 - 1, x - 2, x + 2)
3
"""
return dup_sub(f, dup_mul(g, h, K), K)
def dmp_sub_mul(f, g, h, u, K):
"""
Returns ``f - g*h`` where ``f, g, h`` are in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub_mul(x**2 + y, x, x + 2)
-2*x + y
"""
return dmp_sub(f, dmp_mul(g, h, u, K), u, K)
def dup_mul(f, g, K):
"""
Multiply dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mul(x - 2, x + 2)
x**2 - 4
"""
if f == g:
return dup_sqr(f, K)
if not (f and g):
return []
df = dup_degree(f)
dg = dup_degree(g)
n = max(df, dg) + 1
if n < 100:
h = []
for i in range(0, df + dg + 1):
coeff = K.zero
for j in range(max(0, i - dg), min(df, i) + 1):
coeff += f[j]*g[i - j]
h.append(coeff)
return dup_strip(h)
else:
# Use Karatsuba's algorithm (divide and conquer), see e.g.:
# Joris van der Hoeven, Relax But Don't Be Too Lazy,
# J. Symbolic Computation, 11 (2002), section 3.1.1.
n2 = n//2
fl, gl = dup_slice(f, 0, n2, K), dup_slice(g, 0, n2, K)
fh = dup_rshift(dup_slice(f, n2, n, K), n2, K)
gh = dup_rshift(dup_slice(g, n2, n, K), n2, K)
lo, hi = dup_mul(fl, gl, K), dup_mul(fh, gh, K)
mid = dup_mul(dup_add(fl, fh, K), dup_add(gl, gh, K), K)
mid = dup_sub(mid, dup_add(lo, hi, K), K)
return dup_add(dup_add(lo, dup_lshift(mid, n2, K), K),
dup_lshift(hi, 2*n2, K), K)
def dmp_mul(f, g, u, K):
"""
Multiply dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_mul(x*y + 1, x)
x**2*y + x
"""
if not u:
return dup_mul(f, g, K)
if f == g:
return dmp_sqr(f, u, K)
df = dmp_degree(f, u)
if df < 0:
return f
dg = dmp_degree(g, u)
if dg < 0:
return g
h, v = [], u - 1
for i in range(0, df + dg + 1):
coeff = dmp_zero(v)
for j in range(max(0, i - dg), min(df, i) + 1):
coeff = dmp_add(coeff, dmp_mul(f[j], g[i - j], v, K), v, K)
h.append(coeff)
return dmp_strip(h, u)
def dup_sqr(f, K):
"""
Square dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sqr(x**2 + 1)
x**4 + 2*x**2 + 1
"""
df, h = len(f) - 1, []
for i in range(0, 2*df + 1):
c = K.zero
jmin = max(0, i - df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in range(jmin, jmax + 1):
c += f[j]*f[i - j]
c += c
if n & 1:
elem = f[jmax + 1]
c += elem**2
h.append(c)
return dup_strip(h)
def dmp_sqr(f, u, K):
"""
Square dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sqr(x**2 + x*y + y**2)
x**4 + 2*x**3*y + 3*x**2*y**2 + 2*x*y**3 + y**4
"""
if not u:
return dup_sqr(f, K)
df = dmp_degree(f, u)
if df < 0:
return f
h, v = [], u - 1
for i in range(0, 2*df + 1):
c = dmp_zero(v)
jmin = max(0, i - df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in range(jmin, jmax + 1):
c = dmp_add(c, dmp_mul(f[j], f[i - j], v, K), v, K)
c = dmp_mul_ground(c, K(2), v, K)
if n & 1:
elem = dmp_sqr(f[jmax + 1], v, K)
c = dmp_add(c, elem, v, K)
h.append(c)
return dmp_strip(h, u)
def dup_pow(f, n, K):
"""
Raise ``f`` to the ``n``-th power in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pow(x - 2, 3)
x**3 - 6*x**2 + 12*x - 8
"""
if not n:
return [K.one]
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or not f or f == [K.one]:
return f
g = [K.one]
while True:
n, m = n//2, n
if m % 2:
g = dup_mul(g, f, K)
if not n:
break
f = dup_sqr(f, K)
return g
def dmp_pow(f, n, u, K):
"""
Raise ``f`` to the ``n``-th power in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_pow(x*y + 1, 3)
x**3*y**3 + 3*x**2*y**2 + 3*x*y + 1
"""
if not u:
return dup_pow(f, n, K)
if not n:
return dmp_one(u, K)
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or dmp_zero_p(f, u) or dmp_one_p(f, u, K):
return f
g = dmp_one(u, K)
while True:
n, m = n//2, n
if m & 1:
g = dmp_mul(g, f, u, K)
if not n:
break
f = dmp_sqr(f, u, K)
return g
def dup_pdiv(f, g, K):
"""
Polynomial pseudo-division in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r, dr = [], f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
j, N = dr - dg, N - 1
Q = dup_mul_ground(q, lc_g, K)
q = dup_add_term(Q, lc_r, j, K)
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
c = lc_g**N
q = dup_mul_ground(q, c, K)
r = dup_mul_ground(r, c, K)
return q, r
def dup_prem(f, g, K):
"""
Polynomial pseudo-remainder in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_prem(x**2 + 1, 2*x - 4)
20
"""
df = dup_degree(f)
dg = dup_degree(g)
r, dr = f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
j, N = dr - dg, N - 1
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return dup_mul_ground(r, lc_g**N, K)
def dup_pquo(f, g, K):
"""
Polynomial exact pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> R.dup_pquo(x**2 + 1, 2*x - 4)
2*x + 4
"""
return dup_pdiv(f, g, K)[0]
def dup_pexquo(f, g, K):
"""
Polynomial pseudo-quotient in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> R.dup_pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: [2, -4] does not divide [1, 0, 1]
"""
q, r = dup_pdiv(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
def dmp_pdiv(f, g, u, K):
"""
Polynomial pseudo-division in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_pdiv(x**2 + x*y, 2*x + 2)
(2*x + 2*y - 2, -4*y + 4)
"""
if not u:
return dup_pdiv(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r, dr = dmp_zero(u), f, df
if df < dg:
return q, r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
lc_r = dmp_LC(r, K)
j, N = dr - dg, N - 1
Q = dmp_mul_term(q, lc_g, 0, u, K)
q = dmp_add_term(Q, lc_r, j, u, K)
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
c = dmp_pow(lc_g, N, u - 1, K)
q = dmp_mul_term(q, c, 0, u, K)
r = dmp_mul_term(r, c, 0, u, K)
return q, r
def dmp_prem(f, g, u, K):
"""
Polynomial pseudo-remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_prem(x**2 + x*y, 2*x + 2)
-4*y + 4
"""
if not u:
return dup_prem(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
r, dr = f, df
if df < dg:
return r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
lc_r = dmp_LC(r, K)
j, N = dr - dg, N - 1
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
c = dmp_pow(lc_g, N, u - 1, K)
return dmp_mul_term(r, c, 0, u, K)
def dmp_pquo(f, g, u, K):
"""
Polynomial exact pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = 2*x + 2*y
>>> h = 2*x + 2
>>> R.dmp_pquo(f, g)
2*x
>>> R.dmp_pquo(f, h)
2*x + 2*y - 2
"""
return dmp_pdiv(f, g, u, K)[0]
def dmp_pexquo(f, g, u, K):
"""
Polynomial pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = 2*x + 2*y
>>> h = 2*x + 2
>>> R.dmp_pexquo(f, g)
2*x
>>> R.dmp_pexquo(f, h)
Traceback (most recent call last):
...
ExactQuotientFailed: [[2], [2]] does not divide [[1], [1, 0], []]
"""
q, r = dmp_pdiv(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed(f, g)
def dup_rr_div(f, g, K):
"""
Univariate division with remainder over a ring.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_rr_div(x**2 + 1, 2*x - 4)
(0, x**2 + 1)
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r, dr = [], f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
if lc_r % lc_g:
break
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dmp_rr_div(f, g, u, K):
"""
Multivariate division with remainder over a ring.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_rr_div(x**2 + x*y, 2*x + 2)
(0, x**2 + x*y)
"""
if not u:
return dup_rr_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r, dr = dmp_zero(u), f, df
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u - 1
while True:
lc_r = dmp_LC(r, K)
c, R = dmp_rr_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dup_ff_div(f, g, K):
"""
Polynomial division with remainder over a field.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_ff_div(x**2 + 1, 2*x - 4)
(1/2*x + 1, 5)
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r, dr = [], f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif dr == _dr and not K.is_Exact:
# remove leading term created by rounding error
r = dup_strip(r[1:])
dr = dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dmp_ff_div(f, g, u, K):
"""
Polynomial division with remainder over a field.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_ff_div(x**2 + x*y, 2*x + 2)
(1/2*x + 1/2*y - 1/2, -y + 1)
"""
if not u:
return dup_ff_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r, dr = dmp_zero(u), f, df
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u - 1
while True:
lc_r = dmp_LC(r, K)
c, R = dmp_ff_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dup_div(f, g, K):
"""
Polynomial division with remainder in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_div(x**2 + 1, 2*x - 4)
(0, x**2 + 1)
>>> R, x = ring("x", QQ)
>>> R.dup_div(x**2 + 1, 2*x - 4)
(1/2*x + 1, 5)
"""
if K.is_Field:
return dup_ff_div(f, g, K)
else:
return dup_rr_div(f, g, K)
def dup_rem(f, g, K):
"""
Returns polynomial remainder in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_rem(x**2 + 1, 2*x - 4)
x**2 + 1
>>> R, x = ring("x", QQ)
>>> R.dup_rem(x**2 + 1, 2*x - 4)
5
"""
return dup_div(f, g, K)[1]
def dup_quo(f, g, K):
"""
Returns exact polynomial quotient in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_quo(x**2 + 1, 2*x - 4)
0
>>> R, x = ring("x", QQ)
>>> R.dup_quo(x**2 + 1, 2*x - 4)
1/2*x + 1
"""
return dup_div(f, g, K)[0]
def dup_exquo(f, g, K):
"""
Returns polynomial quotient in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_exquo(x**2 - 1, x - 1)
x + 1
>>> R.dup_exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: [2, -4] does not divide [1, 0, 1]
"""
q, r = dup_div(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
def dmp_div(f, g, u, K):
"""
Polynomial division with remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(0, x**2 + x*y)
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(1/2*x + 1/2*y - 1/2, -y + 1)
"""
if K.is_Field:
return dmp_ff_div(f, g, u, K)
else:
return dmp_rr_div(f, g, u, K)
def dmp_rem(f, g, u, K):
"""
Returns polynomial remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_rem(x**2 + x*y, 2*x + 2)
x**2 + x*y
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_rem(x**2 + x*y, 2*x + 2)
-y + 1
"""
return dmp_div(f, g, u, K)[1]
def dmp_quo(f, g, u, K):
"""
Returns exact polynomial quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_quo(x**2 + x*y, 2*x + 2)
0
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_quo(x**2 + x*y, 2*x + 2)
1/2*x + 1/2*y - 1/2
"""
return dmp_div(f, g, u, K)[0]
def dmp_exquo(f, g, u, K):
"""
Returns polynomial quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = x + y
>>> h = 2*x + 2
>>> R.dmp_exquo(f, g)
x
>>> R.dmp_exquo(f, h)
Traceback (most recent call last):
...
ExactQuotientFailed: [[2], [2]] does not divide [[1], [1, 0], []]
"""
q, r = dmp_div(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed(f, g)
def dup_max_norm(f, K):
"""
Returns maximum norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_max_norm(-x**2 + 2*x - 3)
3
"""
if not f:
return K.zero
else:
return max(dup_abs(f, K))
def dmp_max_norm(f, u, K):
"""
Returns maximum norm of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_max_norm(2*x*y - x - 3)
3
"""
if not u:
return dup_max_norm(f, K)
v = u - 1
return max([ dmp_max_norm(c, v, K) for c in f ])
def dup_l1_norm(f, K):
"""
Returns l1 norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_l1_norm(2*x**3 - 3*x**2 + 1)
6
"""
if not f:
return K.zero
else:
return sum(dup_abs(f, K))
def dmp_l1_norm(f, u, K):
"""
Returns l1 norm of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_l1_norm(2*x*y - x - 3)
6
"""
if not u:
return dup_l1_norm(f, K)
v = u - 1
return sum([ dmp_l1_norm(c, v, K) for c in f ])
def dup_expand(polys, K):
"""
Multiply together several polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_expand([x**2 - 1, x, 2])
2*x**3 - 2*x
"""
if not polys:
return [K.one]
f = polys[0]
for g in polys[1:]:
f = dup_mul(f, g, K)
return f
def dmp_expand(polys, u, K):
"""
Multiply together several polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_expand([x**2 + y**2, x + 1])
x**3 + x**2 + x*y**2 + y**2
"""
if not polys:
return dmp_one(u, K)
f = polys[0]
for g in polys[1:]:
f = dmp_mul(f, g, u, K)
return f
| {
"repo_name": "kaushik94/sympy",
"path": "sympy/polys/densearith.py",
"copies": "3",
"size": "33487",
"license": "bsd-3-clause",
"hash": 5696039334643162000,
"line_mean": 17.2192600653,
"line_max": 82,
"alpha_frac": 0.4332427509,
"autogenerated": false,
"ratio": 2.6938299412758426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.96245027320806,
"avg_score": 0.0005139920190487959,
"num_lines": 1838
} |
"""Arithmetics for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from __future__ import print_function, division
from sympy.polys.densebasic import (
dup_slice,
dup_LC, dmp_LC,
dup_degree, dmp_degree,
dup_normal,
dup_strip, dmp_strip,
dmp_zero_p, dmp_zero,
dmp_one_p, dmp_one,
dmp_ground, dmp_zeros)
from sympy.polys.polyerrors import (ExactQuotientFailed, PolynomialDivisionFailed)
from sympy.core.compatibility import xrange
def dup_add_term(f, c, i, K):
"""
Add ``c*x**i`` to ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add_term(x**2 - 1, ZZ(2), 4)
2*x**4 + x**2 - 1
"""
if not c:
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dup_strip([f[0] + c] + f[1:])
else:
if i >= n:
return [c] + [K.zero]*(i - n) + f
else:
return f[:m] + [f[m] + c] + f[m + 1:]
def dmp_add_term(f, c, i, u, K):
"""
Add ``c(x_2..x_u)*x_0**i`` to ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add_term(x*y + 1, 2, 2)
2*x**2 + x*y + 1
"""
if not u:
return dup_add_term(f, c, i, K)
v = u - 1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dmp_strip([dmp_add(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [c] + dmp_zeros(i - n, v, K) + f
else:
return f[:m] + [dmp_add(f[m], c, v, K)] + f[m + 1:]
def dup_sub_term(f, c, i, K):
"""
Subtract ``c*x**i`` from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub_term(2*x**4 + x**2 - 1, ZZ(2), 4)
x**2 - 1
"""
if not c:
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dup_strip([f[0] - c] + f[1:])
else:
if i >= n:
return [-c] + [K.zero]*(i - n) + f
else:
return f[:m] + [f[m] - c] + f[m + 1:]
def dmp_sub_term(f, c, i, u, K):
"""
Subtract ``c(x_2..x_u)*x_0**i`` from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub_term(2*x**2 + x*y + 1, 2, 2)
x*y + 1
"""
if not u:
return dup_add_term(f, -c, i, K)
v = u - 1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dmp_strip([dmp_sub(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [dmp_neg(c, v, K)] + dmp_zeros(i - n, v, K) + f
else:
return f[:m] + [dmp_sub(f[m], c, v, K)] + f[m + 1:]
def dup_mul_term(f, c, i, K):
"""
Multiply ``f`` by ``c*x**i`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mul_term(x**2 - 1, ZZ(3), 2)
3*x**4 - 3*x**2
"""
if not c or not f:
return []
else:
return [ cf * c for cf in f ] + [K.zero]*i
def dmp_mul_term(f, c, i, u, K):
"""
Multiply ``f`` by ``c(x_2..x_u)*x_0**i`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_mul_term(x**2*y + x, 3*y, 2)
3*x**4*y**2 + 3*x**3*y
"""
if not u:
return dup_mul_term(f, c, i, K)
v = u - 1
if dmp_zero_p(f, u):
return f
if dmp_zero_p(c, v):
return dmp_zero(u)
else:
return [ dmp_mul(cf, c, v, K) for cf in f ] + dmp_zeros(i, v, K)
def dup_add_ground(f, c, K):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x + 8
"""
return dup_add_term(f, c, 0, K)
def dmp_add_ground(f, c, u, K):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x + 8
"""
return dmp_add_term(f, dmp_ground(c, u - 1), 0, u, K)
def dup_sub_ground(f, c, K):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x
"""
return dup_sub_term(f, c, 0, K)
def dmp_sub_ground(f, c, u, K):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x
"""
return dmp_sub_term(f, dmp_ground(c, u - 1), 0, u, K)
def dup_mul_ground(f, c, K):
"""
Multiply ``f`` by a constant value in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mul_ground(x**2 + 2*x - 1, ZZ(3))
3*x**2 + 6*x - 3
"""
if not c or not f:
return []
else:
return [ cf * c for cf in f ]
def dmp_mul_ground(f, c, u, K):
"""
Multiply ``f`` by a constant value in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_mul_ground(2*x + 2*y, ZZ(3))
6*x + 6*y
"""
if not u:
return dup_mul_ground(f, c, K)
v = u - 1
return [ dmp_mul_ground(cf, c, v, K) for cf in f ]
def dup_quo_ground(f, c, K):
"""
Quotient by a constant in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_quo_ground(3*x**2 + 2, ZZ(2))
x**2 + 1
>>> R, x = ring("x", QQ)
>>> R.dup_quo_ground(3*x**2 + 2, QQ(2))
3/2*x**2 + 1
"""
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
if K.has_Field:
return [ K.quo(cf, c) for cf in f ]
else:
return [ cf // c for cf in f ]
def dmp_quo_ground(f, c, u, K):
"""
Quotient by a constant in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_quo_ground(2*x**2*y + 3*x, ZZ(2))
x**2*y + x
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_quo_ground(2*x**2*y + 3*x, QQ(2))
x**2*y + 3/2*x
"""
if not u:
return dup_quo_ground(f, c, K)
v = u - 1
return [ dmp_quo_ground(cf, c, v, K) for cf in f ]
def dup_exquo_ground(f, c, K):
"""
Exact quotient by a constant in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_exquo_ground(x**2 + 2, QQ(2))
1/2*x**2 + 1
"""
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
return [ K.exquo(cf, c) for cf in f ]
def dmp_exquo_ground(f, c, u, K):
"""
Exact quotient by a constant in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_exquo_ground(x**2*y + 2*x, QQ(2))
1/2*x**2*y + x
"""
if not u:
return dup_exquo_ground(f, c, K)
v = u - 1
return [ dmp_exquo_ground(cf, c, v, K) for cf in f ]
def dup_lshift(f, n, K):
"""
Efficiently multiply ``f`` by ``x**n`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_lshift(x**2 + 1, 2)
x**4 + x**2
"""
if not f:
return f
else:
return f + [K.zero]*n
def dup_rshift(f, n, K):
"""
Efficiently divide ``f`` by ``x**n`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_rshift(x**4 + x**2, 2)
x**2 + 1
>>> R.dup_rshift(x**4 + x**2 + 2, 2)
x**2 + 1
"""
return f[:-n]
def dup_abs(f, K):
"""
Make all coefficients positive in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_abs(x**2 - 1)
x**2 + 1
"""
return [ K.abs(coeff) for coeff in f ]
def dmp_abs(f, u, K):
"""
Make all coefficients positive in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_abs(x**2*y - x)
x**2*y + x
"""
if not u:
return dup_abs(f, K)
v = u - 1
return [ dmp_abs(cf, v, K) for cf in f ]
def dup_neg(f, K):
"""
Negate a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_neg(x**2 - 1)
-x**2 + 1
"""
return [ -coeff for coeff in f ]
def dmp_neg(f, u, K):
"""
Negate a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_neg(x**2*y - x)
-x**2*y + x
"""
if not u:
return dup_neg(f, K)
v = u - 1
return [ dmp_neg(cf, v, K) for cf in f ]
def dup_add(f, g, K):
"""
Add dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add(x**2 - 1, x - 2)
x**2 + x - 3
"""
if not f:
return g
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a + b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ a + b for a, b in zip(f, g) ]
def dmp_add(f, g, u, K):
"""
Add dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add(x**2 + y, x**2*y + x)
x**2*y + x**2 + x + y
"""
if not u:
return dup_add(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return g
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u - 1
if df == dg:
return dmp_strip([ dmp_add(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ dmp_add(a, b, v, K) for a, b in zip(f, g) ]
def dup_sub(f, g, K):
"""
Subtract dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub(x**2 - 1, x - 2)
x**2 - x + 1
"""
if not f:
return dup_neg(g, K)
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a - b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dup_neg(g[:k], K), g[k:]
return h + [ a - b for a, b in zip(f, g) ]
def dmp_sub(f, g, u, K):
"""
Subtract dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub(x**2 + y, x**2*y + x)
-x**2*y + x**2 - x + y
"""
if not u:
return dup_sub(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return dmp_neg(g, u, K)
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u - 1
if df == dg:
return dmp_strip([ dmp_sub(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dmp_neg(g[:k], u, K), g[k:]
return h + [ dmp_sub(a, b, v, K) for a, b in zip(f, g) ]
def dup_add_mul(f, g, h, K):
"""
Returns ``f + g*h`` where ``f, g, h`` are in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add_mul(x**2 - 1, x - 2, x + 2)
2*x**2 - 5
"""
return dup_add(f, dup_mul(g, h, K), K)
def dmp_add_mul(f, g, h, u, K):
"""
Returns ``f + g*h`` where ``f, g, h`` are in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add_mul(x**2 + y, x, x + 2)
2*x**2 + 2*x + y
"""
return dmp_add(f, dmp_mul(g, h, u, K), u, K)
def dup_sub_mul(f, g, h, K):
"""
Returns ``f - g*h`` where ``f, g, h`` are in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub_mul(x**2 - 1, x - 2, x + 2)
3
"""
return dup_sub(f, dup_mul(g, h, K), K)
def dmp_sub_mul(f, g, h, u, K):
"""
Returns ``f - g*h`` where ``f, g, h`` are in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub_mul(x**2 + y, x, x + 2)
-2*x + y
"""
return dmp_sub(f, dmp_mul(g, h, u, K), u, K)
def dup_mul(f, g, K):
"""
Multiply dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mul(x - 2, x + 2)
x**2 - 4
"""
if f == g:
return dup_sqr(f, K)
if not (f and g):
return []
df = dup_degree(f)
dg = dup_degree(g)
n = max(df, dg) + 1
if n < 100:
h = []
for i in xrange(0, df + dg + 1):
coeff = K.zero
for j in xrange(max(0, i - dg), min(df, i) + 1):
coeff += f[j]*g[i - j]
h.append(coeff)
return dup_strip(h)
else:
# Use Karatsuba's algorithm (divide and conquer), see e.g.:
# Joris van der Hoeven, Relax But Don't Be Too Lazy,
# J. Symbolic Computation, 11 (2002), section 3.1.1.
n2 = n//2
fl, gl = dup_slice(f, 0, n2, K), dup_slice(g, 0, n2, K)
fh = dup_rshift(dup_slice(f, n2, n, K), n2, K)
gh = dup_rshift(dup_slice(g, n2, n, K), n2, K)
lo, hi = dup_mul(fl, gl, K), dup_mul(fh, gh, K)
mid = dup_mul(dup_add(fl, fh, K), dup_add(gl, gh, K), K)
mid = dup_sub(mid, dup_add(lo, hi, K), K)
return dup_add(dup_add(lo, dup_lshift(mid, n2, K), K),
dup_lshift(hi, 2*n2, K), K)
def dmp_mul(f, g, u, K):
"""
Multiply dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_mul(x*y + 1, x)
x**2*y + x
"""
if not u:
return dup_mul(f, g, K)
if f == g:
return dmp_sqr(f, u, K)
df = dmp_degree(f, u)
if df < 0:
return f
dg = dmp_degree(g, u)
if dg < 0:
return g
h, v = [], u - 1
for i in xrange(0, df + dg + 1):
coeff = dmp_zero(v)
for j in xrange(max(0, i - dg), min(df, i) + 1):
coeff = dmp_add(coeff, dmp_mul(f[j], g[i - j], v, K), v, K)
h.append(coeff)
return dmp_strip(h, u)
def dup_sqr(f, K):
"""
Square dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sqr(x**2 + 1)
x**4 + 2*x**2 + 1
"""
df, h = dup_degree(f), []
for i in xrange(0, 2*df + 1):
c = K.zero
jmin = max(0, i - df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in xrange(jmin, jmax + 1):
c += f[j]*f[i - j]
c += c
if n & 1:
elem = f[jmax + 1]
c += elem**2
h.append(c)
return dup_strip(h)
def dmp_sqr(f, u, K):
"""
Square dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sqr(x**2 + x*y + y**2)
x**4 + 2*x**3*y + 3*x**2*y**2 + 2*x*y**3 + y**4
"""
if not u:
return dup_sqr(f, K)
df = dmp_degree(f, u)
if df < 0:
return f
h, v = [], u - 1
for i in xrange(0, 2*df + 1):
c = dmp_zero(v)
jmin = max(0, i - df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in xrange(jmin, jmax + 1):
c = dmp_add(c, dmp_mul(f[j], f[i - j], v, K), v, K)
c = dmp_mul_ground(c, K(2), v, K)
if n & 1:
elem = dmp_sqr(f[jmax + 1], v, K)
c = dmp_add(c, elem, v, K)
h.append(c)
return dmp_strip(h, u)
def dup_pow(f, n, K):
"""
Raise ``f`` to the ``n``-th power in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pow(x - 2, 3)
x**3 - 6*x**2 + 12*x - 8
"""
if not n:
return [K.one]
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or not f or f == [K.one]:
return f
g = [K.one]
while True:
n, m = n//2, n
if m % 2:
g = dup_mul(g, f, K)
if not n:
break
f = dup_sqr(f, K)
return g
def dmp_pow(f, n, u, K):
"""
Raise ``f`` to the ``n``-th power in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_pow(x*y + 1, 3)
x**3*y**3 + 3*x**2*y**2 + 3*x*y + 1
"""
if not u:
return dup_pow(f, n, K)
if not n:
return dmp_one(u, K)
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or dmp_zero_p(f, u) or dmp_one_p(f, u, K):
return f
g = dmp_one(u, K)
while True:
n, m = n//2, n
if m & 1:
g = dmp_mul(g, f, u, K)
if not n:
break
f = dmp_sqr(f, u, K)
return g
def dup_pdiv(f, g, K):
"""
Polynomial pseudo-division in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r, dr = [], f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
j, N = dr - dg, N - 1
Q = dup_mul_ground(q, lc_g, K)
q = dup_add_term(Q, lc_r, j, K)
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
c = lc_g**N
q = dup_mul_ground(q, c, K)
r = dup_mul_ground(r, c, K)
return q, r
def dup_prem(f, g, K):
"""
Polynomial pseudo-remainder in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_prem(x**2 + 1, 2*x - 4)
20
"""
df = dup_degree(f)
dg = dup_degree(g)
r, dr = f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
j, N = dr - dg, N - 1
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return dup_mul_ground(r, lc_g**N, K)
def dup_pquo(f, g, K):
"""
Polynomial exact pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> R.dup_pquo(x**2 + 1, 2*x - 4)
2*x + 4
"""
return dup_pdiv(f, g, K)[0]
def dup_pexquo(f, g, K):
"""
Polynomial pseudo-quotient in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> R.dup_pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: [2, -4] does not divide [1, 0, 1]
"""
q, r = dup_pdiv(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
def dmp_pdiv(f, g, u, K):
"""
Polynomial pseudo-division in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_pdiv(x**2 + x*y, 2*x + 2)
(2*x + 2*y - 2, -4*y + 4)
"""
if not u:
return dup_pdiv(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r, dr = dmp_zero(u), f, df
if df < dg:
return q, r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
lc_r = dmp_LC(r, K)
j, N = dr - dg, N - 1
Q = dmp_mul_term(q, lc_g, 0, u, K)
q = dmp_add_term(Q, lc_r, j, u, K)
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
c = dmp_pow(lc_g, N, u - 1, K)
q = dmp_mul_term(q, c, 0, u, K)
r = dmp_mul_term(r, c, 0, u, K)
return q, r
def dmp_prem(f, g, u, K):
"""
Polynomial pseudo-remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_prem(x**2 + x*y, 2*x + 2)
-4*y + 4
"""
if not u:
return dup_prem(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
r, dr = f, df
if df < dg:
return r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
lc_r = dmp_LC(r, K)
j, N = dr - dg, N - 1
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
c = dmp_pow(lc_g, N, u - 1, K)
return dmp_mul_term(r, c, 0, u, K)
def dmp_pquo(f, g, u, K):
"""
Polynomial exact pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = 2*x + 2*y
>>> h = 2*x + 2
>>> R.dmp_pquo(f, g)
2*x
>>> R.dmp_pquo(f, h)
2*x + 2*y - 2
"""
return dmp_pdiv(f, g, u, K)[0]
def dmp_pexquo(f, g, u, K):
"""
Polynomial pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = 2*x + 2*y
>>> h = 2*x + 2
>>> R.dmp_pexquo(f, g)
2*x
>>> R.dmp_pexquo(f, h)
Traceback (most recent call last):
...
ExactQuotientFailed: [[2], [2]] does not divide [[1], [1, 0], []]
"""
q, r = dmp_pdiv(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed(f, g)
def dup_rr_div(f, g, K):
"""
Univariate division with remainder over a ring.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_rr_div(x**2 + 1, 2*x - 4)
(0, x**2 + 1)
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r, dr = [], f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
if lc_r % lc_g:
break
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dmp_rr_div(f, g, u, K):
"""
Multivariate division with remainder over a ring.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_rr_div(x**2 + x*y, 2*x + 2)
(0, x**2 + x*y)
"""
if not u:
return dup_rr_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r, dr = dmp_zero(u), f, df
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u - 1
while True:
lc_r = dmp_LC(r, K)
c, R = dmp_rr_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dup_ff_div(f, g, K):
"""
Polynomial division with remainder over a field.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_ff_div(x**2 + 1, 2*x - 4)
(1/2*x + 1, 5)
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r, dr = [], f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dmp_ff_div(f, g, u, K):
"""
Polynomial division with remainder over a field.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_ff_div(x**2 + x*y, 2*x + 2)
(1/2*x + 1/2*y - 1/2, -y + 1)
"""
if not u:
return dup_ff_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r, dr = dmp_zero(u), f, df
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u - 1
while True:
lc_r = dmp_LC(r, K)
c, R = dmp_ff_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dup_div(f, g, K):
"""
Polynomial division with remainder in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_div(x**2 + 1, 2*x - 4)
(0, x**2 + 1)
>>> R, x = ring("x", QQ)
>>> R.dup_div(x**2 + 1, 2*x - 4)
(1/2*x + 1, 5)
"""
if K.has_Field:
return dup_ff_div(f, g, K)
else:
return dup_rr_div(f, g, K)
def dup_rem(f, g, K):
"""
Returns polynomial remainder in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_rem(x**2 + 1, 2*x - 4)
x**2 + 1
>>> R, x = ring("x", QQ)
>>> R.dup_rem(x**2 + 1, 2*x - 4)
5
"""
return dup_div(f, g, K)[1]
def dup_quo(f, g, K):
"""
Returns exact polynomial quotient in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_quo(x**2 + 1, 2*x - 4)
0
>>> R, x = ring("x", QQ)
>>> R.dup_quo(x**2 + 1, 2*x - 4)
1/2*x + 1
"""
return dup_div(f, g, K)[0]
def dup_exquo(f, g, K):
"""
Returns polynomial quotient in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_exquo(x**2 - 1, x - 1)
x + 1
>>> R.dup_exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: [2, -4] does not divide [1, 0, 1]
"""
q, r = dup_div(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
def dmp_div(f, g, u, K):
"""
Polynomial division with remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(0, x**2 + x*y)
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(1/2*x + 1/2*y - 1/2, -y + 1)
"""
if K.has_Field:
return dmp_ff_div(f, g, u, K)
else:
return dmp_rr_div(f, g, u, K)
def dmp_rem(f, g, u, K):
"""
Returns polynomial remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_rem(x**2 + x*y, 2*x + 2)
x**2 + x*y
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_rem(x**2 + x*y, 2*x + 2)
-y + 1
"""
return dmp_div(f, g, u, K)[1]
def dmp_quo(f, g, u, K):
"""
Returns exact polynomial quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_quo(x**2 + x*y, 2*x + 2)
0
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_quo(x**2 + x*y, 2*x + 2)
1/2*x + 1/2*y - 1/2
"""
return dmp_div(f, g, u, K)[0]
def dmp_exquo(f, g, u, K):
"""
Returns polynomial quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = x + y
>>> h = 2*x + 2
>>> R.dmp_exquo(f, g)
x
>>> R.dmp_exquo(f, h)
Traceback (most recent call last):
...
ExactQuotientFailed: [[2], [2]] does not divide [[1], [1, 0], []]
"""
q, r = dmp_div(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed(f, g)
def dup_max_norm(f, K):
"""
Returns maximum norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_max_norm(-x**2 + 2*x - 3)
3
"""
if not f:
return K.zero
else:
return max(dup_abs(f, K))
def dmp_max_norm(f, u, K):
"""
Returns maximum norm of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_max_norm(2*x*y - x - 3)
3
"""
if not u:
return dup_max_norm(f, K)
v = u - 1
return max([ dmp_max_norm(c, v, K) for c in f ])
def dup_l1_norm(f, K):
"""
Returns l1 norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_l1_norm(2*x**3 - 3*x**2 + 1)
6
"""
if not f:
return K.zero
else:
return sum(dup_abs(f, K))
def dmp_l1_norm(f, u, K):
"""
Returns l1 norm of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_l1_norm(2*x*y - x - 3)
6
"""
if not u:
return dup_l1_norm(f, K)
v = u - 1
return sum([ dmp_l1_norm(c, v, K) for c in f ])
def dup_expand(polys, K):
"""
Multiply together several polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_expand([x**2 - 1, x, 2])
2*x**3 - 2*x
"""
if not polys:
return [K.one]
f = polys[0]
for g in polys[1:]:
f = dup_mul(f, g, K)
return f
def dmp_expand(polys, u, K):
"""
Multiply together several polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_expand([x**2 + y**2, x + 1])
x**3 + x**2 + x*y**2 + y**2
"""
if not polys:
return dmp_one(u, K)
f = polys[0]
for g in polys[1:]:
f = dmp_mul(f, g, u, K)
return f
| {
"repo_name": "kmacinnis/sympy",
"path": "sympy/polys/densearith.py",
"copies": "2",
"size": "33306",
"license": "bsd-3-clause",
"hash": 2594390899822181000,
"line_mean": 17.1603053435,
"line_max": 82,
"alpha_frac": 0.4335254909,
"autogenerated": false,
"ratio": 2.689437984496124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9120387910153454,
"avg_score": 0.0005151130485341805,
"num_lines": 1834
} |
"""Arithmetics for dense recursive polynomials in `K[x]` or `K[X]`. """
from sympy.polys.densebasic import (
dup_LC, dmp_LC,
dup_degree, dmp_degree,
dup_normal, dmp_normal,
dup_strip, dmp_strip,
dmp_zero_p, dmp_zero,
dmp_one_p, dmp_one,
dmp_zeros
)
from sympy.polys.polyerrors import (
ExactQuotientFailed
)
from sympy.utilities import cythonized
@cythonized("i,n,m")
def dup_add_term(f, c, i, K):
"""Add `c*x**i` to `f` in `K[x]`. """
if not c:
return f
n = len(f)
m = n-i-1
if i == n-1:
return dup_strip([f[0]+c] + f[1:])
else:
if i >= n:
return [c] + [K.zero]*(i-n) + f
else:
return f[:m] + [f[m]+c] + f[m+1:]
@cythonized("i,u,v,n,m")
def dmp_add_term(f, c, i, u, K):
"""Add `c(x_2..x_u)*x_0**i` to `f` in `K[X]`. """
if not u:
return dup_add_term(f, c, i, K)
v = u-1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n-i-1
if i == n-1:
return dmp_strip([dmp_add(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [c] + dmp_zeros(i-n, v, K) + f
else:
return f[:m] + [dmp_add(f[m], c, v, K)] + f[m+1:]
@cythonized("i,n,m")
def dup_sub_term(f, c, i, K):
"""Subtract `c*x**i` from `f` in `K[x]`. """
if not c:
return f
n = len(f)
m = n-i-1
if i == n-1:
return dup_strip([f[0]-c] + f[1:])
else:
if i >= n:
return [-c] + [K.zero]*(i-n) + f
else:
return f[:m] + [f[m]-c] + f[m+1:]
@cythonized("i,u,v,n,m")
def dmp_sub_term(f, c, i, u, K):
"""Subtract `c(x_2..x_u)*x_0**i` from `f` in `K[X]`. """
if not u:
return dup_add_term(f, -c, i, K)
v = u-1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n-i-1
if i == n-1:
return dmp_strip([dmp_sub(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [dmp_neg(c, v, K)] + dmp_zeros(i-n, v, K) + f
else:
return f[:m] + [dmp_sub(f[m], c, v, K)] + f[m+1:]
@cythonized("i")
def dup_mul_term(f, c, i, K):
"""Multiply `f` by `c*x**i` in `K[x]`. """
if not c or not f:
return []
else:
return [ cf * c for cf in f ] + [K.zero]*i
@cythonized("i,u,v")
def dmp_mul_term(f, c, i, u, K):
"""Multiply `f` by `c(x_2..x_u)*x_0**i` in `K[X]`. """
if not u:
return dup_mul_term(f, c, i, K)
v = u-1
if dmp_zero_p(f, u):
return f
if dmp_zero_p(c, v):
return dmp_zero(u)
else:
return [ dmp_mul(cf, c, v, K) for cf in f ] + dmp_zeros(i, v, K)
def dup_mul_ground(f, c, K):
"""Multiply `f` by a constant value in `K[x]`. """
if not c or not f:
return []
else:
return [ cf * c for cf in f ]
@cythonized("u,v")
def dmp_mul_ground(f, c, u, K):
"""Multiply `f` by a constant value in `K[X]`. """
if not u:
return dup_mul_ground(f, c, K)
v = u-1
return [ dmp_mul_ground(cf, c, v, K) for cf in f ]
def dup_quo_ground(f, c, K):
"""Quotient by a constant in `K[x]`. """
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
return [ K.quo(cf, c) for cf in f ]
@cythonized("u,v")
def dmp_quo_ground(f, c, u, K):
"""Quotient by a constant in `K[X]`. """
if not u:
return dup_quo_ground(f, c, K)
v = u-1
return [ dmp_quo_ground(cf, c, v, K) for cf in f ]
def dup_exquo_ground(f, c, K):
"""Exact quotient by a constant in `K[x]`. """
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
if K.has_Field or not K.is_Exact:
return [ K.quo(cf, c) for cf in f ]
else:
return [ cf // c for cf in f ]
@cythonized("u,v")
def dmp_exquo_ground(f, c, u, K):
"""Exact quotient by a constant in `K[X]`. """
if not u:
return dup_exquo_ground(f, c, K)
v = u-1
return [ dmp_exquo_ground(cf, c, v, K) for cf in f ]
@cythonized("n")
def dup_lshift(f, n, K):
"""Efficiently multiply `f` by `x**n` in `K[x]`. """
if not f:
return f
else:
return f + [K.zero]*n
@cythonized("n")
def dup_rshift(f, n, K):
"""Efficiently divide `f` by `x**n` in `K[x]`. """
return f[:-n]
def dup_abs(f, K):
"""Make all coefficients positive in `K[x]`. """
return [ K.abs(coeff) for coeff in f ]
@cythonized("u,v")
def dmp_abs(f, u, K):
"""Make all coefficients positive in `K[X]`. """
if not u:
return dup_abs(f, K)
v = u-1
return [ dmp_abs(cf, v, K) for cf in f ]
def dup_neg(f, K):
"""Negate a polynomial in `K[x]`. """
return [ -coeff for coeff in f ]
@cythonized("u,v")
def dmp_neg(f, u, K):
"""Negate a polynomial in `K[X]`. """
if not u:
return dup_neg(f, K)
v = u-1
return [ dmp_neg(cf, u-1, K) for cf in f ]
@cythonized("df,dg,k")
def dup_add(f, g, K):
"""Add dense polynomials in `K[x]`. """
if not f:
return g
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a + b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ a + b for a, b in zip(f, g) ]
@cythonized("u,v,df,dg,k")
def dmp_add(f, g, u, K):
"""Add dense polynomials in `K[X]`. """
if not u:
return dup_add(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return g
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u-1
if df == dg:
return dmp_strip([ dmp_add(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ dmp_add(a, b, v, K) for a, b in zip(f, g) ]
@cythonized("df,dg,k")
def dup_sub(f, g, K):
"""Subtract dense polynomials in `K[x]`. """
if not f:
return dup_neg(g, K)
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a - b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dup_neg(g[:k], K), g[k:]
return h + [ a - b for a, b in zip(f, g) ]
@cythonized("u,v,df,dg,k")
def dmp_sub(f, g, u, K):
"""Subtract dense polynomials in `K[X]`. """
if not u:
return dup_sub(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return dmp_neg(g, u, K)
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u-1
if df == dg:
return dmp_strip([ dmp_sub(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dmp_neg(g[:k], u, K), g[k:]
return h + [ dmp_sub(a, b, v, K) for a, b in zip(f, g) ]
def dup_add_mul(f, g, h, K):
"""Returns `f + g*h` where `f, g, h` are in `K[x]`. """
return dup_add(f, dup_mul(g, h, K), K)
@cythonized("u")
def dmp_add_mul(f, g, h, u, K):
"""Returns `f + g*h` where `f, g, h` are in `K[X]`. """
return dmp_add(f, dmp_mul(g, h, u, K), u, K)
def dup_sub_mul(f, g, h, K):
"""Returns `f - g*h` where `f, g, h` are in `K[x]`. """
return dup_sub(f, dup_mul(g, h, K), K)
@cythonized("u")
def dmp_sub_mul(f, g, h, u, K):
"""Returns `f - g*h` where `f, g, h` are in `K[X]`. """
return dmp_sub(f, dmp_mul(g, h, u, K), u, K)
@cythonized("df,dg,i,j")
def dup_mul(f, g, K):
"""Multiply dense polynomials in `K[x]`. """
if f == g:
return dup_sqr(f, K)
if not (f and g):
return []
df = dup_degree(f)
dg = dup_degree(g)
h = []
for i in xrange(0, df+dg+1):
coeff = K.zero
for j in xrange(max(0, i-dg), min(df, i)+1):
coeff += f[j]*g[i-j]
h.append(coeff)
return h
@cythonized("u,v,df,dg,i,j")
def dmp_mul(f, g, u, K):
"""Multiply dense polynomials in `K[X]`. """
if not u:
return dup_mul(f, g, K)
if f == g:
return dmp_sqr(f, u, K)
df = dmp_degree(f, u)
if df < 0:
return f
dg = dmp_degree(g, u)
if dg < 0:
return g
h, v = [], u-1
for i in xrange(0, df+dg+1):
coeff = dmp_zero(v)
for j in xrange(max(0, i-dg), min(df, i)+1):
coeff = dmp_add(coeff, dmp_mul(f[j], g[i-j], v, K), v, K)
h.append(coeff)
return h
@cythonized("df,jmin,jmax,n,i,j")
def dup_sqr(f, K):
"""Square dense polynomials in `K[x]`. """
df, h = dup_degree(f), []
for i in xrange(0, 2*df+1):
c = K.zero
jmin = max(0, i-df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in xrange(jmin, jmax+1):
c += f[j]*f[i-j]
c += c
if n & 1:
elem = f[jmax+1]
c += elem**2
h.append(c)
return h
@cythonized("u,v,df,jmin,jmax,n,i,j")
def dmp_sqr(f, u, K):
"""Square dense polynomials in `K[X]`. """
if not u:
return dup_sqr(f, K)
df = dmp_degree(f, u)
if df < 0:
return f
h, v = [], u-1
for i in xrange(0, 2*df+1):
c = dmp_zero(v)
jmin = max(0, i-df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in xrange(jmin, jmax+1):
c = dmp_add(c, dmp_mul(f[j], f[i-j], v, K), v, K)
c = dmp_mul_ground(c, 2, v, K)
if n & 1:
elem = dmp_sqr(f[jmax+1], v, K)
c = dmp_add(c, elem, v, K)
h.append(c)
return h
@cythonized("n,m")
def dup_pow(f, n, K):
"""Raise f to the n-th power in `K[x]`. """
if not n:
return [K.one]
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or not f or f == [K.one]:
return f
g = [K.one]
while True:
n, m = n//2, n
if m & 1:
g = dup_mul(g, f, K)
if not n:
break
f = dup_sqr(f, K)
return g
@cythonized("u,n,m")
def dmp_pow(f, n, u, K):
"""Raise f to the n-th power in `K[X]`. """
if not u:
return dup_pow(f, n, K)
if not n:
return dmp_one(u, K)
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or dmp_zero_p(f, u) or dmp_one_p(f, u, K):
return f
g = dmp_one(u, K)
while True:
n, m = n//2, n
if m & 1:
g = dmp_mul(g, f, u, K)
if not n:
break
f = dmp_sqr(f, u, K)
return g
@cythonized("df,dg,dr,N,j")
def dup_pdiv(f, g, K):
"""Polynomial pseudo-division in `K[x]`. """
df = dup_degree(f)
dg = dup_degree(g)
q, r = [], f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
j, N = dr-dg, N-1
Q = dup_mul_ground(q, lc_g, K)
q = dup_add_term(Q, lc_r, j, K)
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
c = lc_g**N
q = dup_mul_ground(q, c, K)
r = dup_mul_ground(r, c, K)
return q, r
@cythonized("df,dg,dr,N,j")
def dup_prem(f, g, K):
"""Polynomial pseudo-remainder in `K[x]`. """
df = dup_degree(f)
dg = dup_degree(g)
r = f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
j, N = dr-dg, N-1
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
return dup_mul_ground(r, lc_g**N, K)
def dup_pquo(f, g, K):
"""Polynomial pseudo-quotient in `K[x]`. """
q, r = dup_pdiv(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed('%s does not divide %s' % (g, f))
def dup_pexquo(f, g, K):
"""Polynomial exact pseudo-quotient in `K[X]`. """
return dup_pdiv(f, g, K)[0]
@cythonized("u,df,dg,dr,N,j")
def dmp_pdiv(f, g, u, K):
"""Polynomial pseudo-division in `K[X]`. """
if not u:
return dup_pdiv(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r = dmp_zero(u), f
if df < dg:
return q, r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
j, N = dr-dg, N-1
Q = dmp_mul_term(q, lc_g, 0, u, K)
q = dmp_add_term(Q, lc_r, j, u, K)
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
c = dmp_pow(lc_g, N, u-1, K)
q = dmp_mul_term(q, c, 0, u, K)
r = dmp_mul_term(r, c, 0, u, K)
return q, r
@cythonized("u,df,dg,dr,N,j")
def dmp_prem(f, g, u, K):
"""Polynomial pseudo-remainder in `K[X]`. """
if not u:
return dup_prem(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
r = f
if df < dg:
return r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
j, N = dr-dg, N-1
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
c = dmp_pow(lc_g, N, u-1, K)
return dmp_mul_term(r, c, 0, u, K)
def dmp_pquo(f, g, u, K):
"""Polynomial pseudo-quotient in `K[X]`. """
q, r = dmp_pdiv(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed('%s does not divide %s' % (g, f))
def dmp_pexquo(f, g, u, K):
"""Polynomial exact pseudo-quotient in `K[X]`. """
return dmp_pdiv(f, g, u, K)[0]
@cythonized("df,dg,dr,j")
def dup_rr_div(f, g, K):
"""Univariate division with remainder over a ring. """
df = dup_degree(f)
dg = dup_degree(g)
q, r = [], f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
if lc_r % lc_g:
break
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
return q, r
@cythonized("u,df,dg,dr,j")
def dmp_rr_div(f, g, u, K):
"""Multivariate division with remainder over a ring. """
if not u:
return dup_rr_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r = dmp_zero(u), f
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u-1
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
c, R = dmp_rr_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
return q, r
@cythonized("df,dg,dr,j")
def dup_ff_div(f, g, K):
"""Polynomial division with remainder over a field. """
df = dup_degree(f)
dg = dup_degree(g)
q, r = [], f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
if not K.is_Exact:
r = dup_normal(r, K)
return q, r
@cythonized("u,df,dg,dr,j")
def dmp_ff_div(f, g, u, K):
"""Polynomial division with remainder over a field. """
if not u:
return dup_ff_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r = dmp_zero(u), f
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u-1
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
c, R = dmp_ff_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
return q, r
def dup_div(f, g, K):
"""Polynomial division with remainder in `K[x]`. """
if K.has_Field or not K.is_Exact:
return dup_ff_div(f, g, K)
else:
return dup_rr_div(f, g, K)
def dup_rem(f, g, K):
"""Returns polynomial remainder in `K[x]`. """
return dup_div(f, g, K)[1]
def dup_quo(f, g, K):
"""Returns polynomial quotient in `K[x]`. """
q, r = dup_div(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed('%s does not divide %s' % (g, f))
def dup_exquo(f, g, K):
"""Returns exact polynomial quotient in `K[x]`. """
return dup_div(f, g, K)[0]
@cythonized("u")
def dmp_div(f, g, u, K):
"""Polynomial division with remainder in `K[X]`. """
if K.has_Field or not K.is_Exact:
return dmp_ff_div(f, g, u, K)
else:
return dmp_rr_div(f, g, u, K)
@cythonized("u")
def dmp_rem(f, g, u, K):
"""Returns polynomial remainder in `K[X]`. """
return dmp_div(f, g, u, K)[1]
@cythonized("u")
def dmp_quo(f, g, u, K):
"""Returns polynomial quotient in `K[X]`. """
q, r = dmp_div(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed('%s does not divide %s' % (g, f))
@cythonized("u")
def dmp_exquo(f, g, u, K):
"""Returns exact polynomial quotient in `K[X]`. """
return dmp_div(f, g, u, K)[0]
def dup_max_norm(f, K):
"""Returns maximum norm of a polynomial in `K[x]`. """
if not f:
return K.zero
else:
return max(dup_abs(f, K))
@cythonized("u,v")
def dmp_max_norm(f, u, K):
"""Returns maximum norm of a polynomial in `K[X]`. """
if not u:
return dup_max_norm(f, K)
v = u-1
return max([ dmp_max_norm(c, v, K) for c in f ])
def dup_l1_norm(f, K):
"""Returns l1 norm of a polynomial in `K[x]`. """
if not f:
return K.zero
else:
return sum(dup_abs(f, K))
@cythonized("u,v")
def dmp_l1_norm(f, u, K):
"""Returns l1 norm of a polynomial in `K[X]`. """
if not u:
return dup_l1_norm(f, K)
v = u-1
return sum([ dmp_l1_norm(c, v, K) for c in f ])
def dup_expand(polys, K):
"""Multiply together several polynomials in `K[x]`. """
if not polys:
return [K.one]
f = polys[0]
for g in polys[1:]:
f = dup_mul(f, g, K)
return f
@cythonized("u")
def dmp_expand(polys, u, K):
"""Multiply together several polynomials in `K[X]`. """
if not polys:
return dmp_one(u, K)
f = polys[0]
for g in polys[1:]:
f = dmp_mul(f, g, u, K)
return f
| {
"repo_name": "tarballs-are-good/sympy",
"path": "sympy/polys/densearith.py",
"copies": "3",
"size": "19747",
"license": "bsd-3-clause",
"hash": -2414432543870292500,
"line_mean": 19.677486911,
"line_max": 74,
"alpha_frac": 0.471312098,
"autogenerated": false,
"ratio": 2.605145118733509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9552938784657419,
"avg_score": 0.004703686415218176,
"num_lines": 955
} |
"""Arithmetics for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from sympy.polys.densebasic import (
dup_LC, dmp_LC,
dup_degree, dmp_degree,
dup_normal,
dup_strip, dmp_strip,
dmp_zero_p, dmp_zero,
dmp_one_p, dmp_one,
dmp_ground, dmp_zeros)
from sympy.polys.polyerrors import (
ExactQuotientFailed)
from sympy.utilities import cythonized
@cythonized("i,n,m")
def dup_add_term(f, c, i, K):
"""
Add ``c*x**i`` to ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_add_term
>>> f = ZZ.map([1, 0, -1])
>>> dup_add_term(f, ZZ(2), 4, ZZ)
[2, 0, 1, 0, -1]
"""
if not c:
return f
n = len(f)
m = n-i-1
if i == n-1:
return dup_strip([f[0]+c] + f[1:])
else:
if i >= n:
return [c] + [K.zero]*(i-n) + f
else:
return f[:m] + [f[m]+c] + f[m+1:]
@cythonized("i,u,v,n,m")
def dmp_add_term(f, c, i, u, K):
"""
Add ``c(x_2..x_u)*x_0**i`` to ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_add_term
>>> f = ZZ.map([[1, 0], [1]])
>>> c = ZZ.map([2])
>>> dmp_add_term(f, c, 2, 1, ZZ)
[[2], [1, 0], [1]]
"""
if not u:
return dup_add_term(f, c, i, K)
v = u-1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n-i-1
if i == n-1:
return dmp_strip([dmp_add(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [c] + dmp_zeros(i-n, v, K) + f
else:
return f[:m] + [dmp_add(f[m], c, v, K)] + f[m+1:]
@cythonized("i,n,m")
def dup_sub_term(f, c, i, K):
"""
Subtract ``c*x**i`` from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_sub_term
>>> f = ZZ.map([2, 0, 1, 0, -1])
>>> dup_sub_term(f, ZZ(2), 4, ZZ)
[1, 0, -1]
"""
if not c:
return f
n = len(f)
m = n-i-1
if i == n-1:
return dup_strip([f[0]-c] + f[1:])
else:
if i >= n:
return [-c] + [K.zero]*(i-n) + f
else:
return f[:m] + [f[m]-c] + f[m+1:]
@cythonized("i,u,v,n,m")
def dmp_sub_term(f, c, i, u, K):
"""
Subtract ``c(x_2..x_u)*x_0**i`` from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_sub_term
>>> f = ZZ.map([[2], [1, 0], [1]])
>>> c = ZZ.map([2])
>>> dmp_sub_term(f, c, 2, 1, ZZ)
[[1, 0], [1]]
"""
if not u:
return dup_add_term(f, -c, i, K)
v = u-1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n-i-1
if i == n-1:
return dmp_strip([dmp_sub(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [dmp_neg(c, v, K)] + dmp_zeros(i-n, v, K) + f
else:
return f[:m] + [dmp_sub(f[m], c, v, K)] + f[m+1:]
@cythonized("i")
def dup_mul_term(f, c, i, K):
"""
Multiply ``f`` by ``c*x**i`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_mul_term
>>> f = ZZ.map([1, 0, -1])
>>> dup_mul_term(f, ZZ(3), 2, ZZ)
[3, 0, -3, 0, 0]
"""
if not c or not f:
return []
else:
return [ cf * c for cf in f ] + [K.zero]*i
@cythonized("i,u,v")
def dmp_mul_term(f, c, i, u, K):
"""
Multiply ``f`` by ``c(x_2..x_u)*x_0**i`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_mul_term
>>> f = ZZ.map([[1, 0], [1], []])
>>> c = ZZ.map([3, 0])
>>> dmp_mul_term(f, c, 2, 1, ZZ)
[[3, 0, 0], [3, 0], [], [], []]
"""
if not u:
return dup_mul_term(f, c, i, K)
v = u-1
if dmp_zero_p(f, u):
return f
if dmp_zero_p(c, v):
return dmp_zero(u)
else:
return [ dmp_mul(cf, c, v, K) for cf in f ] + dmp_zeros(i, v, K)
def dup_add_ground(f, c, K):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_add_ground
>>> f = ZZ.map([1, 2, 3, 4])
>>> dup_add_ground(f, ZZ(4), ZZ)
[1, 2, 3, 8]
"""
return dup_add_term(f, c, 0, K)
def dmp_add_ground(f, c, u, K):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_add_ground
>>> f = ZZ.map([[1], [2], [3], [4]])
>>> dmp_add_ground(f, ZZ(4), 1, ZZ)
[[1], [2], [3], [8]]
"""
return dmp_add_term(f, dmp_ground(c, u-1), 0, u, K)
def dup_sub_ground(f, c, K):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_sub_ground
>>> f = ZZ.map([1, 2, 3, 4])
>>> dup_sub_ground(f, ZZ(4), ZZ)
[1, 2, 3, 0]
"""
return dup_sub_term(f, c, 0, K)
def dmp_sub_ground(f, c, u, K):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_sub_ground
>>> f = ZZ.map([[1], [2], [3], [4]])
>>> dmp_sub_ground(f, ZZ(4), 1, ZZ)
[[1], [2], [3], []]
"""
return dmp_sub_term(f, dmp_ground(c, u-1), 0, u, K)
def dup_mul_ground(f, c, K):
"""
Multiply ``f`` by a constant value in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_mul_ground
>>> f = ZZ.map([1, 2, -1])
>>> dup_mul_ground(f, ZZ(3), ZZ)
[3, 6, -3]
"""
if not c or not f:
return []
else:
return [ cf * c for cf in f ]
@cythonized("u,v")
def dmp_mul_ground(f, c, u, K):
"""
Multiply ``f`` by a constant value in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_mul_ground
>>> f = ZZ.map([[2], [2, 0]])
>>> dmp_mul_ground(f, ZZ(3), 1, ZZ)
[[6], [6, 0]]
"""
if not u:
return dup_mul_ground(f, c, K)
v = u-1
return [ dmp_mul_ground(cf, c, v, K) for cf in f ]
def dup_quo_ground(f, c, K):
"""
Quotient by a constant in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dup_quo_ground
>>> f = ZZ.map([3, 0, 2])
>>> g = QQ.map([3, 0, 2])
>>> dup_quo_ground(f, ZZ(2), ZZ)
[1, 0, 1]
>>> dup_quo_ground(g, QQ(2), QQ)
[3/2, 0/1, 1/1]
"""
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
if K.has_Field or not K.is_Exact:
return [ K.quo(cf, c) for cf in f ]
else:
return [ cf // c for cf in f ]
@cythonized("u,v")
def dmp_quo_ground(f, c, u, K):
"""
Quotient by a constant in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dmp_quo_ground
>>> f = ZZ.map([[2, 0], [3], []])
>>> g = QQ.map([[2, 0], [3], []])
>>> dmp_quo_ground(f, ZZ(2), 1, ZZ)
[[1, 0], [1], []]
>>> dmp_quo_ground(g, QQ(2), 1, QQ)
[[1/1, 0/1], [3/2], []]
"""
if not u:
return dup_quo_ground(f, c, K)
v = u-1
return [ dmp_quo_ground(cf, c, v, K) for cf in f ]
def dup_exquo_ground(f, c, K):
"""
Exact quotient by a constant in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dup_exquo_ground
>>> f = QQ.map([1, 0, 2])
>>> dup_exquo_ground(f, QQ(2), QQ)
[1/2, 0/1, 1/1]
"""
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
return [ K.exquo(cf, c) for cf in f ]
@cythonized("u,v")
def dmp_exquo_ground(f, c, u, K):
"""
Exact quotient by a constant in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dmp_exquo_ground
>>> f = QQ.map([[1, 0], [2], []])
>>> dmp_exquo_ground(f, QQ(2), 1, QQ)
[[1/2, 0/1], [1/1], []]
"""
if not u:
return dup_exquo_ground(f, c, K)
v = u-1
return [ dmp_exquo_ground(cf, c, v, K) for cf in f ]
@cythonized("n")
def dup_lshift(f, n, K):
"""
Efficiently multiply ``f`` by ``x**n`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_lshift
>>> f = ZZ.map([1, 0, 1])
>>> dup_lshift(f, 2, ZZ)
[1, 0, 1, 0, 0]
"""
if not f:
return f
else:
return f + [K.zero]*n
@cythonized("n")
def dup_rshift(f, n, K):
"""
Efficiently divide ``f`` by ``x**n`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_rshift
>>> f = ZZ.map([1, 0, 1, 0, 0])
>>> g = ZZ.map([1, 0, 1, 0, 2])
>>> dup_rshift(f, 2, ZZ)
[1, 0, 1]
>>> dup_rshift(g, 2, ZZ)
[1, 0, 1]
"""
return f[:-n]
def dup_abs(f, K):
"""
Make all coefficients positive in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_abs
>>> f = ZZ.map([1, 0, -1])
>>> dup_abs(f, ZZ)
[1, 0, 1]
"""
return [ K.abs(coeff) for coeff in f ]
@cythonized("u,v")
def dmp_abs(f, u, K):
"""
Make all coefficients positive in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_abs
>>> f = ZZ.map([[1, 0], [-1], []])
>>> dmp_abs(f, 1, ZZ)
[[1, 0], [1], []]
"""
if not u:
return dup_abs(f, K)
v = u-1
return [ dmp_abs(cf, v, K) for cf in f ]
def dup_neg(f, K):
"""
Negate a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_neg
>>> f = ZZ.map([1, 0, -1])
>>> dup_neg(f, ZZ)
[-1, 0, 1]
"""
return [ -coeff for coeff in f ]
@cythonized("u,v")
def dmp_neg(f, u, K):
"""
Negate a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_neg
>>> f = ZZ.map([[1, 0], [-1], []])
>>> dmp_neg(f, 1, ZZ)
[[-1, 0], [1], []]
"""
if not u:
return dup_neg(f, K)
v = u-1
return [ dmp_neg(cf, v, K) for cf in f ]
@cythonized("df,dg,k")
def dup_add(f, g, K):
"""
Add dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_add
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, -2])
>>> dup_add(f, g, ZZ)
[1, 1, -3]
"""
if not f:
return g
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a + b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ a + b for a, b in zip(f, g) ]
@cythonized("u,v,df,dg,k")
def dmp_add(f, g, u, K):
"""
Add dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_add
>>> f = ZZ.map([[1], [], [1, 0]])
>>> g = ZZ.map([[1, 0], [1], []])
>>> dmp_add(f, g, 1, ZZ)
[[1, 1], [1], [1, 0]]
"""
if not u:
return dup_add(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return g
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u-1
if df == dg:
return dmp_strip([ dmp_add(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ dmp_add(a, b, v, K) for a, b in zip(f, g) ]
@cythonized("df,dg,k")
def dup_sub(f, g, K):
"""
Subtract dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_sub
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, -2])
>>> dup_sub(f, g, ZZ)
[1, -1, 1]
"""
if not f:
return dup_neg(g, K)
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a - b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dup_neg(g[:k], K), g[k:]
return h + [ a - b for a, b in zip(f, g) ]
@cythonized("u,v,df,dg,k")
def dmp_sub(f, g, u, K):
"""
Subtract dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_sub
>>> f = ZZ.map([[1], [], [1, 0]])
>>> g = ZZ.map([[1, 0], [1], []])
>>> dmp_sub(f, g, 1, ZZ)
[[-1, 1], [-1], [1, 0]]
"""
if not u:
return dup_sub(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return dmp_neg(g, u, K)
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u-1
if df == dg:
return dmp_strip([ dmp_sub(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dmp_neg(g[:k], u, K), g[k:]
return h + [ dmp_sub(a, b, v, K) for a, b in zip(f, g) ]
def dup_add_mul(f, g, h, K):
"""
Returns ``f + g*h`` where ``f, g, h`` are in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_add_mul
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, -2])
>>> h = ZZ.map([1, 2])
>>> dup_add_mul(f, g, h, ZZ)
[2, 0, -5]
"""
return dup_add(f, dup_mul(g, h, K), K)
@cythonized("u")
def dmp_add_mul(f, g, h, u, K):
"""
Returns ``f + g*h`` where ``f, g, h`` are in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_add_mul
>>> f = ZZ.map([[1], [], [1, 0]])
>>> g = ZZ.map([[1], []])
>>> h = ZZ.map([[1], [2]])
>>> dmp_add_mul(f, g, h, 1, ZZ)
[[2], [2], [1, 0]]
"""
return dmp_add(f, dmp_mul(g, h, u, K), u, K)
def dup_sub_mul(f, g, h, K):
"""
Returns ``f - g*h`` where ``f, g, h`` are in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_sub_mul
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, -2])
>>> h = ZZ.map([1, 2])
>>> dup_sub_mul(f, g, h, ZZ)
[3]
"""
return dup_sub(f, dup_mul(g, h, K), K)
@cythonized("u")
def dmp_sub_mul(f, g, h, u, K):
"""
Returns ``f - g*h`` where ``f, g, h`` are in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_sub_mul
>>> f = ZZ.map([[1], [], [1, 0]])
>>> g = ZZ.map([[1], []])
>>> h = ZZ.map([[1], [2]])
>>> dmp_sub_mul(f, g, h, 1, ZZ)
[[-2], [1, 0]]
"""
return dmp_sub(f, dmp_mul(g, h, u, K), u, K)
@cythonized("df,dg,i,j")
def dup_mul(f, g, K):
"""
Multiply dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_mul
>>> f = ZZ.map([1, -2])
>>> g = ZZ.map([1, 2])
>>> dup_mul(f, g, ZZ)
[1, 0, -4]
"""
if f == g:
return dup_sqr(f, K)
if not (f and g):
return []
df = dup_degree(f)
dg = dup_degree(g)
h = []
for i in xrange(0, df+dg+1):
coeff = K.zero
for j in xrange(max(0, i-dg), min(df, i)+1):
coeff += f[j]*g[i-j]
h.append(coeff)
return dup_strip(h)
@cythonized("u,v,df,dg,i,j")
def dmp_mul(f, g, u, K):
"""
Multiply dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_mul
>>> f = ZZ.map([[1, 0], [1]])
>>> g = ZZ.map([[1], []])
>>> dmp_mul(f, g, 1, ZZ)
[[1, 0], [1], []]
"""
if not u:
return dup_mul(f, g, K)
if f == g:
return dmp_sqr(f, u, K)
df = dmp_degree(f, u)
if df < 0:
return f
dg = dmp_degree(g, u)
if dg < 0:
return g
h, v = [], u-1
for i in xrange(0, df+dg+1):
coeff = dmp_zero(v)
for j in xrange(max(0, i-dg), min(df, i)+1):
coeff = dmp_add(coeff, dmp_mul(f[j], g[i-j], v, K), v, K)
h.append(coeff)
return dmp_strip(h, u)
@cythonized("df,jmin,jmax,n,i,j")
def dup_sqr(f, K):
"""
Square dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_sqr
>>> f = ZZ.map([1, 0, 1])
>>> dup_sqr(f, ZZ)
[1, 0, 2, 0, 1]
"""
df, h = dup_degree(f), []
for i in xrange(0, 2*df+1):
c = K.zero
jmin = max(0, i-df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in xrange(jmin, jmax+1):
c += f[j]*f[i-j]
c += c
if n & 1:
elem = f[jmax+1]
c += elem**2
h.append(c)
return dup_strip(h)
@cythonized("u,v,df,jmin,jmax,n,i,j")
def dmp_sqr(f, u, K):
"""
Square dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_sqr
>>> f = ZZ.map([[1], [1, 0], [1, 0, 0]])
>>> dmp_sqr(f, 1, ZZ)
[[1], [2, 0], [3, 0, 0], [2, 0, 0, 0], [1, 0, 0, 0, 0]]
"""
if not u:
return dup_sqr(f, K)
df = dmp_degree(f, u)
if df < 0:
return f
h, v = [], u-1
for i in xrange(0, 2*df+1):
c = dmp_zero(v)
jmin = max(0, i-df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in xrange(jmin, jmax+1):
c = dmp_add(c, dmp_mul(f[j], f[i-j], v, K), v, K)
c = dmp_mul_ground(c, K(2), v, K)
if n & 1:
elem = dmp_sqr(f[jmax+1], v, K)
c = dmp_add(c, elem, v, K)
h.append(c)
return dmp_strip(h, u)
@cythonized("n,m")
def dup_pow(f, n, K):
"""
Raise ``f`` to the ``n``-th power in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_pow
>>> dup_pow([ZZ(1), -ZZ(2)], 3, ZZ)
[1, -6, 12, -8]
"""
if not n:
return [K.one]
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or not f or f == [K.one]:
return f
g = [K.one]
while True:
n, m = n//2, n
if m % 2:
g = dup_mul(g, f, K)
if not n:
break
f = dup_sqr(f, K)
return g
@cythonized("u,n,m")
def dmp_pow(f, n, u, K):
"""
Raise ``f`` to the ``n``-th power in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_pow
>>> f = ZZ.map([[1, 0], [1]])
>>> dmp_pow(f, 3, 1, ZZ)
[[1, 0, 0, 0], [3, 0, 0], [3, 0], [1]]
"""
if not u:
return dup_pow(f, n, K)
if not n:
return dmp_one(u, K)
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or dmp_zero_p(f, u) or dmp_one_p(f, u, K):
return f
g = dmp_one(u, K)
while True:
n, m = n//2, n
if m & 1:
g = dmp_mul(g, f, u, K)
if not n:
break
f = dmp_sqr(f, u, K)
return g
@cythonized("df,dg,dr,N,j")
def dup_pdiv(f, g, K):
"""
Polynomial pseudo-division in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_pdiv
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_pdiv(f, g, ZZ)
([2, 4], [20])
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r = [], f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
j, N = dr-dg, N-1
Q = dup_mul_ground(q, lc_g, K)
q = dup_add_term(Q, lc_r, j, K)
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
c = lc_g**N
q = dup_mul_ground(q, c, K)
r = dup_mul_ground(r, c, K)
return q, r
@cythonized("df,dg,dr,N,j")
def dup_prem(f, g, K):
"""
Polynomial pseudo-remainder in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_prem
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_prem(f, g, ZZ)
[20]
"""
df = dup_degree(f)
dg = dup_degree(g)
r = f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
j, N = dr-dg, N-1
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
return dup_mul_ground(r, lc_g**N, K)
def dup_pquo(f, g, K):
"""
Polynomial exact pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_pquo
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([2, -2])
>>> dup_pquo(f, g, ZZ)
[2, 2]
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_pquo(f, g, ZZ)
[2, 4]
"""
return dup_pdiv(f, g, K)[0]
def dup_pexquo(f, g, K):
"""
Polynomial pseudo-quotient in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_pexquo
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([2, -2])
>>> dup_pexquo(f, g, ZZ)
[2, 2]
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_pexquo(f, g, ZZ)
Traceback (most recent call last):
...
ExactQuotientFailed: [2, -4] does not divide [1, 0, 1]
"""
q, r = dup_pdiv(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
@cythonized("u,df,dg,dr,N,j")
def dmp_pdiv(f, g, u, K):
"""
Polynomial pseudo-division in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_pdiv
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_pdiv(f, g, 1, ZZ)
([[2], [2, -2]], [[-4, 4]])
"""
if not u:
return dup_pdiv(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r = dmp_zero(u), f
if df < dg:
return q, r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
j, N = dr-dg, N-1
Q = dmp_mul_term(q, lc_g, 0, u, K)
q = dmp_add_term(Q, lc_r, j, u, K)
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
c = dmp_pow(lc_g, N, u-1, K)
q = dmp_mul_term(q, c, 0, u, K)
r = dmp_mul_term(r, c, 0, u, K)
return q, r
@cythonized("u,df,dg,dr,N,j")
def dmp_prem(f, g, u, K):
"""
Polynomial pseudo-remainder in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_prem
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_prem(f, g, 1, ZZ)
[[-4, 4]]
"""
if not u:
return dup_prem(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
r = f
if df < dg:
return r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
j, N = dr-dg, N-1
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
c = dmp_pow(lc_g, N, u-1, K)
return dmp_mul_term(r, c, 0, u, K)
def dmp_pquo(f, g, u, K):
"""
Polynomial exact pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_pquo
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2, 0]])
>>> h = ZZ.map([[2], [2]])
>>> dmp_pquo(f, g, 1, ZZ)
[[2], []]
>>> dmp_pquo(f, h, 1, ZZ)
[[2], [2, -2]]
"""
return dmp_pdiv(f, g, u, K)[0]
def dmp_pexquo(f, g, u, K):
"""
Polynomial pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_pexquo
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2, 0]])
>>> h = ZZ.map([[2], [2]])
>>> dmp_pexquo(f, g, 1, ZZ)
[[2], []]
>>> dmp_pexquo(f, h, 1, ZZ)
Traceback (most recent call last):
...
ExactQuotientFailed: [[2], [2]] does not divide [[1], [1, 0], []]
"""
q, r = dmp_pdiv(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed(f, g)
@cythonized("df,dg,dr,j")
def dup_rr_div(f, g, K):
"""
Univariate division with remainder over a ring.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_rr_div
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_rr_div(f, g, ZZ)
([], [1, 0, 1])
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r = [], f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
if lc_r % lc_g:
break
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
return q, r
@cythonized("u,df,dg,dr,j")
def dmp_rr_div(f, g, u, K):
"""
Multivariate division with remainder over a ring.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_rr_div
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_rr_div(f, g, 1, ZZ)
([[]], [[1], [1, 0], []])
"""
if not u:
return dup_rr_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r = dmp_zero(u), f
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u-1
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
c, R = dmp_rr_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
return q, r
@cythonized("df,dg,dr,j")
def dup_ff_div(f, g, K):
"""
Polynomial division with remainder over a field.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densearith import dup_ff_div
>>> f = QQ.map([1, 0, 1])
>>> g = QQ.map([2, -4])
>>> dup_ff_div(f, g, QQ)
([1/2, 1/1], [5/1])
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r = [], f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
if not K.is_Exact:
r = dup_normal(r, K)
return q, r
@cythonized("u,df,dg,dr,j")
def dmp_ff_div(f, g, u, K):
"""
Polynomial division with remainder over a field.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densearith import dmp_ff_div
>>> f = QQ.map([[1], [1, 0], []])
>>> g = QQ.map([[2], [2]])
>>> dmp_ff_div(f, g, 1, QQ)
([[1/2], [1/2, -1/2]], [[-1/1, 1/1]])
"""
if not u:
return dup_ff_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r = dmp_zero(u), f
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u-1
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
c, R = dmp_ff_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
return q, r
def dup_div(f, g, K):
"""
Polynomial division with remainder in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dup_div
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_div(f, g, ZZ)
([], [1, 0, 1])
>>> f = QQ.map([1, 0, 1])
>>> g = QQ.map([2, -4])
>>> dup_div(f, g, QQ)
([1/2, 1/1], [5/1])
"""
if K.has_Field or not K.is_Exact:
return dup_ff_div(f, g, K)
else:
return dup_rr_div(f, g, K)
def dup_rem(f, g, K):
"""
Returns polynomial remainder in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dup_rem
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_rem(f, g, ZZ)
[1, 0, 1]
>>> f = QQ.map([1, 0, 1])
>>> g = QQ.map([2, -4])
>>> dup_rem(f, g, QQ)
[5/1]
"""
return dup_div(f, g, K)[1]
def dup_quo(f, g, K):
"""
Returns exact polynomial quotient in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dup_quo
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_quo(f, g, ZZ)
[]
>>> f = QQ.map([1, 0, 1])
>>> g = QQ.map([2, -4])
>>> dup_quo(f, g, QQ)
[1/2, 1/1]
"""
return dup_div(f, g, K)[0]
def dup_exquo(f, g, K):
"""
Returns polynomial quotient in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_exquo
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, -1])
>>> dup_exquo(f, g, ZZ)
[1, 1]
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_exquo(f, g, ZZ)
Traceback (most recent call last):
...
ExactQuotientFailed: [2, -4] does not divide [1, 0, 1]
"""
q, r = dup_div(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
@cythonized("u")
def dmp_div(f, g, u, K):
"""
Polynomial division with remainder in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dmp_div
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_div(f, g, 1, ZZ)
([[]], [[1], [1, 0], []])
>>> f = QQ.map([[1], [1, 0], []])
>>> g = QQ.map([[2], [2]])
>>> dmp_div(f, g, 1, QQ)
([[1/2], [1/2, -1/2]], [[-1/1, 1/1]])
"""
if K.has_Field or not K.is_Exact:
return dmp_ff_div(f, g, u, K)
else:
return dmp_rr_div(f, g, u, K)
@cythonized("u")
def dmp_rem(f, g, u, K):
"""
Returns polynomial remainder in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dmp_rem
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_rem(f, g, 1, ZZ)
[[1], [1, 0], []]
>>> f = QQ.map([[1], [1, 0], []])
>>> g = QQ.map([[2], [2]])
>>> dmp_rem(f, g, 1, QQ)
[[-1/1, 1/1]]
"""
return dmp_div(f, g, u, K)[1]
@cythonized("u")
def dmp_quo(f, g, u, K):
"""
Returns exact polynomial quotient in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dmp_quo
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_quo(f, g, 1, ZZ)
[[]]
>>> f = QQ.map([[1], [1, 0], []])
>>> g = QQ.map([[2], [2]])
>>> dmp_quo(f, g, 1, QQ)
[[1/2], [1/2, -1/2]]
"""
return dmp_div(f, g, u, K)[0]
@cythonized("u")
def dmp_exquo(f, g, u, K):
"""
Returns polynomial quotient in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_exquo
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[1], [1, 0]])
>>> h = ZZ.map([[2], [2]])
>>> dmp_exquo(f, g, 1, ZZ)
[[1], []]
>>> dmp_exquo(f, h, 1, ZZ)
Traceback (most recent call last):
...
ExactQuotientFailed: [[2], [2]] does not divide [[1], [1, 0], []]
"""
q, r = dmp_div(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed(f, g)
def dup_max_norm(f, K):
"""
Returns maximum norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_max_norm
>>> f = ZZ.map([-1, 2, -3])
>>> dup_max_norm(f, ZZ)
3
"""
if not f:
return K.zero
else:
return max(dup_abs(f, K))
@cythonized("u,v")
def dmp_max_norm(f, u, K):
"""
Returns maximum norm of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_max_norm
>>> f = ZZ.map([[2, -1], [-3]])
>>> dmp_max_norm(f, 1, ZZ)
3
"""
if not u:
return dup_max_norm(f, K)
v = u-1
return max([ dmp_max_norm(c, v, K) for c in f ])
def dup_l1_norm(f, K):
"""
Returns l1 norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_l1_norm
>>> f = ZZ.map([2, -3, 0, 1])
>>> dup_l1_norm(f, ZZ)
6
"""
if not f:
return K.zero
else:
return sum(dup_abs(f, K))
@cythonized("u,v")
def dmp_l1_norm(f, u, K):
"""
Returns l1 norm of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_l1_norm
>>> f = ZZ.map([[2, -1], [-3]])
>>> dmp_l1_norm(f, 1, ZZ)
6
"""
if not u:
return dup_l1_norm(f, K)
v = u-1
return sum([ dmp_l1_norm(c, v, K) for c in f ])
def dup_expand(polys, K):
"""
Multiply together several polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_expand
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, 0])
>>> h = ZZ.map([2])
>>> dup_expand([f, g, h], ZZ)
[2, 0, -2, 0]
"""
if not polys:
return [K.one]
f = polys[0]
for g in polys[1:]:
f = dup_mul(f, g, K)
return f
@cythonized("u")
def dmp_expand(polys, u, K):
"""
Multiply together several polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_expand
>>> f = ZZ.map([[1], [], [1, 0, 0]])
>>> g = ZZ.map([[1], [1]])
>>> dmp_expand([f, g], 1, ZZ)
[[1], [1], [1, 0, 0], [1, 0, 0]]
"""
if not polys:
return dmp_one(u, K)
f = polys[0]
for g in polys[1:]:
f = dmp_mul(f, g, u, K)
return f
| {
"repo_name": "srjoglekar246/sympy",
"path": "sympy/polys/densearith.py",
"copies": "2",
"size": "37290",
"license": "bsd-3-clause",
"hash": 7229165384753978000,
"line_mean": 18.0255102041,
"line_max": 75,
"alpha_frac": 0.4542236525,
"autogenerated": false,
"ratio": 2.748581115943097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4202804768443097,
"avg_score": null,
"num_lines": null
} |
"""Arithmetics for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from sympy.polys.densebasic import (
dup_LC, dup_TC, dmp_LC,
dup_degree, dmp_degree,
dup_normal, dmp_normal,
dup_strip, dmp_strip,
dmp_zero_p, dmp_zero,
dmp_one_p, dmp_one,
dmp_ground, dmp_zeros)
from sympy.polys.polyerrors import (
ExactQuotientFailed)
from sympy.utilities import cythonized
@cythonized("i,n,m")
def dup_add_term(f, c, i, K):
"""
Add ``c*x**i`` to ``f`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_add_term
>>> f = ZZ.map([1, 0, -1])
>>> dup_add_term(f, ZZ(2), 4, ZZ)
[2, 0, 1, 0, -1]
"""
if not c:
return f
n = len(f)
m = n-i-1
if i == n-1:
return dup_strip([f[0]+c] + f[1:])
else:
if i >= n:
return [c] + [K.zero]*(i-n) + f
else:
return f[:m] + [f[m]+c] + f[m+1:]
@cythonized("i,u,v,n,m")
def dmp_add_term(f, c, i, u, K):
"""
Add ``c(x_2..x_u)*x_0**i`` to ``f`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_add_term
>>> f = ZZ.map([[1, 0], [1]])
>>> c = ZZ.map([2])
>>> dmp_add_term(f, c, 2, 1, ZZ)
[[2], [1, 0], [1]]
"""
if not u:
return dup_add_term(f, c, i, K)
v = u-1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n-i-1
if i == n-1:
return dmp_strip([dmp_add(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [c] + dmp_zeros(i-n, v, K) + f
else:
return f[:m] + [dmp_add(f[m], c, v, K)] + f[m+1:]
@cythonized("i,n,m")
def dup_sub_term(f, c, i, K):
"""
Subtract ``c*x**i`` from ``f`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_sub_term
>>> f = ZZ.map([2, 0, 1, 0, -1])
>>> dup_sub_term(f, ZZ(2), 4, ZZ)
[1, 0, -1]
"""
if not c:
return f
n = len(f)
m = n-i-1
if i == n-1:
return dup_strip([f[0]-c] + f[1:])
else:
if i >= n:
return [-c] + [K.zero]*(i-n) + f
else:
return f[:m] + [f[m]-c] + f[m+1:]
@cythonized("i,u,v,n,m")
def dmp_sub_term(f, c, i, u, K):
"""
Subtract ``c(x_2..x_u)*x_0**i`` from ``f`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_sub_term
>>> f = ZZ.map([[2], [1, 0], [1]])
>>> c = ZZ.map([2])
>>> dmp_sub_term(f, c, 2, 1, ZZ)
[[1, 0], [1]]
"""
if not u:
return dup_add_term(f, -c, i, K)
v = u-1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n-i-1
if i == n-1:
return dmp_strip([dmp_sub(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [dmp_neg(c, v, K)] + dmp_zeros(i-n, v, K) + f
else:
return f[:m] + [dmp_sub(f[m], c, v, K)] + f[m+1:]
@cythonized("i")
def dup_mul_term(f, c, i, K):
"""
Multiply ``f`` by ``c*x**i`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_mul_term
>>> f = ZZ.map([1, 0, -1])
>>> dup_mul_term(f, ZZ(3), 2, ZZ)
[3, 0, -3, 0, 0]
"""
if not c or not f:
return []
else:
return [ cf * c for cf in f ] + [K.zero]*i
@cythonized("i,u,v")
def dmp_mul_term(f, c, i, u, K):
"""
Multiply ``f`` by ``c(x_2..x_u)*x_0**i`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_mul_term
>>> f = ZZ.map([[1, 0], [1], []])
>>> c = ZZ.map([3, 0])
>>> dmp_mul_term(f, c, 2, 1, ZZ)
[[3, 0, 0], [3, 0], [], [], []]
"""
if not u:
return dup_mul_term(f, c, i, K)
v = u-1
if dmp_zero_p(f, u):
return f
if dmp_zero_p(c, v):
return dmp_zero(u)
else:
return [ dmp_mul(cf, c, v, K) for cf in f ] + dmp_zeros(i, v, K)
def dup_add_ground(f, c, K):
"""
Add an element of the ground domain to ``f``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_add_ground
>>> f = ZZ.map([1, 2, 3, 4])
>>> dup_add_ground(f, ZZ(4), ZZ)
[1, 2, 3, 8]
"""
return dup_add_term(f, c, 0, K)
def dmp_add_ground(f, c, u, K):
"""
Add an element of the ground domain to ``f``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_add_ground
>>> f = ZZ.map([[1], [2], [3], [4]])
>>> dmp_add_ground(f, ZZ(4), 1, ZZ)
[[1], [2], [3], [8]]
"""
return dmp_add_term(f, dmp_ground(c, u-1), 0, u, K)
def dup_sub_ground(f, c, K):
"""
Subtract an element of the ground domain from ``f``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_sub_ground
>>> f = ZZ.map([1, 2, 3, 4])
>>> dup_sub_ground(f, ZZ(4), ZZ)
[1, 2, 3, 0]
"""
return dup_sub_term(f, c, 0, K)
def dmp_sub_ground(f, c, u, K):
"""
Subtract an element of the ground domain from ``f``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_sub_ground
>>> f = ZZ.map([[1], [2], [3], [4]])
>>> dmp_sub_ground(f, ZZ(4), 1, ZZ)
[[1], [2], [3], []]
"""
return dmp_sub_term(f, dmp_ground(c, u-1), 0, u, K)
def dup_mul_ground(f, c, K):
"""
Multiply ``f`` by a constant value in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_mul_ground
>>> f = ZZ.map([1, 2, -1])
>>> dup_mul_ground(f, ZZ(3), ZZ)
[3, 6, -3]
"""
if not c or not f:
return []
else:
return [ cf * c for cf in f ]
@cythonized("u,v")
def dmp_mul_ground(f, c, u, K):
"""
Multiply ``f`` by a constant value in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_mul_ground
>>> f = ZZ.map([[2], [2, 0]])
>>> dmp_mul_ground(f, ZZ(3), 1, ZZ)
[[6], [6, 0]]
"""
if not u:
return dup_mul_ground(f, c, K)
v = u-1
return [ dmp_mul_ground(cf, c, v, K) for cf in f ]
def dup_quo_ground(f, c, K):
"""
Quotient by a constant in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densearith import dup_quo_ground
>>> f = QQ.map([1, 0, 2])
>>> dup_quo_ground(f, QQ(2), QQ)
[1/2, 0/1, 1/1]
"""
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
return [ K.quo(cf, c) for cf in f ]
@cythonized("u,v")
def dmp_quo_ground(f, c, u, K):
"""
Quotient by a constant in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densearith import dmp_quo_ground
>>> f = QQ.map([[1, 0], [2], []])
>>> dmp_quo_ground(f, QQ(2), 1, QQ)
[[1/2, 0/1], [1/1], []]
"""
if not u:
return dup_quo_ground(f, c, K)
v = u-1
return [ dmp_quo_ground(cf, c, v, K) for cf in f ]
def dup_exquo_ground(f, c, K):
"""
Exact quotient by a constant in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dup_exquo_ground
>>> f = ZZ.map([3, 0, 2])
>>> g = QQ.map([3, 0, 2])
>>> dup_exquo_ground(f, ZZ(2), ZZ)
[1, 0, 1]
>>> dup_exquo_ground(g, QQ(2), QQ)
[3/2, 0/1, 1/1]
"""
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
if K.has_Field or not K.is_Exact:
return [ K.quo(cf, c) for cf in f ]
else:
return [ cf // c for cf in f ]
@cythonized("u,v")
def dmp_exquo_ground(f, c, u, K):
"""
Exact quotient by a constant in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dmp_exquo_ground
>>> f = ZZ.map([[2, 0], [3], []])
>>> g = QQ.map([[2, 0], [3], []])
>>> dmp_exquo_ground(f, ZZ(2), 1, ZZ)
[[1, 0], [1], []]
>>> dmp_exquo_ground(g, QQ(2), 1, QQ)
[[1/1, 0/1], [3/2], []]
"""
if not u:
return dup_exquo_ground(f, c, K)
v = u-1
return [ dmp_exquo_ground(cf, c, v, K) for cf in f ]
@cythonized("n")
def dup_lshift(f, n, K):
"""
Efficiently multiply ``f`` by ``x**n`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_lshift
>>> f = ZZ.map([1, 0, 1])
>>> dup_lshift(f, 2, ZZ)
[1, 0, 1, 0, 0]
"""
if not f:
return f
else:
return f + [K.zero]*n
@cythonized("n")
def dup_rshift(f, n, K):
"""
Efficiently divide ``f`` by ``x**n`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_rshift
>>> f = ZZ.map([1, 0, 1, 0, 0])
>>> g = ZZ.map([1, 0, 1, 0, 2])
>>> dup_rshift(f, 2, ZZ)
[1, 0, 1]
>>> dup_rshift(g, 2, ZZ)
[1, 0, 1]
"""
return f[:-n]
def dup_abs(f, K):
"""
Make all coefficients positive in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_abs
>>> f = ZZ.map([1, 0, -1])
>>> dup_abs(f, ZZ)
[1, 0, 1]
"""
return [ K.abs(coeff) for coeff in f ]
@cythonized("u,v")
def dmp_abs(f, u, K):
"""
Make all coefficients positive in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_abs
>>> f = ZZ.map([[1, 0], [-1], []])
>>> dmp_abs(f, 1, ZZ)
[[1, 0], [1], []]
"""
if not u:
return dup_abs(f, K)
v = u-1
return [ dmp_abs(cf, v, K) for cf in f ]
def dup_neg(f, K):
"""
Negate a polynomial in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_neg
>>> f = ZZ.map([1, 0, -1])
>>> dup_neg(f, ZZ)
[-1, 0, 1]
"""
return [ -coeff for coeff in f ]
@cythonized("u,v")
def dmp_neg(f, u, K):
"""
Negate a polynomial in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_neg
>>> f = ZZ.map([[1, 0], [-1], []])
>>> dmp_neg(f, 1, ZZ)
[[-1, 0], [1], []]
"""
if not u:
return dup_neg(f, K)
v = u-1
return [ dmp_neg(cf, u-1, K) for cf in f ]
@cythonized("df,dg,k")
def dup_add(f, g, K):
"""
Add dense polynomials in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_add
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, -2])
>>> dup_add(f, g, ZZ)
[1, 1, -3]
"""
if not f:
return g
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a + b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ a + b for a, b in zip(f, g) ]
@cythonized("u,v,df,dg,k")
def dmp_add(f, g, u, K):
"""
Add dense polynomials in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_add
>>> f = ZZ.map([[1], [], [1, 0]])
>>> g = ZZ.map([[1, 0], [1], []])
>>> dmp_add(f, g, 1, ZZ)
[[1, 1], [1], [1, 0]]
"""
if not u:
return dup_add(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return g
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u-1
if df == dg:
return dmp_strip([ dmp_add(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ dmp_add(a, b, v, K) for a, b in zip(f, g) ]
@cythonized("df,dg,k")
def dup_sub(f, g, K):
"""
Subtract dense polynomials in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_sub
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, -2])
>>> dup_sub(f, g, ZZ)
[1, -1, 1]
"""
if not f:
return dup_neg(g, K)
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a - b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dup_neg(g[:k], K), g[k:]
return h + [ a - b for a, b in zip(f, g) ]
@cythonized("u,v,df,dg,k")
def dmp_sub(f, g, u, K):
"""
Subtract dense polynomials in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_sub
>>> f = ZZ.map([[1], [], [1, 0]])
>>> g = ZZ.map([[1, 0], [1], []])
>>> dmp_sub(f, g, 1, ZZ)
[[-1, 1], [-1], [1, 0]]
"""
if not u:
return dup_sub(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return dmp_neg(g, u, K)
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u-1
if df == dg:
return dmp_strip([ dmp_sub(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dmp_neg(g[:k], u, K), g[k:]
return h + [ dmp_sub(a, b, v, K) for a, b in zip(f, g) ]
def dup_add_mul(f, g, h, K):
"""
Returns ``f + g*h`` where ``f, g, h`` are in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_add_mul
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, -2])
>>> h = ZZ.map([1, 2])
>>> dup_add_mul(f, g, h, ZZ)
[2, 0, -5]
"""
return dup_add(f, dup_mul(g, h, K), K)
@cythonized("u")
def dmp_add_mul(f, g, h, u, K):
"""
Returns ``f + g*h`` where ``f, g, h`` are in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_add_mul
>>> f = ZZ.map([[1], [], [1, 0]])
>>> g = ZZ.map([[1], []])
>>> h = ZZ.map([[1], [2]])
>>> dmp_add_mul(f, g, h, 1, ZZ)
[[2], [2], [1, 0]]
"""
return dmp_add(f, dmp_mul(g, h, u, K), u, K)
def dup_sub_mul(f, g, h, K):
"""
Returns ``f - g*h`` where ``f, g, h`` are in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_sub_mul
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, -2])
>>> h = ZZ.map([1, 2])
>>> dup_sub_mul(f, g, h, ZZ)
[3]
"""
return dup_sub(f, dup_mul(g, h, K), K)
@cythonized("u")
def dmp_sub_mul(f, g, h, u, K):
"""
Returns ``f - g*h`` where ``f, g, h`` are in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_sub_mul
>>> f = ZZ.map([[1], [], [1, 0]])
>>> g = ZZ.map([[1], []])
>>> h = ZZ.map([[1], [2]])
>>> dmp_sub_mul(f, g, h, 1, ZZ)
[[-2], [1, 0]]
"""
return dmp_sub(f, dmp_mul(g, h, u, K), u, K)
@cythonized("df,dg,i,j")
def dup_mul(f, g, K):
"""
Multiply dense polynomials in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_mul
>>> f = ZZ.map([1, -2])
>>> g = ZZ.map([1, 2])
>>> dup_mul(f, g, ZZ)
[1, 0, -4]
"""
if f == g:
return dup_sqr(f, K)
if not (f and g):
return []
df = dup_degree(f)
dg = dup_degree(g)
h = []
for i in xrange(0, df+dg+1):
coeff = K.zero
for j in xrange(max(0, i-dg), min(df, i)+1):
coeff += f[j]*g[i-j]
h.append(coeff)
return dup_strip(h)
@cythonized("u,v,df,dg,i,j")
def dmp_mul(f, g, u, K):
"""
Multiply dense polynomials in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_mul
>>> f = ZZ.map([[1, 0], [1]])
>>> g = ZZ.map([[1], []])
>>> dmp_mul(f, g, 1, ZZ)
[[1, 0], [1], []]
"""
if not u:
return dup_mul(f, g, K)
if f == g:
return dmp_sqr(f, u, K)
df = dmp_degree(f, u)
if df < 0:
return f
dg = dmp_degree(g, u)
if dg < 0:
return g
h, v = [], u-1
for i in xrange(0, df+dg+1):
coeff = dmp_zero(v)
for j in xrange(max(0, i-dg), min(df, i)+1):
coeff = dmp_add(coeff, dmp_mul(f[j], g[i-j], v, K), v, K)
h.append(coeff)
return dmp_strip(h, u)
@cythonized("df,jmin,jmax,n,i,j")
def dup_sqr(f, K):
"""
Square dense polynomials in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_sqr
>>> f = ZZ.map([1, 0, 1])
>>> dup_sqr(f, ZZ)
[1, 0, 2, 0, 1]
"""
df, h = dup_degree(f), []
for i in xrange(0, 2*df+1):
c = K.zero
jmin = max(0, i-df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in xrange(jmin, jmax+1):
c += f[j]*f[i-j]
c += c
if n & 1:
elem = f[jmax+1]
c += elem**2
h.append(c)
return dup_strip(h)
@cythonized("u,v,df,jmin,jmax,n,i,j")
def dmp_sqr(f, u, K):
"""
Square dense polynomials in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_sqr
>>> f = ZZ.map([[1], [1, 0], [1, 0, 0]])
>>> dmp_sqr(f, 1, ZZ)
[[1], [2, 0], [3, 0, 0], [2, 0, 0, 0], [1, 0, 0, 0, 0]]
"""
if not u:
return dup_sqr(f, K)
df = dmp_degree(f, u)
if df < 0:
return f
h, v = [], u-1
for i in xrange(0, 2*df+1):
c = dmp_zero(v)
jmin = max(0, i-df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in xrange(jmin, jmax+1):
c = dmp_add(c, dmp_mul(f[j], f[i-j], v, K), v, K)
c = dmp_mul_ground(c, K(2), v, K)
if n & 1:
elem = dmp_sqr(f[jmax+1], v, K)
c = dmp_add(c, elem, v, K)
h.append(c)
return dmp_strip(h, u)
@cythonized("n,m")
def dup_pow(f, n, K):
"""
Raise ``f`` to the ``n``-th power in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_pow
>>> dup_pow([ZZ(1), -ZZ(2)], 3, ZZ)
[1, -6, 12, -8]
"""
if not n:
return [K.one]
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or not f or f == [K.one]:
return f
g = [K.one]
while True:
n, m = n//2, n
if m % 2:
g = dup_mul(g, f, K)
if not n:
break
f = dup_sqr(f, K)
return g
@cythonized("u,n,m")
def dmp_pow(f, n, u, K):
"""
Raise ``f`` to the ``n``-th power in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_pow
>>> f = ZZ.map([[1, 0], [1]])
>>> dmp_pow(f, 3, 1, ZZ)
[[1, 0, 0, 0], [3, 0, 0], [3, 0], [1]]
"""
if not u:
return dup_pow(f, n, K)
if not n:
return dmp_one(u, K)
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or dmp_zero_p(f, u) or dmp_one_p(f, u, K):
return f
g = dmp_one(u, K)
while True:
n, m = n//2, n
if m & 1:
g = dmp_mul(g, f, u, K)
if not n:
break
f = dmp_sqr(f, u, K)
return g
@cythonized("df,dg,dr,N,j")
def dup_pdiv(f, g, K):
"""
Polynomial pseudo-division in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_pdiv
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_pdiv(f, g, ZZ)
([2, 4], [20])
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r = [], f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
j, N = dr-dg, N-1
Q = dup_mul_ground(q, lc_g, K)
q = dup_add_term(Q, lc_r, j, K)
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
c = lc_g**N
q = dup_mul_ground(q, c, K)
r = dup_mul_ground(r, c, K)
return q, r
@cythonized("df,dg,dr,N,j")
def dup_prem(f, g, K):
"""
Polynomial pseudo-remainder in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_prem
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_prem(f, g, ZZ)
[20]
"""
df = dup_degree(f)
dg = dup_degree(g)
r = f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
j, N = dr-dg, N-1
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
return dup_mul_ground(r, lc_g**N, K)
def dup_pquo(f, g, K):
"""
Polynomial pseudo-quotient in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_pquo
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([2, -2])
>>> dup_pquo(f, g, ZZ)
[2, 2]
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_pquo(f, g, ZZ)
Traceback (most recent call last):
...
ExactQuotientFailed: [2, -4] does not divide [1, 0, 1]
"""
q, r = dup_pdiv(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
def dup_pexquo(f, g, K):
"""
Polynomial exact pseudo-quotient in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_pexquo
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([2, -2])
>>> dup_pexquo(f, g, ZZ)
[2, 2]
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_pexquo(f, g, ZZ)
[2, 4]
"""
return dup_pdiv(f, g, K)[0]
@cythonized("u,df,dg,dr,N,j")
def dmp_pdiv(f, g, u, K):
"""
Polynomial pseudo-division in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_pdiv
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_pdiv(f, g, 1, ZZ)
([[2], [2, -2]], [[-4, 4]])
"""
if not u:
return dup_pdiv(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r = dmp_zero(u), f
if df < dg:
return q, r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
j, N = dr-dg, N-1
Q = dmp_mul_term(q, lc_g, 0, u, K)
q = dmp_add_term(Q, lc_r, j, u, K)
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
c = dmp_pow(lc_g, N, u-1, K)
q = dmp_mul_term(q, c, 0, u, K)
r = dmp_mul_term(r, c, 0, u, K)
return q, r
@cythonized("u,df,dg,dr,N,j")
def dmp_prem(f, g, u, K):
"""
Polynomial pseudo-remainder in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_prem
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_prem(f, g, 1, ZZ)
[[-4, 4]]
"""
if not u:
return dup_prem(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
r = f
if df < dg:
return r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
j, N = dr-dg, N-1
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
c = dmp_pow(lc_g, N, u-1, K)
return dmp_mul_term(r, c, 0, u, K)
def dmp_pquo(f, g, u, K):
"""
Polynomial pseudo-quotient in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_pquo
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2, 0]])
>>> h = ZZ.map([[2], [2]])
>>> dmp_pquo(f, g, 1, ZZ)
[[2], []]
>>> dmp_pquo(f, h, 1, ZZ)
Traceback (most recent call last):
...
ExactQuotientFailed: [[2], [2]] does not divide [[1], [1, 0], []]
"""
q, r = dmp_pdiv(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed(f, g)
def dmp_pexquo(f, g, u, K):
"""
Polynomial exact pseudo-quotient in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_pexquo
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2, 0]])
>>> h = ZZ.map([[2], [2]])
>>> dmp_pexquo(f, g, 1, ZZ)
[[2], []]
>>> dmp_pexquo(f, h, 1, ZZ)
[[2], [2, -2]]
"""
return dmp_pdiv(f, g, u, K)[0]
@cythonized("df,dg,dr,j")
def dup_rr_div(f, g, K):
"""
Univariate division with remainder over a ring.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_rr_div
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_rr_div(f, g, ZZ)
([], [1, 0, 1])
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r = [], f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
if lc_r % lc_g:
break
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
return q, r
@cythonized("u,df,dg,dr,j")
def dmp_rr_div(f, g, u, K):
"""
Multivariate division with remainder over a ring.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_rr_div
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_rr_div(f, g, 1, ZZ)
([[]], [[1], [1, 0], []])
"""
if not u:
return dup_rr_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r = dmp_zero(u), f
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u-1
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
c, R = dmp_rr_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
return q, r
@cythonized("df,dg,dr,j")
def dup_ff_div(f, g, K):
"""
Polynomial division with remainder over a field.
**Examples**
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densearith import dup_ff_div
>>> f = QQ.map([1, 0, 1])
>>> g = QQ.map([2, -4])
>>> dup_ff_div(f, g, QQ)
([1/2, 1/1], [5/1])
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r = [], f
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
dr = dup_degree(r)
if dr < dg:
break
lc_r = dup_LC(r, K)
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
if not K.is_Exact:
r = dup_normal(r, K)
return q, r
@cythonized("u,df,dg,dr,j")
def dmp_ff_div(f, g, u, K):
"""
Polynomial division with remainder over a field.
**Examples**
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densearith import dmp_ff_div
>>> f = QQ.map([[1], [1, 0], []])
>>> g = QQ.map([[2], [2]])
>>> dmp_ff_div(f, g, 1, QQ)
([[1/2], [1/2, -1/2]], [[-1/1, 1/1]])
"""
if not u:
return dup_ff_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r = dmp_zero(u), f
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u-1
while True:
dr = dmp_degree(r, u)
if dr < dg:
break
lc_r = dmp_LC(r, K)
c, R = dmp_ff_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
return q, r
def dup_div(f, g, K):
"""
Polynomial division with remainder in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dup_div
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_div(f, g, ZZ)
([], [1, 0, 1])
>>> f = QQ.map([1, 0, 1])
>>> g = QQ.map([2, -4])
>>> dup_div(f, g, QQ)
([1/2, 1/1], [5/1])
"""
if K.has_Field or not K.is_Exact:
return dup_ff_div(f, g, K)
else:
return dup_rr_div(f, g, K)
def dup_rem(f, g, K):
"""
Returns polynomial remainder in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dup_rem
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_rem(f, g, ZZ)
[1, 0, 1]
>>> f = QQ.map([1, 0, 1])
>>> g = QQ.map([2, -4])
>>> dup_rem(f, g, QQ)
[5/1]
"""
return dup_div(f, g, K)[1]
def dup_quo(f, g, K):
"""
Returns polynomial quotient in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_quo
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, -1])
>>> dup_quo(f, g, ZZ)
[1, 1]
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_quo(f, g, ZZ)
Traceback (most recent call last):
...
ExactQuotientFailed: [2, -4] does not divide [1, 0, 1]
"""
q, r = dup_div(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
def dup_exquo(f, g, K):
"""
Returns exact polynomial quotient in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dup_exquo
>>> f = ZZ.map([1, 0, 1])
>>> g = ZZ.map([2, -4])
>>> dup_exquo(f, g, ZZ)
[]
>>> f = QQ.map([1, 0, 1])
>>> g = QQ.map([2, -4])
>>> dup_exquo(f, g, QQ)
[1/2, 1/1]
"""
return dup_div(f, g, K)[0]
@cythonized("u")
def dmp_div(f, g, u, K):
"""
Polynomial division with remainder in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dmp_div
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_div(f, g, 1, ZZ)
([[]], [[1], [1, 0], []])
>>> f = QQ.map([[1], [1, 0], []])
>>> g = QQ.map([[2], [2]])
>>> dmp_div(f, g, 1, QQ)
([[1/2], [1/2, -1/2]], [[-1/1, 1/1]])
"""
if K.has_Field or not K.is_Exact:
return dmp_ff_div(f, g, u, K)
else:
return dmp_rr_div(f, g, u, K)
@cythonized("u")
def dmp_rem(f, g, u, K):
"""
Returns polynomial remainder in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dmp_rem
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_rem(f, g, 1, ZZ)
[[1], [1, 0], []]
>>> f = QQ.map([[1], [1, 0], []])
>>> g = QQ.map([[2], [2]])
>>> dmp_rem(f, g, 1, QQ)
[[-1/1, 1/1]]
"""
return dmp_div(f, g, u, K)[1]
@cythonized("u")
def dmp_quo(f, g, u, K):
"""
Returns polynomial quotient in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_quo
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[1], [1, 0]])
>>> h = ZZ.map([[2], [2]])
>>> dmp_quo(f, g, 1, ZZ)
[[1], []]
>>> dmp_quo(f, h, 1, ZZ)
Traceback (most recent call last):
...
ExactQuotientFailed: [[2], [2]] does not divide [[1], [1, 0], []]
"""
q, r = dmp_div(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed(f, g)
@cythonized("u")
def dmp_exquo(f, g, u, K):
"""
Returns exact polynomial quotient in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densearith import dmp_exquo
>>> f = ZZ.map([[1], [1, 0], []])
>>> g = ZZ.map([[2], [2]])
>>> dmp_exquo(f, g, 1, ZZ)
[[]]
>>> f = QQ.map([[1], [1, 0], []])
>>> g = QQ.map([[2], [2]])
>>> dmp_exquo(f, g, 1, QQ)
[[1/2], [1/2, -1/2]]
"""
return dmp_div(f, g, u, K)[0]
def dup_max_norm(f, K):
"""
Returns maximum norm of a polynomial in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_max_norm
>>> f = ZZ.map([-1, 2, -3])
>>> dup_max_norm(f, ZZ)
3
"""
if not f:
return K.zero
else:
return max(dup_abs(f, K))
@cythonized("u,v")
def dmp_max_norm(f, u, K):
"""
Returns maximum norm of a polynomial in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_max_norm
>>> f = ZZ.map([[2, -1], [-3]])
>>> dmp_max_norm(f, 1, ZZ)
3
"""
if not u:
return dup_max_norm(f, K)
v = u-1
return max([ dmp_max_norm(c, v, K) for c in f ])
def dup_l1_norm(f, K):
"""
Returns l1 norm of a polynomial in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_l1_norm
>>> f = ZZ.map([2, -3, 0, 1])
>>> dup_l1_norm(f, ZZ)
6
"""
if not f:
return K.zero
else:
return sum(dup_abs(f, K))
@cythonized("u,v")
def dmp_l1_norm(f, u, K):
"""
Returns l1 norm of a polynomial in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_l1_norm
>>> f = ZZ.map([[2, -1], [-3]])
>>> dmp_l1_norm(f, 1, ZZ)
6
"""
if not u:
return dup_l1_norm(f, K)
v = u-1
return sum([ dmp_l1_norm(c, v, K) for c in f ])
def dup_expand(polys, K):
"""
Multiply together several polynomials in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_expand
>>> f = ZZ.map([1, 0, -1])
>>> g = ZZ.map([1, 0])
>>> h = ZZ.map([2])
>>> dup_expand([f, g, h], ZZ)
[2, 0, -2, 0]
"""
if not polys:
return [K.one]
f = polys[0]
for g in polys[1:]:
f = dup_mul(f, g, K)
return f
@cythonized("u")
def dmp_expand(polys, u, K):
"""
Multiply together several polynomials in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dmp_expand
>>> f = ZZ.map([[1], [], [1, 0, 0]])
>>> g = ZZ.map([[1], [1]])
>>> dmp_expand([f, g], 1, ZZ)
[[1], [1], [1, 0, 0], [1, 0, 0]]
"""
if not polys:
return dmp_one(u, K)
f = polys[0]
for g in polys[1:]:
f = dmp_mul(f, g, u, K)
return f
| {
"repo_name": "pernici/sympy",
"path": "sympy/polys/densearith.py",
"copies": "1",
"size": "36749",
"license": "bsd-3-clause",
"hash": 4553368240760850400,
"line_mean": 18.3517640864,
"line_max": 75,
"alpha_frac": 0.4612642521,
"autogenerated": false,
"ratio": 2.7073080889936643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8656398865464978,
"avg_score": 0.002434695125737092,
"num_lines": 1899
} |
"""Arithmetic simplification module using sympy.
This module simplifies symbolic expressions using only arithmetic operators.
"""
# pylint: disable=unused-import,exec-used
import ast
import sympy
from copy import deepcopy
from sspam.tools import asttools
def run(expr_ast, nbits):
'Apply sympy arithmetic simplifications to expression ast'
# variables for sympy symbols
getid = asttools.GetIdentifiers()
getid.visit(expr_ast)
variables = getid.variables
functions = getid.functions
original_type = type(expr_ast)
# copying to avoid wierd pointer behaviour
expr_ast = deepcopy(expr_ast)
# converting expr_ast into an ast.Expression
if not isinstance(expr_ast, ast.Expression):
if isinstance(expr_ast, ast.Module):
expr_ast = ast.Expression(expr_ast.body[0].value)
elif isinstance(expr_ast, ast.Expr):
expr_ast = ast.Expression(expr_ast.value)
else:
expr_ast = ast.Expression(expr_ast)
for var in variables:
exec("%s = sympy.Symbol('%s')" % (var, var))
for fun in {"mxor", "mor", "mand", "mnot", "mrshift", "mlshift"}:
exec("%s = sympy.Function('%s')" % (fun, fun))
for fun in functions:
exec("%s = sympy.Function('%s')" % (fun, fun))
expr_ast = asttools.ReplaceBitwiseOp().visit(expr_ast)
ast.fix_missing_locations(expr_ast)
code = compile(expr_ast, '<test>', mode='eval')
eval_expr = eval(code)
try:
expr_ast = ast.parse(str(eval_expr))
except SyntaxError as ex:
print ex
exit(1)
expr_ast = asttools.ReplaceBitwiseFunctions().visit(expr_ast)
# sympy does not consider the number of bits
expr_ast = asttools.GetConstMod(nbits).visit(expr_ast)
# return original type
if original_type == ast.Expression:
expr_ast = ast.Expression(expr_ast.body[0].value)
elif original_type == ast.Expr:
expr_ast = expr_ast.body[0]
elif original_type == ast.Module:
return expr_ast
else:
expr_ast = expr_ast.body[0].value
return expr_ast
| {
"repo_name": "quarkslab/sspam",
"path": "sspam/arithm_simpl.py",
"copies": "1",
"size": "2082",
"license": "bsd-3-clause",
"hash": -2522995492833082000,
"line_mean": 32.0476190476,
"line_max": 76,
"alpha_frac": 0.6503362152,
"autogenerated": false,
"ratio": 3.602076124567474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47524123397674733,
"avg_score": null,
"num_lines": null
} |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
NaT,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
timedelta_range,
)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range("2H", periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(["1 day", "2 days"])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, "a"), ("a", tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex(
[pd.Timedelta("1 days"), pd.NaT, pd.Timedelta("3 days")]
)
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta("3 days")])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(
[
"1 day",
pd.NaT,
"1 day 00:00:01",
pd.NaT,
"1 day 00:00:01",
"5 day 00:00:03",
]
)
tdidx2 = pd.TimedeltaIndex(
["2 day", "2 day", pd.NaT, pd.NaT, "1 day 00:00:02", "5 days 00:00:03"]
)
tdarr = np.array(
[
np.timedelta64(2, "D"),
np.timedelta64(2, "D"),
np.timedelta64("nat"),
np.timedelta64("nat"),
np.timedelta64(1, "D") + np.timedelta64(2, "s"),
np.timedelta64(5, "D") + np.timedelta64(3, "s"),
]
)
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range("1 days", periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x")
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["4H", "8H", "12H", "16H", "20H"], freq="4H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "4H"
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["1H", "2H", "3H", "4H", "5H"], freq="H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "H"
idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x")
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(
["-2H", "-4H", "-6H", "-8H", "-10H"], freq="-2H", name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq == "-2H"
idx = TimedeltaIndex(["-2H", "-1H", "0H", "1H", "2H"], freq="H", name="x")
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["2H", "1H", "0H", "1H", "2H"], freq=None, name="x")
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (
r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'"
)
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(["0 days", "1 days", "2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(["0 days", pd.NaT, "1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(["0 days", pd.NaT, "-1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(["20121231", "20130101", "20130102"], name="bar")
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(["20121231", pd.NaT, "20121230"], name="foo")
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range("20130101", periods=3)
ts = Timestamp("20130101")
dt = ts.to_pydatetime()
dti_tz = pd.date_range("20130101", periods=3).tz_localize("US/Eastern")
ts_tz = Timestamp("20130101").tz_localize("US/Eastern")
ts_tz2 = Timestamp("20130101").tz_localize("CET")
dt_tz = ts_tz.to_pydatetime()
td = Timedelta("1 days")
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta("0 days")
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta("0 days")
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta("0 days")
_check(result, expected)
# tz mismatches
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta("0 days")
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(["20121231", "20130101", "20130102"], tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(["0 days", pd.NaT, "0 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(["2 days", pd.NaT, "4 days"], name="foo")
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(["20121231", pd.NaT, "20130101"])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
result = tdi + dt
expected = DatetimeIndex(["20130102", pd.NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(["20130102", pd.NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(["2 days", pd.NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(["2 days", pd.NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(["20130102", pd.NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(["20130102", pd.NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp("20130102")
assert result == expected
result = td + dt
expected = Timestamp("20130102")
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize("freq", ["D", "B"])
def test_timedelta(self, freq):
index = pd.date_range("1/1/2000", periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == "D":
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range("2013", "2014")
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, "D") + Timestamp("2000")
with pytest.raises(OutOfBoundsDatetime):
Timestamp("2000") + pd.to_timedelta(106580, "D")
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], "D") + Timestamp("2000")
with pytest.raises(OverflowError, match=msg):
Timestamp("2000") + pd.to_timedelta([106580], "D")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
(
pd.to_timedelta([_NaT, "5 days", "1 hours"])
- pd.to_timedelta(["7 seconds", _NaT, "4 hours"])
)
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(["4 days", pd.NaT])
result = pd.to_timedelta(["5 days", pd.NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, "5 hours"])
result = pd.to_timedelta([pd.NaT, "5 days", "1 hours"]) + pd.to_timedelta(
["7 seconds", pd.NaT, "4 hours"]
)
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range("1", periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(["00:00:01"]))
s2 = pd.to_timedelta(Series(["00:00:02"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(["00:00:01"]).apply(pd.to_timedelta)
df2 = pd.DataFrame(["00:00:02"]).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta("00:00:01")
scalar2 = pd.to_timedelta("00:00:02")
timedelta_NaT = pd.to_timedelta("NaT")
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range("2012-1-1", periods=3, freq="D")
v2 = pd.date_range("2012-1-2", periods=3, freq="D")
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]")
tm.assert_series_equal(rs, xp)
assert rs.dtype == "timedelta64[ns]"
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
# series on the rhs
result = df["A"] - df["A"].shift()
assert result.dtype == "timedelta64[ns]"
result = df["A"] + td
assert result.dtype == "M8[ns]"
# scalar Timestamp on rhs
maxa = df["A"].max()
assert isinstance(maxa, Timestamp)
resultb = df["A"] - df["A"].max()
assert resultb.dtype == "timedelta64[ns]"
# timestamp on lhs
result = resultb + df["A"]
values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
expected = Series(values, name="A")
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df["A"] - datetime(2001, 1, 1)
expected = Series([timedelta(days=4017 + i) for i in range(3)], name="A")
tm.assert_series_equal(result, expected)
assert result.dtype == "m8[ns]"
d = datetime(2001, 1, 1, 3, 4)
resulta = df["A"] - d
assert resulta.dtype == "m8[ns]"
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df["A"], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df["A"])
assert resultb.dtype == "M8[ns]"
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(df["A"], resultb)
assert resultb.dtype == "M8[ns]"
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta("1s")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
# addition
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
# multiplication
tm.assert_series_equal(
nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta
)
tm.assert_series_equal(
1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(["1 day", "2 days"])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + "a"
with pytest.raises(TypeError):
"a" + tdi
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(["-1 days", "-1 days"])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize("freq", [None, "H"])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period("2011-01-01", freq="D")
idx = TimedeltaIndex(["1 hours", "2 hours"], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("tdi_freq", [None, "H"])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(["1 day", "2 day"])
idx = tm.box_expected(idx, box_with_array)
msg = (
"cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation"
)
with pytest.raises(TypeError, match=msg):
idx - Timestamp("2011-01-01")
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp("2011-01-01", tz=tz)
idx = TimedeltaIndex(["1 day", "2 day"])
expected = DatetimeIndex(["2011-01-02", "2011-01-03"], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp("2012-01-01")
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range("1 day", periods=3)
expected = pd.date_range("2012-01-02", periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D")
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64("NaT")
tdi = timedelta_range("1 day", periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(["59 days", "59 days", "NaT"])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype="uint8")
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize("scalar", [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array, scalar):
box = box_with_array
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize(
"dtype",
[
"int64",
"int32",
"int16",
"uint64",
"uint32",
"uint16",
"uint8",
"float64",
"float32",
"float16",
],
)
@pytest.mark.parametrize(
"vec",
[
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith("float"):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize(
"scalar_td",
[
timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta("5m4s").to_timedelta64(),
],
)
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series(
[timedelta(seconds=1)] * 3
)
assert result.dtype == "m8[ns]"
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = Series([timedelta(seconds=1)] * 3) - Series(
[timedelta(seconds=0)] * 3
)
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(["00:05:03"] * 3))
td2 = pd.to_timedelta("00:05:04")
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series(
[timedelta(seconds=1)] * 3
)
assert result.dtype == "m8[ns]"
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = Series([timedelta(seconds=1)] * 3) - Series(
[timedelta(seconds=0)] * 3
)
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize(
"names",
[
(None, None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
],
)
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == "Venkman":
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["0 days", "1 day"], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series(
[Timedelta(hours=3), Timedelta(days=1, hours=4)], name=names[2]
)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == "timedelta64[ns]"
else:
assert result.dtypes[0] == "timedelta64[ns]"
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == "timedelta64[ns]"
else:
assert result.dtypes[0] == "timedelta64[ns]"
expected = Series(
[Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=names[2]
)
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == "timedelta64[ns]"
else:
assert result.dtypes[0] == "timedelta64[ns]"
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == "timedelta64[ns]"
else:
assert result.dtypes[0] == "timedelta64[ns]"
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta("1s")])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta("1s")])
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series(
[pd.offsets.Minute(1), pd.offsets.Second(3), pd.offsets.Hour(2)]
)
expected = Series(
[
timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3),
]
)
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ["Hour", "Minute", "Second", "Day", "Micro", "Milli", "Nano"]:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == "bar":
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=names[2]
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == "bar":
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=names[2]
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected_add = Series(
[tdi[n] + other[n] for n in range(len(tdi))], name=names[2]
)
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series(
[tdi[n] - other[n] for n in range(len(tdi))], name=names[2]
)
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize("obox", [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range("2013-01-01", "2013-01-03"))
enddate = Series(pd.date_range("2013-03-01", "2013-03-03"))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype="int64")
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype="int64"))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype="float64")
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize(
"other",
[
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11),
],
ids=lambda x: type(x).__name__,
)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(["1 Day"] * 10)
expected = timedelta_range("1 days", "10 days")
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError, match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match="Cannot divide NaTType by"):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range("1 days", "10 days")
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64("NaT")
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match="Cannot divide"):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Float64Index((np.arange(10) + 1) * 12, name="foo")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array):
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
expected = pd.Float64Index([12, np.nan, 24], name="foo")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype="m8[h]")
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*"
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Int64Index((np.arange(10) + 1) * 12, name="foo")
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize(
"scalar_td",
[
timedelta(minutes=10, seconds=7),
Timedelta("10m7s"),
Timedelta("10m7s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(["00:05:03", "00:05:03", pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range("1 ns", "10 ns", periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 ns", "0 ns"] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize("one", [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match="Cannot divide"):
two / tdser
@pytest.mark.parametrize(
"dtype",
[
"int64",
"int32",
"int16",
"uint64",
"uint32",
"uint16",
"uint8",
"float64",
"float32",
"float16",
],
)
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(dtype)
expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"int64",
"int32",
"int16",
"uint64",
"uint32",
"uint16",
"uint8",
"float64",
"float32",
"float16",
],
)
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(dtype)
expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser / vector
tm.assert_equal(result, expected)
pattern = (
"true_divide cannot use operands|"
"cannot perform __div__|"
"cannot perform __truediv__|"
"unsupported operand|"
"Cannot divide"
)
with pytest.raises(TypeError, match=pattern):
vector / tdser
if not isinstance(vector, pd.Index):
# Index.__rdiv__ won't try to operate elementwise, just raises
result = tdser / vector.astype(object)
if box_with_array is pd.DataFrame:
expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match=pattern):
vector.astype(object) / tdser
@pytest.mark.parametrize(
"names",
[
(None, None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
],
)
def test_td64arr_mul_int_series(self, box_df_fail, names):
# GH#19042 test for correct name attachment
box = box_df_fail # broadcasts along wrong axis, but doesn't raise
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
# TODO: Should we be parametrizing over types for `ser` too?
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(
["0days", "1day", "4days", "9days", "16days"],
dtype="timedelta64[ns]",
name=names[2],
)
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize(
"names",
[
(None, None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
],
)
def test_float_series_rdiv_td64arr(self, box_with_array, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
box = box_with_array
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
xname = names[2] if box is not tm.to_array else names[1]
expected = Series(
[tdi[n] / ser[n] for n in range(len(ser))],
dtype="timedelta64[ns]",
name=xname,
)
xbox = box
if box in [pd.Index, tm.to_array] and type(ser) is Series:
xbox = Series
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = ser.__rdiv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeInvalidArithmeticOps:
def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
scalar_td ** td1
with pytest.raises(TypeError, match=pattern):
td1 ** scalar_td
| {
"repo_name": "kushalbhola/MyStuff",
"path": "Practice/PythonApplication/env/Lib/site-packages/pandas/tests/arithmetic/test_timedelta64.py",
"copies": "2",
"size": "76159",
"license": "apache-2.0",
"hash": 5948132105776181000,
"line_mean": 34.6716627635,
"line_max": 88,
"alpha_frac": 0.5701361625,
"autogenerated": false,
"ratio": 3.589188934445544,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5159325096945544,
"avg_score": null,
"num_lines": null
} |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import OutOfBoundsDatetime, PerformanceWarning
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
NaT,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
def assert_dtype(obj, expected_dtype):
"""
Helper to check the dtype for a Series, Index, or single-column DataFrame.
"""
if isinstance(obj, DataFrame):
dtype = obj.dtypes.iat[0]
else:
dtype = obj.dtype
assert dtype == expected_dtype
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
tdi = pd.timedelta_range("2H", periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
msg = "Invalid comparison between dtype"
with pytest.raises(TypeError, match=msg):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
@pytest.mark.parametrize(
"td_scalar",
[
timedelta(days=1),
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
offsets.Hour(24),
],
)
def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):
# regression test for GH#5963
box = box_with_array
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
ser = Series([timedelta(days=1), timedelta(days=2)])
ser = tm.box_expected(ser, box)
actual = ser > td_scalar
expected = Series([False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(actual, expected)
@pytest.mark.parametrize(
"invalid",
[
345600000000000,
"a",
Timestamp.now(),
Timestamp.now("UTC"),
Timestamp.now().to_datetime64(),
Timestamp.now().to_pydatetime(),
Timestamp.now().date(),
],
)
def test_td64_comparisons_invalid(self, box_with_array, invalid):
# GH#13624 for str
box = box_with_array
rng = timedelta_range("1 days", periods=10)
obj = tm.box_expected(rng, box)
assert_invalid_comparison(obj, invalid, box)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.date_range("1970-01-01", periods=10, tz="UTC").array,
np.array(pd.date_range("1970-01-01", periods=10)),
list(pd.date_range("1970-01-01", periods=10)),
pd.date_range("1970-01-01", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_td64arr_cmp_arraylike_invalid(self, other):
# We don't parametrize this over box_with_array because listlike
# other plays poorly with assert_invalid_comparison reversed checks
rng = timedelta_range("1 days", periods=10)._data
assert_invalid_comparison(rng, other, tm.to_array)
def test_td64arr_cmp_mixed_invalid(self):
rng = timedelta_range("1 days", periods=5)._data
other = np.array([0, 1, 2, rng[3], Timestamp.now()])
result = rng == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = rng != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
rng < other
with pytest.raises(TypeError, match=msg):
rng > other
with pytest.raises(TypeError, match=msg):
rng <= other
with pytest.raises(TypeError, match=msg):
rng >= other
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
@pytest.mark.parametrize("dtype", [None, object])
def test_comp_nat(self, dtype):
left = TimedeltaIndex([Timedelta("1 days"), pd.NaT, Timedelta("3 days")])
right = TimedeltaIndex([pd.NaT, pd.NaT, Timedelta("3 days")])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = TimedeltaIndex(
[
"1 day",
pd.NaT,
"1 day 00:00:01",
pd.NaT,
"1 day 00:00:01",
"5 day 00:00:03",
]
)
tdidx2 = TimedeltaIndex(
["2 day", "2 day", pd.NaT, pd.NaT, "1 day 00:00:02", "5 days 00:00:03"]
)
tdarr = np.array(
[
np.timedelta64(2, "D"),
np.timedelta64(2, "D"),
np.timedelta64("nat"),
np.timedelta64("nat"),
np.timedelta64(1, "D") + np.timedelta64(2, "s"),
np.timedelta64(5, "D") + np.timedelta64(3, "s"),
]
)
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range("1 days", periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x")
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["4H", "8H", "12H", "16H", "20H"], freq="4H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "4H"
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["1H", "2H", "3H", "4H", "5H"], freq="H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "H"
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(
["-2H", "-4H", "-6H", "-8H", "-10H"], freq="-2H", name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq == "-2H"
idx = TimedeltaIndex(["-2H", "-1H", "0H", "1H", "2H"], freq="H", name="x")
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["2H", "1H", "0H", "1H", "2H"], freq=None, name="x")
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = r"unsupported operand type\(s\) for -"
with pytest.raises(TypeError, match=msg):
td - dt
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(["0 days", "1 days", "2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(["0 days", pd.NaT, "1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(["0 days", pd.NaT, "-1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
["20121231", "20130101", "20130102"], freq="D", name="bar"
)
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(["20121231", pd.NaT, "20121230"], name="foo")
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range("20130101", periods=3)
ts = Timestamp("20130101")
dt = ts.to_pydatetime()
dti_tz = pd.date_range("20130101", periods=3).tz_localize("US/Eastern")
ts_tz = Timestamp("20130101").tz_localize("US/Eastern")
ts_tz2 = Timestamp("20130101").tz_localize("CET")
dt_tz = ts_tz.to_pydatetime()
td = Timedelta("1 days")
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta("0 days")
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta("0 days")
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta("0 days")
_check(result, expected)
# tz mismatches
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta("0 days")
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(["20121231", "20130101", "20130102"], tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
result = tdi - tdi
expected = TimedeltaIndex(["0 days", pd.NaT, "0 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(["2 days", pd.NaT, "4 days"], name="foo")
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(["20121231", pd.NaT, "20130101"])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
result = tdi + dt
expected = DatetimeIndex(["20130102", pd.NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(["20130102", pd.NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(["2 days", pd.NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(["2 days", pd.NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
msg = "Addition/subtraction of integers and integer-arrays"
with pytest.raises(TypeError, match=msg):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : pd.Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(["20130102", pd.NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(["20130102", pd.NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp("20130102")
assert result == expected
result = td + dt
expected = Timestamp("20130102")
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize("freq", ["D", "B"])
def test_timedelta(self, freq):
index = pd.date_range("1/1/2000", periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
back = back._with_freq("infer")
tm.assert_index_equal(index, back)
if freq == "D":
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range("2013", "2014")
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
assert result1.freq == rng.freq
result1 = result1._with_freq(None)
tm.assert_index_equal(result1, result4)
assert result3.freq == rng.freq
result3 = result3._with_freq(None)
tm.assert_index_equal(result2, result3)
def test_tda_add_sub_index(self):
# Check that TimedeltaArray defers to Index on arithmetic ops
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
tda = tdi.array
dti = pd.date_range("1999-12-31", periods=3, freq="D")
result = tda + dti
expected = tdi + dti
tm.assert_index_equal(result, expected)
result = tda + tdi
expected = tdi + tdi
tm.assert_index_equal(result, expected)
result = tda - tdi
expected = tdi - tdi
tm.assert_index_equal(result, expected)
def test_tda_add_dt64_object_array(self, box_with_array, tz_naive_fixture):
# Result should be cast back to DatetimeArray
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
dti = dti._with_freq(None)
tdi = dti - dti
obj = tm.box_expected(tdi, box)
other = tm.box_expected(dti, box)
with tm.assert_produces_warning(PerformanceWarning):
result = obj + other.astype(object)
tm.assert_equal(result, other)
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
def test_tdi_iadd_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
orig_rng = rng
rng += two_hours
tm.assert_equal(rng, expected)
if box_with_array is not pd.Index:
# Check that operation is actually inplace
tm.assert_equal(orig_rng, expected)
def test_tdi_isub_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
orig_rng = rng
rng -= two_hours
tm.assert_equal(rng, expected)
if box_with_array is not pd.Index:
# Check that operation is actually inplace
tm.assert_equal(orig_rng, expected)
# -------------------------------------------------------------
def test_tdi_ops_attributes(self):
rng = timedelta_range("2 days", periods=5, freq="2D", name="x")
result = rng + 1 * rng.freq
exp = timedelta_range("4 days", periods=5, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
result = rng - 2 * rng.freq
exp = timedelta_range("-2 days", periods=5, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
result = rng * 2
exp = timedelta_range("4 days", periods=5, freq="4D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "4D"
result = rng / 2
exp = timedelta_range("1 days", periods=5, freq="D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "D"
result = -rng
exp = timedelta_range("-2 days", periods=5, freq="-2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "-2D"
rng = pd.timedelta_range("-2 days", periods=5, freq="D", name="x")
result = abs(rng)
exp = TimedeltaIndex(
["2 days", "1 days", "0 days", "1 days", "2 days"], name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq is None
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
# TODO: Make raised error message more informative and test
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
pd.to_timedelta(106580, "D") + Timestamp("2000")
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
Timestamp("2000") + pd.to_timedelta(106580, "D")
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], "D") + Timestamp("2000")
with pytest.raises(OverflowError, match=msg):
Timestamp("2000") + pd.to_timedelta([106580], "D")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
(
pd.to_timedelta([_NaT, "5 days", "1 hours"])
- pd.to_timedelta(["7 seconds", _NaT, "4 hours"])
)
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(["4 days", pd.NaT])
result = pd.to_timedelta(["5 days", pd.NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, "5 hours"])
result = pd.to_timedelta([pd.NaT, "5 days", "1 hours"]) + pd.to_timedelta(
["7 seconds", pd.NaT, "4 hours"]
)
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(["00:00:01"]))
s2 = pd.to_timedelta(Series(["00:00:02"]))
msg = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
# Passing datetime64-dtype data to TimedeltaIndex is no longer
# supported GH#29794
pd.to_timedelta(Series([pd.NaT]))
sn = pd.to_timedelta(Series([pd.NaT], dtype="m8[ns]"))
df1 = DataFrame(["00:00:01"]).apply(pd.to_timedelta)
df2 = DataFrame(["00:00:02"]).apply(pd.to_timedelta)
with pytest.raises(TypeError, match=msg):
# Passing datetime64-dtype data to TimedeltaIndex is no longer
# supported GH#29794
DataFrame([pd.NaT]).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT.value]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta("00:00:01")
scalar2 = pd.to_timedelta("00:00:02")
timedelta_NaT = pd.to_timedelta("NaT")
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
s1 + np.nan
with pytest.raises(TypeError, match=msg):
np.nan + s1
with pytest.raises(TypeError, match=msg):
s1 - np.nan
with pytest.raises(TypeError, match=msg):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
msg = "cannot subtract a datelike from|unsupported operand type"
with pytest.raises(TypeError, match=msg):
df1 + np.nan
with pytest.raises(TypeError, match=msg):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range("2012-1-1", periods=3, freq="D")
v2 = pd.date_range("2012-1-2", periods=3, freq="D")
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]")
tm.assert_series_equal(rs, xp)
assert rs.dtype == "timedelta64[ns]"
df = DataFrame({"A": v1})
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
# series on the rhs
result = df["A"] - df["A"].shift()
assert result.dtype == "timedelta64[ns]"
result = df["A"] + td
assert result.dtype == "M8[ns]"
# scalar Timestamp on rhs
maxa = df["A"].max()
assert isinstance(maxa, Timestamp)
resultb = df["A"] - df["A"].max()
assert resultb.dtype == "timedelta64[ns]"
# timestamp on lhs
result = resultb + df["A"]
values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
expected = Series(values, name="A")
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df["A"] - datetime(2001, 1, 1)
expected = Series([timedelta(days=4017 + i) for i in range(3)], name="A")
tm.assert_series_equal(result, expected)
assert result.dtype == "m8[ns]"
d = datetime(2001, 1, 1, 3, 4)
resulta = df["A"] - d
assert resulta.dtype == "m8[ns]"
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df["A"], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df["A"])
assert resultb.dtype == "M8[ns]"
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(df["A"], resultb)
assert resultb.dtype == "M8[ns]"
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta("1s")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
# addition
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
# multiplication
tm.assert_series_equal(
nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta
)
tm.assert_series_equal(
1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(["1 day", "2 day"])
idx = tm.box_expected(idx, box_with_array)
msg = (
"cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation"
)
with pytest.raises(TypeError, match=msg):
idx - Timestamp("2011-01-01")
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp("2011-01-01", tz=tz)
idx = TimedeltaIndex(["1 day", "2 day"])
expected = DatetimeIndex(["2011-01-02", "2011-01-03"], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"ts",
[
Timestamp("2012-01-01"),
Timestamp("2012-01-01").to_pydatetime(),
Timestamp("2012-01-01").to_datetime64(),
],
)
def test_td64arr_add_sub_datetimelike_scalar(self, ts, box_with_array):
# GH#11925, GH#29558
tdi = timedelta_range("1 day", periods=3)
expected = pd.date_range("2012-01-02", periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D")
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
msg = "cannot subtract a datelike from"
with pytest.raises(TypeError, match=msg):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64("NaT")
tdi = timedelta_range("1 day", periods=3)
expected = DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Invalid __add__/__sub__ operations
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("tdi_freq", [None, "H"])
def test_td64arr_sub_periodlike(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
msg = "cannot subtract|unsupported operand type"
with pytest.raises(TypeError, match=msg):
tdi - pi
# GH#13078 subtraction of Period scalar not supported
with pytest.raises(TypeError, match=msg):
tdi - pi[0]
@pytest.mark.parametrize(
"other",
[
# GH#12624 for str case
"a",
# GH#19123
1,
1.5,
np.array(2),
],
)
def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other):
# vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
assert_invalid_addsub_type(tdarr, other)
@pytest.mark.parametrize(
"vec",
[
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3]),
DataFrame([[1, 2, 3]]),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_addsub_numeric_arr_invalid(
self, box_with_array, vec, any_real_dtype
):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
vector = vec.astype(any_real_dtype)
assert_invalid_addsub_type(tdarr, vector)
def test_td64arr_add_sub_int(self, box_with_array, one):
# Variants of `one` for #19012, deprecated GH#22535
rng = timedelta_range("1 days 09:00:00", freq="H", periods=10)
tdarr = tm.box_expected(rng, box_with_array)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, one, msg)
# TOOD: get inplace ops into assert_invalid_addsub_type
with pytest.raises(TypeError, match=msg):
tdarr += one
with pytest.raises(TypeError, match=msg):
tdarr -= one
def test_td64arr_add_sub_integer_array(self, box_with_array):
# GH#19959, deprecated GH#22535
# GH#22696 for DataFrame case, check that we don't dispatch to numpy
# implementation, which treats int64 as m8[ns]
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days 09:00:00", freq="H", periods=3)
tdarr = tm.box_expected(rng, box)
other = tm.box_expected([4, 3, 2], xbox)
msg = "Addition/subtraction of integers and integer-arrays"
assert_invalid_addsub_type(tdarr, other, msg)
def test_td64arr_addsub_integer_array_no_freq(self, box_with_array):
# GH#19959
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"])
tdarr = tm.box_expected(tdi, box)
other = tm.box_expected([14, -1, 16], xbox)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, other, msg)
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_with_array):
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_with_array):
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
def test_td64arr_add_sub_tdi(self, box_with_array, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
box = box_with_array
if box is pd.DataFrame and names[1] != names[0]:
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["0 days", "1 day"], name=names[0])
tdi = np.array(tdi) if box in [tm.to_array, pd.array] else tdi
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series(
[Timedelta(hours=3), Timedelta(days=1, hours=4)], name=names[2]
)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
result = ser + tdi
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
expected = Series(
[Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=names[2]
)
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
result = ser - tdi
tm.assert_equal(result, -expected)
assert_dtype(result, "timedelta64[ns]")
def test_td64arr_add_sub_td64_nat(self, box_with_array):
# GH#23320 special handling for timedelta64("NaT")
box = box_with_array
tdi = TimedeltaIndex([NaT, Timedelta("1s")])
other = np.timedelta64("NaT")
expected = TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box_with_array):
# GH#18808
box = box_with_array
ser = Series([NaT, Timedelta("1s")])
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
# GH#10699 for Tick cases
box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
result = two_hours + rng
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as - is now numeric
# GH#10699 for Tick cases
box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
result = two_hours - rng
tm.assert_equal(result, -expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
def test_td64arr_add_offset_index(self, names, box_with_array):
# GH#18849, GH#19744
box = box_with_array
if box is pd.DataFrame and names[1] != names[0]:
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
other = np.array(other) if box in [tm.to_array, pd.array] else other
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=names[2]
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_with_array):
# GH#18849
box = box_with_array
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
def test_td64arr_sub_offset_index(self, names, box_with_array):
# GH#18824, GH#19744
box = box_with_array
xbox = box if box not in [tm.to_array, pd.array] else pd.Index
exname = names[2] if box not in [tm.to_array, pd.array] else names[1]
if box is pd.DataFrame and names[1] != names[0]:
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_with_offset_series(self, names, box_with_array):
# GH#18849
box = box_with_array
box2 = Series if box in [pd.Index, tm.to_array, pd.array] else box
if box is pd.DataFrame:
# Since we are operating with a DataFrame and a non-DataFrame,
# the non-DataFrame is cast to Series and its name ignored.
exname = names[0]
elif box in [tm.to_array, pd.array]:
exname = names[1]
else:
exname = names[2]
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))], name=exname)
obj = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = obj + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + obj
tm.assert_equal(res2, expected_add)
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))], name=exname)
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = obj - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize("obox", [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = "has incorrect type|cannot add the type MonthEnd"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
# ------------------------------------------------------------------
# Unsorted
def test_td64arr_add_sub_object_array(self, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = pd.timedelta_range("1 day", periods=3, freq="D")
tdarr = tm.box_expected(tdi, box)
other = np.array(
[Timedelta(days=1), pd.offsets.Day(2), Timestamp("2000-01-04")]
)
with tm.assert_produces_warning(PerformanceWarning):
result = tdarr + other
expected = pd.Index(
[Timedelta(days=2), Timedelta(days=4), Timestamp("2000-01-07")]
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
msg = "unsupported operand type|cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdarr - other
with tm.assert_produces_warning(PerformanceWarning):
result = other - tdarr
expected = pd.Index([Timedelta(0), Timedelta(0), Timestamp("2000-01-01")])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
msg = "argument must be an integer|cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype="int64")
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array, pd.array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * Series(np.arange(5, dtype="int64"))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array, pd.array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype="float64")
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize(
"other",
[
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11),
],
ids=lambda x: type(x).__name__,
)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(["1 Day"] * 10)
expected = timedelta_range("1 days", "10 days")
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError, match="unsupported operand type"):
rng / pd.NaT
with pytest.raises(TypeError, match="Cannot divide NaTType by"):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days", "10 days")
rng = tm.box_expected(rng, box)
other = np.timedelta64("NaT")
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, xbox)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match="Cannot divide"):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Float64Index((np.arange(10) + 1) * 12, name="foo")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, xbox)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
def test_td64arr_div_td64_scalar(self, m, unit, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
startdate = Series(pd.date_range("2013-01-01", "2013-01-03"))
enddate = Series(pd.date_range("2013-03-01", "2013-03-03"))
ser = enddate - startdate
ser[2] = np.nan
flat = ser
ser = tm.box_expected(ser, box)
# op
expected = Series([x / np.timedelta64(m, unit) for x in flat])
expected = tm.box_expected(expected, xbox)
result = ser / np.timedelta64(m, unit)
tm.assert_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in flat])
expected = tm.box_expected(expected, xbox)
result = np.timedelta64(m, unit) / ser
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
expected = pd.Float64Index([12, np.nan, 24], name="foo")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, xbox)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, xbox)
other = np.array([2, 4, 2], dtype="m8[h]")
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
msg = "Cannot divide vectors|Unable to coerce to Series"
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError, match=msg):
rng / other
with pytest.raises(ValueError, match=msg):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_td64arr_with_nat(self, box_with_array):
# GH#35529
box = box_with_array
xbox = np.ndarray if box is pd.array else box
left = Series([1000, 222330, 30], dtype="timedelta64[ns]")
right = Series([1000, 222330, None], dtype="timedelta64[ns]")
left = tm.box_expected(left, box)
right = tm.box_expected(right, box)
expected = np.array([1.0, 1.0, np.nan], dtype=np.float64)
expected = tm.box_expected(expected, xbox)
result = left // right
tm.assert_equal(result, expected)
# case that goes through __rfloordiv__ with arraylike
result = np.asarray(left) // right
tm.assert_equal(result, expected)
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
box = box_with_array
xbox = np.ndarray if box is pd.array else box
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
box = box_with_array
xbox = np.ndarray if box is pd.array else box
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array, scalar_td):
# GH#18831
box = box_with_array
xbox = np.ndarray if box is pd.array else box
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*"
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Int64Index((np.arange(10) + 1) * 12, name="foo")
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize(
"scalar_td",
[
timedelta(minutes=10, seconds=7),
Timedelta("10m7s"),
Timedelta("10m7s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
box = box_with_array
xbox = np.ndarray if box_with_array is pd.array else box_with_array
tdi = TimedeltaIndex(["00:05:03", "00:05:03", pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, xbox, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
warn = None
if box_with_array is pd.DataFrame and isinstance(three_days, pd.DateOffset):
warn = PerformanceWarning
with tm.assert_produces_warning(warn):
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range("1 ns", "10 ns", periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 ns", "0 ns"] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
msg = "Cannot divide int by"
with pytest.raises(TypeError, match=msg):
2 % tdarr
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
msg = (
"cannot use operands with types dtype|"
"Cannot multiply with unequal lengths|"
"Unable to coerce to Series"
)
with pytest.raises(TypeError, match=msg):
# length check before dtype check
idx * idx[:3]
with pytest.raises(ValueError, match=msg):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
msg = "cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match="Cannot divide"):
two / tdser
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, any_real_dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(any_real_dtype)
expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_div_numeric_array(self, box_with_array, vector, any_real_dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(any_real_dtype)
expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser / vector
tm.assert_equal(result, expected)
pattern = (
"true_divide'? cannot use operands|"
"cannot perform __div__|"
"cannot perform __truediv__|"
"unsupported operand|"
"Cannot divide"
)
with pytest.raises(TypeError, match=pattern):
vector / tdser
if not isinstance(vector, pd.Index):
# Index.__rdiv__ won't try to operate elementwise, just raises
result = tdser / vector.astype(object)
if box_with_array is pd.DataFrame:
expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
expected = pd.Index(expected) # do dtype inference
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match=pattern):
vector.astype(object) / tdser
def test_td64arr_mul_int_series(self, box_with_array, names, request):
# GH#19042 test for correct name attachment
box = box_with_array
if box_with_array is pd.DataFrame and names[2] is None:
reason = "broadcasts along wrong axis, but doesn't raise"
request.node.add_marker(pytest.mark.xfail(reason=reason))
exname = names[2] if box not in [tm.to_array, pd.array] else names[1]
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
# TODO: Should we be parametrizing over types for `ser` too?
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(
["0days", "1day", "4days", "9days", "16days"],
dtype="timedelta64[ns]",
name=exname,
)
tdi = tm.box_expected(tdi, box)
xbox = (
Series
if (box is pd.Index or box is tm.to_array or box is pd.array)
else box
)
expected = tm.box_expected(expected, xbox)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
if box is pd.DataFrame:
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
def test_float_series_rdiv_td64arr(self, box_with_array, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
box = box_with_array
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
xname = names[2] if box not in [tm.to_array, pd.array] else names[1]
expected = Series(
[tdi[n] / ser[n] for n in range(len(ser))],
dtype="timedelta64[ns]",
name=xname,
)
xbox = box
if box in [pd.Index, tm.to_array, pd.array] and type(ser) is Series:
xbox = Series
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = ser.__rtruediv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedelta64ArrayLikeArithmetic:
# Arithmetic tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic
# tests will eventually end up here.
def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
scalar_td ** td1
with pytest.raises(TypeError, match=pattern):
td1 ** scalar_td
def test_add_timestamp_to_timedelta():
# GH: 35897
timestamp = Timestamp.now()
result = timestamp + pd.timedelta_range("0s", "1s", periods=31)
expected = DatetimeIndex(
[
timestamp
+ (
pd.to_timedelta("0.033333333s") * i
+ pd.to_timedelta("0.000000001s") * divmod(i, 3)[0]
)
for i in range(31)
]
)
tm.assert_index_equal(result, expected)
| {
"repo_name": "gfyoung/pandas",
"path": "pandas/tests/arithmetic/test_timedelta64.py",
"copies": "2",
"size": "79397",
"license": "bsd-3-clause",
"hash": 534541911961175800,
"line_mean": 34.9587862319,
"line_max": 88,
"alpha_frac": 0.5778178017,
"autogenerated": false,
"ratio": 3.570490623735216,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003360064956510807,
"num_lines": 2208
} |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import datetime, timedelta
from itertools import product, starmap
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import NullFrequencyError, PerformanceWarning
import pandas as pd
from pandas import (
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
from pandas.core.indexes.datetimes import _to_M8
import pandas.util.testing as tm
def assert_all(obj):
"""
Test helper to call call obj.all() the appropriate number of times on
a Series or DataFrame.
"""
if isinstance(obj, pd.DataFrame):
assert obj.all().all()
else:
assert obj.all()
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DateteimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
# FIXME: ValueError with transpose on tzaware
dtarr = tm.box_expected(dti, box, transpose=False)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox, transpose=False)
tm.assert_equal(result, expected)
class TestDatetime64DataFrameComparison:
@pytest.mark.parametrize(
"timestamps",
[
[pd.Timestamp("2012-01-01 13:00:00+00:00")] * 2,
[pd.Timestamp("2012-01-01 13:00:00")] * 2,
],
)
def test_tz_aware_scalar_comparison(self, timestamps):
# GH#15966
df = pd.DataFrame({"test": timestamps})
expected = pd.DataFrame({"test": [False, False]})
tm.assert_frame_equal(df == -1, expected)
def test_dt64_nat_comparison(self):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
ts = pd.Timestamp.now()
df = pd.DataFrame([ts, pd.NaT])
expected = pd.DataFrame([True, False])
result = df == ts
tm.assert_frame_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[pd.Timestamp("2011-01-01"), NaT, pd.Timestamp("2011-01-03")],
[NaT, NaT, pd.Timestamp("2011-01-03")],
),
(
[pd.Timedelta("1 days"), NaT, pd.Timedelta("3 days")],
[NaT, NaT, pd.Timedelta("3 days")],
),
(
[pd.Period("2011-01", freq="M"), NaT, pd.Period("2011-03", freq="M")],
[NaT, NaT, pd.Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("box", [Series, pd.Index])
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons(self, dtype, box, reverse, pair):
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
# Series, Index
expected = Series([False, False, True])
tm.assert_series_equal(left == right, expected)
expected = Series([True, True, False])
tm.assert_series_equal(left != right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left < right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left > right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left <= right, expected)
def test_comparison_invalid(self, box_with_array):
# GH#4968
# invalid date/int comparisons
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
ser = Series(range(5))
ser2 = Series(pd.date_range("20010101", periods=5))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
for (x, y) in [(ser, ser2), (ser2, ser)]:
result = x == y
expected = tm.box_expected([False] * 5, xbox)
tm.assert_equal(result, expected)
result = x != y
expected = tm.box_expected([True] * 5, xbox)
tm.assert_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box_with_array)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
def test_series_comparison_scalars(self):
series = Series(date_range("1/1/2000", periods=10))
val = datetime(2000, 1, 4)
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
val = series[5]
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
def test_dt64_ser_cmp_date_warning(self):
# https://github.com/pandas-dev/pandas/issues/21359
# Remove this test and enble invalid test below
ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
date = ser.iloc[0].to_pydatetime().date()
with tm.assert_produces_warning(FutureWarning) as m:
result = ser == date
expected = pd.Series([True] + [False] * 9, name="dates")
tm.assert_series_equal(result, expected)
assert "Comparing Series of datetimes " in str(m[0].message)
assert "will not compare equal" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser != date
tm.assert_series_equal(result, ~expected)
assert "will not compare equal" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser <= date
tm.assert_series_equal(result, expected)
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser < date
tm.assert_series_equal(result, pd.Series([False] * 10, name="dates"))
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser >= date
tm.assert_series_equal(result, pd.Series([True] * 10, name="dates"))
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser > date
tm.assert_series_equal(result, pd.Series([False] + [True] * 9, name="dates"))
assert "a TypeError will be raised" in str(m[0].message)
@pytest.mark.skip(reason="GH#21359")
def test_dt64ser_cmp_date_invalid(self, box_with_array):
# GH#19800 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
ser = pd.date_range("20010101", periods=10)
date = ser.iloc[0].to_pydatetime().date()
ser = tm.box_expected(ser, box_with_array)
assert not (ser == date).any()
assert (ser != date).all()
with pytest.raises(TypeError):
ser > date
with pytest.raises(TypeError):
ser < date
with pytest.raises(TypeError):
ser >= date
with pytest.raises(TypeError):
ser <= date
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = pd.Timestamp("nat")
ser[3] = pd.Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
ser = pd.Series([pd.Timestamp("2000-01-29 01:59:00"), "NaT"])
ser = tm.box_expected(ser, box_with_array)
result = ser != ser
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[0]
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[1]
expected = tm.box_expected([True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[0]
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[1]
expected = tm.box_expected([False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat(self, op):
# GH#18162
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
# Check that there isn't a problem aware-aware and naive-naive do not
# raise
naive_series = Series(dr)
aware_series = Series(dz)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dz, naive_series)
with pytest.raises(TypeError, match=msg):
op(dr, aware_series)
# TODO: implement _assert_tzawareness_compat for the reverse
# comparison with the Series on the left-hand side
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],
)
def test_comparators(self, op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = _to_M8(element)
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
def dt64arr_cmp_non_datetime(self, tz_naive_fixture, box_with_array):
# GH#19301 by convention datetime.date is not considered comparable
# to Timestamp or DatetimeIndex. This may change in the future.
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = datetime(2016, 1, 1).date()
assert not (dtarr == other).any()
assert (dtarr != other).all()
with pytest.raises(TypeError):
dtarr < other
with pytest.raises(TypeError):
dtarr <= other
with pytest.raises(TypeError):
dtarr > other
with pytest.raises(TypeError):
dtarr >= other
@pytest.mark.parametrize("other", [None, np.nan, pd.NaT])
def test_dti_eq_null_scalar(self, other, tz_naive_fixture):
# GH#19301
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
assert not (dti == other).any()
@pytest.mark.parametrize("other", [None, np.nan, pd.NaT])
def test_dti_ne_null_scalar(self, other, tz_naive_fixture):
# GH#19301
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
assert (dti != other).all()
@pytest.mark.parametrize("other", [None, np.nan])
def test_dti_cmp_null_scalar_inequality(
self, tz_naive_fixture, other, box_with_array
):
# GH#19301
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dtarr < other
with pytest.raises(TypeError, match=msg):
dtarr <= other
with pytest.raises(TypeError, match=msg):
dtarr > other
with pytest.raises(TypeError, match=msg):
dtarr >= other
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = pd.DatetimeIndex(
[pd.Timestamp("2011-01-01"), pd.NaT, pd.Timestamp("2011-01-03")]
)
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == pd.NaT, expected)
tm.assert_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != pd.NaT, expected)
tm.assert_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < pd.NaT, expected)
tm.assert_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
didx2 = pd.DatetimeIndex(
["2014-02-01", "2014-03-01", pd.NaT, pd.NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np_datetime64_compat("2014-02-01 00:00Z"),
np_datetime64_compat("2014-03-01 00:00Z"),
np_datetime64_compat("nat"),
np.datetime64("nat"),
np_datetime64_compat("2014-06-01 00:00Z"),
np_datetime64_compat("2014-07-01 00:00Z"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat(self, op, box_df_fail):
# GH#18162
box = box_df_fail
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, dz)
# FIXME: DataFrame case fails to raise for == and !=, wrong
# message for inequalities
with pytest.raises(TypeError, match=msg):
op(dr, list(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(list(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
# FIXME: DataFrame case fails to raise for == and !=, wrong
# message for inequalities
with pytest.raises(TypeError, match=msg):
op(dz, list(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(list(dr), dtype=object))
# Check that there isn't a problem aware-aware and naive-naive do not
# raise
assert_all(dr == dr)
assert_all(dz == dz)
# FIXME: DataFrame case fails to raise for == and !=, wrong
# message for inequalities
assert (dr == list(dr)).all()
assert (dz == list(dz)).all()
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
# GH#18162
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = pd.Timestamp("2000-03-14 01:59")
ts_tz = pd.Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert_all(dr > ts)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert_all(dz > ts_tz)
with pytest.raises(TypeError, match=msg):
op(dz, ts)
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
def test_scalar_comparison_tzawareness(
self, op, other, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("other", ["foo", 99, 4.0, object(), timedelta(days=2)])
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074
tz = tz_naive_fixture
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
rng = date_range("1/1/2000", periods=10, tz=tz)
rng = tm.box_expected(rng, box_with_array)
result = rng == other
expected = np.array([False] * 10)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = rng != other
expected = np.array([True] * 10)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
rng < other
with pytest.raises(TypeError, match=msg):
rng <= other
with pytest.raises(TypeError, match=msg):
rng > other
with pytest.raises(TypeError, match=msg):
rng >= other
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
# tzawareness failure
dti != other
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = "Cannot compare type"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_dt64arr_iadd_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
def test_dt64arr_isub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng -= two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = pd.date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = pd.DatetimeIndex(["NaT"] * 9, tz=tz)
# FIXME: fails with transpose=True due to tz-aware DataFrame
# transpose bug
obj = tm.box_expected(dti, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.date_range("2015-12-31", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = pd.date_range("2016-01-02", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|bad operand type for unary -"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
pd.Timestamp("2013-01-01"),
pd.Timestamp("2013-01-01").to_pydatetime(),
pd.Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = pd.date_range("2013-01-01", periods=3)
idx = tm.box_expected(idx, box_with_array)
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = pd.date_range("20130101", periods=3)
dtarr = tm.box_expected(dti, box_with_array)
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = pd.date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = pd.Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
dtarr + dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals + dtarr
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
idx = tm.box_expected(idx, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
idx + Timestamp("2011-01-01")
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01") + idx
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
def test_dt64arr_add_sub_float(self, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(["unsupported operand type", "cannot (add|subtract)"])
with pytest.raises(TypeError, match=msg):
dtarr + other
with pytest.raises(TypeError, match=msg):
other + dtarr
with pytest.raises(TypeError, match=msg):
dtarr - other
with pytest.raises(TypeError, match=msg):
other - dtarr
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
with pytest.raises(TypeError, match=msg):
dtarr + parr
with pytest.raises(TypeError, match=msg):
parr + dtarr
with pytest.raises(TypeError, match=msg):
dtarr - parr
with pytest.raises(TypeError, match=msg):
parr - dtarr
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_period_scalar(self, dti_freq, box_with_array):
# GH#13078
# not supported, check TypeError
per = pd.Period("2011-01-01", freq="D")
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(idx, box_with_array)
msg = "|".join(["unsupported operand type", "cannot (add|subtract)"])
with pytest.raises(TypeError, match=msg):
dtarr + per
with pytest.raises(TypeError, match=msg):
per + dtarr
with pytest.raises(TypeError, match=msg):
dtarr - per
with pytest.raises(TypeError, match=msg):
per - dtarr
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "bad operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: parametrize over the scalar being added? radd? sub?
offset = dates + pd.offsets.Hour(5)
tm.assert_equal(offset, expected)
offset = dates + np.timedelta64(5, "h")
tm.assert_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, kwd in enumerate(relative_kwargs):
off = pd.DateOffset(**dict([kwd]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = pd.DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "bad operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "bad operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - pd.DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
# TODO: __sub__, __rsub__
def test_dt64arr_add_mixed_offset_array(self, box_with_array):
# GH#10699
# array of offsets
s = DatetimeIndex([Timestamp("2000-1-1"), Timestamp("2000-2-1")])
s = tm.box_expected(s, box_with_array)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn, clear=[pd.core.arrays.datetimelike]):
other = pd.Index([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()])
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2000-2-29")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# same offset
other = pd.Index(
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
)
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2001-2-1")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# TODO: overlap with test_dt64arr_add_mixed_offset_array?
def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, box_with_array):
# GH#18849
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn, clear=[pd.core.arrays.datetimelike]):
res = dtarr + other
expected = DatetimeIndex(
[dti[n] + other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn, clear=[pd.core.arrays.datetimelike]):
res2 = other + dtarr
tm.assert_equal(res2, expected)
with tm.assert_produces_warning(warn, clear=[pd.core.arrays.datetimelike]):
res = dtarr - other
expected = DatetimeIndex(
[dti[n] - other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
pd.DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
pd.DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = pd.DatetimeIndex(exp, tz=tz, freq=exp_freq)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = pd.Timestamp("1700-01-31")
td = pd.Timedelta("20000 Days")
dti = pd.date_range("1949-09-30", freq="100Y", periods=4)
ser = pd.Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = pd.NaT
expected = pd.Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = pd.NaT
expected = pd.Series(
["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]"
)
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = pd.Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = pd.Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([pd.Timestamp.min])
t1 = tmin + pd.Timedelta.max + pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([pd.Timestamp.max])
t2 = tmax + pd.Timedelta.min - pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
pd.Timestamp("20111230"),
pd.Timestamp("20120101"),
pd.Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
pd.Timestamp("20111231"),
pd.Timestamp("20120102"),
pd.Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([pd.Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([pd.Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = pd.date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = pd.Series(dti)
expected = pd.Series(pd.TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), pd.NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "Unary negative expects"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([pd.NaT, Timestamp("19900315")]),
Series([pd.NaT, pd.NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
@pytest.mark.parametrize("op", ["__add__", "__radd__", "__sub__", "__rsub__"])
@pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
def test_dt64_series_add_intlike(self, tz, op):
# GH#19123
dti = pd.DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
method = getattr(ser, op)
msg = "|".join(
[
"incompatible type for a .* operation",
"cannot evaluate a numeric op",
"ufunc .* cannot use operands",
"cannot (add|subtract)",
]
)
with pytest.raises(TypeError, match=msg):
method(1)
with pytest.raises(TypeError, match=msg):
method(other)
with pytest.raises(TypeError, match=msg):
method(other.values)
with pytest.raises(TypeError, match=msg):
method(pd.Index(other))
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "bad operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = rng + one
expected = pd.date_range("2000-01-01 10:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz_naive_fixture, one):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
expected = pd.date_range("2000-01-01 10:00", freq="H", periods=10, tz=tz)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz_naive_fixture, one):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = rng - one
expected = pd.date_range("2000-01-01 08:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz_naive_fixture, one):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
expected = pd.date_range("2000-01-01 08:00", freq="H", periods=10, tz=tz)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))])
result = dti + other
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = other + dti
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))])
# tm.assert_produces_warning does not handle cases where we expect
# two warnings, in this case PerformanceWarning and FutureWarning.
# Until that is fixed, we don't catch either
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = dti + other
tm.assert_index_equal(result, expected)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = other + dti
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = pd.DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
nfmsg = "Cannot shift with no freq"
tmsg = "cannot subtract DatetimeArray from"
with pytest.raises(NullFrequencyError, match=nfmsg):
dti + other
with pytest.raises(NullFrequencyError, match=nfmsg):
other + dti
with pytest.raises(NullFrequencyError, match=nfmsg):
dti - other
with pytest.raises(TypeError, match=tmsg):
other - dti
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "|".join(
[
"cannot perform __neg__ with this index type:",
"ufunc subtract cannot use operands with types",
"cannot subtract DatetimeArray from",
]
)
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dti(self, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
msg = (
"cannot add DatetimeArray and {0}".format(type(addend).__name__)
).replace("DatetimeIndex", "DatetimeArray")
with pytest.raises(TypeError, match=msg):
dti + addend
with pytest.raises(TypeError, match=msg):
addend + dti
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
intervals = ["D", "h", "m", "s", "us"]
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
tm.assert_series_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(
["2011-01-02", "2011-01-05", "2011-01-08"], freq="3D", name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq == "3D"
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(
["2010-12-31", "2011-01-01", "2011-01-02"], freq="D", name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq == "D"
@pytest.mark.parametrize(
"names", [("foo", None, None), ("baz", "bar", None), ("bar", "bar", "bar")]
)
@pytest.mark.parametrize("tz", [None, "America/Chicago"])
def test_dti_add_series(self, tz, names):
# GH#13905
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_dti_add_offset_index(self, tz_naive_fixture, names):
# GH#18849, GH#19744
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
with tm.assert_produces_warning(
PerformanceWarning, clear=[pd.core.arrays.datetimelike]
):
res = dti + other
expected = DatetimeIndex(
[dti[n] + other[n] for n in range(len(dti))], name=names[2], freq="infer"
)
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(
PerformanceWarning, clear=[pd.core.arrays.datetimelike]
):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_dti_sub_offset_index(self, tz_naive_fixture, names):
# GH#18824, GH#19744
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
with tm.assert_produces_warning(
PerformanceWarning, clear=[pd.core.arrays.datetimelike]
):
res = dti - other
expected = DatetimeIndex(
[dti[n] - other[n] for n in range(len(dti))], name=names[2], freq="infer"
)
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_dti_with_offset_series(self, tz_naive_fixture, names):
# GH#18849
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
expected_add = Series(
[dti[n] + other[n] for n in range(len(dti))], name=names[2]
)
with tm.assert_produces_warning(
PerformanceWarning, clear=[pd.core.arrays.datetimelike]
):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(
PerformanceWarning, clear=[pd.core.arrays.datetimelike]
):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series(
[dti[n] - other[n] for n in range(len(dti))], name=names[2]
)
with tm.assert_produces_warning(
PerformanceWarning, clear=[pd.core.arrays.datetimelike]
):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
]
)
actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"lh,rh",
[
(SubDatetime(2000, 1, 1), Timedelta(hours=1)),
(Timedelta(hours=1), SubDatetime(2000, 1, 1)),
],
)
def test_dt_subclass_add_timedelta(lh, rh):
# GH 25851
# ensure that subclassed datetime works for
# Timedelta operations
result = lh + rh
expected = SubDatetime(2000, 1, 1, 1)
assert result == expected
| {
"repo_name": "toobaz/pandas",
"path": "pandas/tests/arithmetic/test_datetime64.py",
"copies": "1",
"size": "96004",
"license": "bsd-3-clause",
"hash": -8919672283907945000,
"line_mean": 35.7409108305,
"line_max": 87,
"alpha_frac": 0.5623828174,
"autogenerated": false,
"ratio": 3.6321125907990313,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9693022309337787,
"avg_score": 0.00029461977224906204,
"num_lines": 2613
} |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import datetime, time, timedelta
from itertools import product, starmap
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):
# We don't parametrize this over box_with_array because listlike
# other plays poorly with assert_invalid_comparison reversed checks
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
assert_invalid_comparison(dta, other, tm.to_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
ts = Timestamp.now(tz)
ser = Series([ts, pd.NaT])
obj = tm.box_expected(ser, box)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
# GH#4968
# invalid date/int comparisons
tz = tz_naive_fixture
ser = Series(range(5))
ser2 = Series(pd.date_range("20010101", periods=5, tz=tz))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
assert_invalid_comparison(ser, ser2, box_with_array)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(pd.date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, Timestamp("nat"))
result = right_f(Timestamp("nat"), s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
ser = Series([Timestamp("2000-01-29 01:59:00"), "NaT"])
ser = tm.box_expected(ser, box_with_array)
result = ser != ser
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[1]
expected = tm.box_expected([True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[1]
expected = tm.box_expected([False, False], xbox)
tm.assert_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],
)
def test_comparators(self, op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
left = DatetimeIndex([Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([pd.NaT, pd.NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == pd.NaT, expected)
tm.assert_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != pd.NaT, expected)
tm.assert_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < pd.NaT, expected)
tm.assert_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", pd.NaT, pd.NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np_datetime64_compat("2014-02-01 00:00Z"),
np_datetime64_compat("2014-03-01 00:00Z"),
np_datetime64_compat("nat"),
np.datetime64("nat"),
np_datetime64_compat("2014-06-01 00:00Z"),
np_datetime64_compat("2014-07-01 00:00Z"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat(self, op, box_with_array):
# GH#18162
box = box_with_array
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
# GH#18162
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, op, other, tz_aware_fixture, box_with_array
):
box = box_with_array
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
dtarr = tm.box_expected(dti, box_with_array)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_dt64arr_iadd_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
def test_dt64arr_isub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = pd.date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = pd.date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = pd.date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = pd.date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = pd.date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([pd.NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - pd.NaT
expected = Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - pd.NaT
expected = Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
warn = None
if box_with_array is not pd.DataFrame or tz_naive_fixture is None:
warn = PerformanceWarning
with tm.assert_produces_warning(warn):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
dtarr + dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals + dtarr
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
idx = tm.box_expected(idx, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
idx + Timestamp("2011-01-01")
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01") + idx
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = pd.date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: parametrize over the scalar being added? radd? sub?
offset = dates + pd.offsets.Hour(5)
tm.assert_equal(offset, expected)
offset = dates + np.timedelta64(5, "h")
tm.assert_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
warn = PerformanceWarning
if box_with_array is pd.DataFrame and tz is not None:
warn = None
with tm.assert_produces_warning(warn):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = pd.date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = pd.NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = pd.NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = pd.date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), pd.NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "Unary negative expects"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([pd.NaT, Timestamp("19900315")]),
Series([pd.NaT, pd.NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
@pytest.mark.parametrize("op", ["__add__", "__radd__", "__sub__", "__rsub__"])
def test_dt64_series_add_intlike(self, tz_naive_fixture, op):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
method = getattr(ser, op)
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
with pytest.raises(TypeError, match=msg):
method(1)
with pytest.raises(TypeError, match=msg):
method(other)
with pytest.raises(TypeError, match=msg):
method(np.array(other))
with pytest.raises(TypeError, match=msg):
method(pd.Index(other))
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "|".join(
[
"cannot perform __neg__ with this index type:",
"ufunc subtract cannot use operands with types",
"cannot subtract DatetimeArray from",
]
)
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
with pytest.raises(TypeError, match=msg):
dtarr + addend
with pytest.raises(TypeError, match=msg):
addend + dtarr
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
tm.assert_series_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
# When adding/subtracting an ndarray (which has no .freq), the result
# does not infer freq
idx = idx._with_freq(None)
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(["2011-01-02", "2011-01-05", "2011-01-08"], name="x")
for result in [idx + delta, np.add(idx, delta)]:
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
exp = DatetimeIndex(["2010-12-31", "2011-01-01", "2011-01-02"], name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_dti_add_series(self, tz_naive_fixture, names):
# GH#13905
tz = tz_naive_fixture
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
def test_dti_addsub_offset_arraylike(
self, tz_naive_fixture, names, op, index_or_series
):
# GH#18849, GH#19744
box = pd.Index
other_box = index_or_series
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
xbox = get_upcast_box(box, other)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [pd.Index, np.array])
def test_dti_addsub_object_arraylike(
self, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)])
xbox = get_upcast_box(box_with_array, other)
expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
warn = PerformanceWarning
if box_with_array is pd.DataFrame and tz is not None:
warn = None
with tm.assert_produces_warning(warn):
result = dtarr + other
tm.assert_equal(result, expected)
expected = DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(warn):
result = dtarr - other
tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
]
)
actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
def test_dt64arr_addsub_object_dtype_2d():
# block-wise DataFrame operations will require operating on 2D
# DatetimeArray/TimedeltaArray, so check that specifically.
dti = pd.date_range("1994-02-13", freq="2W", periods=4)
dta = dti._data.reshape((4, 1))
other = np.array([[pd.offsets.Day(n)] for n in range(4)])
assert other.shape == dta.shape
with tm.assert_produces_warning(PerformanceWarning):
result = dta + other
with tm.assert_produces_warning(PerformanceWarning):
expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)
assert isinstance(result, DatetimeArray)
assert result.freq is None
tm.assert_numpy_array_equal(result._data, expected._data)
with tm.assert_produces_warning(PerformanceWarning):
# Case where we expect to get a TimedeltaArray back
result2 = dta - dta.astype(object)
assert isinstance(result2, TimedeltaArray)
assert result2.shape == (4, 1)
assert result2.freq is None
assert (result2.asi8 == 0).all()
| {
"repo_name": "jreback/pandas",
"path": "pandas/tests/arithmetic/test_datetime64.py",
"copies": "1",
"size": "91671",
"license": "bsd-3-clause",
"hash": 5792500266418371000,
"line_mean": 35.3485329104,
"line_max": 88,
"alpha_frac": 0.559773538,
"autogenerated": false,
"ratio": 3.662006151879519,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4721779689879519,
"avg_score": null,
"num_lines": null
} |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.compat import np_datetime64_compat
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):
# We don't parametrize this over box_with_array because listlike
# other plays poorly with assert_invalid_comparison reversed checks
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
assert_invalid_comparison(dta, other, tm.to_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
ts = Timestamp.now(tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
# GH#4968
# invalid date/int comparisons
tz = tz_naive_fixture
ser = Series(range(5))
ser2 = Series(date_range("20010101", periods=5, tz=tz))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
assert_invalid_comparison(ser, ser2, box_with_array)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, Timestamp("nat"))
result = right_f(Timestamp("nat"), s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), "NaT"])
ser = tm.box_expected(ser, box_with_array)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],
)
def test_comparators(self, op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np_datetime64_compat("2014-02-01 00:00Z"),
np_datetime64_compat("2014-03-01 00:00Z"),
np_datetime64_compat("nat"),
np.datetime64("nat"),
np_datetime64_compat("2014-06-01 00:00Z"),
np_datetime64_compat("2014-07-01 00:00Z"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat(self, op, box_with_array):
# GH#18162
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
# GH#18162
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, op, other, tz_aware_fixture, box_with_array
):
box = box_with_array
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
dtarr = tm.box_expected(dti, box_with_array)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_dt64arr_iadd_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
def test_dt64arr_isub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
dtarr + dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals + dtarr
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
idx = tm.box_expected(idx, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
idx + Timestamp("2011-01-01")
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01") + idx
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: parametrize over the scalar being added? radd? sub?
offset = dates + pd.offsets.Hour(5)
tm.assert_equal(offset, expected)
offset = dates + np.timedelta64(5, "h")
tm.assert_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "Unary negative expects"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
@pytest.mark.parametrize("op", ["__add__", "__radd__", "__sub__", "__rsub__"])
def test_dt64_series_add_intlike(self, tz_naive_fixture, op):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
method = getattr(ser, op)
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
with pytest.raises(TypeError, match=msg):
method(1)
with pytest.raises(TypeError, match=msg):
method(other)
with pytest.raises(TypeError, match=msg):
method(np.array(other))
with pytest.raises(TypeError, match=msg):
method(pd.Index(other))
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "|".join(
[
"cannot perform __neg__ with this index type:",
"ufunc subtract cannot use operands with types",
"cannot subtract DatetimeArray from",
]
)
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
with pytest.raises(TypeError, match=msg):
dtarr + addend
with pytest.raises(TypeError, match=msg):
addend + dtarr
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
tm.assert_series_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
# When adding/subtracting an ndarray (which has no .freq), the result
# does not infer freq
idx = idx._with_freq(None)
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(["2011-01-02", "2011-01-05", "2011-01-08"], name="x")
for result in [idx + delta, np.add(idx, delta)]:
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
exp = DatetimeIndex(["2010-12-31", "2011-01-01", "2011-01-02"], name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_dti_add_series(self, tz_naive_fixture, names):
# GH#13905
tz = tz_naive_fixture
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
def test_dti_addsub_offset_arraylike(
self, tz_naive_fixture, names, op, index_or_series
):
# GH#18849, GH#19744
box = pd.Index
other_box = index_or_series
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
xbox = get_upcast_box(box, other)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [pd.Index, np.array])
def test_dti_addsub_object_arraylike(
self, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)])
xbox = get_upcast_box(box_with_array, other)
expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(PerformanceWarning):
result = dtarr + other
tm.assert_equal(result, expected)
expected = DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(PerformanceWarning):
result = dtarr - other
tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
]
)
actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
def test_dt64arr_addsub_object_dtype_2d():
# block-wise DataFrame operations will require operating on 2D
# DatetimeArray/TimedeltaArray, so check that specifically.
dti = date_range("1994-02-13", freq="2W", periods=4)
dta = dti._data.reshape((4, 1))
other = np.array([[pd.offsets.Day(n)] for n in range(4)])
assert other.shape == dta.shape
with tm.assert_produces_warning(PerformanceWarning):
result = dta + other
with tm.assert_produces_warning(PerformanceWarning):
expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)
assert isinstance(result, DatetimeArray)
assert result.freq is None
tm.assert_numpy_array_equal(result._data, expected._data)
with tm.assert_produces_warning(PerformanceWarning):
# Case where we expect to get a TimedeltaArray back
result2 = dta - dta.astype(object)
assert isinstance(result2, TimedeltaArray)
assert result2.shape == (4, 1)
assert result2.freq is None
assert (result2.asi8 == 0).all()
| {
"repo_name": "datapythonista/pandas",
"path": "pandas/tests/arithmetic/test_datetime64.py",
"copies": "2",
"size": "91274",
"license": "bsd-3-clause",
"hash": -4406093311762539000,
"line_mean": 35.1767736821,
"line_max": 88,
"alpha_frac": 0.5594473782,
"autogenerated": false,
"ratio": 3.667684641967371,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5227132020167371,
"avg_score": null,
"num_lines": null
} |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for numeric dtypes
from collections import abc
from decimal import Decimal
from itertools import combinations
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Series, Timedelta, TimedeltaIndex
from pandas.core import ops
import pandas.util.testing as tm
# ------------------------------------------------------------------
# Comparisons
class TestNumericComparisons:
def test_operator_series_comparison_zerorank(self):
# GH#13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
tm.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
tm.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
tm.assert_series_equal(result, expected)
def test_df_numeric_cmp_dt64_raises(self):
# GH#8932, GH#22163
ts = pd.Timestamp.now()
df = pd.DataFrame({'x': range(5)})
with pytest.raises(TypeError):
df > ts
with pytest.raises(TypeError):
df < ts
with pytest.raises(TypeError):
ts < df
with pytest.raises(TypeError):
ts > df
assert not (df == ts).any().any()
assert (df != ts).all().all()
def test_compare_invalid(self):
# GH#8058
# ops testing
a = pd.Series(np.random.randn(5), name=0)
b = pd.Series(np.random.randn(5))
b.name = pd.Timestamp('2000-01-01')
tm.assert_series_equal(a / b, 1 / (b / a))
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaLike:
# TODO: also check name retentention
@pytest.mark.parametrize('box_cls', [np.array, pd.Index, pd.Series])
@pytest.mark.parametrize('left', [
pd.RangeIndex(10, 40, 10)] + [cls([10, 20, 30], dtype=dtype)
for dtype in ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f2', 'f4', 'f8']
for cls in [pd.Series, pd.Index]],
ids=lambda x: type(x).__name__ + str(x.dtype))
def test_mul_td64arr(self, left, box_cls):
# GH#22390
right = np.array([1, 2, 3], dtype='m8[s]')
right = box_cls(right)
expected = pd.TimedeltaIndex(['10s', '40s', '90s'])
if isinstance(left, pd.Series) or box_cls is pd.Series:
expected = pd.Series(expected)
result = left * right
tm.assert_equal(result, expected)
result = right * left
tm.assert_equal(result, expected)
# TODO: also check name retentention
@pytest.mark.parametrize('box_cls', [np.array, pd.Index, pd.Series])
@pytest.mark.parametrize('left', [
pd.RangeIndex(10, 40, 10)] + [cls([10, 20, 30], dtype=dtype)
for dtype in ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f2', 'f4', 'f8']
for cls in [pd.Series, pd.Index]],
ids=lambda x: type(x).__name__ + str(x.dtype))
def test_div_td64arr(self, left, box_cls):
# GH#22390
right = np.array([10, 40, 90], dtype='m8[s]')
right = box_cls(right)
expected = pd.TimedeltaIndex(['1s', '2s', '3s'])
if isinstance(left, pd.Series) or box_cls is pd.Series:
expected = pd.Series(expected)
result = right / left
tm.assert_equal(result, expected)
result = right // left
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
left / right
with pytest.raises(TypeError):
left // right
# TODO: de-duplicate with test_numeric_arr_mul_tdscalar
def test_ops_series(self):
# regression test for G#H8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
# TODO: also test non-nanosecond timedelta64 and Tick objects;
# see test_numeric_arr_rdiv_tdscalar for note on these failing
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box):
# GH#19333
index = numeric_idx
expected = pd.timedelta_range('0 days', '4 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
index = numeric_idx[1:3]
expected = TimedeltaIndex(['3 Days', '36 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = three_days / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / three_days
@pytest.mark.parametrize('other', [
pd.Timedelta(hours=31),
pd.Timedelta(hours=31).to_pytimedelta(),
pd.Timedelta(hours=31).to_timedelta64(),
pd.Timedelta(hours=31).to_timedelta64().astype('m8[h]'),
np.timedelta64('NaT'),
np.timedelta64('NaT', 'D'),
pd.offsets.Minute(3),
pd.offsets.Second(0)])
def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box):
left = tm.box_expected(numeric_idx, box)
with pytest.raises(TypeError):
left + other
with pytest.raises(TypeError):
other + left
with pytest.raises(TypeError):
left - other
with pytest.raises(TypeError):
other - left
# ------------------------------------------------------------------
# Arithmetic
class TestDivisionByZero:
def test_div_zero(self, zero, numeric_idx):
idx = numeric_idx
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
result = idx / zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') / np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_floordiv_zero(self, zero, numeric_idx):
idx = numeric_idx
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
result = idx // zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') // np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_mod_zero(self, zero, numeric_idx):
idx = numeric_idx
expected = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan],
dtype=np.float64)
result = idx % zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') % np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_divmod_zero(self, zero, numeric_idx):
idx = numeric_idx
exleft = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
exright = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan],
dtype=np.float64)
result = divmod(idx, zero)
tm.assert_index_equal(result[0], exleft)
tm.assert_index_equal(result[1], exright)
# ------------------------------------------------------------------
@pytest.mark.parametrize('dtype2', [
np.int64, np.int32, np.int16, np.int8,
np.float64, np.float32, np.float16,
np.uint64, np.uint32, np.uint16, np.uint8])
@pytest.mark.parametrize('dtype1', [np.int64, np.float64, np.uint64])
def test_ser_div_ser(self, dtype1, dtype2):
# no longer do integer div for any ops, but deal with the 0's
first = Series([3, 4, 5, 8], name='first').astype(dtype1)
second = Series([0, 0, 0, 3], name='second').astype(dtype2)
with np.errstate(all='ignore'):
expected = Series(first.values.astype(np.float64) / second.values,
dtype='float64', name=None)
expected.iloc[0:3] = np.inf
result = first / second
tm.assert_series_equal(result, expected)
assert not result.equals(second / first)
def test_rdiv_zero_compat(self):
# GH#8674
zero_array = np.array([0] * 5)
data = np.random.randn(5)
expected = Series([0.] * 5)
result = zero_array / Series(data)
tm.assert_series_equal(result, expected)
result = Series(zero_array) / data
tm.assert_series_equal(result, expected)
result = Series(zero_array) / Series(data)
tm.assert_series_equal(result, expected)
def test_div_zero_inf_signs(self):
# GH#9144, inf signing
ser = Series([-1, 0, 1], name='first')
expected = Series([-np.inf, np.nan, np.inf], name='first')
result = ser / 0
tm.assert_series_equal(result, expected)
def test_rdiv_zero(self):
# GH#9144
ser = Series([-1, 0, 1], name='first')
expected = Series([0.0, np.nan, 0.0], name='first')
result = 0 / ser
tm.assert_series_equal(result, expected)
def test_floordiv_div(self):
# GH#9144
ser = Series([-1, 0, 1], name='first')
result = ser // 0
expected = Series([-np.inf, np.nan, np.inf], name='first')
tm.assert_series_equal(result, expected)
def test_df_div_zero_df(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / df
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
tm.assert_frame_equal(result, expected)
def test_df_div_zero_array(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
with np.errstate(all='ignore'):
arr = df.values.astype('float') / df.values
result = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_df_div_zero_int(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / 0
expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
expected.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') / 0
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_div_zero_series_does_not_commute(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser / df
res2 = df / ser
assert not res.fillna(0).equals(res2.fillna(0))
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
result = df % df
tm.assert_frame_equal(result, expected)
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values % df.values
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_int(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df % 0
expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') % 0
result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_series_does_not_commute(self):
# GH#3590, modulo as ints
# not commutative with series
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser % df
res2 = df % ser
assert not res.fillna(0).equals(res2.fillna(0))
class TestMultiplicationDivision:
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# for non-timestamp/timedelta/period dtypes
@pytest.mark.parametrize('box', [
pytest.param(pd.Index,
marks=pytest.mark.xfail(reason="Index.__div__ always "
"raises",
raises=TypeError)),
pd.Series,
pd.DataFrame
], ids=lambda x: x.__name__)
def test_divide_decimal(self, box):
# resolves issue GH#9787
ser = Series([Decimal(10)])
expected = Series([Decimal(5)])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = ser / Decimal(2)
tm.assert_equal(result, expected)
result = ser // Decimal(2)
tm.assert_equal(result, expected)
def test_div_equiv_binop(self):
# Test Series.div as well as Series.__div__
# float/integer issue
# GH#7785
first = Series([1, 0], name='first')
second = Series([-0.01, -0.02], name='second')
expected = Series([-0.01, -np.inf])
result = second.div(first)
tm.assert_series_equal(result, expected, check_names=False)
result = second / first
tm.assert_series_equal(result, expected)
def test_div_int(self, numeric_idx):
idx = numeric_idx
result = idx / 1
expected = idx.astype('float64')
tm.assert_index_equal(result, expected)
result = idx / 2
expected = Index(idx.values / 2)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op', [operator.mul, ops.rmul, operator.floordiv])
def test_mul_int_identity(self, op, numeric_idx, box):
idx = numeric_idx
idx = tm.box_expected(idx, box)
result = op(idx, 1)
tm.assert_equal(result, idx)
def test_mul_int_array(self, numeric_idx):
idx = numeric_idx
didx = idx * idx
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result, idx * 5)
arr_dtype = 'uint64' if isinstance(idx, pd.UInt64Index) else 'int64'
result = idx * np.arange(5, dtype=arr_dtype)
tm.assert_index_equal(result, didx)
def test_mul_int_series(self, numeric_idx):
idx = numeric_idx
didx = idx * idx
arr_dtype = 'uint64' if isinstance(idx, pd.UInt64Index) else 'int64'
result = idx * Series(np.arange(5, dtype=arr_dtype))
tm.assert_series_equal(result, Series(didx))
def test_mul_float_series(self, numeric_idx):
idx = numeric_idx
rng5 = np.arange(5, dtype='float64')
result = idx * Series(rng5 + 0.1)
expected = Series(rng5 * (rng5 + 0.1))
tm.assert_series_equal(result, expected)
def test_mul_index(self, numeric_idx):
# in general not true for RangeIndex
idx = numeric_idx
if not isinstance(idx, pd.RangeIndex):
result = idx * idx
tm.assert_index_equal(result, idx ** 2)
def test_mul_datelike_raises(self, numeric_idx):
idx = numeric_idx
with pytest.raises(TypeError):
idx * pd.date_range('20130101', periods=5)
def test_mul_size_mismatch_raises(self, numeric_idx):
idx = numeric_idx
with pytest.raises(ValueError):
idx * idx[0:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
@pytest.mark.parametrize('op', [operator.pow, ops.rpow])
def test_pow_float(self, op, numeric_idx, box):
# test power calculations both ways, GH#14973
idx = numeric_idx
expected = pd.Float64Index(op(idx.values, 2.0))
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = op(idx, 2.0)
tm.assert_equal(result, expected)
def test_modulo(self, numeric_idx, box):
# GH#9244
idx = numeric_idx
expected = Index(idx.values % 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx % 2
tm.assert_equal(result, expected)
def test_divmod_scalar(self, numeric_idx):
idx = numeric_idx
result = divmod(idx, 2)
with np.errstate(all='ignore'):
div, mod = divmod(idx.values, 2)
expected = Index(div), Index(mod)
for r, e in zip(result, expected):
tm.assert_index_equal(r, e)
def test_divmod_ndarray(self, numeric_idx):
idx = numeric_idx
other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2
result = divmod(idx, other)
with np.errstate(all='ignore'):
div, mod = divmod(idx.values, other)
expected = Index(div), Index(mod)
for r, e in zip(result, expected):
tm.assert_index_equal(r, e)
def test_divmod_series(self, numeric_idx):
idx = numeric_idx
other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2
result = divmod(idx, Series(other))
with np.errstate(all='ignore'):
div, mod = divmod(idx.values, other)
expected = Series(div), Series(mod)
for r, e in zip(result, expected):
tm.assert_series_equal(r, e)
@pytest.mark.parametrize('other', [np.nan, 7, -23, 2.718, -3.14, np.inf])
def test_ops_np_scalar(self, other):
vals = np.random.randn(5, 3)
f = lambda x: pd.DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
tm.assert_frame_equal(df / np.array(other), f(vals / other))
tm.assert_frame_equal(np.array(other) * df, f(vals * other))
tm.assert_frame_equal(df + np.array(other), f(vals + other))
tm.assert_frame_equal(np.array(other) - df, f(other - vals))
# TODO: This came from series.test.test_operators, needs cleanup
def test_operators_frame(self):
# rpow does not work with DataFrame
ts = tm.makeTimeSeries()
ts.name = 'ts'
df = pd.DataFrame({'A': ts})
tm.assert_series_equal(ts + ts, ts + df['A'],
check_names=False)
tm.assert_series_equal(ts ** ts, ts ** df['A'],
check_names=False)
tm.assert_series_equal(ts < ts, ts < df['A'],
check_names=False)
tm.assert_series_equal(ts / ts, ts / df['A'],
check_names=False)
# TODO: this came from tests.series.test_analytics, needs cleanup and
# de-duplication with test_modulo above
def test_modulo2(self):
with np.errstate(all='ignore'):
# GH#3590, modulo as ints
p = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.nan
tm.assert_series_equal(result, expected)
result = p['first'] % 0
expected = Series(np.nan, index=p.index, name='first')
tm.assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values)
tm.assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
result2 = p['second'] % p['first']
assert not result.equals(result2)
# GH#9144
s = Series([0, 1])
result = s % 0
expected = Series([np.nan, np.nan])
tm.assert_series_equal(result, expected)
result = 0 % s
expected = Series([np.nan, 0.0])
tm.assert_series_equal(result, expected)
class TestAdditionSubtraction:
# __add__, __sub__, __radd__, __rsub__, __iadd__, __isub__
# for non-timestamp/timedelta/period dtypes
# TODO: This came from series.test.test_operators, needs cleanup
def test_arith_ops_df_compat(self):
# GH#1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
# TODO: This came from series.test.test_operators, needs cleanup
def test_series_frame_radd_bug(self):
# GH#353
vals = pd.Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
tm.assert_series_equal(result, expected)
frame = pd.DataFrame({'vals': vals})
result = 'foo_' + frame
expected = pd.DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
ts = tm.makeTimeSeries()
ts.name = 'ts'
# really raise this time
now = pd.Timestamp.now().to_pydatetime()
with pytest.raises(TypeError):
now + ts
with pytest.raises(TypeError):
ts + now
# TODO: This came from series.test.test_operators, needs cleanup
def test_datetime64_with_index(self):
# arithmetic integer ops with an index
ser = pd.Series(np.random.randn(5))
expected = ser - ser.index.to_series()
result = ser - ser.index
tm.assert_series_equal(result, expected)
# GH#4629
# arithmetic datetime64 ops with an index
ser = pd.Series(pd.date_range('20130101', periods=5),
index=pd.date_range('20130101', periods=5))
expected = ser - ser.index.to_series()
result = ser - ser.index
tm.assert_series_equal(result, expected)
with pytest.raises(TypeError):
# GH#18850
result = ser - ser.index.to_period()
df = pd.DataFrame(np.random.randn(5, 2),
index=pd.date_range('20130101', periods=5))
df['date'] = pd.Timestamp('20130102')
df['expected'] = df['date'] - df.index.to_series()
df['result'] = df['date'] - df.index
tm.assert_series_equal(df['result'], df['expected'], check_names=False)
# TODO: taken from tests.frame.test_operators, needs cleanup
def test_frame_operators(self, float_frame):
frame = float_frame
frame2 = pd.DataFrame(float_frame, columns=['D', 'C', 'B', 'A'])
garbage = np.random.random(4)
colSeries = pd.Series(garbage, index=np.array(frame.columns))
idSum = frame + frame
seriesSum = frame + colSeries
for col, series in idSum.items():
for idx, val in series.items():
origVal = frame[col][idx] * 2
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
for col, series in seriesSum.items():
for idx, val in series.items():
origVal = frame[col][idx] + colSeries[col]
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
added = frame2 + frame2
expected = frame2 * 2
tm.assert_frame_equal(added, expected)
df = pd.DataFrame({'a': ['a', None, 'b']})
tm.assert_frame_equal(df + df,
pd.DataFrame({'a': ['aa', np.nan, 'bb']}))
# Test for issue #10181
for dtype in ('float', 'int64'):
frames = [
pd.DataFrame(dtype=dtype),
pd.DataFrame(columns=['A'], dtype=dtype),
pd.DataFrame(index=[0], dtype=dtype),
]
for df in frames:
assert (df + df).equals(df)
tm.assert_frame_equal(df + df, df)
# TODO: taken from tests.series.test_operators; needs cleanup
def test_series_operators(self):
def _check_op(series, other, op, pos_only=False, check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
tser = tm.makeTimeSeries().rename('ts')
check(tser, tser * 2)
check(tser, tser * 0)
check(tser, tser[::2])
check(tser, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(tser, 5)
check_comparators(tser, tser + 1, check_dtype=False)
# TODO: taken from tests.series.test_operators; needs cleanup
def test_divmod(self):
def check(series, other):
results = divmod(series, other)
if isinstance(other, abc.Iterable) and len(series) != len(other):
# if the lengths don't match, this is the test where we use
# `tser[::2]`. Pad every other value in `other_np` with nan.
other_np = []
for n in other:
other_np.append(n)
other_np.append(np.nan)
else:
other_np = other
other_np = np.asarray(other_np)
with np.errstate(all='ignore'):
expecteds = divmod(series.values, np.asarray(other_np))
for result, expected in zip(results, expecteds):
# check the values, name, and index separately
tm.assert_almost_equal(np.asarray(result), expected)
assert result.name == series.name
tm.assert_index_equal(result.index, series.index)
tser = tm.makeTimeSeries().rename('ts')
check(tser, tser * 2)
check(tser, tser * 0)
check(tser, tser[::2])
check(tser, 5)
class TestUFuncCompat:
@pytest.mark.parametrize('holder', [pd.Int64Index, pd.UInt64Index,
pd.Float64Index, pd.RangeIndex,
pd.Series])
def test_ufunc_compat(self, holder):
box = pd.Series if holder is pd.Series else pd.Index
if holder is pd.RangeIndex:
idx = pd.RangeIndex(0, 5)
else:
idx = holder(np.arange(5, dtype='int64'))
result = np.sin(idx)
expected = box(np.sin(np.arange(5, dtype='int64')))
tm.assert_equal(result, expected)
@pytest.mark.parametrize('holder', [pd.Int64Index, pd.UInt64Index,
pd.Float64Index, pd.Series])
def test_ufunc_coercions(self, holder):
idx = holder([1, 2, 3, 4, 5], name='x')
box = pd.Series if holder is pd.Series else pd.Index
result = np.sqrt(idx)
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = np.divide(idx, 2.)
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
# _evaluate_numeric_binop
result = idx + 2.
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index([3., 4., 5., 6., 7.], name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx - 2.
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index([-1., 0., 1., 2., 3.], name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx * 1.
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index([1., 2., 3., 4., 5.], name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx / 2.
assert result.dtype == 'f8' and isinstance(result, box)
exp = pd.Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
class TestObjectDtypeEquivalence:
# Tests that arithmetic operations match operations executed elementwise
@pytest.mark.parametrize('dtype', [None, object])
def test_numarr_with_dtype_add_nan(self, dtype, box):
ser = pd.Series([1, 2, 3], dtype=dtype)
expected = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = np.nan + ser
tm.assert_equal(result, expected)
result = ser + np.nan
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_numarr_with_dtype_add_int(self, dtype, box):
ser = pd.Series([1, 2, 3], dtype=dtype)
expected = pd.Series([2, 3, 4], dtype=dtype)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = 1 + ser
tm.assert_equal(result, expected)
result = ser + 1
tm.assert_equal(result, expected)
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize('op', [operator.add, operator.sub, operator.mul,
operator.truediv, operator.floordiv])
def test_operators_reverse_object(self, op):
# GH#56
arr = pd.Series(np.random.randn(10), index=np.arange(10), dtype=object)
result = op(1., arr)
expected = op(1., arr.astype(float))
tm.assert_series_equal(result.astype(float), expected)
class TestNumericArithmeticUnsorted:
# Tests in this class have been moved from type-specific test modules
# but not yet sorted, parametrized, and de-duplicated
def check_binop(self, ops, scalars, idxs):
for op in ops:
for a, b in combinations(idxs, 2):
result = op(a, b)
expected = op(pd.Int64Index(a), pd.Int64Index(b))
tm.assert_index_equal(result, expected)
for idx in idxs:
for scalar in scalars:
result = op(idx, scalar)
expected = op(pd.Int64Index(idx), scalar)
tm.assert_index_equal(result, expected)
def test_binops(self):
ops = [operator.add, operator.sub, operator.mul, operator.floordiv,
operator.truediv]
scalars = [-1, 1, 2]
idxs = [pd.RangeIndex(0, 10, 1), pd.RangeIndex(0, 20, 2),
pd.RangeIndex(-10, 10, 2), pd.RangeIndex(5, -5, -1)]
self.check_binop(ops, scalars, idxs)
def test_binops_pow(self):
# numpy does not allow powers of negative integers so test separately
# https://github.com/numpy/numpy/pull/8127
ops = [pow]
scalars = [1, 2]
idxs = [pd.RangeIndex(0, 10, 1), pd.RangeIndex(0, 20, 2)]
self.check_binop(ops, scalars, idxs)
# TODO: mod, divmod?
@pytest.mark.parametrize('op', [operator.add, operator.sub,
operator.mul, operator.floordiv,
operator.truediv, operator.pow])
def test_arithmetic_with_frame_or_series(self, op):
# check that we return NotImplemented when operating with Series
# or DataFrame
index = pd.RangeIndex(5)
other = pd.Series(np.random.randn(5))
expected = op(pd.Series(index), other)
result = op(index, other)
tm.assert_series_equal(result, expected)
other = pd.DataFrame(np.random.randn(2, 5))
expected = op(pd.DataFrame([index, index]), other)
result = op(index, other)
tm.assert_frame_equal(result, expected)
def test_numeric_compat2(self):
# validate that we are handling the RangeIndex overrides to numeric ops
# and returning RangeIndex where possible
idx = pd.RangeIndex(0, 10, 2)
result = idx * 2
expected = pd.RangeIndex(0, 20, 4)
tm.assert_index_equal(result, expected, exact=True)
result = idx + 2
expected = pd.RangeIndex(2, 12, 2)
tm.assert_index_equal(result, expected, exact=True)
result = idx - 2
expected = pd.RangeIndex(-2, 8, 2)
tm.assert_index_equal(result, expected, exact=True)
result = idx / 2
expected = pd.RangeIndex(0, 5, 1).astype('float64')
tm.assert_index_equal(result, expected, exact=True)
result = idx / 4
expected = pd.RangeIndex(0, 10, 2) / 4
tm.assert_index_equal(result, expected, exact=True)
result = idx // 1
expected = idx
tm.assert_index_equal(result, expected, exact=True)
# __mul__
result = idx * idx
expected = Index(idx.values * idx.values)
tm.assert_index_equal(result, expected, exact=True)
# __pow__
idx = pd.RangeIndex(0, 1000, 2)
result = idx ** 2
expected = idx._int64index ** 2
tm.assert_index_equal(Index(result.values), expected, exact=True)
# __floordiv__
cases_exact = [
(pd.RangeIndex(0, 1000, 2), 2, pd.RangeIndex(0, 500, 1)),
(pd.RangeIndex(-99, -201, -3), -3, pd.RangeIndex(33, 67, 1)),
(pd.RangeIndex(0, 1000, 1), 2,
pd.RangeIndex(0, 1000, 1)._int64index // 2),
(pd.RangeIndex(0, 100, 1), 2.0,
pd.RangeIndex(0, 100, 1)._int64index // 2.0),
(pd.RangeIndex(0), 50, pd.RangeIndex(0)),
(pd.RangeIndex(2, 4, 2), 3, pd.RangeIndex(0, 1, 1)),
(pd.RangeIndex(-5, -10, -6), 4, pd.RangeIndex(-2, -1, 1)),
(pd.RangeIndex(-100, -200, 3), 2, pd.RangeIndex(0))]
for idx, div, expected in cases_exact:
tm.assert_index_equal(idx // div, expected, exact=True)
@pytest.mark.parametrize('dtype', [np.int64, np.float64])
@pytest.mark.parametrize('delta', [1, 0, -1])
def test_addsub_arithmetic(self, dtype, delta):
# GH#8142
delta = dtype(delta)
index = pd.Index([10, 11, 12], dtype=dtype)
result = index + delta
expected = pd.Index(index.values + delta, dtype=dtype)
tm.assert_index_equal(result, expected)
# this subtraction used to fail
result = index - delta
expected = pd.Index(index.values - delta, dtype=dtype)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index + index, 2 * index)
tm.assert_index_equal(index - index, 0 * index)
assert not (index - index).empty
| {
"repo_name": "cbertinato/pandas",
"path": "pandas/tests/arithmetic/test_numeric.py",
"copies": "1",
"size": "39542",
"license": "bsd-3-clause",
"hash": -4483040166672664000,
"line_mean": 36.2335216573,
"line_max": 79,
"alpha_frac": 0.5519700572,
"autogenerated": false,
"ratio": 3.547322149457253,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45992922066572534,
"avg_score": null,
"num_lines": null
} |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for object dtype
from decimal import Decimal
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import Series, Timestamp
from pandas.core import ops
import pandas.util.testing as tm
# ------------------------------------------------------------------
# Comparisons
class TestObjectComparisons:
def test_comparison_object_numeric_nas(self):
ser = Series(np.random.randn(10), dtype=object)
shifted = ser.shift(2)
ops = ["lt", "le", "gt", "ge", "eq", "ne"]
for op in ops:
func = getattr(operator, op)
result = func(ser, shifted)
expected = func(ser.astype(float), shifted.astype(float))
tm.assert_series_equal(result, expected)
def test_object_comparisons(self):
ser = Series(["a", "b", np.nan, "c", "a"])
result = ser == "a"
expected = Series([True, False, False, False, True])
tm.assert_series_equal(result, expected)
result = ser < "a"
expected = Series([False, False, False, False, False])
tm.assert_series_equal(result, expected)
result = ser != "a"
expected = -(ser == "a")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_more_na_comparisons(self, dtype):
left = Series(["a", np.nan, "c"], dtype=dtype)
right = Series(["a", np.nan, "d"], dtype=dtype)
result = left == right
expected = Series([True, False, False])
tm.assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
tm.assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
tm.assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Arithmetic
class TestArithmetic:
# TODO: parametrize
def test_pow_ops_object(self):
# GH#22922
# pow is weird with masking & 1, so testing here
a = Series([1, np.nan, 1, np.nan], dtype=object)
b = Series([1, np.nan, np.nan, 1], dtype=object)
result = a ** b
expected = Series(a.values ** b.values, dtype=object)
tm.assert_series_equal(result, expected)
result = b ** a
expected = Series(b.values ** a.values, dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
@pytest.mark.parametrize("other", ["category", "Int64"])
def test_add_extension_scalar(self, other, box, op):
# GH#22378
# Check that scalars satisfying is_extension_array_dtype(obj)
# do not incorrectly try to dispatch to an ExtensionArray operation
arr = pd.Series(["a", "b", "c"])
expected = pd.Series([op(x, other) for x in arr])
arr = tm.box_expected(arr, box)
expected = tm.box_expected(expected, box)
result = op(arr, other)
tm.assert_equal(result, expected)
def test_objarr_add_str(self, box):
ser = pd.Series(["x", np.nan, "x"])
expected = pd.Series(["xa", np.nan, "xa"])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = ser + "a"
tm.assert_equal(result, expected)
def test_objarr_radd_str(self, box):
ser = pd.Series(["x", np.nan, "x"])
expected = pd.Series(["ax", np.nan, "ax"])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = "a" + ser
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[1, 2, 3],
[1.1, 2.2, 3.3],
[Timestamp("2011-01-01"), Timestamp("2011-01-02"), pd.NaT],
["x", "y", 1],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_objarr_radd_str_invalid(self, dtype, data, box):
ser = Series(data, dtype=dtype)
ser = tm.box_expected(ser, box)
with pytest.raises(TypeError):
"foo_" + ser
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_objarr_add_invalid(self, op, box):
# invalid ops
obj_ser = tm.makeObjectSeries()
obj_ser.name = "objects"
obj_ser = tm.box_expected(obj_ser, box)
with pytest.raises(Exception):
op(obj_ser, 1)
with pytest.raises(Exception):
op(obj_ser, np.array(1, dtype=np.int64))
# TODO: Moved from tests.series.test_operators; needs cleanup
def test_operators_na_handling(self):
ser = Series(["foo", "bar", "baz", np.nan])
result = "prefix_" + ser
expected = pd.Series(["prefix_foo", "prefix_bar", "prefix_baz", np.nan])
tm.assert_series_equal(result, expected)
result = ser + "_suffix"
expected = pd.Series(["foo_suffix", "bar_suffix", "baz_suffix", np.nan])
tm.assert_series_equal(result, expected)
# TODO: parametrize over box
@pytest.mark.parametrize("dtype", [None, object])
def test_series_with_dtype_radd_timedelta(self, dtype):
# note this test is _not_ aimed at timedelta64-dtyped Series
ser = pd.Series(
[pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")],
dtype=dtype,
)
expected = pd.Series(
[pd.Timedelta("4 days"), pd.Timedelta("5 days"), pd.Timedelta("6 days")]
)
result = pd.Timedelta("3 days") + ser
tm.assert_series_equal(result, expected)
result = ser + pd.Timedelta("3 days")
tm.assert_series_equal(result, expected)
# TODO: cleanup & parametrize over box
def test_mixed_timezone_series_ops_object(self):
# GH#13043
ser = pd.Series(
[
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-01", tz="Asia/Tokyo"),
],
name="xxx",
)
assert ser.dtype == object
exp = pd.Series(
[
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="Asia/Tokyo"),
],
name="xxx",
)
tm.assert_series_equal(ser + pd.Timedelta("1 days"), exp)
tm.assert_series_equal(pd.Timedelta("1 days") + ser, exp)
# object series & object series
ser2 = pd.Series(
[
pd.Timestamp("2015-01-03", tz="US/Eastern"),
pd.Timestamp("2015-01-05", tz="Asia/Tokyo"),
],
name="xxx",
)
assert ser2.dtype == object
exp = pd.Series([pd.Timedelta("2 days"), pd.Timedelta("4 days")], name="xxx")
tm.assert_series_equal(ser2 - ser, exp)
tm.assert_series_equal(ser - ser2, -exp)
ser = pd.Series(
[pd.Timedelta("01:00:00"), pd.Timedelta("02:00:00")],
name="xxx",
dtype=object,
)
assert ser.dtype == object
exp = pd.Series(
[pd.Timedelta("01:30:00"), pd.Timedelta("02:30:00")], name="xxx"
)
tm.assert_series_equal(ser + pd.Timedelta("00:30:00"), exp)
tm.assert_series_equal(pd.Timedelta("00:30:00") + ser, exp)
# TODO: cleanup & parametrize over box
def test_iadd_preserves_name(self):
# GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name
ser = pd.Series([1, 2, 3])
ser.index.name = "foo"
ser.index += 1
assert ser.index.name == "foo"
ser.index -= 1
assert ser.index.name == "foo"
def test_add_string(self):
# from bug report
index = pd.Index(["a", "b", "c"])
index2 = index + "foo"
assert "a" not in index2
assert "afoo" in index2
def test_iadd_string(self):
index = pd.Index(["a", "b", "c"])
# doesn't fail test unless there is a check before `+=`
assert "a" in index
index += "_x"
assert "a_x" in index
def test_add(self):
index = tm.makeStringIndex(100)
expected = pd.Index(index.values * 2)
tm.assert_index_equal(index + index, expected)
tm.assert_index_equal(index + index.tolist(), expected)
tm.assert_index_equal(index.tolist() + index, expected)
# test add and radd
index = pd.Index(list("abc"))
expected = pd.Index(["a1", "b1", "c1"])
tm.assert_index_equal(index + "1", expected)
expected = pd.Index(["1a", "1b", "1c"])
tm.assert_index_equal("1" + index, expected)
def test_sub_fail(self):
index = tm.makeStringIndex(100)
with pytest.raises(TypeError):
index - "a"
with pytest.raises(TypeError):
index - index
with pytest.raises(TypeError):
index - index.tolist()
with pytest.raises(TypeError):
index.tolist() - index
def test_sub_object(self):
# GH#19369
index = pd.Index([Decimal(1), Decimal(2)])
expected = pd.Index([Decimal(0), Decimal(1)])
result = index - Decimal(1)
tm.assert_index_equal(result, expected)
result = index - pd.Index([Decimal(1), Decimal(1)])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
index - "foo"
with pytest.raises(TypeError):
index - np.array([2, "foo"])
def test_rsub_object(self):
# GH#19369
index = pd.Index([Decimal(1), Decimal(2)])
expected = pd.Index([Decimal(1), Decimal(0)])
result = Decimal(2) - index
tm.assert_index_equal(result, expected)
result = np.array([Decimal(2), Decimal(2)]) - index
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
"foo" - index
with pytest.raises(TypeError):
np.array([True, pd.Timestamp.now()]) - index
| {
"repo_name": "kushalbhola/MyStuff",
"path": "Practice/PythonApplication/env/Lib/site-packages/pandas/tests/arithmetic/test_object.py",
"copies": "2",
"size": "10294",
"license": "apache-2.0",
"hash": 1729039999091011600,
"line_mean": 31.3710691824,
"line_max": 85,
"alpha_frac": 0.55362347,
"autogenerated": false,
"ratio": 3.622097114707952,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002247691312669359,
"num_lines": 318
} |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for object dtype
import datetime
from decimal import Decimal
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import Series, Timestamp
import pandas._testing as tm
from pandas.core import ops
# ------------------------------------------------------------------
# Comparisons
class TestObjectComparisons:
def test_comparison_object_numeric_nas(self):
ser = Series(np.random.randn(10), dtype=object)
shifted = ser.shift(2)
ops = ["lt", "le", "gt", "ge", "eq", "ne"]
for op in ops:
func = getattr(operator, op)
result = func(ser, shifted)
expected = func(ser.astype(float), shifted.astype(float))
tm.assert_series_equal(result, expected)
def test_object_comparisons(self):
ser = Series(["a", "b", np.nan, "c", "a"])
result = ser == "a"
expected = Series([True, False, False, False, True])
tm.assert_series_equal(result, expected)
result = ser < "a"
expected = Series([False, False, False, False, False])
tm.assert_series_equal(result, expected)
result = ser != "a"
expected = -(ser == "a")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_more_na_comparisons(self, dtype):
left = Series(["a", np.nan, "c"], dtype=dtype)
right = Series(["a", np.nan, "d"], dtype=dtype)
result = left == right
expected = Series([True, False, False])
tm.assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
tm.assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
tm.assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Arithmetic
class TestArithmetic:
# TODO: parametrize
def test_pow_ops_object(self):
# GH#22922
# pow is weird with masking & 1, so testing here
a = Series([1, np.nan, 1, np.nan], dtype=object)
b = Series([1, np.nan, np.nan, 1], dtype=object)
result = a ** b
expected = Series(a.values ** b.values, dtype=object)
tm.assert_series_equal(result, expected)
result = b ** a
expected = Series(b.values ** a.values, dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
@pytest.mark.parametrize("other", ["category", "Int64"])
def test_add_extension_scalar(self, other, box_with_array, op):
# GH#22378
# Check that scalars satisfying is_extension_array_dtype(obj)
# do not incorrectly try to dispatch to an ExtensionArray operation
arr = Series(["a", "b", "c"])
expected = Series([op(x, other) for x in arr])
arr = tm.box_expected(arr, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = op(arr, other)
tm.assert_equal(result, expected)
def test_objarr_add_str(self, box_with_array):
ser = Series(["x", np.nan, "x"])
expected = Series(["xa", np.nan, "xa"])
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + "a"
tm.assert_equal(result, expected)
def test_objarr_radd_str(self, box_with_array):
ser = Series(["x", np.nan, "x"])
expected = Series(["ax", np.nan, "ax"])
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = "a" + ser
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[1, 2, 3],
[1.1, 2.2, 3.3],
[Timestamp("2011-01-01"), Timestamp("2011-01-02"), pd.NaT],
["x", "y", 1],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_objarr_radd_str_invalid(self, dtype, data, box_with_array):
ser = Series(data, dtype=dtype)
ser = tm.box_expected(ser, box_with_array)
msg = (
"can only concatenate str|"
"did not contain a loop with signature matching types|"
"unsupported operand type|"
"must be str"
)
with pytest.raises(TypeError, match=msg):
"foo_" + ser
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_objarr_add_invalid(self, op, box_with_array):
# invalid ops
box = box_with_array
obj_ser = tm.makeObjectSeries()
obj_ser.name = "objects"
obj_ser = tm.box_expected(obj_ser, box)
msg = "can only concatenate str|unsupported operand type|must be str"
with pytest.raises(Exception, match=msg):
op(obj_ser, 1)
with pytest.raises(Exception, match=msg):
op(obj_ser, np.array(1, dtype=np.int64))
# TODO: Moved from tests.series.test_operators; needs cleanup
def test_operators_na_handling(self):
ser = Series(["foo", "bar", "baz", np.nan])
result = "prefix_" + ser
expected = Series(["prefix_foo", "prefix_bar", "prefix_baz", np.nan])
tm.assert_series_equal(result, expected)
result = ser + "_suffix"
expected = Series(["foo_suffix", "bar_suffix", "baz_suffix", np.nan])
tm.assert_series_equal(result, expected)
# TODO: parametrize over box
@pytest.mark.parametrize("dtype", [None, object])
def test_series_with_dtype_radd_timedelta(self, dtype):
# note this test is _not_ aimed at timedelta64-dtyped Series
ser = Series(
[pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")],
dtype=dtype,
)
expected = Series(
[pd.Timedelta("4 days"), pd.Timedelta("5 days"), pd.Timedelta("6 days")]
)
result = pd.Timedelta("3 days") + ser
tm.assert_series_equal(result, expected)
result = ser + pd.Timedelta("3 days")
tm.assert_series_equal(result, expected)
# TODO: cleanup & parametrize over box
def test_mixed_timezone_series_ops_object(self):
# GH#13043
ser = Series(
[
Timestamp("2015-01-01", tz="US/Eastern"),
Timestamp("2015-01-01", tz="Asia/Tokyo"),
],
name="xxx",
)
assert ser.dtype == object
exp = Series(
[
Timestamp("2015-01-02", tz="US/Eastern"),
Timestamp("2015-01-02", tz="Asia/Tokyo"),
],
name="xxx",
)
tm.assert_series_equal(ser + pd.Timedelta("1 days"), exp)
tm.assert_series_equal(pd.Timedelta("1 days") + ser, exp)
# object series & object series
ser2 = Series(
[
Timestamp("2015-01-03", tz="US/Eastern"),
Timestamp("2015-01-05", tz="Asia/Tokyo"),
],
name="xxx",
)
assert ser2.dtype == object
exp = Series([pd.Timedelta("2 days"), pd.Timedelta("4 days")], name="xxx")
tm.assert_series_equal(ser2 - ser, exp)
tm.assert_series_equal(ser - ser2, -exp)
ser = Series(
[pd.Timedelta("01:00:00"), pd.Timedelta("02:00:00")],
name="xxx",
dtype=object,
)
assert ser.dtype == object
exp = Series([pd.Timedelta("01:30:00"), pd.Timedelta("02:30:00")], name="xxx")
tm.assert_series_equal(ser + pd.Timedelta("00:30:00"), exp)
tm.assert_series_equal(pd.Timedelta("00:30:00") + ser, exp)
# TODO: cleanup & parametrize over box
def test_iadd_preserves_name(self):
# GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name
ser = Series([1, 2, 3])
ser.index.name = "foo"
ser.index += 1
assert ser.index.name == "foo"
ser.index -= 1
assert ser.index.name == "foo"
def test_add_string(self):
# from bug report
index = pd.Index(["a", "b", "c"])
index2 = index + "foo"
assert "a" not in index2
assert "afoo" in index2
def test_iadd_string(self):
index = pd.Index(["a", "b", "c"])
# doesn't fail test unless there is a check before `+=`
assert "a" in index
index += "_x"
assert "a_x" in index
def test_add(self):
index = tm.makeStringIndex(100)
expected = pd.Index(index.values * 2)
tm.assert_index_equal(index + index, expected)
tm.assert_index_equal(index + index.tolist(), expected)
tm.assert_index_equal(index.tolist() + index, expected)
# test add and radd
index = pd.Index(list("abc"))
expected = pd.Index(["a1", "b1", "c1"])
tm.assert_index_equal(index + "1", expected)
expected = pd.Index(["1a", "1b", "1c"])
tm.assert_index_equal("1" + index, expected)
def test_sub_fail(self):
index = tm.makeStringIndex(100)
msg = "unsupported operand type|Cannot broadcast"
with pytest.raises(TypeError, match=msg):
index - "a"
with pytest.raises(TypeError, match=msg):
index - index
with pytest.raises(TypeError, match=msg):
index - index.tolist()
with pytest.raises(TypeError, match=msg):
index.tolist() - index
def test_sub_object(self):
# GH#19369
index = pd.Index([Decimal(1), Decimal(2)])
expected = pd.Index([Decimal(0), Decimal(1)])
result = index - Decimal(1)
tm.assert_index_equal(result, expected)
result = index - pd.Index([Decimal(1), Decimal(1)])
tm.assert_index_equal(result, expected)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
index - "foo"
with pytest.raises(TypeError, match=msg):
index - np.array([2, "foo"])
def test_rsub_object(self):
# GH#19369
index = pd.Index([Decimal(1), Decimal(2)])
expected = pd.Index([Decimal(1), Decimal(0)])
result = Decimal(2) - index
tm.assert_index_equal(result, expected)
result = np.array([Decimal(2), Decimal(2)]) - index
tm.assert_index_equal(result, expected)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
"foo" - index
with pytest.raises(TypeError, match=msg):
np.array([True, Timestamp.now()]) - index
class MyIndex(pd.Index):
# Simple index subclass that tracks ops calls.
_calls: int
@classmethod
def _simple_new(cls, values, name=None, dtype=None):
result = object.__new__(cls)
result._data = values
result._index_data = values
result._name = name
result._calls = 0
result._reset_identity()
return result
def __add__(self, other):
self._calls += 1
return self._simple_new(self._index_data)
def __radd__(self, other):
return self.__add__(other)
@pytest.mark.parametrize(
"other",
[
[datetime.timedelta(1), datetime.timedelta(2)],
[datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2)],
[pd.Period("2000"), pd.Period("2001")],
["a", "b"],
],
ids=["timedelta", "datetime", "period", "object"],
)
def test_index_ops_defer_to_unknown_subclasses(other):
# https://github.com/pandas-dev/pandas/issues/31109
values = np.array(
[datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)], dtype=object
)
a = MyIndex._simple_new(values)
other = pd.Index(other)
result = other + a
assert isinstance(result, MyIndex)
assert a._calls == 1
| {
"repo_name": "jreback/pandas",
"path": "pandas/tests/arithmetic/test_object.py",
"copies": "2",
"size": "12144",
"license": "bsd-3-clause",
"hash": 4664936065137660000,
"line_mean": 31.384,
"line_max": 86,
"alpha_frac": 0.5627470356,
"autogenerated": false,
"ratio": 3.6655599154844554,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015653270502959365,
"num_lines": 375
} |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs import IncompatibleFrequency, Period, Timestamp, to_offset
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import PeriodIndex, Series, TimedeltaIndex, period_range
import pandas._testing as tm
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
from .common import assert_invalid_comparison
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"scalar", ["foo", pd.Timestamp.now(), pd.Timedelta(days=4)]
)
def test_compare_invalid_scalar(self, box_with_array, scalar):
# comparison with scalar that cannot be interpreted as a Period
pi = pd.period_range("2000", periods=4)
parr = tm.box_expected(pi, box_with_array)
assert_invalid_comparison(parr, scalar, box_with_array)
@pytest.mark.parametrize(
"other",
[
pd.date_range("2000", periods=4).array,
pd.timedelta_range("1D", periods=4).array,
np.arange(4),
np.arange(4).astype(np.float64),
list(range(4)),
],
)
def test_compare_invalid_listlike(self, box_with_array, other):
pi = pd.period_range("2000", periods=4)
parr = tm.box_expected(pi, box_with_array)
assert_invalid_comparison(parr, other, box_with_array)
@pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)])
def test_compare_object_dtype(self, box_with_array, other_box):
pi = pd.period_range("2000", periods=5)
parr = tm.box_expected(pi, box_with_array)
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
other = other_box(pi)
expected = np.array([True, True, True, True, True])
expected = tm.box_expected(expected, xbox)
result = parr == other
tm.assert_equal(result, expected)
result = parr <= other
tm.assert_equal(result, expected)
result = parr >= other
tm.assert_equal(result, expected)
result = parr != other
tm.assert_equal(result, ~expected)
result = parr < other
tm.assert_equal(result, ~expected)
result = parr > other
tm.assert_equal(result, ~expected)
other = other_box(pi[::-1])
expected = np.array([False, False, True, False, False])
expected = tm.box_expected(expected, xbox)
result = parr == other
tm.assert_equal(result, expected)
expected = np.array([True, True, True, False, False])
expected = tm.box_expected(expected, xbox)
result = parr <= other
tm.assert_equal(result, expected)
expected = np.array([False, False, True, True, True])
expected = tm.box_expected(expected, xbox)
result = parr >= other
tm.assert_equal(result, expected)
expected = np.array([True, True, False, True, True])
expected = tm.box_expected(expected, xbox)
result = parr != other
tm.assert_equal(result, expected)
expected = np.array([True, True, False, False, False])
expected = tm.box_expected(expected, xbox)
result = parr < other
tm.assert_equal(result, expected)
expected = np.array([False, False, False, True, True])
expected = tm.box_expected(expected, xbox)
result = parr > other
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", pd.Period("2017", freq="D")])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
2017,
[2017, 2017, 2017],
np.array([2017, 2017, 2017]),
np.array([2017, 2017, 2017], dtype=object),
pd.Index([2017, 2017, 2017]),
],
)
def test_eq_integer_disallowed(self, other):
# match Period semantics by not treating integers as Periods
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([False, False, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
msg = "|".join(
[
"not supported between instances of 'Period' and 'int'",
r"Invalid comparison between dtype=period\[D\] and ",
]
)
with pytest.raises(TypeError, match=msg):
idx < other
with pytest.raises(TypeError, match=msg):
idx > other
with pytest.raises(TypeError, match=msg):
idx <= other
with pytest.raises(TypeError, match=msg):
idx >= other
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = r"Input has different freq=(M|2M|3M) from PeriodArray\(freq=A-DEC\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool_)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
msg = r"unsupported operand type\(s\) for \+: .* and .*"
with pytest.raises(TypeError, match=msg):
rng + other
with pytest.raises(TypeError, match=msg):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
msg = r"Input has different freq=[HD] from PeriodArray\(freq=[DH]\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
msg = (
r"unsupported operand type\(s\) for [+-]: .* and .*|"
"Concatenation operation is not implemented for NumPy arrays"
)
with pytest.raises(TypeError, match=msg):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
msg = (
r"(:?cannot add PeriodArray and .*)"
r"|(:?cannot subtract .* from (:?a\s)?.*)"
r"|(:?unsupported operand type\(s\) for \+: .* and .*)"
)
with pytest.raises(TypeError, match=msg):
rng + other
with pytest.raises(TypeError, match=msg):
other + rng
with pytest.raises(TypeError, match=msg):
rng - other
with pytest.raises(TypeError, match=msg):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
msg = r"Cannot add or subtract timedelta64\[ns\] dtype from period\[Q-DEC\]"
with pytest.raises(TypeError, match=msg):
rng + tdarr
with pytest.raises(TypeError, match=msg):
tdarr + rng
with pytest.raises(TypeError, match=msg):
rng - tdarr
msg = r"cannot subtract PeriodArray from timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
msg = r"cannot subtract .* from .*"
with pytest.raises(TypeError, match=msg):
tdarr - rng
with pytest.raises(TypeError, match=msg):
tdi - rng
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("tdi_freq", [None, "H"])
def test_parr_sub_td64array(self, box_with_array, tdi_freq, pi_freq):
box = box_with_array
xbox = box if box is not tm.to_array else pd.Index
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
td64obj = tm.box_expected(tdi, box)
if pi_freq == "H":
result = pi - td64obj
expected = (pi.to_timestamp("S") - tdi).to_period(pi_freq)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
# Subtract from scalar
result = pi[0] - td64obj
expected = (pi[0].to_timestamp("S") - tdi).to_period(pi_freq)
expected = tm.box_expected(expected, box)
tm.assert_equal(result, expected)
elif pi_freq == "D":
# Tick, but non-compatible
msg = "Input has different freq=None from PeriodArray"
with pytest.raises(IncompatibleFrequency, match=msg):
pi - td64obj
with pytest.raises(IncompatibleFrequency, match=msg):
pi[0] - td64obj
else:
# With non-Tick freq, we could not add timedelta64 array regardless
# of what its resolution is
msg = "Cannot add or subtract timedelta64"
with pytest.raises(TypeError, match=msg):
pi - td64obj
with pytest.raises(TypeError, match=msg):
pi[0] - td64obj
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = r"Input cannot be converted to Period\(freq=Q-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = r"Input has different freq=-1M from Period\(freq=Q-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("transpose", [True, False])
def test_pi_add_offset_n_gt1(self, box_with_array, transpose):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box_with_array, transpose=transpose)
expected = tm.box_expected(expected, box_with_array, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
pi = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
msg = r"bad operand type for unary -: 'PeriodArray'"
with pytest.raises(TypeError, match=msg):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
msg = (
r"(:?bad operand type for unary -: 'PeriodArray')"
r"|(:?cannot subtract PeriodArray from timedelta64\[[hD]\])"
)
with pytest.raises(TypeError, match=msg):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)
expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
msg = (
r"(:?bad operand type for unary -: 'PeriodArray')"
r"|(:?cannot subtract PeriodArray from timedelta64\[[hD]\])"
)
with pytest.raises(TypeError, match=msg):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = pd.period_range("2014-01", "2016-12", freq="M")
expected = pd.period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
@pytest.mark.parametrize("transpose", [True, False])
def test_parr_add_sub_td64_nat(self, box_with_array, transpose):
# GH#23320 special handling for timedelta64("NaT")
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box_with_array, transpose=transpose)
expected = tm.box_expected(expected, box_with_array, transpose=transpose)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = r"cannot subtract .* from .*"
with pytest.raises(TypeError, match=msg):
other - obj
@pytest.mark.parametrize(
"other",
[
np.array(["NaT"] * 9, dtype="m8[ns]"),
TimedeltaArray._from_sequence(["NaT"] * 9),
],
)
def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other):
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = r"cannot subtract .* from .*"
with pytest.raises(TypeError, match=msg):
other - obj
# ---------------------------------------------------------------
# Unsorted
def test_parr_add_sub_index(self):
# Check that PeriodArray defers to Index on arithmetic ops
pi = pd.period_range("2000-12-31", periods=3)
parr = pi.array
result = parr - pi
expected = pi - pi
tm.assert_index_equal(result, expected)
def test_parr_add_sub_object_array(self):
pi = pd.period_range("2000-12-31", periods=3, freq="D")
parr = pi.array
other = np.array([pd.Timedelta(days=1), pd.offsets.Day(2), 3])
with tm.assert_produces_warning(PerformanceWarning):
result = parr + other
expected = pd.PeriodIndex(
["2001-01-01", "2001-01-03", "2001-01-05"], freq="D"
).array
tm.assert_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = parr - other
expected = pd.PeriodIndex(["2000-12-30"] * 3, freq="D").array
tm.assert_equal(result, expected)
class TestPeriodSeriesArithmetic:
def test_ops_series_timedelta(self):
# GH#13043
ser = pd.Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
expected = pd.Series(
[pd.Period("2015-01-02", freq="D"), pd.Period("2015-01-03", freq="D")],
name="xxx",
)
result = ser + pd.Timedelta("1 days")
tm.assert_series_equal(result, expected)
result = pd.Timedelta("1 days") + ser
tm.assert_series_equal(result, expected)
result = ser + pd.tseries.offsets.Day()
tm.assert_series_equal(result, expected)
result = pd.tseries.offsets.Day() + ser
tm.assert_series_equal(result, expected)
def test_ops_series_period(self):
# GH#13043
ser = pd.Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
per = pd.Period("2015-01-10", freq="D")
off = per.freq
# dtype will be object because of original dtype
expected = pd.Series([9 * off, 8 * off], name="xxx", dtype=object)
tm.assert_series_equal(per - ser, expected)
tm.assert_series_equal(ser - per, -1 * expected)
s2 = pd.Series(
[pd.Period("2015-01-05", freq="D"), pd.Period("2015-01-04", freq="D")],
name="xxx",
)
assert s2.dtype == "Period[D]"
expected = pd.Series([4 * off, 2 * off], name="xxx", dtype=object)
tm.assert_series_equal(s2 - ser, expected)
tm.assert_series_equal(ser - s2, -1 * expected)
class TestPeriodIndexSeriesMethods:
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
tm.assert_equal(result, expected)
ser = pd.Series(values)
result = func(ser)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "2011-05", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period("2011-01", freq="M")
off = idx.freq
exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name="idx")
tm.assert_index_equal(result, exp)
result = Period("2011-01", freq="M") - idx
exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name="idx")
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize("ng", ["str", 1.5])
@pytest.mark.parametrize(
"func",
[
lambda obj, ng: obj + ng,
lambda obj, ng: ng + obj,
lambda obj, ng: obj - ng,
lambda obj, ng: ng - obj,
lambda obj, ng: np.add(obj, ng),
lambda obj, ng: np.add(ng, obj),
lambda obj, ng: np.subtract(obj, ng),
lambda obj, ng: np.subtract(ng, obj),
],
)
def test_parr_ops_errors(self, ng, func, box_with_array):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
obj = tm.box_expected(idx, box_with_array)
msg = (
r"unsupported operand type\(s\)|can only concatenate|"
r"must be str|object to str implicitly"
)
with pytest.raises(TypeError, match=msg):
func(obj, ng)
def test_pi_ops_nat(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="2M", name="idx"
)
expected = PeriodIndex(
["2011-07", "2011-08", "NaT", "2011-10"], freq="2M", name="idx"
)
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(
["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(
["2011-05", "2011-01", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(
["2010-12", "2010-12", "NaT", "2010-12"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(
["2010-10", "2010-12", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],
freq="D",
name="idx",
)
f = lambda x: x + pd.offsets.Day()
exp = PeriodIndex(
["2011-01-02", "2011-02-02", "2011-03-02", "2011-04-02"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
f = lambda x: x + pd.offsets.Day(2)
exp = PeriodIndex(
["2011-01-03", "2011-02-03", "2011-03-03", "2011-04-03"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
f = lambda x: x - pd.offsets.Day(2)
exp = PeriodIndex(
["2010-12-30", "2011-01-30", "2011-02-27", "2011-03-30"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],
freq="D",
name="idx",
)
ser = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
for obj in [idx, ser]:
msg = r"Input has different freq=2H from Period.*?\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
obj + pd.offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
pd.offsets.Hour(2) + obj
msg = r"Input has different freq=-2H from Period.*?\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
obj - pd.offsets.Hour(2)
def test_pi_sub_period(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
result = idx - pd.Period("2012-01", freq="M")
off = idx.freq
exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period("2012-01", freq="M"))
tm.assert_index_equal(result, exp)
result = pd.Period("2012-01", freq="M") - idx
exp = pd.Index([12 * off, 11 * off, 10 * off, 9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period("2012-01", freq="M"), idx)
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx")
result = idx - pd.Period("NaT", freq="M")
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
result = pd.Period("NaT", freq="M") - idx
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_pi_sub_pdnat(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
exp = pd.TimedeltaIndex([pd.NaT] * 4, name="idx")
tm.assert_index_equal(pd.NaT - idx, exp)
tm.assert_index_equal(idx - pd.NaT, exp)
def test_pi_sub_period_nat(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
result = idx - pd.Period("2012-01", freq="M")
off = idx.freq
exp = pd.Index([-12 * off, pd.NaT, -10 * off, -9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = pd.Period("2012-01", freq="M") - idx
exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name="idx")
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx")
tm.assert_index_equal(idx - pd.Period("NaT", freq="M"), exp)
tm.assert_index_equal(pd.Period("NaT", freq="M") - idx, exp)
@pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None])
def test_comparison_operations(self, scalars):
# GH 28980
expected = Series([False, False])
s = Series([pd.Period("2019"), pd.Period("2020")], dtype="period[A-DEC]")
result = s == scalars
tm.assert_series_equal(result, expected)
| {
"repo_name": "rs2/pandas",
"path": "pandas/tests/arithmetic/test_period.py",
"copies": "1",
"size": "56440",
"license": "bsd-3-clause",
"hash": 5386055590892858000,
"line_mean": 35.7926988266,
"line_max": 88,
"alpha_frac": 0.5589475549,
"autogenerated": false,
"ratio": 3.4676824772671417,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.952377835817405,
"avg_score": 0.000570334798618349,
"num_lines": 1534
} |
import struct, sys
from ast import literal_eval
errno = 0
def parse(expr):
out = []
tokens = expr.split()
for token in tokens:
if token == ' ':
pass
elif token.isdigit():
out.append( int(token) )
elif token in ['+', '-', '*', '/', '%', '?']:
out.append(token)
else:
print "Warning: Unrecognized token: " + str(token) + "\n"
return out
def assemble(parse_stack):
out = []
num_regs = 16
stack_top = 0
i = 0
global errno
for item in parse_stack:
if isinstance(item, int):
out += [ord('$'), stack_top, item, 0]
stack_top += 1
i += 1
elif item in ['+', '-', '*', '/', '%']:
stack_top -= 1
out += [ord(item), stack_top-1, stack_top, stack_top-1]
i += 1
elif item == '?':
out += [ord('?'), stack_top-1, 0, 0]
i += 1
else:
print "Warning: Unknown opcode. Output may be incorrect."
i += 1
if stack_top >= 17:
print "Error: In term " + str(i) + ": Too many values pushed onto stack. Limit is: " + str(num_regs)
del out[-4:]
errno = 1
break
if stack_top <= 0:
print "Error: In term " + str(i) + ": Too many operators. Stack will underflow."
del out[-4:]
errno = 1
break
out += [ord('#'), 0, 0, 0]
return out
# Example usage:
# $ python arithc.py '1 2 3 + + ?'
if __name__ == '__main__':
try:
bytelist = assemble(parse(sys.argv[1]))
result = [struct.pack("<B", b) for b in bytelist]
if errno == 0:
for b in result:
sys.stdout.write(b)
except IndexError:
print "USAGE: python arithc.py [POSTFIX_EXPR]"
print "\nExample:\n\tpython arithc.py '1 2 3 + + ?'"
| {
"repo_name": "philipaconrad/mini-vm",
"path": "examples/arith/arithc.py",
"copies": "1",
"size": "2255",
"license": "mit",
"hash": 4979442437776330000,
"line_mean": 27.2857142857,
"line_max": 112,
"alpha_frac": 0.4740576497,
"autogenerated": false,
"ratio": 3.7583333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4732390983033333,
"avg_score": null,
"num_lines": null
} |
"""Aritmetika na N, s razlikom što su + i * lijevo asocirani višemjesni.
Uz implicitno množenje ako desni faktor počinje zagradom (npr. 2(3+1)=8).
Implementiran je i optimizator, baš kao u originalnom aritmetika_N.py."""
from vepar import *
from backend import Python_eval
class T(TipoviTokena):
PLUS, PUTA, OTVORENA, ZATVORENA = '+*()'
class BROJ(Token):
def vrijednost(t): return int(t.sadržaj)
def optim(t): return t
@lexer
def an(lex):
for znak in lex:
if znak.isspace(): lex.zanemari()
elif znak.isdecimal():
lex.prirodni_broj(znak)
yield lex.token(T.BROJ)
else: yield lex.literal(T)
### Beskontekstna gramatika
# izraz -> član | izraz PLUS član
# član -> faktor | član PUTA faktor | član zagrade
# faktor -> BROJ | zagrade
# zagrade -> OTVORENA izraz ZATVORENA
class P(Parser):
def izraz(p) -> 'Zbroj|član':
trenutni = [p.član()]
while p >= T.PLUS: trenutni.append(p.član())
return Zbroj.ili_samo(trenutni)
def član(p) -> 'Umnožak|faktor':
trenutni = [p.faktor()]
while p >= T.PUTA or p > T.OTVORENA:
trenutni.append(p.faktor())
return Umnožak.ili_samo(trenutni)
def faktor(p) -> 'BROJ|izraz':
if broj := p >= T.BROJ: return broj
elif p >> T.OTVORENA:
u_zagradi = p.izraz()
p >> T.ZATVORENA
return u_zagradi
nula, jedan = Token(T.BROJ, '0'), Token(T.BROJ, '1')
class Zbroj(AST):
pribrojnici: 'izraz*'
def vrijednost(zbroj):
return sum(pribrojnik.vrijednost() for pribrojnik in zbroj.pribrojnici)
def optim(zbroj):
opt_pribr = [pribrojnik.optim() for pribrojnik in zbroj.pribrojnici]
opt_pribr = [x for x in opt_pribr if x != nula]
if not opt_pribr: return nula
return Zbroj.ili_samo(opt_pribr)
class Umnožak(AST):
faktori: 'izraz*'
def vrijednost(umnožak):
return math.prod(faktor.vrijednost() for faktor in umnožak.faktori)
def optim(umnožak):
opt_fakt = [faktor.optim() for faktor in umnožak.faktori]
if nula in opt_fakt: return nula
opt_fakt = [x for x in opt_fakt if x != jedan]
if not opt_fakt: return jedan
else: return Umnožak.ili_samo(opt_fakt)
def testiraj(izraz):
print('-' * 60)
prikaz(stablo := P(izraz), 3)
prikaz(opt := stablo.optim(), 3)
mi = opt.vrijednost()
try: Python = Python_eval(izraz)
except SyntaxError: return print('Python ovo ne zna!', izraz, '==', mi)
if mi == Python: return print(izraz, '==', mi, 'OK')
print(izraz, 'mi:', mi, 'Python:', Python, 'krivo')
raise ArithmeticError
an('(2+3)*4')
testiraj('(2+3)*4')
testiraj('2 + (0+1*1*2)')
testiraj('2(3+5)')
testiraj('(1+1) (0+2+0) (0+1) (3+4)')
with SintaksnaGreška: testiraj('(2+3)4')
with SintaksnaGreška: testiraj('2\t37')
with LeksičkaGreška: testiraj('2^3')
with LeksičkaGreška: testiraj('3+00')
with SintaksnaGreška: testiraj('+1')
with LeksičkaGreška: testiraj('-1')
| {
"repo_name": "vedgar/ip",
"path": "PJ/03_ar_N_višemjesni.py",
"copies": "1",
"size": "3067",
"license": "unlicense",
"hash": -4874865134944929000,
"line_mean": 28.1826923077,
"line_max": 79,
"alpha_frac": 0.618121911,
"autogenerated": false,
"ratio": 2.3637071651090342,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3481829076109034,
"avg_score": null,
"num_lines": null
} |
"""Aritmetika u Q, s deklaracijama varijabli, statičkim (sintaksnim) tipovima,
i provjerom tipova (typechecking). Podržana su tri tipa, totalno uređena s
obzirom na relaciju "biti podtip": nat (N), int (Z) i rat (Q)."""
from vepar import *
class T(TipoviTokena):
NAT, INT, RAT, DIV, MOD = 'nat', 'int', 'rat', 'div', 'mod'
PLUS, MINUS, PUTA, KROZ, NA, OTV, ZATV, JEDNAKO, UPIT = '+-*/^()=?'
NOVIRED = '\n'
class IME(Token):
def provjeri_tip(t): return rt.symtab[t]
class BROJ(Token):
def provjeri_tip(t): return Tip.N
@lexer
def aq(lex):
for znak in lex:
if znak == '\n': yield lex.literal(T) # prije provjere isspace!
elif znak.isspace(): lex.zanemari()
elif znak.isdecimal():
lex.prirodni_broj(znak)
yield lex.token(T.BROJ)
elif znak.isalpha():
lex * {str.isalpha, '_'}
yield lex.literal_ili(T.IME, case=False)
else: yield lex.literal(T)
class Tip(enum.Enum):
N = Token(T.NAT)
Z = Token(T.INT)
Q = Token(T.RAT)
def __le__(t1, t2): return t1 is Tip.N or t2 is Tip.Q or t1 is t2
def __lt__(t1, t2): return t1 <= t2 and t1 is not t2
### Beskontekstna gramatika
# start -> NOVIRED? niz_naredbi NOVIRED?
# niz_naredbi -> naredba | niz_naredbi NOVIRED naredba
# naredba -> UPIT izraz | (NAT|INT|RAT) IME JEDNAKO izraz
# izraz -> član | izraz (PLUS|MINUS) član
# član -> faktor | član (PUTA|KROZ|DIV|MOD) faktor
# faktor -> baza | baza NA faktor | MINUS faktor
# baza -> BROJ | IME | OTV izraz ZATV
class P(Parser):
def start(self) -> 'Program':
self >= T.NOVIRED
rt.symtab, naredbe = Memorija(), []
while not self > KRAJ:
naredbe.append(self.naredba())
self >> {T.NOVIRED, KRAJ}
return Program(naredbe)
def naredba(self) -> 'izraz|Pridruživanje':
if self >= T.UPIT: return self.izraz()
token_za_tip = self >= {T.NAT, T.INT, T.RAT}
ažuriraj(varijabla := self >> T.IME, token_za_tip)
self >> T.JEDNAKO
return Pridruživanje(varijabla, token_za_tip, self.izraz())
def izraz(self) -> 'član|Op':
t = self.član()
while op := self >= {T.PLUS, T.MINUS}: t = Op(op, t, self.član())
return t
def član(self) -> 'faktor|Op':
trenutni = self.faktor()
while operator := self >= {T.PUTA, T.KROZ, T.DIV, T.MOD}:
trenutni = Op(operator, trenutni, self.faktor())
return trenutni
def faktor(self) -> 'baza|Op':
if op := self >= T.MINUS: return Op(op, nenavedeno, self.faktor())
baza = self.baza()
if op := self >= T.NA: return Op(op, baza, self.faktor())
else: return baza
def baza(self) -> 'BROJ|IME|izraz':
if broj := self >= T.BROJ: return broj
elif varijabla := self >= T.IME:
varijabla.provjeri_tip() # zapravo provjeri deklaraciju
return varijabla
elif self >> T.OTV:
u_zagradi = self.izraz()
self >> T.ZATV
return u_zagradi
### Apstraktna sintaksna stabla
# Program: naredbe:[naredba] # rt.symtab:Memorija
# Pridruživanje: varijabla:IME tip:T? vrijednost:izraz
# Op: operator:T lijevo:izraz desno:izraz
def ažuriraj(var, token_za_tip):
if token_za_tip:
tip = Tip(token_za_tip)
if var in rt.symtab:
pravi = var.provjeri_tip()
if tip is pravi: raise var.redeklaracija()
else: raise var.krivi_tip(tip, pravi)
rt.symtab[var] = tip
return var.provjeri_tip()
class Program(AST):
naredbe: 'naredba*'
def provjeri_tipove(self):
for naredba in self.naredbe:
tip = naredba.provjeri_tip()
if tip: print(tip)
class Pridruživanje(AST):
varijabla: 'IME'
tip: 'Tip?'
vrijednost: 'izraz'
def provjeri_tip(self):
lijevo = self.varijabla.provjeri_tip()
desno = self.vrijednost.provjeri_tip()
if not desno <= lijevo: raise self.varijabla.krivi_tip(lijevo, desno)
class Op(AST):
operator: 'T'
lijevo: 'izraz'
desno: 'izraz'
def provjeri_tip(self):
if self.lijevo is nenavedeno: prvi = Tip.N # -x = 0 - x
else: prvi = self.lijevo.provjeri_tip()
o, drugi = self.operator, self.desno.provjeri_tip()
if o ^ T.KROZ: return Tip.Q
elif o ^ {T.PLUS, T.PUTA}: return max(prvi, drugi)
elif o ^ T.MINUS: return max(prvi, drugi, Tip.Z)
# semantika: a div b := floor(a/b), a mod b := a - a div b * b
elif o ^ T.DIV: return Tip.N if prvi is drugi is Tip.N else tip.Z
elif o ^ T.MOD: return Tip.Q if prvi is Tip.Q else drugi
elif o ^ T.NA:
if drugi is Tip.N: return prvi
elif drugi is Tip.Z: return Tip.Q
else: raise o.krivi_tip(prvi, drugi)
else: assert False, f'nepokriveni slučaj operatora {o}!'
ast = P('''
rat a = 6 / 2
a = a + 4
nat b = 8 + 1
int c = 6 ^ 2
rat d = 6
d = d ^ 5
? b mod 1
? c mod b
? 6 ^ -3 - 3
''')
prikaz(ast, 3)
ast.provjeri_tipove()
# DZ: Dodajte tipove R i C (u statičku analizu; računanje je druga stvar:)
| {
"repo_name": "vedgar/ip",
"path": "PJ/18_ar_Q_statyping.py",
"copies": "1",
"size": "5206",
"license": "unlicense",
"hash": -7495866693834366000,
"line_mean": 30.8159509202,
"line_max": 78,
"alpha_frac": 0.5769379098,
"autogenerated": false,
"ratio": 2.52729044834308,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36042283581430795,
"avg_score": null,
"num_lines": null
} |
"""Aritmetika u skupu racionalnih brojeva, s detekcijom grešaka.
Po uzoru na https://web.math.pmf.unizg.hr/~veky/B/IP.k2p.17-09-08.pdf."""
from vepar import *
import fractions
class T(TipoviTokena):
PLUS, MINUS, PUTA, KROZ, JEDNAKO, OTV, ZATV, NOVIRED = '+-*/=()\n'
class BROJ(Token):
def izračunaj(t): return fractions.Fraction(t.sadržaj)
class IME(Token):
def izračunaj(t):
if t in rt.memorija: return rt.memorija[t]
else: raise t.nedeklaracija(f'pri pridruživanju {rt.pridruženo}')
@lexer
def aq(lex):
for znak in lex:
if znak.isdecimal():
lex.prirodni_broj(znak)
yield lex.token(T.BROJ)
elif znak.isalnum():
lex * {str.isalnum, '_'}
yield lex.token(T.IME)
elif znak.isspace() and znak != '\n': lex.zanemari()
else: yield lex.literal(T)
### BKG
# program -> '' | program naredba
# naredba -> IME JEDNAKO izraz NOVIRED
# izraz -> član | izraz PLUS član | izraz MINUS član
# član -> faktor | član PUTA faktor | član KROZ faktor
# faktor -> BROJ | IME | MINUS faktor | OTV izraz ZATV
class P(Parser):
def program(p) -> 'Program':
pridruživanja = []
while ime := p >= T.IME:
p >> T.JEDNAKO
pridruživanja.append((ime, p.izraz()))
p >> T.NOVIRED
return Program(pridruživanja)
def izraz(p) -> 'član|Op':
t = p.član()
while op := p >= {T.PLUS, T.MINUS}: t = Op(op, t, p.član())
return t
def član(p) -> 'faktor|Op':
t = p.faktor()
while op := p >= {T.PUTA, T.KROZ}: t = Op(op, t, p.faktor())
return t
def faktor(p) -> 'Op|IME|BROJ|izraz':
if op := p >= T.MINUS: return Op(op, nenavedeno, p.faktor())
if elementarni := p >= {T.IME, T.BROJ}: return elementarni
elif p >> T.OTV:
u_zagradi = p.izraz()
p >> T.ZATV
return u_zagradi
### AST
# Program: pridruživanja:[(IME,izraz)]
# izraz: BROJ: Token
# IME: Token
# Op: op:PLUS|MINUS|PUTA|KROZ lijevo:izraz? desno:izraz
class Program(AST):
pridruživanja: '(IME,izraz)*'
def izvrši(program):
rt.memorija = Memorija()
for ime, vrijednost in program.pridruživanja:
rt.pridruženo = ime
rt.memorija[ime] = vrijednost.izračunaj()
del rt.pridruženo
return rt.memorija
class Op(AST):
op: 'T'
lijevo: 'izraz?'
desno: 'izraz'
def izračunaj(self):
if self.lijevo is nenavedeno: l = 0 # unarni minus: -x = 0-x
else: l = self.lijevo.izračunaj()
o, d = self.op, self.desno.izračunaj()
if o ^ T.PLUS: return l + d
elif o ^ T.MINUS: return l - d
elif o ^ T.PUTA: return l * d
elif d: return l / d
else: raise self.iznimka(
f'dijeljenje nulom pri pridruživanju {rt.pridruženo}')
# Moramo staviti backslash na početak jer inače program počinje novim redom.
ast = P('''\
a = 3 / 7
b = a + 3
c = b - b
b = a * -a
d = a / (c + 1)
e = -3 / 3
''')
prikaz(ast)
for ime, vrijednost in ast.izvrši(): print(ime.sadržaj, vrijednost, sep='=')
| {
"repo_name": "vedgar/ip",
"path": "PJ/17_aritmetika_Q.py",
"copies": "1",
"size": "3229",
"license": "unlicense",
"hash": -837983516521369300,
"line_mean": 28.2935779817,
"line_max": 77,
"alpha_frac": 0.5606013154,
"autogenerated": false,
"ratio": 2.475193798449612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35357951138496124,
"avg_score": null,
"num_lines": null
} |
"""Aritmética con curvas elípticas.
Este módulo permite operar con el grupo de puntos de una curva elíptica.
Para utilizar las funciones y las clases de este módulo, debe importarlo
previamente: ::
# reemplace ... por la función/clase que desea utilizar
from ccepy.curvas_elipticas import ...
Para operar con puntos de una curva elíptica, use las funciones de la forma
``curva_eliptica_sobre_*`` y los operadores aritméticos habituales.
>>> E = curva_eliptica_sobre_Fq(a=2, b=3, p=97) # y^2 = x^3 + 2x + 3 sobre F97
>>> E.coeficientes
Coeficientes(a=2, b=3)
>>> P = E(0, 10)
>>> P
(0,10)
>>> Q = E(3, 6)
>>> Q
(3,6)
>>> P + Q
(85,71)
>>> -P
(0,87)
>>> 3 * P
(23,24)
"""
import copy
from fractions import Fraction
from collections import namedtuple
from abc import ABCMeta, abstractmethod
EcuacionWeierstrass = namedtuple('Coeficientes', ['a', 'b'])
from ccepy.cuerpos_finitos import Fq, PolinomioZp # PolinomioZp para los test
class PuntoRacional(metaclass=ABCMeta):
"""Clase abstracta que representa un punto racional de una curva elíptica.
El resto de las clases ``Punto*Racional`` heredan de esta.
Esta clase no se puede instanciar. Sirve como punto de partida para
crear curvas elípticas sobre nuevos cuerpos.
"""
__slots__ = ('_x', '_y') # para mejorar la eficiencia si hay muchos objetos
@classmethod
@abstractmethod
def contiene(cls, x, y):
"""Comprueba si (x,y) esta en la curva."""
return
@abstractmethod
def __init__(self, x, y):
# Debe inicializar self._x y self._y.
return
def es_elemento_neutro(self):
"""Comprueba si es el elemento neutro (el punto del infinito).
Returns:
bool: verdadero o falso.
"""
return self._x is None or self._y is None
@property
def x(self):
"""La componente x del punto si no es el elemento neutro. Es un
atributo de solo lectura."""
if self.es_elemento_neutro():
raise AttributeError("El elemento neutro no tiene componente x")
else:
return self._x
@property
def y(self):
"""La componente y del punto si no es el elemento neutro. Es un
atributo de solo lectura."""
if self.es_elemento_neutro():
raise AttributeError("El elemento neutro no tiene componente y")
else:
return self._y
@classmethod
def elemento_neutro(cls):
"""Devuelve el elemento neutro.
Returns:
El elemento neutro."""
return cls(None, None)
@abstractmethod
def __eq__(self, other):
return
def __ne__(self, other):
return not self.__eq__(other)
@abstractmethod
def __add__(self, other):
return
@abstractmethod
def __neg__(self):
return
def __sub__(self, other):
return self + (-other)
@abstractmethod
def __mul__(self, other):
return
@abstractmethod
def __rmul__(self, other):
return
def __str__(self):
if self.es_elemento_neutro():
return "Elemento neutro"
else:
return "({0},{1})".format(self.x, self.y)
__repr__ = __str__
def curva_eliptica_sobre_Fq(a, b, p, n=1, pol_irreducible=None):
"""Devuelve el constructor de puntos de una curva elíptica sobre
un cuerpo finito de q elementos de característica distinta de 2 y 3.
>>> E = curva_eliptica_sobre_Fq(1, 1, 5, 2) # y^2 = x^3 + x + 1 sobre F25
>>> E
<class 'ccepy.curvas_elipticas.curva_eliptica_sobre_Fq.<locals>.PuntoFqRacional'>
>>> E(0, 1)
({[0, 0]; 25},{[1, 0]; 25})
Los dos primeros argumentos (``a``, ``b``) son los coeficientes de la ecuación
de Weierstrass simplificada: :math:`y^2 = x^3 + a x + b`. Estos valores
pueden ser bien de tipo :py:class:`int` o bien de tipo :class:`.EnteroModuloP` o
:class:`.ElementoFq` según sea ``n`` uno o mayor que uno respectivamente.
Los tres últimos argumentos (``p``, ``n``, ``pol_irreducible``) definen el cuerpo
finito de p**n elementos sobre el que se define la curva eliptipca.
Args:
a : el coeficiente que acompaña a x en la ecuación de Weierstrass
b : el término independiente de la ecuación de Weierstrass
p (int): un número primo.
n (Optional[int]): un número natural.
pol_irreducible (Optional[PolinomioZp]): un polinomio de grado
*n* irreducible.
Return:
PuntoFqRacional: la clase que representa los puntos de la curva elíptica.
"""
# Copiar la clase fuera de la función para que aparezca en la documentación
class PuntoFqRacional(PuntoRacional):
"""Representa un punto de una curva elíptica sobre un cuerpo finito de
q elementos de característica distinta de 2 y 3.
>>> E = curva_eliptica_sobre_Fq(1, 1, 5, 2) # y^2 = x^3 + x + 1 sobre F25
>>> F25 = Fq(5, 2)
>>> P = E(F25.cero(), F25.uno())
>>> type(P)
<class 'ccepy.curvas_elipticas.curva_eliptica_sobre_Fq.<locals>.PuntoFqRacional'>
>>> P
({[0, 0]; 25},{[1, 0]; 25})
>>> Q = E(F25([4, 0]), F25([2, 0]))
>>> Q
({[4, 0]; 25},{[2, 0]; 25})
>>> P + Q
({[2, 0]; 25},{[1, 0]; 25})
>>> -P
({[0, 0]; 25},{[4, 0]; 25})
>>> 4 * P
({[3, 0]; 25},{[4, 0]; 25})
La curva elíptica está definida por la ecuación de Weierstrass
simplificada :math:`y^2 = x^3 + a x + b`.
Soporta los operadores ``+``, ``-``, ``*`` con su significado habitual.
Los parámetros ``x``, ``y`` deben ser del tipo :class:`.EnteroModuloP` o
:class:`.ElementoFq` según el cuerpo finito tenga un número primo de
elementos o un número potencia de un primo de elementos. Para construir
elementos de estos tipos, utilice :func:`.Fq`.
Args:
x: un elemento del cuerpo finito de q elementos.
y: un elemento del cuerpo finito de q elementos.
Los elementos de ``coeficientes`` y ``discriminante`` serán del tipo
:class:`.EnteroModuloP` o :class:`.ElementoFq` según el cuerpo finito
tenga un número primo de elementos o un número potencia de un primo de
elementos.
Attributes:
coeficientes (Tuple): los coeficientes (a, b) de la ecuación de Weierstrass. (atributo de clase)
discriminante: El discriminate de la curva elíptica. (atributo de clase)
Fq: El constructor de elementos del cuerpo finito de q elementos. (atributo de clase)
"""
# coeficientes (a, b) de la ecuación y^2 = x^3 + a*x + b
coeficientes = None
discriminante = None
Fq = None
@classmethod
def contiene(cls, x, y):
a, b = cls.coeficientes
lado_izquierdo_ecuacion = y**2
lado_derecho_ecuacion = x**3 + a * x + b
return lado_izquierdo_ecuacion == lado_derecho_ecuacion
def __init__(self, x, y):
if x is None or y is None:
self._x = None
self._y = None
else:
self._x = PuntoFqRacional.Fq(x)
self._y = PuntoFqRacional.Fq(y)
if not PuntoFqRacional.contiene(self._x, self._y):
raise ValueError("El punto ({0}, {1})".format(x, y) +
" no pertenece a la curva.")
def __eq__(self, other):
if self.es_elemento_neutro():
return other.es_elemento_neutro()
elif other.es_elemento_neutro():
return False
return self.x == other.x and self.y == other.y
def __add__(self, other):
if self.es_elemento_neutro():
return other
elif other.es_elemento_neutro():
return self
x1, y1 = self.x, self.y
x2, y2 = other.x, other.y
a = PuntoFqRacional.coeficientes.a
Fq = PuntoFqRacional.Fq
if self == other:
if y1 == Fq(0):
return PuntoFqRacional.elemento_neutro()
else:
m = (Fq(3) * x1**2 + a) / (Fq(2) * y1)
x3 = m**2 - Fq(2) * x1
y3 = m * (x1 - x3) - y1
return PuntoFqRacional(x3, y3)
elif x1 == x2:
# y1 != y2
return PuntoFqRacional.elemento_neutro()
else:
m = (y2 - y1) / (x2 - x1)
x3 = m**2 - x1 - x2
y3 = m * (x1 - x3) - y1
return PuntoFqRacional(x3, y3)
def __neg__(self):
if self.es_elemento_neutro():
return self
else:
return PuntoFqRacional(self.x, -self.y)
@classmethod
def _multiplicacion_por_duplicacion(cls, punto, k):
"""Realiza la multiplicación k * punto mediante el método de
multiplicación por duplicación."""
rep_binaria_k = "".join(bin(k)[2:]) # (k_t, k_{t-1},..., k_0)
Q = PuntoFqRacional.elemento_neutro()
P = punto
for k_i in rep_binaria_k:
Q = Q + Q # duplicar
if k_i == "1":
Q = Q + P # sumar
return Q
def __mul__(self, entero):
if self.es_elemento_neutro():
return self
elif entero < 0:
return PuntoFqRacional._multiplicacion_por_duplicacion(-self, -entero)
else:
return PuntoFqRacional._multiplicacion_por_duplicacion(self, entero)
__rmul__ = __mul__
if p == 2 or p == 3:
raise ValueError("p no puede ser ni 2 ni 3.")
F_q = Fq(p, n, pol_irreducible)
A = F_q(a)
B = F_q(b)
discriminante = F_q(4) * A**3 + F_q(27) * B**2
if discriminante == F_q.cero():
raise ValueError("El discriminant, 4a^3 + 27b^2, no puede ser cero.")
PuntoFqRacional.discriminante = discriminante
PuntoFqRacional.coeficientes = EcuacionWeierstrass(A, B)
PuntoFqRacional.Fq = F_q
return PuntoFqRacional
def curva_eliptica_sobre_F2m(a, b, m, pol_irreducible=None):
"""Devuelve el constructor de puntos de una curva elíptica sobre
el cuerpo finito de 2**m elementos.
>>> pol_irreducible = PolinomioZp([1, 1, 0, 0, 1], p=2)
>>> F16 = Fq(2, 4, pol_irreducible)
>>> a = F16([0, 0, 0, 1])
>>> b = F16([1, 0, 0, 1])
>>> E = curva_eliptica_sobre_F2m(a, b, 4, pol_irreducible)
>>> E
<class 'ccepy.curvas_elipticas.curva_eliptica_sobre_F2m.<locals>.PuntoF2mRacional'>
>>> E(F16.uno(), F16.uno())
({[1, 0, 0, 0]; 16},{[1, 0, 0, 0]; 16})
Los dos primeros argumentos (``a``, ``b``) son los coeficientes de la ecuación
de Weierstrass simplificada: :math:`y^2 + x y = x^3 + a x^2 + b`. Estos valores
pueden ser bien de tipo :py:class:`int` o bien de tipo :class:`.EnteroModuloP` o
:class:`.ElementoFq` según sea ``n`` uno o mayor que uno respectivamente.
Los dos últimos argumentos (``m``, ``pol_irreducible``) definen el cuerpo
finito de 2**m elementos sobre el que se define la curva eliptipca.
Args:
a : el coeficiente que acompaña a x^2 en la ecuación de Weierstrass
b : el término independiente de la ecuación de Weierstrass
m ([int]): un número natural.
pol_irreducible (Optional[PolinomioZp]): un polinomio de grado
*m* irreducible.
Return:
PuntoF2mRacional: la clase que representa los puntos de la curva elíptica.
"""
# Copiar la clase fuera de la función para que aparezca en la documentación
class PuntoF2mRacional(PuntoRacional):
"""Representa un punto de una curva elíptica sobre el cuerpo finito de
2**m elementos.
>>> pol_irreducible = PolinomioZp([1, 1, 0, 0, 1], p=2)
>>> F16 = Fq(2, 4, pol_irreducible)
>>> a = F16([0, 0, 0, 1])
>>> b = F16([1, 0, 0, 1])
>>> E = curva_eliptica_sobre_F2m(a, b, 4, pol_irreducible)
>>> E.coeficientes
Coeficientes(a={[0, 0, 0, 1]; 16}, b={[1, 0, 0, 1]; 16})
>>> P = E(F16([0, 1, 0, 0]), F16([1, 1, 1, 1]))
>>> type(P)
<class 'ccepy.curvas_elipticas.curva_eliptica_sobre_F2m.<locals>.PuntoF2mRacional'>
>>> P
({[0, 1, 0, 0]; 16},{[1, 1, 1, 1]; 16})
>>> Q = E(F16([0, 0, 1, 1]), F16([0, 0, 1, 1]))
>>> Q
({[0, 0, 1, 1]; 16},{[0, 0, 1, 1]; 16})
>>> P + Q
({[1, 0, 0, 0]; 16},{[1, 0, 0, 0]; 16})
>>> -P
({[0, 1, 0, 0]; 16},{[1, 0, 1, 1]; 16})
>>> 2 * P
({[1, 1, 0, 1]; 16},{[0, 1, 0, 0]; 16})
La curva elíptica está definida por la ecuación de Weierstrass
simplificada :math:`y^2 + x y = x^3 + a x^2 + b`.
Soporta los operadores ``+``, ``-``, ``*`` con su significado habitual.
Los parámetros ``x``, ``y`` deben ser del tipo :class:`.EnteroModuloP` o
:class:`.ElementoFq` según m sea uno o mayor que uno. Para construir
elementos de estos tipos, utilice :func:`.Fq`.
Args:
x: un elemento del cuerpo finito de 2**m elementos.
y: un elemento del cuerpo finito de 2**m elementos.
Los elementos de ``coeficientes`` y ``discriminante`` serán del tipo
:class:`.EnteroModuloP` o :class:`.ElementoFq` según m sea uno o
mayor que uno.
Attributes:
coeficientes (Tuple): los coeficientes (a, b) de la ecuación de Weierstrass. (atributo de clase)
discriminante: El discriminate de la curva elíptica. (atributo de clase)
Fq: El constructor de elementos del cuerpo finito de 2**m elementos. (atributo de clase)
"""
# coeficientes (a, b) de la ecuación y^2 + x y = x^3 + a x^2 + b
coeficientes = None
discriminante = None
F2m = None
@classmethod
def contiene(cls, x, y):
"""Comprueba si el punto (x, y) pertenece a la curva elíptica.
Args:
a : un elemento del cuerpo finito de 2**m elementos.
b : un elemento del cuerpo finito de 2**m elementos.
Returns:
bool: verdadero o falso.
"""
a, b = cls.coeficientes
lado_izquierdo_ecuacion = y**2 + x * y
lado_derecho_ecuacion = x**3 + a * x**2 + b
return lado_izquierdo_ecuacion == lado_derecho_ecuacion
def __init__(self, x, y):
if x is None or y is None:
self._x = None
self._y = None
else:
self._x = PuntoF2mRacional.F2m(x)
self._y = PuntoF2mRacional.F2m(y)
if not PuntoF2mRacional.contiene(self._x, self._y):
raise ValueError("El punto ({0}, {1})".format(x, y) +
" no pertenece a la curva.")
def __eq__(self, other):
if self.es_elemento_neutro():
return other.es_elemento_neutro()
elif other.es_elemento_neutro():
return False
return self.x == other.x and self.y == other.y
def __add__(self, other):
if self.es_elemento_neutro():
return other
elif other.es_elemento_neutro():
return self
x1, y1 = self.x, self.y
x2, y2 = other.x, other.y
a = PuntoF2mRacional.coeficientes.a
F2m = PuntoF2mRacional.F2m
if self == other:
if x1 == F2m(0):
# P = Q, P = -P, calculamos 2P
return PuntoF2mRacional.elemento_neutro()
else:
# P = Q, P != -P, calculamos 2P
m = x1 + y1 / x1
x3 = m**2 + m + a
y3 = x1**2 + (m + 1) * x3
return PuntoF2mRacional(x3, y3)
elif x1 == x2:
# (y1 != y2) | P != Q, P = -Q, calculamos P - P
return PuntoF2mRacional.elemento_neutro()
else:
# P != +-Q, calculamos P + Q
m = (y1 + y2) / (x1 + x2)
x3 = m**2 + m + x1 + x2 + a
y3 = m * (x1 + x3) + x3 + y1
return PuntoF2mRacional(x3, y3)
def __neg__(self):
if self.es_elemento_neutro():
return self
else:
return PuntoF2mRacional(self.x, self.x + self.y)
@classmethod
def _multiplicacion_por_duplicacion(cls, punto, k):
rep_binaria_k = "".join(bin(k)[2:]) # (k_t, k_{t-1},..., k_0)
Q = PuntoF2mRacional.elemento_neutro()
P = punto
for k_i in rep_binaria_k:
Q = Q + Q # duplicar
if k_i == "1":
Q = Q + P # sumar
return Q
def __mul__(self, entero):
if self.es_elemento_neutro():
return self
elif entero < 0:
return PuntoF2mRacional._multiplicacion_por_duplicacion(-self, -entero)
else:
return PuntoF2mRacional._multiplicacion_por_duplicacion(self, entero)
__rmul__ = __mul__
F2m = Fq(2, m, pol_irreducible)
A = F2m(a)
B = F2m(b)
discriminante = B
if discriminante == F2m.cero():
raise ValueError("El discriminant, b, no puede ser cero.")
PuntoF2mRacional.discriminante = discriminante
PuntoF2mRacional.coeficientes = EcuacionWeierstrass(A, B)
PuntoF2mRacional.F2m = F2m
return PuntoF2mRacional
def curva_eliptica_sobre_Q(a, b):
"""Devuelve el constructor de puntos de una curva elíptica sobre los
números racionales.
>>> E = curva_eliptica_sobre_Q(0, 4) # y^2 = x^3 + 4 sobre Q
>>> E
<class 'ccepy.curvas_elipticas.curva_eliptica_sobre_Q.<locals>.PuntoQRacional'>
>>> E(0, -2)
(0,-2)
Los argumentos (``a``, ``b``) son los coeficientes de la ecuación
de Weierstrass simplificada: :math:`y^2 = x^3 + a x + b`. Estos valores
pueden ser bien de tipo :py:class:`int` o bien de tipo :py:class:`fractions.Fraction`.
Args:
a : el coeficiente que acompaña a x en la ecuación de Weierstrass
b : el término independiente de la ecuación de Weierstrass
Return:
PuntoQRacional: la clase que representa los puntos de la curva elíptica.
"""
# Copiar la clase fuera de la función para que aparezca en la documentación
class PuntoQRacional(PuntoRacional):
"""Representa un punto de una curva elíptica sobre los
números racionales.
>>> E = curva_eliptica_sobre_Q(-18, -72) # y^2 = x^3 - 18 * x - 72 sobre Q
>>> P = E(6, 6)
>>> type(P)
<class 'ccepy.curvas_elipticas.curva_eliptica_sobre_Q.<locals>.PuntoQRacional'>
>>> P
(6,6)
>>> Q = E(Fraction(177, 4), Fraction(-2343, 8))
>>> Q
(177/4,-2343/8)
>>> P + Q
(28102/2601,4183750/132651)
>>> -P
(6,-6)
>>> 4 * P
(111795513/9759376,-1067078260371/30488290624)
La curva elíptica está definida por la ecuación de Weierstrass
simplificada :math:`y^2 = x^3 + a x + b`.
Soporta los operadores ``+``, ``-``, ``*`` con su significado habitual.
Los parámetros ``x``, ``y`` deben ser del tipo :py:class:`int` o
:py:class:`fractions.Fraction`.
Args:
x: un número racional.
y: un número racional.
Los elementos de ``coeficientes`` y ``discriminante`` serán del tipo
:py:class:`int` o :py:class:`fractions.Fraction` según se haya llamado
a :func:`.curva_eliptica_sobre_Q`.
Attributes:
coeficientes (Tuple): los coeficientes (a, b) de la ecuación de Weierstrass. (atributo de clase)
discriminante: El discriminate de la curva elíptica. (atributo de clase)
"""
coeficientes = None
discriminante = None
@classmethod
def contiene(cls, x, y):
"""Comprueba si el punto (x, y) pertenece a la curva elíptica.
Args:
a : un número racional.
b : un número racional.
Returns:
bool: verdadero o falso.
"""
a, b = cls.coeficientes
lado_izquierdo_ecuacion = y**2
lado_derecho_ecuacion = x**3 + a * x + b
return lado_izquierdo_ecuacion == lado_derecho_ecuacion
def __init__(self, x, y):
if x is None or y is None:
self._x = None
self._y = None
else:
if PuntoQRacional.contiene(x, y):
self._x = Fraction(x) # para aceptar también int
self._y = Fraction(y)
else:
raise ValueError("El punto ({0}, {1})".format(x, y) +
" no pertenece a la curva.")
def __eq__(self, other):
if self.es_elemento_neutro():
return other.es_elemento_neutro()
elif other.es_elemento_neutro():
return False
return self.x == other.x and self.y == other.y
def __add__(self, other):
if self.es_elemento_neutro():
return other
elif other.es_elemento_neutro():
return self
x1, y1 = self.x, self.y
x2, y2 = other.x, other.y
a = PuntoQRacional.coeficientes.a
if self == other:
if y1 == 0:
return PuntoQRacional.elemento_neutro()
else:
m = (3 * x1**2 + a) / (2 * y1)
x3 = m**2 - 2 * x1
y3 = m * (x1 - x3) - y1
return PuntoQRacional(x3, y3)
elif x1 == x2:
return PuntoQRacional.elemento_neutro()
else:
m = (y2 - y1) / (x2 - x1)
x3 = m**2 - x1 - x2
y3 = m * (x1 - x3) - y1
return PuntoQRacional(x3, y3)
def __neg__(self):
if self.es_elemento_neutro():
return self
else:
return PuntoQRacional(self.x, -self.y)
def __mul__(self, entero):
producto = PuntoQRacional.elemento_neutro()
for i in range(entero):
producto += self
return producto
__rmul__ = __mul__
discriminante = 4 * a**3 + 27 * b**2
if discriminante == 0:
raise ValueError("El discriminant, 4a^3 + 27b^2, no puede ser cero.")
PuntoQRacional.discriminante = discriminante
PuntoQRacional.coeficientes = EcuacionWeierstrass(a, b)
return PuntoQRacional
| {
"repo_name": "ranea/ccepy",
"path": "ccepy/curvas_elipticas.py",
"copies": "1",
"size": "23389",
"license": "mit",
"hash": 7565633026684312000,
"line_mean": 34.7834101382,
"line_max": 108,
"alpha_frac": 0.5259497746,
"autogenerated": false,
"ratio": 3.025324675324675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9047773390818935,
"avg_score": 0.0007002118211481468,
"num_lines": 651
} |
"""Aritmética elemental con enteros y polinomios.
Este módulo permite operar con enteros módulo un primo p y polinomios
cuyos coeficientes sean enteros módulo un primo p.
Para utilizar las funciones y las clases de este módulo, debe importarlo
previamente: ::
# reemplace ... por la función/clase que desea utilizar
from ccepy.aritmetica_elemental import ...
Para operar con enteros módulo un primo p, use la función :func:`Zp` y
los operadores aritméticos habituales.
>>> Z7 = Zp(7)
>>> n, m = Z7(2), Z7(6)
>>> n
2
>>> m
6
>>> n + m
1
>>> n * m
5
>>> m ** (-1)
6
Para operar con polinomios con coeficientes enteros módulo un primo p,
use la función :func:`PolinomioZp` y los operadores aritméticos
habituales.
>>> f = PolinomioZp([0, 0, 1], p=2)
>>> f
X^2
>>> g = PolinomioZp([1, 1], p=2)
>>> g
X + 1
>>> f + g
X^2 + X + 1
>>> f * g
X^3 + X^2
>>> f ** 3
X^6
"""
from itertools import zip_longest
import functools
import math
import copy
import random
def alg_euclides(a, b):
"""Calcula el algoritmo extendido de Euclides para enteros.
Esto es, los (x, y, d) tal que :math:`a x + b y = d`, siendo d el máximo
común divisor de (a, b).
>>> alg_euclides(54, 24)
(1, -2, 6)
Args:
a (int): un número positivo.
b (int): otro número positivo.
Returns:
List[int]: la lista [x, y, d].
"""
if b > a:
y, x, d = alg_euclides(b, a) # Intercambiamos x, y
else:
if b == 0:
d, x, y = a, 1, 0
else:
x2, x1, y2, y1 = 1, 0, 0, 1
while b > 0:
q, r = divmod(a, b)
x, y = x2 - q * x1, y2 - q * y1
a, b = b, r
x2, x1, y2, y1 = x1, x, y1, y
d, x, y = a, x2, y2
return x, y, d
def alg_euclides_polinomios(g, h, p):
"""Calcula el algoritmo extendido de Euclides para polinomios.
Esto es, los (s, t, d) tal que :math:`s g + t h = d`,
siendo d el máximo común divisor mónico de (s, g).
>>> f = PolinomioZp([0, 0, 0, 1], p=2)
>>> f
X^3
>>> g = PolinomioZp([1, 0, 1, 1], p=2)
>>> g
X^3 + X^2 + 1
>>> alg_euclides_polinomios(f, g, p=2)
(X^2 + X + 1, X^2 + 1, 1)
Args:
g (PolinomioZp): un polinomio no nulo con coeficientes enteros
módulo p.
h (PolinomioZp): otro polinomio con coeficientes enteros módulo p.
p (int): el primo p.
Returns:
List[PolinomioZp]: la lista [s, t, d].
"""
gg = PolinomioZp(g.coeficientes, p)
hh = PolinomioZp(h.coeficientes, p)
cero = PolinomioZp([0], p)
uno = PolinomioZp([1], p)
if hh == cero:
d, s, t = gg, uno, cero
else:
s2, s1, t2, t1 = uno, cero, cero, uno
while hh != cero:
q, r = divmod(gg, hh)
s, t = s2 - q * s1, t2 - q * t1
gg, hh = hh, r
s2, s1, t2, t1 = s1, s, t1, t
d, s, t = gg, s2, t2
if d.coeficiente_lider() != 1:
Z_p = Zp(p)
inverso = Z_p(d.coeficiente_lider()).inverso()
k = PolinomioZp([inverso], p)
s, t, d = s * k, t * k, d * k
return s, t, d
@functools.lru_cache()
def Zp(p):
"""Devuelve el constructor de enteros módulo un primo p.
>>> Z2 = Zp(2)
>>> Z2
<class 'ccepy.aritmetica_elemental.Zp.<locals>.EnteroModuloP'>
>>> Z2(11) # 11 mod 2
1
Args:
p (int): un número primo.
Return:
EnteroModuloP: la clase que representa los enteros módulo un primo p.
"""
# Copiar la clase fuera de la función para que aparezca en la documentación
class EnteroModuloP(int):
"""Representa un entero módulo un primo p.
>>> Z7 = Zp(7)
>>> n, m = Z7(2), Z7(6)
>>> type(n)
<class 'ccepy.aritmetica_elemental.Zp.<locals>.EnteroModuloP'>
>>> n
2
>>> m
6
>>> n + m
1
>>> n * m
5
>>> m ** (-1)
6
Soporta los operadores ``+``, ``-``, ``*``, ``/`` y ``**`` con su
significado habitual.
Los operandos pueden ser ambos de tipo :class:`EnteroModuloP` o bien uno
de tipo :class:`EnteroModuloP` y otro de tipo :py:class:`int`. En ambos
casos el resultado será de tipo :class:`EnteroModuloP`.
Args:
entero (int): el valor del entero.
Attributes:
p (int): el primo p (*atributo de clase*).
"""
p = None
@classmethod
def cero(cls):
"""Devuelve el cero.
Return:
EnteroModuloP: el cero.
"""
return EnteroModuloP(0)
@classmethod
def uno(cls):
"""Devuelve el uno.
Return:
EnteroModuloP: el uno.
"""
return EnteroModuloP(1)
def __new__(cls, entero):
if EnteroModuloP.p is None:
raise RuntimeError("Instancie usando la función Zp()")
return super().__new__(cls, entero % EnteroModuloP.p)
# descomentar este método solo para la documentación
# def __init__(self, entero):
# pass
def __eq__(self, m):
return super().__eq__(EnteroModuloP(m))
def __ne__(self, m):
return not self.__eq__(m)
def __add__(self, m):
return EnteroModuloP(super().__add__(m))
__radd__ = __add__
def __neg__(self):
return EnteroModuloP(super().__neg__())
def __sub__(self, m):
return EnteroModuloP(self + (-m))
def __rsub__(self, m):
return -self.__sub__(m)
def __mul__(self, m):
return EnteroModuloP(super().__mul__(m))
__rmul__ = __mul__
def __pow__(self, m):
if m < 0:
inverso = self.inverso()
return inverso ** (-m)
else:
return EnteroModuloP(super().__pow__(m, EnteroModuloP.p))
def inverso(self):
"""Devuelve el inverso módulo p.
>>> Z7 = Zp(7)
>>> Z7(6).inverso()
6
Return:
EnteroModuloP: el inverso.
"""
if self == 0:
raise ZeroDivisionError
x, y, d = alg_euclides(self, EnteroModuloP.p)
return EnteroModuloP(x)
def __truediv__(self, m):
return self * EnteroModuloP(m).inverso()
def __rtruediv__(self, m):
return EnteroModuloP(m).__truediv__(self)
# Necesario para @lru_cache
def __hash__(self):
return super().__hash__()
EnteroModuloP.p = p
EnteroModuloP.__name__ = "Z{0}".format(p)
return EnteroModuloP
class PolinomioZp:
"""Representa un polinomio con coeficientes enteros módulo un primo p.
>>> f = PolinomioZp([0, 0, 1], p=2)
>>> f
X^2
>>> g = PolinomioZp([1, 1], p=2)
>>> g
X + 1
>>> f + g
X^2 + X + 1
>>> f * g
X^3 + X^2
>>> f ** 3
X^6
Soporta los operadores ``+``, ``-``, ``*``, ``/``, ``%`` y ``**`` con su
significado habitual.
Los operandos pueden ser ambos de tipo :class:`PolinomioZp` o bien uno de
tipo :class:`PolinomioZp` y otro de tipo :py:class:`int`. En ambos casos el
resultado será de tipo :class:`PolinomioZp`.
Args:
coeficientes (List[int]): los coeficientes del polinomio ordenados
de forma ascendente, esto es, el primero el término constante y
el último el coeficiente líder.
p (int): el primo p.
"""
def __init__(self, coeficientes, p):
# Queremos que el último coeficiente no sea nulo
# (excepto si es el polinomio cero)
Z_p = Zp(p)
ultimo_coef = None
for indice, coef in enumerate(reversed(coeficientes)):
if Z_p(coef) != 0:
ultimo_coef = len(coeficientes) - indice - 1
break
else:
ultimo_coef = 0
self._coeficientes = [Z_p(c) for c in coeficientes[:ultimo_coef + 1]]
@property
def coeficientes(self):
"""List[EnteroModuloP]: los coeficientes del polinomio ordenados de
forma ascendente, esto es, el primero es el término constante y el
último el coeficiente líder. Es un atributo de solo lectura.
"""
return self._coeficientes
def primo(self):
"""Devuelve el primo p."""
return self._coeficientes[0].p
@classmethod
def monomio(cls, coef, grado, p):
"""Devuelve el monomio con coeficiente *coef* y de grado *grado*.
>>> PolinomioZp.monomio(-1, 7, 2)
X^7
Args:
coef (int): el coeficiente líder del monomio.
grado (int): el exponente del monomio.
Returns:
PolinomioZp: el monomio con dicho coeficiente y grado.
"""
return cls([0 for i in range(grado)] + [coef], p)
def grado(self):
"""Devuelve el grado del polinomio.
>>> f = PolinomioZp([1, 0, 0, 1], p=2)
>>> f
X^3 + 1
>>> f.grado()
3
El grado puede ser:
* \- :py:data:`math.inf` : si el polinomio es el polinomio cero.
* ``n`` : si el término lider tiene exponente n.
Returns:
int: el grado del polinomio.
"""
if self == PolinomioZp([0], self.primo()):
return -math.inf
else:
return len(self.coeficientes) - 1
def coeficiente_lider(self):
"""Devuelve el coeficiente asociado al término de mayor exponente.
>>> f = PolinomioZp([2, 0, 0, 1], p=3)
>>> f
X^3 + 2
>>> f.coeficiente_lider()
1
Returns:
EnteroModuloP: el coeficiente asociado al mayor exponente.
"""
return self.coeficientes[-1]
def es_irreducible(self):
"""Comprueba si el polinomio es irreducible.
>>> f = PolinomioZp([1, 0, 1, 1], p=2)
>>> f
X^3 + X^2 + 1
>>> f.es_irreducible()
True
Returns:
bool: verdadero o falso.
"""
p = self.primo()
f = PolinomioZp(self.coeficientes, p)
if f.coeficiente_lider() != 1:
f = f / PolinomioZp([f.coeficiente_lider()], p) # lo hacemos mónico
m = f.grado()
x = PolinomioZp([0, 1], p)
u = copy.deepcopy(x)
for i in range(1, m // 2 + 1):
u = (u ** p) % f
_, _, d = alg_euclides_polinomios(f, u - x, p)
if d != PolinomioZp([1], p):
return False
return True
@classmethod
def genera_irreducible(cls, grado, p):
"""Devuelve un polinomio irreducible de dicho grado con coeficientes
módulo p.
>>> f = PolinomioZp.genera_irreducible(3, 2)
>>> f.es_irreducible()
True
Returns:
PolinomioZp: el polinomio irreducible.
"""
Z_p = Zp(p)
while True:
a_0 = Z_p(1 + random.randrange(p - 1)) # lo queremos != 0
a_m = Z_p(1)
coeficientes = [a_0]
coeficientes += [random.randrange(p) for i in range(1, grado)]
coeficientes += [a_m]
f = PolinomioZp(coeficientes, p)
if f.es_irreducible():
return f
def __eq__(self, q):
if isinstance(q, PolinomioZp):
return self.coeficientes == q.coeficientes
else:
# Si el polinomio es una constante, hacemos
# la comparación con el coeficiente
if self.grado() < 1:
return self.coeficientes[0] == q
else:
return False
def __ne__(self, q):
return not self.__eq__(q)
def __add__(self, q):
if isinstance(q, PolinomioZp):
return PolinomioZp([a + b for a, b in zip_longest(self.coeficientes,
q.coeficientes,
fillvalue=0)], self.primo())
else:
coeficientes = [self.coeficientes[0] + q] + self.coeficientes[1:]
return PolinomioZp(coeficientes, self.primo())
__radd__ = __add__
def __neg__(self):
return PolinomioZp([-a for a in self.coeficientes], self.primo())
def __sub__(self, q):
return self + (-q)
def __rsub__(self, q):
return -self.__sub__(q)
def __mul__(self, q):
if isinstance(q, PolinomioZp):
cero = PolinomioZp([0], self.primo())
if self == cero or q == cero:
return cero
maximo_grado = len(self.coeficientes) + len(q.coeficientes)
multiplicacion = [0 for _ in range(maximo_grado)]
for i, a in enumerate(self.coeficientes):
for j, b in enumerate(q.coeficientes):
multiplicacion[i + j] += a * b
return PolinomioZp(multiplicacion, self.primo())
else:
return PolinomioZp([a * q for a in self.coeficientes], self.primo())
__rmul__ = __mul__
def __pow__(self, n):
potencia = PolinomioZp([1], self.primo())
for _ in range(n):
potencia *= self
return potencia
def __divmod__(self, q):
p = self.primo()
cero = PolinomioZp([0], self.primo())
cociente, divisor, resto = cero, copy.deepcopy(q), copy.deepcopy(self)
while resto != cero and resto.grado() >= divisor.grado():
monomio_grado = resto.grado() - divisor.grado()
monomio_cl = resto.coeficiente_lider() / divisor.coeficiente_lider()
monomio_cociente = PolinomioZp.monomio(monomio_cl, monomio_grado, p)
cociente += monomio_cociente
resto -= monomio_cociente * divisor
return cociente, resto
def __truediv__(self, q):
return divmod(self, q)[0]
def __mod__(self, q):
if self.grado() < q.grado():
return self
else:
return divmod(self, q)[1]
def __str__(self):
if self == PolinomioZp([0], self.primo()):
return str(0)
else:
monomios = []
# Se imprime los monomios en orden descedente respecto al grado
for indice, coef in enumerate(reversed(self.coeficientes)):
if coef != 0:
exponente = len(self.coeficientes) - indice - 1
# La siguiente casuística es escribir X
# en lugar de 1*X^1 y casos similares
if exponente == 0:
monomios.append(str(coef))
elif exponente == 1:
if coef == 1:
monomios.append("X")
else:
monomios.append("{0}*X".format(coef))
else:
if coef == 1:
monomios.append("X^{0}".format(exponente))
else:
monomios.append("{0}*X^{1}".format(coef, exponente))
return ' + '.join(monomios)
__repr__ = __str__
# Necesario para @functools.lru_cache de Fq()
def __hash__(self):
return hash(tuple(self.coeficientes))
| {
"repo_name": "ranea/ccepy",
"path": "ccepy/aritmetica_elemental.py",
"copies": "1",
"size": "15736",
"license": "mit",
"hash": -4830021927142027000,
"line_mean": 28.1598513011,
"line_max": 80,
"alpha_frac": 0.494390617,
"autogenerated": false,
"ratio": 3.073667711598746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9066868562994095,
"avg_score": 0.00023795312093019652,
"num_lines": 538
} |
"""ARK Survival RCON Interface. Connect, authenticate and transmit data to your favorite ARK Server.
by Torgrim "Fable" Ruud - torgrim.ruud@gmail.com
Class initialization requires host,port and password and will connect to the server unless specified.
Upon connecting commences authentication.
Through class inheritance you will not need to change core code (steam_socket, steam_socket_core class).
You can just add same name function to this class and use super().function()
This way you can alter without fearing breaking the core functionality.
For easy reading of code all transmittable RCON commands are in class RconCommands
The RCON class is simply just a collection of helper functions and a wrapper for core code.
"""
from ark.rcon_commands import RconCommands
from ark.steam.source_server_query import ArkSourceQuery
from .cli import *
from .thread_handler import ThreadHandler
from ark.storage import Storage
from ark.event_handler import EventHandler
from ark.database import Db
import time
from ark.server_control import ServerControl
from factory import Factory
Config = Factory.get('Config')
Lang = Factory.get('Translation')
class Rcon(RconCommands):
@classmethod
def callback_restart(cls,*args):
""" Callback for broadcast on immediate restarts
Broadcast does not happen if you restart immedietly
"""
out('Issuing IMMEDIDATE server restart')
ServerControl.restart_server()
@classmethod
def delayed_restart(cls,minutes, message=""):
"""Delayed restart of the server
Will restart the server after 5,10,30,60 minutes.
Notifies the server with broadcast on all these intervals and on 60 seconds
Args:
minutes: 5,10,30,60 minutes.
Returns:
Result: Bool
Err: String / None
"""
minutes = int(minutes)
if minutes not in [5,10,30,60]:
err = 'Unable to do delayed restart. Valid waiting times: 5, 10, 30, 60'
out(err)
return False, err
def delayed_message(minutes,message=""):
if minutes == 60:
cls.broadcast(Lang.get('restart_default').format('60 minutes',message), cls.response_callback_response_only)
time.sleep(30*60)
minutes = 30
if minutes == 30:
cls.broadcast(Lang.get('restart_default').format('30 minutes',message), cls.response_callback_response_only)
time.sleep(20*60)
minutes = 10
if minutes == 10:
cls.broadcast(Lang.get('restart_default').format('10 minutes',message), cls.response_callback_response_only)
time.sleep(5*60)
cls.broadcast(Lang.get('restart_default').format('5 minutes',message), cls.response_callback_response_only)
time.sleep(4*60)
cls.broadcast(Lang.get('restart_default').format('60 seconds',message), cls.response_callback_response_only)
time.sleep(50)
cls.broadcast(Lang.get('restart_default').format('10 seconds',message), cls.response_callback_response_only)
time.sleep(10)
Storage.restart_timestamp = None
ServerControl.restart_server()
Storage.restart_timestamp = floor(time.time() + (minutes*60))
callback = lambda:delayed_message(minutes,message)
ThreadHandler.create_thread(callback,looping=False)
return True, None
@classmethod
def set_next_restart_time(cls,timestamp):
if timestamp < Storage.restart_timestamp:
Storage.restart_timestamp = timestamp
@classmethod
def get_next_restart_string(cls):
"""Returns SecondsLeft or None, String Formatted Time
"""
if not Storage.restart_timestamp:
return None, 'No restart within the next 60 minutes'
seconds_left = Storage.restart_timestamp - time.time()
return seconds_left, time_countdown(seconds_left)
@staticmethod
def is_admin(steam_id=None, steam_name=None):
player = Db.find_player(steam_id=steam_id, steam_name=steam_name)
if not player:
return False
if player.admin:
return True
return False
@classmethod
def reconnect(cls):
EventHandler.triggerEvent(EventHandler.E_DISCONNECT, Storage.players_online_steam_name)
Storage.players_online_steam_name = {}
super().reconnect()
@staticmethod
def find_online_steam_id(steam_name=None):
for steam_id, name in Storage.players_online_steam_name.items():
if steam_name == name:
return steam_id
return None
@classmethod
def debug_compare_packet_count(cls):
out("{} incoming packets and {} outgoing packets".format(len(Rcon.incoming_packets),len(Rcon.outgoing_packets)))
@classmethod
def init(cls,host,port,query_port,password,timeout=None):
if host is None or port is None:
raise TypeError("Please initialize the rcon module with host and port")
if password is None:
raise TypeError("Please provide rcon password")
result, err = cls.socket_connect(host,port,query_port,password,timeout)
if not result:
cls.reconnect()
ThreadHandler.create_thread(cls.listen)
ThreadHandler.create_thread(cls.loop_communication)
@classmethod
def response_callback_default(cls, packet):
out('> {}\n[Response]: {}'.format(packet.outgoing_command,packet.decoded["body"].strip()))
@classmethod
def response_callback_response_only(cls,packet):
out('[Response]: {}'.format(packet.decoded["body"].strip()))
@classmethod
def none_response_callback(cls,packet):
pass
@staticmethod
def query_server():
Storage.query_data = ArkSourceQuery.query_info(Config.rcon_host,Config.query_port)
return Storage.query_data
| {
"repo_name": "f4ble/pyarc",
"path": "ark/rcon.py",
"copies": "2",
"size": "5976",
"license": "apache-2.0",
"hash": 4630517662447208000,
"line_mean": 34.5714285714,
"line_max": 124,
"alpha_frac": 0.6599732262,
"autogenerated": false,
"ratio": 4.167364016736402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0050164654281854,
"num_lines": 168
} |
import argparse
import os
import time
import datetime
cVersion = "0.1a"
cWaitingTimeForPageUpdate = 5 # seconds
cWaitingTimeForDownloadingToComplete = 10 # seconds
def main():
ShowApplicationTitle()
inputs = ProcessCommandLineInputs()
print "Accessing Arlo webpage.."
result = DownloadAllTodaysVideo(inputs.account, inputs.password, inputs.verbose)
if result:
print "Processing Video.."
MoveFilesToUploadFolder(inputs.download_path, inputs.upload_path)
else:
print "Download failed"
return 1
print "Done"
return 0
def ShowApplicationTitle():
print "-----------------------------------------"
print " ArloBackup %s" % cVersion
print "-----------------------------------------"
print
def ProcessCommandLineInputs():
parser = argparse.ArgumentParser(
description="""This is a helper script to download Arlo videos from your account, and put them into
a cloud storage. So that you can keep them forever without losing after certain time.""")
parser.add_argument("account", help="Arlo site account id for login. ex> magoja@gmail.com")
parser.add_argument("password", help="Arlo site password for login.")
parser.add_argument(
"-d", "--download_path",
default="~/Downloads",
help="Default download folder. This script will move all *.mp4 files from there.")
parser.add_argument(
"-u", "--upload_path",
default="~/Uploads",
help="Upload folder for cloud storage. I recommend you to use Dropbox, Google Photos or a similar service.")
parser.add_argument("-v", "--verbose", action="store_true", help="Increase output verbosity for debug.")
return parser.parse_args()
def DownloadAllTodaysVideo(account, password, verbose):
downloader = ArloVideoDownloader(verbose)
if not downloader.Login(account, password):
print "Error) Login Failed. Please check your account."
# This might break if they change the login system.
return False
downloader.DownloadTodaysVideo()
return True
class ArloVideoDownloader:
def __init__(self, verbose):
from splinter import Browser
self.browser = Browser("chrome")
self.verbose = verbose
def __del__(self):
if self.verbose:
return
# Leave the browser open if this is running with Verbose option.
if self.browser != None:
self.browser.quit()
def Login(self, account, password):
self.browser.visit("https://arlo.netgear.com/#/login")
self.browser.fill('userId', account)
self.browser.fill('password', password)
button = self.browser.find_by_id('loginButton')
if button.is_empty():
self.Debug("Cannot find loginButton.")
return False
button.click()
self.WaitForPageUpdate()
# Wait for page to load. This can take some time.
if self.browser.is_element_not_present_by_text('Library', wait_time = cWaitingTimeForPageUpdate):
return False
else:
return True
def DownloadTodaysVideo(self):
print "Logging in.."
if not self.OpenYesterdayPage():
self.Debug("Err> Cannot open library tab")
return False
print "Downloading Video.."
self.IterateToDownloadAll()
self.WaitForDownloading()
def WaitForPageUpdate(self):
self.Debug("Wait %d seconds.." % cWaitingTimeForPageUpdate)
time.sleep(cWaitingTimeForPageUpdate)
def IterateToDownloadAll(self):
self.SetSelectVideoMode()
previews = self.browser.find_by_css('.timeline-record')
# Go over for each video.
# I didn't try to download all at once, because I couldn't
# avoid the problem that Browser asking for a permission
# to download multiple files at once.
# So, download videos one by one
previousButton = None
for button in previews:
if previousButton is not None:
# Unselect last one.
previousButton.click()
# Select new one
button.click()
previousButton = button
self.WaitForPageUpdate()
self.PushDownload()
def OpenYesterdayPage(self):
#https://arlo.netgear.com/#/calendar/201512/all/all/20151226/day
# They have changed it! 2015/12/29
#https://arlo.netgear.com/#/calendar/201512/20151228/day
yesterday = self.GetYesterday()
url = "https://arlo.netgear.com/#/calendar/%d%d/%d%d%d/day" % (
yesterday.year, yesterday.month, yesterday.year, yesterday.month, yesterday.day)
self.Debug("Visiting: %s" % url)
# This breaks session! What should I do?
self.browser.visit(url)
self.WaitForPageUpdate()
return not self.browser.is_element_not_present_by_text('Select')
def SetSelectVideoMode(self):
self.browser.find_by_text('Select').click()
def GetYesterday(self):
return datetime.datetime.now() - datetime.timedelta(hours=24)
def PushDownload(self):
# TODO: Can we change the download folder?
buttons = self.browser.find_by_css('.download')
buttons[0].click()
pass
def WaitForDownloading(self):
# TODO: How can I know when all the downloading would be completed?
time.sleep(cWaitingTimeForDownloadingToComplete)
def Debug(self, message):
if self.verbose:
print message
def MoveFilesToUploadFolder(downloadPath, uploadPath):
# TODO: Go over all MP4 files. Get the timestamp from file name,
# convert it to human readable format. Move it to target folder.
print "Accessing '%s' folder.." % downloadPath
expandedDownloadPath = os.path.expanduser(downloadPath)
expandedUploadPath = os.path.expanduser(uploadPath)
for root, dirs, filenames in os.walk(expandedDownloadPath):
filenames.sort()
for filename in filenames:
if IsArloVideo(filename):
src = "%s/%s" % (expandedDownloadPath, filename)
dst = "%s/%s" % (expandedUploadPath, filename)
os.rename(src, dst)
def IsArloVideo(filename):
# This is not perfect solution.
if not filename.endswith(".mp4"):
return False
filenameOnly = filename[:-4]
if len(filenameOnly) != 13: # 13 Digit Epoch
return False
# Number only.
if "%d" % int(filenameOnly) != filenameOnly:
return False
return True
if __name__ == "__main__":
import sys, os
if main():
sys.exit(os.EX_OK)
else:
sys.exit(os.EX_SOFTWARE)
else:
print "This file must be the main entry point." | {
"repo_name": "Magoja/arloBackup",
"path": "arloBackup.py",
"copies": "1",
"size": "6276",
"license": "mit",
"hash": -360480508765450200,
"line_mean": 29.1778846154,
"line_max": 112,
"alpha_frac": 0.6854684512,
"autogenerated": false,
"ratio": 3.7990314769975786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4984499928197579,
"avg_score": null,
"num_lines": null
} |
#arlobox.py
# Netgear Arlo camera automation
# Monitor the momentary toggle switch in the Raspberry Pi box,
# or the armed state of an external security alarm system,
# then change the arm/disarm mode of the Arlo cameras,
# and set the lights on the box appropriately.
#Copyright (c) 2016, Len Shustek
#Open source by the MIT License; see LICENSE.txt
# 11 Aug 2015, L. Shustek, first version
# 14 Sep 2016, L. Shustek, redo using HTTP instead of mouse movement
# 20 Sep 2016, L. Shustek, Add optional delay before arm
# 24 Sep 2016, L. Shustek, Add input from an external security alarm system
USERNAME = "xxxxxxxxx"
PASSWORD = "yyyyyyyyy"
from Arlo import Arlo # from https://github.com/jeffreydwalter/arlo, as modified here
import RPi.GPIO as GPIO
import time
armdelay = 30 # optional delay before arming, in seconds
# Raspberry Pi's P-1 connector pin numbers for the switches and lights
upswitch = 21 # arm (momentary grounded input)
downswitch = 19 # disarm (momentary grounded input)
alarmswitch = 7 # alarm system on (grounded input)
redled = 23 # "camera armed" LED (output)
greenled = 15 # "camera disarmed" LED (output)
print("configuring I/O pins...")
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(upswitch, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(downswitch, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(alarmswitch, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(redled, GPIO.OUT, initial=1)
GPIO.setup(greenled, GPIO.OUT, initial=1)
def setleds(green,red):
GPIO.output(greenled,green)
GPIO.output(redled,red)
def blinkleds(times):
for i in range(times):
setleds(0,0)
time.sleep(0.33)
setleds(1,1)
time.sleep(0.33)
def errorleds(times):
for i in range(times):
setleds(1,0)
time.sleep(0.2)
setleds(0,1)
time.sleep(0.2)
setleds(0,0)
def waitleds(times):
for i in range(times):
setleds(1,1)
time.sleep(0.25)
setleds(1,0)
time.sleep(0.25)
setleds(1,1)
time.sleep(0.25)
setleds(0,1)
time.sleep(0.25)
def changemode(arm):
try:
arlo = Arlo(USERNAME,PASSWORD) #log in
basestation = [device for device in arlo.GetDevices() if device['deviceType'] == 'basestation']
print("basestation="+basestation[0]['deviceId'])
if arm == True:
arlo.Arm(basestation[0]['deviceId'], basestation[0]['xCloudId'])
else:
arlo.Disarm(basestation[0]['deviceId'], basestation[0]['xCloudId'])
arlo.Logout()
return True
except Exception as e:
print(e)
errorleds(10)
return False
def do_arm():
if (armdelay > 0):
waitleds(armdelay)
blinkleds(2)
if (changemode(True)): # arm
setleds(0,1)
print("arm succeeded")
def do_disarm():
blinkleds(2)
if (changemode(False)): # disarm
setleds(1,0)
print("disarm succeeded")
alarm_off = GPIO.input(alarmswitch) #current state of alarm system
blinkleds(5) # flash lights a bunch of times to show we're here
print("waiting for switch or alarm system...")
while True:
if (GPIO.input(upswitch) == 0):
print("switch was pushed up")
do_arm()
if (GPIO.input(downswitch) == 0):
print("switch was pushed down")
do_disarm()
if (GPIO.input(alarmswitch) != alarm_off): # alarm system state change?
time.sleep(0.5) # debounce time
if (GPIO.input(alarmswitch) != alarm_off):
alarm_off = GPIO.input(alarmswitch)
if (alarm_off):
print("alarm system was disarmed")
do_disarm()
else:
print("alarm system was armed")
do_arm()
#eof
| {
"repo_name": "LenShustek/ArloCamera",
"path": "arlobox.py",
"copies": "1",
"size": "3823",
"license": "mit",
"hash": -4047601849973661700,
"line_mean": 28.6356589147,
"line_max": 103,
"alpha_frac": 0.6275176563,
"autogenerated": false,
"ratio": 3.167357083678542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9175463040403551,
"avg_score": 0.023882339914998277,
"num_lines": 129
} |
"""ARMA and MA estimates, ARMA and MA PSD estimates.
.. topic:: ARMA model and Power Spectral Densities.
.. autosummary::
:nosignatures:
arma_estimate
ma
arma2psd
parma
pma
.. codeauthor:: Thomas Cokelaer 2011
:References: See [Marple]_
"""
import numpy as np
from numpy.fft import fft
from spectrum.correlation import CORRELATION
from spectrum.covar import arcovar, arcovar_marple
import spectrum.yulewalker as yulewalker
from spectrum.psd import ParametricSpectrum
__all__ = ["arma2psd", "arma_estimate", "ma", "pma", "parma"]
def arma2psd(A=None, B=None, rho=1., T=1., NFFT=4096, sides='default',
norm=False):
r"""Computes power spectral density given ARMA values.
This function computes the power spectral density values
given the ARMA parameters of an ARMA model. It assumes that
the driving sequence is a white noise process of zero mean and
variance :math:`\rho_w`. The sampling frequency and noise variance are
used to scale the PSD output, which length is set by the user with the
`NFFT` parameter.
:param array A: Array of AR parameters (complex or real)
:param array B: Array of MA parameters (complex or real)
:param float rho: White noise variance to scale the returned PSD
:param float T: Sampling frequency in Hertz to scale the PSD.
:param int NFFT: Final size of the PSD
:param str sides: Default PSD is two-sided, but sides can be set to centerdc.
.. warning:: By convention, the AR or MA arrays does not contain the
A0=1 value.
If :attr:`B` is None, the model is a pure AR model. If :attr:`A` is None,
the model is a pure MA model.
:return: two-sided PSD
.. rubric:: Details:
AR case: the power spectral density is:
.. math:: P_{ARMA}(f) = T \rho_w \left|\frac{B(f)}{A(f)}\right|^2
where:
.. math:: A(f) = 1 + \sum_{k=1}^q b(k) e^{-j2\pi fkT}
.. math:: B(f) = 1 + \sum_{k=1}^p a(k) e^{-j2\pi fkT}
.. rubric:: **Example:**
.. plot::
:width: 80%
:include-source:
import spectrum.arma
from pylab import plot, log10, legend
plot(10*log10(spectrum.arma.arma2psd([1,0.5],[0.5,0.5])), label='ARMA(2,2)')
plot(10*log10(spectrum.arma.arma2psd([1,0.5],None)), label='AR(2)')
plot(10*log10(spectrum.arma.arma2psd(None,[0.5,0.5])), label='MA(2)')
legend()
:References: [Marple]_
"""
if NFFT is None:
NFFT = 4096
if A is None and B is None:
raise ValueError("Either AR or MA model must be provided")
psd = np.zeros(NFFT, dtype=complex)
if A is not None:
ip = len(A)
den = np.zeros(NFFT, dtype=complex)
den[0] = 1.+0j
for k in range(0, ip):
den[k+1] = A[k]
denf = fft(den, NFFT)
if B is not None:
iq = len(B)
num = np.zeros(NFFT, dtype=complex)
num[0] = 1.+0j
for k in range(0, iq):
num[k+1] = B[k]
numf = fft(num, NFFT)
# Changed in version 0.6.9 (divided by T instead of multiply)
if A is not None and B is not None:
psd = rho / T * abs(numf)**2. / abs(denf)**2.
elif A is not None:
psd = rho / T / abs(denf)**2.
elif B is not None:
psd = rho / T * abs(numf)**2.
psd = np.real(psd)
# The PSD is a twosided PSD.
# to obtain the centerdc
if sides != 'default':
from . import tools
assert sides in ['centerdc']
if sides == 'centerdc':
psd = tools.twosided_2_centerdc(psd)
if norm == True:
psd /= max(psd)
return psd
def arma_estimate(X, P, Q, lag):
"""Autoregressive and moving average estimators.
This function provides an estimate of the autoregressive
parameters, the moving average parameters, and the driving
white noise variance of an ARMA(P,Q) for a complex or real data sequence.
The parameters are estimated using three steps:
* Estimate the AR parameters from the original data based on a least
squares modified Yule-Walker technique,
* Produce a residual time sequence by filtering the original data
with a filter based on the AR parameters,
* Estimate the MA parameters from the residual time sequence.
:param array X: Array of data samples (length N)
:param int P: Desired number of AR parameters
:param int Q: Desired number of MA parameters
:param int lag: Maximum lag to use for autocorrelation estimates
:return:
* A - Array of complex P AR parameter estimates
* B - Array of complex Q MA parameter estimates
* RHO - White noise variance estimate
.. note::
* lag must be >= Q (MA order)
**dependencies**:
* :meth:`spectrum.correlation.CORRELATION`
* :meth:`spectrum.covar.arcovar`
* :meth:`spectrum.arma.ma`
.. plot::
:width: 80%
:include-source:
from spectrum import arma_estimate, arma2psd, marple_data
import pylab
a,b, rho = arma_estimate(marple_data, 15, 15, 30)
psd = arma2psd(A=a, B=b, rho=rho, sides='centerdc', norm=True)
pylab.plot(10 * pylab.log10(psd))
pylab.ylim([-50,0])
:reference: [Marple]_
"""
R = CORRELATION(X, maxlags=lag, norm='unbiased')
R0 = R[0]
#C Estimate the AR parameters (no error weighting is used).
#C Number of equation errors is M-Q .
MPQ = lag - Q + P
N = len(X)
Y = np.zeros(N-P, dtype=complex)
for K in range(0, MPQ):
KPQ = K + Q - P+1
if KPQ < 0:
Y[K] = R[-KPQ].conjugate()
if KPQ == 0:
Y[K] = R0
if KPQ > 0:
Y[K] = R[KPQ]
# The resize is very important for the normalissation.
Y.resize(lag, refcheck=False)
if P <= 4:
res = arcovar_marple(Y.copy(), P) #! Eq. (10.12)
ar_params = res[0]
else:
res = arcovar(Y.copy(), P) #! Eq. (10.12)
ar_params = res[0]
# the .copy is used to prevent a reference somewhere. this is a bug
# to be tracked down.
Y.resize(N-P, refcheck=False)
#C Filter the original time series
for k in range(P, N):
SUM = X[k]
#SUM += sum([ar_params[j]*X[k-j-1] for j in range(0,P)])
for j in range(0, P):
SUM = SUM + ar_params[j] * X[k-j-1] #! Eq. (10.17)
Y[k-P] = SUM
# Estimate the MA parameters (a "long" AR of order at least 2*IQ
#C is suggested)
#Y.resize(N-P)
ma_params, rho = ma(Y, Q, 2*Q) #! Eq. (10.3)
return ar_params, ma_params, rho
class parma(ParametricSpectrum):
"""Class to create PSD using ARMA estimator.
See :func:`arma_estimate` for description.
.. plot::
:width: 80%
:include-source:
from spectrum import parma, marple_data
p = parma(marple_data, 4, 4, 30, NFFT=4096)
p.plot(sides='centerdc')
"""
def __init__(self, data, P, Q, lag, NFFT=None, sampling=1.,
scale_by_freq=False):
"""**Constructor:**
For a detailed description of the parameters, see :func:`arma_estimate`.
:param array data: input data (list or numpy.array)
:param int P:
:param int Q:
:param int lag:
:param int NFFT: total length of the final data sets (padded with
zero if needed; default is 4096)
:param float sampling: sampling frequency of the input :attr:`data`.
"""
super(parma, self).__init__(data, ma_order=Q, ar_order=P, lag=lag,
NFFT=NFFT, sampling=sampling,
scale_by_freq=scale_by_freq)
self.lag = lag
def __call__(self):
ar_params, ma_params, rho = arma_estimate(self.data, self.ar_order,
self.ma_order, self.lag)
self.ma = ma_params
self.ar = ar_params
self.rho = rho
psd = arma2psd(A=self.ar, B=self.ma, rho=self.rho,
T=self.sampling, NFFT=self.NFFT)
#self.psd = psd
if self.datatype == 'real':
if self.NFFT % 2 == 0:
newpsd = psd[0:int(self.NFFT/2+1)] * 2
else:
newpsd = psd[0:int((self.NFFT+1)/2)] * 2
self.psd = newpsd
else:
self.psd = psd
if self.scale_by_freq is True:
self.scale()
return self
def _str_title(self):
return "ARMA PSD estimate\n"
def __str__(self):
return super(parma, self).__str__()
class pma(ParametricSpectrum):
"""Class to create PSD using MA estimator.
See :func:`ma` for description.
.. plot::
:width: 80%
:include-source:
from spectrum import pma, marple_data
p = pma(marple_data, 15, 30, NFFT=4096)
p.plot(sides='centerdc')
"""
def __init__(self, data, Q, M, NFFT=None, sampling=1.,
scale_by_freq=False):
"""**Constructor:**
For a detailed description of the parameters, see :func:`ma`.
:param array data: input data (list or numpy.array)
:param int Q: MA order
:param int M: AR model used to estimate the MA parameters
:param int NFFT: total length of the final data sets (padded with zero
if needed; default is 4096)
:param float sampling: sampling frequency of the input :attr:`data`.
"""
super(pma, self).__init__(data, ma_order=Q, ar_order=M,
NFFT=NFFT, sampling=sampling,
scale_by_freq=scale_by_freq)
def __call__(self):
ma_params, rho = ma(self.data, self.ma_order, self.ar_order)
self.ma = ma_params
self.rho = rho
psd = arma2psd(A=None, B=self.ma, rho=self.rho,
T=self.sampling, NFFT=self.NFFT)
if self.datatype == 'real':
if self.NFFT % 2 == 0:
newpsd = psd[0:int(self.NFFT/2+1)] * 2
else:
newpsd = psd[0:int((self.NFFT+1)/2)] * 2
self.psd = newpsd
else:
self.psd = psd
if self.scale_by_freq is True:
self.scale()
self.modified = False
def _str_title(self):
return "MA (moving average) PSD estimate\n"
def __str__(self):
return super(pma, self).__str__()
def ma(X, Q, M):
"""Moving average estimator.
This program provides an estimate of the moving average parameters
and driving noise variance for a data sequence based on a
long AR model and a least squares fit.
:param array X: The input data array
:param int Q: Desired MA model order (must be >0 and <M)
:param int M: Order of "long" AR model (suggest at least 2*Q )
:return:
* MA - Array of Q complex MA parameter estimates
* RHO - Real scalar of white noise variance estimate
.. plot::
:width: 80%
:include-source:
from spectrum import arma2psd, ma, marple_data
import pylab
# Estimate 15 Ma parameters
b, rho = ma(marple_data, 15, 30)
# Create the PSD from those MA parameters
psd = arma2psd(B=b, rho=rho, sides='centerdc')
# and finally plot the PSD
pylab.plot(pylab.linspace(-0.5, 0.5, 4096), 10 * pylab.log10(psd/max(psd)))
pylab.axis([-0.5, 0.5, -30, 0])
:reference: [Marple]_
"""
if Q <= 0 or Q >= M:
raise ValueError('Q(MA) must be in ]0,lag[')
#C Fit a high-order AR to the data
a, rho, _c = yulewalker.aryule(X, M, 'biased') #! Eq. (10.5)
#add an element unity to the AR parameter array
a = np.insert(a, 0, 1)
#C Find MA parameters from autocorrelations by Yule-Walker method
ma_params, _p, _c = yulewalker.aryule(a, Q, 'biased') #! Eq. (10.7)
return ma_params, rho
| {
"repo_name": "cokelaer/spectrum",
"path": "src/spectrum/arma.py",
"copies": "1",
"size": "12018",
"license": "bsd-3-clause",
"hash": 4337537801384424400,
"line_mean": 29.8946015424,
"line_max": 85,
"alpha_frac": 0.5681477783,
"autogenerated": false,
"ratio": 3.3692178301093354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44373656084093355,
"avg_score": null,
"num_lines": null
} |
'''ARMA process and estimation with scipy.signal.lfilter
2009-09-06: copied from try_signal.py
reparameterized same as signal.lfilter (positive coefficients)
Notes
-----
* pretty fast
* checked with Monte Carlo and cross comparison with statsmodels yule_walker
for AR numbers are close but not identical to yule_walker
not compared to other statistics packages, no degrees of freedom correction
* ARMA(2,2) estimation (in Monte Carlo) requires longer time series to estimate parameters
without large variance. There might be different ARMA parameters
with similar impulse response function that cannot be well
distinguished with small samples (e.g. 100 observations)
* good for one time calculations for entire time series, not for recursive
prediction
* class structure not very clean yet
* many one-liners with scipy.signal, but takes time to figure out usage
* missing result statistics, e.g. t-values, but standard errors in examples
* no criteria for choice of number of lags
* no constant term in ARMA process
* no integration, differencing for ARIMA
* written without textbook, works but not sure about everything
briefly checked and it looks to be standard least squares, see below
* theoretical autocorrelation function of general ARMA
Done, relatively easy to guess solution, time consuming to get
theoretical test cases,
example file contains explicit formulas for acovf of MA(1), MA(2) and ARMA(1,1)
* two names for lag polynomials ar = rhoy, ma = rhoe ?
Properties:
Judge, ... (1985): The Theory and Practise of Econometrics
BigJudge p. 237ff:
If the time series process is a stationary ARMA(p,q), then
minimizing the sum of squares is asymptoticaly (as T-> inf)
equivalent to the exact Maximum Likelihood Estimator
Because Least Squares conditional on the initial information
does not use all information, in small samples exact MLE can
be better.
Without the normality assumption, the least squares estimator
is still consistent under suitable conditions, however not
efficient
Author: josefpktd
License: BSD
'''
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from scipy import signal, optimize, linalg
from statsmodels.base.model import LikelihoodModel
#this has been copied to new arma_mle.py - keep temporarily for easier lookup
class ARIMAProcess(LikelihoodModel):
'''currently ARMA only, no differencing used - no I
parameterized as
rhoy(L) y_t = rhoe(L) eta_t
A instance of this class preserves state, so new class instances should
be created for different examples
'''
def __init__(self, endog, exog=None):
super(ARIMAProcess, self).__init__(endog, exog)
if endog.ndim == 1:
endog = endog[:,None]
elif endog.ndim > 1 and endog.shape[1] != 1:
raise ValueError("Only the univariate case is implemented")
self.endog = endog # overwrite endog
if exog is not None:
raise ValueError("Exogenous variables are not yet supported.")
def fit(self, order=(0,0,0), method="ls", rhoy0=None, rhoe0=None):
'''
Estimate lag coefficients of an ARIMA process.
Parameters
----------
order : sequence
p,d,q where p is the number of AR lags, d is the number of
differences to induce stationarity, and q is the number of
MA lags to estimate.
method : str {"ls", "ssm"}
Method of estimation. LS is conditional least squares.
SSM is state-space model and the Kalman filter is used to
maximize the exact likelihood.
rhoy0, rhoe0 : array_like (optional)
starting values for estimation
Returns
-------
rh, cov_x, infodict, mesg, ier : output of scipy.optimize.leastsq
rh :
estimate of lag parameters, concatenated [rhoy, rhoe]
cov_x :
unscaled (!) covariance matrix of coefficient estimates
'''
if not hasattr(order, '__iter__'):
raise ValueError("order must be an iterable sequence. Got type \
%s instead" % type(order))
p,d,q = order
if d > 0:
raise ValueError("Differencing not implemented yet")
# assume no constant, ie mu = 0
# unless overwritten then use w_bar for mu
Y = np.diff(endog, d, axis=0) #TODO: handle lags?
x = self.endog.squeeze() # remove the squeeze might be needed later
def errfn( rho):
#rhoy, rhoe = rho
rhoy = np.concatenate(([1], rho[:p]))
rhoe = np.concatenate(([1], rho[p:]))
etahatr = signal.lfilter(rhoy, rhoe, x)
#print rho,np.sum(etahatr*etahatr)
return etahatr
if rhoy0 is None:
rhoy0 = 0.5 * np.ones(p)
if rhoe0 is None:
rhoe0 = 0.5 * np.ones(q)
method = method.lower()
if method == "ls":
rh, cov_x, infodict, mesg, ier = \
optimize.leastsq(errfn, np.r_[rhoy0, rhoe0],ftol=1e-10,full_output=True)
#TODO: integrate this into the MLE.fit framework?
elif method == "ssm":
pass
else:
# fmin_bfgs is slow or doesn't work yet
errfnsum = lambda rho : np.sum(errfn(rho)**2)
#xopt, {fopt, gopt, Hopt, func_calls, grad_calls
rh,fopt, gopt, cov_x, _,_, ier = \
optimize.fmin_bfgs(errfnsum, np.r_[rhoy0, rhoe0], maxiter=2, full_output=True)
infodict, mesg = None, None
self.rh = rh
self.rhoy = np.concatenate(([1], rh[:p]))
self.rhoe = np.concatenate(([1], rh[p:])) #rh[-q:])) doesnt work for q=0
self.error_estimate = errfn(rh)
return rh, cov_x, infodict, mesg, ier
def errfn(self, rho=None, p=None, x=None):
''' duplicate -> remove one
'''
#rhoy, rhoe = rho
if not rho is None:
rhoy = np.concatenate(([1], rho[:p]))
rhoe = np.concatenate(([1], rho[p:]))
else:
rhoy = self.rhoy
rhoe = self.rhoe
etahatr = signal.lfilter(rhoy, rhoe, x)
#print rho,np.sum(etahatr*etahatr)
return etahatr
def predicted(self, rhoy=None, rhoe=None):
'''past predicted values of time series
just added, not checked yet
'''
if rhoy is None:
rhoy = self.rhoy
if rhoe is None:
rhoe = self.rhoe
return self.x + self.error_estimate
def forecast(self, ar=None, ma=None, nperiod=10):
eta = np.r_[self.error_estimate, np.zeros(nperiod)]
if ar is None:
ar = self.rhoy
if ma is None:
ma = self.rhoe
return signal.lfilter(ma, ar, eta)
#TODO: is this needed as a method at all?
@classmethod
def generate_sample(cls, ar, ma, nsample, std=1):
eta = std * np.random.randn(nsample)
return signal.lfilter(ma, ar, eta)
def arma_generate_sample(ar, ma, nsample, sigma=1, distrvs=np.random.randn, burnin=0):
"""
Generate a random sample of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nsample : int
length of simulated time series
sigma : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations at the
beginning of the sample are dropped
Returns
-------
sample : array
sample of ARMA process given by ar, ma of length nsample
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> ar = np.array([.75, -.25])
>>> ma = np.array([.65, .35])
>>> arparams = np.r_[1, -ar] # add zero-lag and negate
>>> maparams = np.r_[1, ma] # add zero-lag
>>> y = sm.tsa.arma_generate_sample(arparams, maparams, 250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
#TODO: unify with ArmaProcess method
eta = sigma * distrvs(nsample+burnin)
return signal.lfilter(ma, ar, eta)[burnin:]
def arma_acovf(ar, ma, nobs=10):
'''theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acovf
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acf
acovf
Notes
-----
Tries to do some crude numerical speed improvements for cases
with high persistance. However, this algorithm is slow if the process is
highly persistent and only a few autocovariances are desired.
'''
#increase length of impulse response for AR closer to 1
#maybe cheap/fast enough to always keep nobs for ir large
if np.abs(np.sum(ar)-1) > 0.9:
nobs_ir = max(1000, 2* nobs) #no idea right now how large it is needed
else:
nobs_ir = max(100, 2* nobs) #no idea right now
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#better save than sorry (?), I have no idea about the required precision
#only checked for AR(1)
while ir[-1] > 5*1e-5:
nobs_ir *= 10
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#again no idea where the speed break points are:
if nobs_ir > 50000 and nobs < 1001:
acovf = np.array([np.dot(ir[:nobs-t], ir[t:nobs]) for t in range(nobs)])
else:
acovf = np.correlate(ir,ir,'full')[len(ir)-1:]
return acovf[:nobs]
def arma_acf(ar, ma, nobs=10):
'''theoretical autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acf
Returns
-------
acf : array
autocorrelation of ARMA process given by ar, ma
See Also
--------
arma_acovf
acf
acovf
'''
acovf = arma_acovf(ar, ma, nobs)
return acovf/acovf[0]
def arma_pacf(ar, ma, nobs=10):
'''partial autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned pacf
Returns
-------
pacf : array
partial autocorrelation of ARMA process given by ar, ma
Notes
-----
solves yule-walker equation for each lag order up to nobs lags
not tested/checked yet
'''
apacf = np.zeros(nobs)
acov = arma_acf(ar,ma, nobs=nobs+1)
apacf[0] = 1.
for k in range(2,nobs+1):
r = acov[:k];
apacf[k-1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
return apacf
def arma_periodogram(ar, ma, worN=None, whole=0):
'''periodogram for ARMA process given by lag-polynomials ar and ma
Parameters
----------
ar : array_like
autoregressive lag-polynomial with leading 1 and lhs sign
ma : array_like
moving average lag-polynomial with leading 1
worN : {None, int}, optional
option for scipy.signal.freqz (read "w or N")
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : {0,1}, optional
options for scipy.signal.freqz
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0 to 2*pi.
Returns
-------
w : array
frequencies
sd : array
periodogram, spectral density
Notes
-----
Normalization ?
This uses signal.freqz, which does not use fft. There is a fft version
somewhere.
'''
w, h = signal.freqz(ma, ar, worN=worN, whole=whole)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print('Warning: nan in frequency response h, maybe a unit root')
return w, sd
def arma_impulse_response(ar, ma, nobs=100):
'''get the impulse response function (MA representation) for ARMA process
Parameters
----------
ma : array_like, 1d
moving average lag polynomial
ar : array_like, 1d
auto regressive lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ir : array, 1d
impulse response function with nobs elements
Notes
-----
This is the same as finding the MA representation of an ARMA(p,q).
By reversing the role of ar and ma in the function arguments, the
returned result is the AR representation of an ARMA(p,q), i.e
ma_representation = arma_impulse_response(ar, ma, nobs=100)
ar_representation = arma_impulse_response(ma, ar, nobs=100)
fully tested against matlab
Examples
--------
AR(1)
>>> arma_impulse_response([1.0, -0.8], [1.], nobs=10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
this is the same as
>>> 0.8**np.arange(10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
MA(2)
>>> arma_impulse_response([1.0], [1., 0.5, 0.2], nobs=10)
array([ 1. , 0.5, 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
ARMA(1,2)
>>> arma_impulse_response([1.0, -0.8], [1., 0.5, 0.2], nobs=10)
array([ 1. , 1.3 , 1.24 , 0.992 , 0.7936 ,
0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685])
'''
impulse = np.zeros(nobs)
impulse[0] = 1.
return signal.lfilter(ma, ar, impulse)
#alias, easier to remember
arma2ma = arma_impulse_response
#alias, easier to remember
def arma2ar(ar, ma, nobs=100):
'''get the AR representation of an ARMA process
Parameters
----------
ar : array_like, 1d
auto regressive lag polynomial
ma : array_like, 1d
moving average lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ar : array, 1d
coefficients of AR lag polynomial with nobs elements
`
Notes
-----
This is just an alias for
``ar_representation = arma_impulse_response(ma, ar, nobs=100)``
fully tested against matlab
Examples
--------
'''
return arma_impulse_response(ma, ar, nobs=nobs)
#moved from sandbox.tsa.try_fi
def ar2arma(ar_des, p, q, n=20, mse='ar', start=None):
'''find arma approximation to ar process
This finds the ARMA(p,q) coefficients that minimize the integrated
squared difference between the impulse_response functions
(MA representation) of the AR and the ARMA process. This does
currently not check whether the MA lagpolynomial of the ARMA
process is invertible, neither does it check the roots of the AR
lagpolynomial.
Parameters
----------
ar_des : array_like
coefficients of original AR lag polynomial, including lag zero
p, q : int
length of desired ARMA lag polynomials
n : int
number of terms of the impuls_response function to include in the
objective function for the approximation
mse : string, 'ar'
not used yet,
Returns
-------
ar_app, ma_app : arrays
coefficients of the AR and MA lag polynomials of the approximation
res : tuple
result of optimize.leastsq
Notes
-----
Extension is possible if we want to match autocovariance instead
of impulse response function.
TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring
roots outside the unit intervall to ones that are inside. How do we do
this?
'''
#p,q = pq
def msear_err(arma, ar_des):
ar, ma = np.r_[1, arma[:p-1]], np.r_[1, arma[p-1:]]
ar_approx = arma_impulse_response(ma, ar, n)
## print(ar,ma)
## print(ar_des.shape, ar_approx.shape)
## print(ar_des)
## print(ar_approx)
return (ar_des - ar_approx) #((ar - ar_approx)**2).sum()
if start is None:
arma0 = np.r_[-0.9* np.ones(p-1), np.zeros(q-1)]
else:
arma0 = start
res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000)#, full_output=True)
#print(res)
arma_app = np.atleast_1d(res[0])
ar_app = np.r_[1, arma_app[:p-1]],
ma_app = np.r_[1, arma_app[p-1:]]
return ar_app, ma_app, res
def lpol2index(ar):
'''remove zeros from lagpolynomial, squeezed representation with index
Parameters
----------
ar : array_like
coefficients of lag polynomial
Returns
-------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
'''
ar = np.asarray(ar)
index = np.nonzero(ar)[0]
coeffs = ar[index]
return coeffs, index
def index2lpol(coeffs, index):
'''expand coefficients to lag poly
Parameters
----------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
ar : array_like
coefficients of lag polynomial
Returns
-------
ar : array_like
coefficients of lag polynomial
'''
n = max(index)
ar = np.zeros(n)
ar[index] = coeffs
return ar
#moved from sandbox.tsa.try_fi
def lpol_fima(d, n=20):
'''MA representation of fractional integration
.. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ma : array
coefficients of lag polynomial
'''
#hide import inside function until we use this heavily
from scipy.special import gamma, gammaln
j = np.arange(n)
return np.exp(gammaln(d+j) - gammaln(j+1) - gammaln(d))
#moved from sandbox.tsa.try_fi
def lpol_fiar(d, n=20):
'''AR representation of fractional integration
.. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ar : array
coefficients of lag polynomial
Notes:
first coefficient is 1, negative signs except for first term,
ar(L)*x_t
'''
#hide import inside function until we use this heavily
from scipy.special import gamma, gammaln
j = np.arange(n)
ar = - np.exp(gammaln(-d+j) - gammaln(j+1) - gammaln(-d))
ar[0] = 1
return ar
#moved from sandbox.tsa.try_fi
def lpol_sdiff(s):
'''return coefficients for seasonal difference (1-L^s)
just a trivial convenience function
Parameters
----------
s : int
number of periods in season
Returns
-------
sdiff : list, length s+1
'''
return [1] + [0]*(s-1) + [-1]
def deconvolve(num, den, n=None):
"""Deconvolves divisor out of signal, division of polynomials for n terms
calculates den^{-1} * num
Parameters
----------
num : array_like
signal or lag polynomial
denom : array_like
coefficients of lag polynomial (linear filter)
n : None or int
number of terms of quotient
Returns
-------
quot : array
quotient or filtered series
rem : array
remainder
Notes
-----
If num is a time series, then this applies the linear filter den^{-1}.
If both num and den are both lagpolynomials, then this calculates the
quotient polynomial for n terms and also returns the remainder.
This is copied from scipy.signal.signaltools and added n as optional
parameter.
"""
num = np.atleast_1d(num)
den = np.atleast_1d(den)
N = len(num)
D = len(den)
if D > N and n is None:
quot = [];
rem = num;
else:
if n is None:
n = N-D+1
input = np.zeros(n, float)
input[0] = 1
quot = signal.lfilter(num, den, input)
num_approx = signal.convolve(den, quot, mode='full')
if len(num) < len(num_approx): # 1d only ?
num = np.concatenate((num, np.zeros(len(num_approx)-len(num))))
rem = num - num_approx
return quot, rem
class ArmaProcess(object):
'''represents an ARMA process for given lag-polynomials
This is a class to bring together properties of the process.
It does not do any estimation or statistical analysis.
maybe needs special handling for unit roots
'''
def __init__(self, ar, ma, nobs=None):
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.arcoefs = -self.ar[1:]
self.macoefs = self.ma[1:]
self.arpoly = np.polynomial.Polynomial(self.ar)
self.mapoly = np.polynomial.Polynomial(self.ma)
self.nobs = nobs
@classmethod
def from_coeffs(cls, arcoefs, macoefs, nobs=None):
'''create ArmaProcess instance from coefficients of the lag-polynomials
'''
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
@classmethod
def from_estimation(cls, model_results, nobs=None):
'''create ArmaProcess instance from estimation results
'''
arcoefs = model_results.params[:model_results.nar]
macoefs = model_results.params[model_results.nar:
model_results.nar+model_results.nma]
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
def __mul__(self, oth):
if isinstance(oth, self.__class__):
ar = (self.arpoly * oth.arpoly).coef
ma = (self.mapoly * oth.mapoly).coef
else:
try:
aroth, maoth = oth
arpolyoth = np.polynomial.Polynomial(aroth)
mapolyoth = np.polynomial.Polynomial(maoth)
ar = (self.arpoly * arpolyoth).coef
ma = (self.mapoly * mapolyoth).coef
except:
print('other is not a valid type')
raise
return self.__class__(ar, ma, nobs=self.nobs)
def __repr__(self):
return 'ArmaProcess(%r, %r, nobs=%d)' % (self.ar.tolist(), self.ma.tolist(),
self.nobs)
def __str__(self):
return 'ArmaProcess\nAR: %r\nMA: %r' % (self.ar.tolist(), self.ma.tolist())
def acovf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acovf(self.ar, self.ma, nobs=nobs)
acovf.__doc__ = arma_acovf.__doc__
def acf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acf(self.ar, self.ma, nobs=nobs)
acf.__doc__ = arma_acf.__doc__
def pacf(self, nobs=None):
nobs = nobs or self.nobs
return arma_pacf(self.ar, self.ma, nobs=nobs)
pacf.__doc__ = arma_pacf.__doc__
def periodogram(self, nobs=None):
nobs = nobs or self.nobs
return arma_periodogram(self.ar, self.ma, worN=nobs)
periodogram.__doc__ = arma_periodogram.__doc__
def impulse_response(self, nobs=None):
nobs = nobs or self.nobs
return arma_impulse_response(self.ar, self.ma, worN=nobs)
impulse_response.__doc__ = arma_impulse_response.__doc__
def arma2ma(self, nobs=None):
nobs = nobs or self.nobs
return arma2ma(self.ar, self.ma, nobs=nobs)
arma2ma.__doc__ = arma2ma.__doc__
def arma2ar(self, nobs=None):
nobs = nobs or self.nobs
return arma2ar(self.ar, self.ma, nobs=nobs)
arma2ar.__doc__ = arma2ar.__doc__
def ar_roots(self):
'''roots of autoregressive lag-polynomial
'''
return self.arpoly.roots()
def ma_roots(self):
'''roots of moving average lag-polynomial
'''
return self.mapoly.roots()
def isstationary(self):
'''Arma process is stationary if AR roots are outside unit circle
Returns
-------
isstationary : boolean
True if autoregressive roots are outside unit circle
'''
if np.all(np.abs(self.ar_roots()) > 1):
return True
else:
return False
def isinvertible(self):
'''Arma process is invertible if MA roots are outside unit circle
Returns
-------
isinvertible : boolean
True if moving average roots are outside unit circle
'''
if np.all(np.abs(self.ma_roots()) > 1):
return True
else:
return False
def invertroots(self, retnew=False):
'''make MA polynomial invertible by inverting roots inside unit circle
Parameters
----------
retnew : boolean
If False (default), then return the lag-polynomial as array.
If True, then return a new instance with invertible MA-polynomial
Returns
-------
manew : array
new invertible MA lag-polynomial, returned if retnew is false.
wasinvertible : boolean
True if the MA lag-polynomial was already invertible, returned if
retnew is false.
armaprocess : new instance of class
If retnew is true, then return a new instance with invertible
MA-polynomial
'''
pr = self.ma_roots()
insideroots = np.abs(pr)<1
if insideroots.any():
pr[np.abs(pr)<1] = 1./pr[np.abs(pr)<1]
pnew = poly.Polynomial.fromroots(pr)
mainv = pn.coef/pnew.coef[0]
wasinvertible = False
else:
mainv = self.ma
wasinvertible = True
if retnew:
return self.__class__(self.ar, mainv, nobs=self.nobs)
else:
return mainv, wasinvertible
def generate_sample(self, size=100, scale=1, distrvs=None, axis=0, burnin=0):
'''generate ARMA samples
Parameters
----------
size : int or tuple of ints
If size is an integer, then this creates a 1d timeseries of length size.
If size is a tuple, then the timeseries is along axis. All other axis
have independent arma samples.
Returns
-------
rvs : ndarray
random sample(s) of arma process
Notes
-----
Should work for n-dimensional with time series along axis, but not tested
yet. Processes are sampled independently.
'''
if distrvs is None:
distrvs = np.random.normal
if np.ndim(size) == 0:
size = [size]
if burnin:
#handle burin time for nd arrays
#maybe there is a better trick in scipy.fft code
newsize = list(size)
newsize[axis] += burnin
newsize = tuple(newsize)
fslice = [slice(None)]*len(newsize)
fslice[axis] = slice(burnin, None, None)
fslice = tuple(fslice)
else:
newsize = tuple(size)
fslice = tuple([slice(None)]*np.ndim(newsize))
eta = scale * distrvs(size=newsize)
return signal.lfilter(self.ma, self.ar, eta, axis=axis)[fslice]
__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample',
'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve',
'lpol2index', 'index2lpol']
if __name__ == '__main__':
# Simulate AR(1)
#--------------
# ar * y = ma * eta
ar = [1, -0.8]
ma = [1.0]
# generate AR data
eta = 0.1 * np.random.randn(1000)
yar1 = signal.lfilter(ar, ma, eta)
print("\nExample 0")
arest = ARIMAProcess(yar1)
rhohat, cov_x, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat)
print(cov_x)
print("\nExample 1")
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
arest = ARIMAProcess(y1)
rhohat1, cov_x1, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat1)
print(cov_x1)
err1 = arest.errfn(x=y1)
print(np.var(err1))
import statsmodels.api as sm
print(sm.regression.yule_walker(y1, order=2, inv=True))
print("\nExample 2")
nsample = 1000
ar = [1.0, -0.6, -0.1]
ma = [1.0, 0.3, 0.2]
y2 = ARIMA.generate_sample(ar,ma,nsample,0.1)
arest2 = ARIMAProcess(y2)
rhohat2, cov_x2, infodict, mesg, ier = arest2.fit((1,0,2))
print(rhohat2)
print(cov_x2)
err2 = arest.errfn(x=y2)
print(np.var(err2))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,0,2))
print(rhohat2a)
print(cov_x2a)
err2a = arest.errfn(x=y2)
print(np.var(err2a))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y2, order=2, inv=True))
print("\nExample 20")
nsample = 1000
ar = [1.0]#, -0.8, -0.4]
ma = [1.0, 0.5, 0.2]
y3 = ARIMA.generate_sample(ar,ma,nsample,0.01)
arest20 = ARIMAProcess(y3)
rhohat3, cov_x3, infodict, mesg, ier = arest20.fit((2,0,0))
print(rhohat3)
print(cov_x3)
err3 = arest20.errfn(x=y3)
print(np.var(err3))
print(np.sqrt(np.dot(err3,err3)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
rhohat3a, cov_x3a, infodict, mesg, ier = arest20.fit((0,0,2))
print(rhohat3a)
print(cov_x3a)
err3a = arest20.errfn(x=y3)
print(np.var(err3a))
print(np.sqrt(np.dot(err3a,err3a)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y3, order=2, inv=True))
print("\nExample 02")
nsample = 1000
ar = [1.0, -0.8, 0.4] #-0.8, -0.4]
ma = [1.0]#, 0.8, 0.4]
y4 = ARIMA.generate_sample(ar,ma,nsample)
arest02 = ARIMAProcess(y4)
rhohat4, cov_x4, infodict, mesg, ier = arest02.fit((2,0,0))
print(rhohat4)
print(cov_x4)
err4 = arest02.errfn(x=y4)
print(np.var(err4))
sige = np.sqrt(np.dot(err4,err4)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4)))
print(np.sqrt(np.diag(cov_x4)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
rhohat4a, cov_x4a, infodict, mesg, ier = arest02.fit((0,0,2))
print(rhohat4a)
print(cov_x4a)
err4a = arest02.errfn(x=y4)
print(np.var(err4a))
sige = np.sqrt(np.dot(err4a,err4a)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4a)))
print(np.sqrt(np.diag(cov_x4a)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
import statsmodels.api as sm
print(sm.regression.yule_walker(y4, order=2, method='mle', inv=True))
import matplotlib.pyplot as plt
plt.plot(arest2.forecast()[-100:])
#plt.show()
ar1, ar2 = ([1, -0.4], [1, 0.5])
ar2 = [1, -1]
lagpolyproduct = np.convolve(ar1, ar2)
print(deconvolve(lagpolyproduct, ar2, n=None))
print(signal.deconvolve(lagpolyproduct, ar2))
print(deconvolve(lagpolyproduct, ar2, n=10))
| {
"repo_name": "rgommers/statsmodels",
"path": "statsmodels/tsa/arima_process.py",
"copies": "1",
"size": "32823",
"license": "bsd-3-clause",
"hash": -7523990398813536000,
"line_mean": 29.1959521619,
"line_max": 94,
"alpha_frac": 0.5996709624,
"autogenerated": false,
"ratio": 3.4219140950792326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9498236223614105,
"avg_score": 0.004669766773025482,
"num_lines": 1087
} |
"""ARMA process and estimation with scipy.signal.lfilter
Notes
-----
* written without textbook, works but not sure about everything
briefly checked and it looks to be standard least squares, see below
* theoretical autocorrelation function of general ARMA
Done, relatively easy to guess solution, time consuming to get
theoretical test cases, example file contains explicit formulas for
acovf of MA(1), MA(2) and ARMA(1,1)
Properties:
Judge, ... (1985): The Theory and Practise of Econometrics
Author: josefpktd
License: BSD
"""
import numpy as np
from scipy import signal, optimize, linalg
from statsmodels.compat.pandas import Appender
from statsmodels.tools.docstring import remove_parameters, Docstring
from statsmodels.tools.validation import array_like
__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample',
'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve',
'lpol2index', 'index2lpol']
NONSTATIONARY_ERROR = """\
The model's autoregressive parameters (ar) indicate that the process
is non-stationary. arma_acovf can only be used with stationary processes.
"""
def arma_generate_sample(ar, ma, nsample, scale=1, distrvs=None,
axis=0, burnin=0):
"""
Simulate data from an ARMA.
Parameters
----------
ar : array_like
The coefficient for autoregressive lag polynomial, including zero lag.
ma : array_like
The coefficient for moving-average lag polynomial, including zero lag.
nsample : int or tuple of ints
If nsample is an integer, then this creates a 1d timeseries of
length size. If nsample is a tuple, creates a len(nsample)
dimensional time series where time is indexed along the input
variable ``axis``. All series are unless ``distrvs`` generates
dependent data.
scale : float
The standard deviation of noise.
distrvs : function, random number generator
A function that generates the random numbers, and takes ``size``
as argument. The default is np.random.standard_normal.
axis : int
See nsample for details.
burnin : int
Number of observation at the beginning of the sample to drop.
Used to reduce dependence on initial values.
Returns
-------
ndarray
Random sample(s) from an ARMA process.
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR parameters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -arparams] # add zero-lag and negate
>>> ma = np.r_[1, maparams] # add zero-lag
>>> y = sm.tsa.arma_generate_sample(ar, ma, 250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
distrvs = np.random.standard_normal if distrvs is None else distrvs
if np.ndim(nsample) == 0:
nsample = [nsample]
if burnin:
# handle burin time for nd arrays
# maybe there is a better trick in scipy.fft code
newsize = list(nsample)
newsize[axis] += burnin
newsize = tuple(newsize)
fslice = [slice(None)] * len(newsize)
fslice[axis] = slice(burnin, None, None)
fslice = tuple(fslice)
else:
newsize = tuple(nsample)
fslice = tuple([slice(None)] * np.ndim(newsize))
eta = scale * distrvs(size=newsize)
return signal.lfilter(ma, ar, eta, axis=axis)[fslice]
def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None):
"""
Theoretical autocovariances of stationary ARMA processes
Parameters
----------
ar : array_like, 1d
The coefficients for autoregressive lag polynomial, including zero lag.
ma : array_like, 1d
The coefficients for moving-average lag polynomial, including zero lag.
nobs : int
The number of terms (lags plus zero lag) to include in returned acovf.
sigma2 : float
Variance of the innovation term.
Returns
-------
ndarray
The autocovariance of ARMA process given by ar, ma.
See Also
--------
arma_acf : Autocorrelation function for ARMA processes.
acovf : Sample autocovariance estimation.
References
----------
.. [*] Brockwell, Peter J., and Richard A. Davis. 2009. Time Series:
Theory and Methods. 2nd ed. 1991. New York, NY: Springer.
"""
if dtype is None:
dtype = np.common_type(np.array(ar), np.array(ma), np.array(sigma2))
p = len(ar) - 1
q = len(ma) - 1
m = max(p, q) + 1
if sigma2.real < 0:
raise ValueError('Must have positive innovation variance.')
# Short-circuit for trivial corner-case
if p == q == 0:
out = np.zeros(nobs, dtype=dtype)
out[0] = sigma2
return out
elif p > 0 and np.max(np.abs(np.roots(ar))) >= 1:
raise ValueError(NONSTATIONARY_ERROR)
# Get the moving average representation coefficients that we need
ma_coeffs = arma2ma(ar, ma, lags=m)
# Solve for the first m autocovariances via the linear system
# described by (BD, eq. 3.3.8)
A = np.zeros((m, m), dtype=dtype)
b = np.zeros((m, 1), dtype=dtype)
# We need a zero-right-padded version of ar params
tmp_ar = np.zeros(m, dtype=dtype)
tmp_ar[:p + 1] = ar
for k in range(m):
A[k, :(k + 1)] = tmp_ar[:(k + 1)][::-1]
A[k, 1:m - k] += tmp_ar[(k + 1):m]
b[k] = sigma2 * np.dot(ma[k:q + 1], ma_coeffs[:max((q + 1 - k), 0)])
acovf = np.zeros(max(nobs, m), dtype=dtype)
try:
acovf[:m] = np.linalg.solve(A, b)[:, 0]
except np.linalg.LinAlgError:
raise ValueError(NONSTATIONARY_ERROR)
# Iteratively apply (BD, eq. 3.3.9) to solve for remaining autocovariances
if nobs > m:
zi = signal.lfiltic([1], ar, acovf[:m:][::-1])
acovf[m:] = signal.lfilter([1], ar, np.zeros(nobs - m, dtype=dtype),
zi=zi)[0]
return acovf[:nobs]
def arma_acf(ar, ma, lags=10):
"""
Theoretical autocorrelation function of an ARMA process.
Parameters
----------
ar : array_like
Coefficients for autoregressive lag polynomial, including zero lag.
ma : array_like
Coefficients for moving-average lag polynomial, including zero lag.
lags : int
The number of terms (lags plus zero lag) to include in returned acf.
Returns
-------
ndarray
The autocorrelations of ARMA process given by ar and ma.
See Also
--------
arma_acovf : Autocovariances from ARMA processes.
acf : Sample autocorrelation function estimation.
acovf : Sample autocovariance function estimation.
"""
acovf = arma_acovf(ar, ma, lags)
return acovf / acovf[0]
def arma_pacf(ar, ma, lags=10):
"""
Theoretical partial autocorrelation function of an ARMA process.
Parameters
----------
ar : array_like, 1d
The coefficients for autoregressive lag polynomial, including zero lag.
ma : array_like, 1d
The coefficients for moving-average lag polynomial, including zero lag.
lags : int
The number of terms (lags plus zero lag) to include in returned pacf.
Returns
-------
ndarrray
The partial autocorrelation of ARMA process given by ar and ma.
Notes
-----
Solves yule-walker equation for each lag order up to nobs lags.
not tested/checked yet
"""
# TODO: Should use rank 1 inverse update
apacf = np.zeros(lags)
acov = arma_acf(ar, ma, lags=lags + 1)
apacf[0] = 1.
for k in range(2, lags + 1):
r = acov[:k]
apacf[k - 1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
return apacf
def arma_periodogram(ar, ma, worN=None, whole=0):
"""
Periodogram for ARMA process given by lag-polynomials ar and ma.
Parameters
----------
ar : array_like
The autoregressive lag-polynomial with leading 1 and lhs sign.
ma : array_like
The moving average lag-polynomial with leading 1.
worN : {None, int}, optional
An option for scipy.signal.freqz (read "w or N").
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN.
whole : {0,1}, optional
An options for scipy.signal.freqz/
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0 to 2*pi.
Returns
-------
w : ndarray
The frequencies.
sd : ndarray
The periodogram, also known as the spectral density.
Notes
-----
Normalization ?
This uses signal.freqz, which does not use fft. There is a fft version
somewhere.
"""
w, h = signal.freqz(ma, ar, worN=worN, whole=whole)
sd = np.abs(h) ** 2 / np.sqrt(2 * np.pi)
if np.any(np.isnan(h)):
# this happens with unit root or seasonal unit root'
import warnings
warnings.warn('Warning: nan in frequency response h, maybe a unit '
'root', RuntimeWarning)
return w, sd
def arma_impulse_response(ar, ma, leads=100):
"""
Compute the impulse response function (MA representation) for ARMA process.
Parameters
----------
ar : array_like, 1d
The auto regressive lag polynomial.
ma : array_like, 1d
The moving average lag polynomial.
leads : int
The number of observations to calculate.
Returns
-------
ndarray
The impulse response function with nobs elements.
Notes
-----
This is the same as finding the MA representation of an ARMA(p,q).
By reversing the role of ar and ma in the function arguments, the
returned result is the AR representation of an ARMA(p,q), i.e
ma_representation = arma_impulse_response(ar, ma, leads=100)
ar_representation = arma_impulse_response(ma, ar, leads=100)
Fully tested against matlab
Examples
--------
AR(1)
>>> arma_impulse_response([1.0, -0.8], [1.], leads=10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
this is the same as
>>> 0.8**np.arange(10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
MA(2)
>>> arma_impulse_response([1.0], [1., 0.5, 0.2], leads=10)
array([ 1. , 0.5, 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
ARMA(1,2)
>>> arma_impulse_response([1.0, -0.8], [1., 0.5, 0.2], leads=10)
array([ 1. , 1.3 , 1.24 , 0.992 , 0.7936 ,
0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685])
"""
impulse = np.zeros(leads)
impulse[0] = 1.
return signal.lfilter(ma, ar, impulse)
def arma2ma(ar, ma, lags=100):
"""
A finite-lag approximate MA representation of an ARMA process.
Parameters
----------
ar : ndarray
The auto regressive lag polynomial.
ma : ndarray
The moving average lag polynomial.
lags : int
The number of coefficients to calculate.
Returns
-------
ndarray
The coefficients of AR lag polynomial with nobs elements.
Notes
-----
Equivalent to ``arma_impulse_response(ma, ar, leads=100)``
"""
return arma_impulse_response(ar, ma, leads=lags)
def arma2ar(ar, ma, lags=100):
"""
A finite-lag AR approximation of an ARMA process.
Parameters
----------
ar : array_like
The auto regressive lag polynomial.
ma : array_like
The moving average lag polynomial.
lags : int
The number of coefficients to calculate.
Returns
-------
ndarray
The coefficients of AR lag polynomial with nobs elements.
Notes
-----
Equivalent to ``arma_impulse_response(ma, ar, leads=100)``
"""
return arma_impulse_response(ma, ar, leads=lags)
# moved from sandbox.tsa.try_fi
def ar2arma(ar_des, p, q, n=20, mse='ar', start=None):
"""
Find arma approximation to ar process.
This finds the ARMA(p,q) coefficients that minimize the integrated
squared difference between the impulse_response functions (MA
representation) of the AR and the ARMA process. This does not check
whether the MA lag polynomial of the ARMA process is invertible, neither
does it check the roots of the AR lag polynomial.
Parameters
----------
ar_des : array_like
The coefficients of original AR lag polynomial, including lag zero.
p : int
The length of desired AR lag polynomials.
q : int
The length of desired MA lag polynomials.
n : int
The number of terms of the impulse_response function to include in the
objective function for the approximation.
mse : str, 'ar'
Not used.
start : ndarray
Initial values to use when finding the approximation.
Returns
-------
ar_app : ndarray
The coefficients of the AR lag polynomials of the approximation.
ma_app : ndarray
The coefficients of the MA lag polynomials of the approximation.
res : tuple
The result of optimize.leastsq.
Notes
-----
Extension is possible if we want to match autocovariance instead
of impulse response function.
"""
# TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring
# TODO: roots outside the unit interval to ones that are inside. How to do
# TODO: this?
# p,q = pq
def msear_err(arma, ar_des):
ar, ma = np.r_[1, arma[:p - 1]], np.r_[1, arma[p - 1:]]
ar_approx = arma_impulse_response(ma, ar, n)
return (ar_des - ar_approx) # ((ar - ar_approx)**2).sum()
if start is None:
arma0 = np.r_[-0.9 * np.ones(p - 1), np.zeros(q - 1)]
else:
arma0 = start
res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000)
arma_app = np.atleast_1d(res[0])
ar_app = np.r_[1, arma_app[:p - 1]],
ma_app = np.r_[1, arma_app[p - 1:]]
return ar_app, ma_app, res
_arma_docs = {'ar': arma2ar.__doc__,
'ma': arma2ma.__doc__}
def lpol2index(ar):
"""
Remove zeros from lag polynomial
Parameters
----------
ar : array_like
coefficients of lag polynomial
Returns
-------
coeffs : ndarray
non-zero coefficients of lag polynomial
index : ndarray
index (lags) of lag polynomial with non-zero elements
"""
ar = array_like(ar, 'ar')
index = np.nonzero(ar)[0]
coeffs = ar[index]
return coeffs, index
def index2lpol(coeffs, index):
"""
Expand coefficients to lag poly
Parameters
----------
coeffs : ndarray
non-zero coefficients of lag polynomial
index : ndarray
index (lags) of lag polynomial with non-zero elements
Returns
-------
ar : array_like
coefficients of lag polynomial
"""
n = max(index)
ar = np.zeros(n + 1)
ar[index] = coeffs
return ar
def lpol_fima(d, n=20):
"""MA representation of fractional integration
.. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ma : ndarray
coefficients of lag polynomial
"""
# hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
return np.exp(gammaln(d + j) - gammaln(j + 1) - gammaln(d))
# moved from sandbox.tsa.try_fi
def lpol_fiar(d, n=20):
"""AR representation of fractional integration
.. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ar : ndarray
coefficients of lag polynomial
Notes:
first coefficient is 1, negative signs except for first term,
ar(L)*x_t
"""
# hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
ar = - np.exp(gammaln(-d + j) - gammaln(j + 1) - gammaln(-d))
ar[0] = 1
return ar
# moved from sandbox.tsa.try_fi
def lpol_sdiff(s):
"""return coefficients for seasonal difference (1-L^s)
just a trivial convenience function
Parameters
----------
s : int
number of periods in season
Returns
-------
sdiff : list, length s+1
"""
return [1] + [0] * (s - 1) + [-1]
def deconvolve(num, den, n=None):
"""Deconvolves divisor out of signal, division of polynomials for n terms
calculates den^{-1} * num
Parameters
----------
num : array_like
signal or lag polynomial
denom : array_like
coefficients of lag polynomial (linear filter)
n : None or int
number of terms of quotient
Returns
-------
quot : ndarray
quotient or filtered series
rem : ndarray
remainder
Notes
-----
If num is a time series, then this applies the linear filter den^{-1}.
If both num and den are both lag polynomials, then this calculates the
quotient polynomial for n terms and also returns the remainder.
This is copied from scipy.signal.signaltools and added n as optional
parameter.
"""
num = np.atleast_1d(num)
den = np.atleast_1d(den)
N = len(num)
D = len(den)
if D > N and n is None:
quot = []
rem = num
else:
if n is None:
n = N - D + 1
input = np.zeros(n, float)
input[0] = 1
quot = signal.lfilter(num, den, input)
num_approx = signal.convolve(den, quot, mode='full')
if len(num) < len(num_approx): # 1d only ?
num = np.concatenate((num, np.zeros(len(num_approx) - len(num))))
rem = num - num_approx
return quot, rem
_generate_sample_doc = Docstring(arma_generate_sample.__doc__)
_generate_sample_doc.remove_parameters(['ar', 'ma'])
_generate_sample_doc.replace_block('Notes', [])
_generate_sample_doc.replace_block('Examples', [])
class ArmaProcess(object):
r"""
Theoretical properties of an ARMA process for specified lag-polynomials.
Parameters
----------
ar : array_like
Coefficient for autoregressive lag polynomial, including zero lag.
Must be entered using the signs from the lag polynomial representation.
See the notes for more information about the sign.
ma : array_like
Coefficient for moving-average lag polynomial, including zero lag.
nobs : int, optional
Length of simulated time series. Used, for example, if a sample is
generated. See example.
Notes
-----
Both the AR and MA components must include the coefficient on the
zero-lag. In almost all cases these values should be 1. Further, due to
using the lag-polynomial representation, the AR parameters should
have the opposite sign of what one would write in the ARMA representation.
See the examples below.
The ARMA(p,q) process is described by
.. math::
y_{t}=\phi_{1}y_{t-1}+\ldots+\phi_{p}y_{t-p}+\theta_{1}\epsilon_{t-1}
+\ldots+\theta_{q}\epsilon_{t-q}+\epsilon_{t}
and the parameterization used in this function uses the lag-polynomial
representation,
.. math::
\left(1-\phi_{1}L-\ldots-\phi_{p}L^{p}\right)y_{t} =
\left(1+\theta_{1}L+\ldots+\theta_{q}L^{q}\right)\epsilon_{t}
Examples
--------
ARMA(2,2) with AR coefficients 0.75 and -0.25, and MA coefficients 0.65 and 0.35
>>> import statsmodels.api as sm
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -arparams] # add zero-lag and negate
>>> ma = np.r_[1, maparams] # add zero-lag
>>> arma_process = sm.tsa.ArmaProcess(ar, ma)
>>> arma_process.isstationary
True
>>> arma_process.isinvertible
True
>>> arma_process.arroots
array([1.5-1.32287566j, 1.5+1.32287566j])
>>> y = arma_process.generate_sample(250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
The same ARMA(2,2) Using the from_coeffs class method
>>> arma_process = sm.tsa.ArmaProcess.from_coeffs(arparams, maparams)
>>> arma_process.arroots
array([1.5-1.32287566j, 1.5+1.32287566j])
"""
# TODO: Check unit root behavior
def __init__(self, ar=None, ma=None, nobs=100):
if ar is None:
ar = np.array([1.])
if ma is None:
ma = np.array([1.])
self.ar = array_like(ar, 'ar')
self.ma = array_like(ma, 'ma')
self.arcoefs = -self.ar[1:]
self.macoefs = self.ma[1:]
self.arpoly = np.polynomial.Polynomial(self.ar)
self.mapoly = np.polynomial.Polynomial(self.ma)
self.nobs = nobs
@classmethod
def from_coeffs(cls, arcoefs=None, macoefs=None, nobs=100):
"""
Create ArmaProcess from an ARMA representation.
Parameters
----------
arcoefs : array_like
Coefficient for autoregressive lag polynomial, not including zero
lag. The sign is inverted to conform to the usual time series
representation of an ARMA process in statistics. See the class
docstring for more information.
macoefs : array_like
Coefficient for moving-average lag polynomial, excluding zero lag.
nobs : int, optional
Length of simulated time series. Used, for example, if a sample
is generated.
Returns
-------
ArmaProcess
Class instance initialized with arcoefs and macoefs.
Examples
--------
>>> arparams = [.75, -.25]
>>> maparams = [.65, .35]
>>> arma_process = sm.tsa.ArmaProcess.from_coeffs(ar, ma)
>>> arma_process.isstationary
True
>>> arma_process.isinvertible
True
"""
arcoefs = [] if arcoefs is None else arcoefs
macoefs = [] if macoefs is None else macoefs
return cls(np.r_[1, -np.asarray(arcoefs)],
np.r_[1, np.asarray(macoefs)],
nobs=nobs)
@classmethod
def from_estimation(cls, model_results, nobs=None):
"""
Create an ArmaProcess from the results of an ARMA estimation.
Parameters
----------
model_results : ARMAResults instance
A fitted model.
nobs : int, optional
If None, nobs is taken from the results.
Returns
-------
ArmaProcess
Class instance initialized from model_results.
"""
arcoefs = model_results.arparams
macoefs = model_results.maparams
nobs = nobs or model_results.nobs
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
def __mul__(self, oth):
if isinstance(oth, self.__class__):
ar = (self.arpoly * oth.arpoly).coef
ma = (self.mapoly * oth.mapoly).coef
else:
try:
aroth, maoth = oth
arpolyoth = np.polynomial.Polynomial(aroth)
mapolyoth = np.polynomial.Polynomial(maoth)
ar = (self.arpoly * arpolyoth).coef
ma = (self.mapoly * mapolyoth).coef
except:
raise TypeError('Other type is not a valid type')
return self.__class__(ar, ma, nobs=self.nobs)
def __repr__(self):
msg = 'ArmaProcess({0}, {1}, nobs={2}) at {3}'
return msg.format(self.ar.tolist(), self.ma.tolist(),
self.nobs, hex(id(self)))
def __str__(self):
return 'ArmaProcess\nAR: {0}\nMA: {1}'.format(self.ar.tolist(),
self.ma.tolist())
@Appender(remove_parameters(arma_acovf.__doc__, ['ar', 'ma', 'sigma2']))
def acovf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acovf(self.ar, self.ma, nobs=nobs)
@Appender(remove_parameters(arma_acf.__doc__, ['ar', 'ma']))
def acf(self, lags=None):
lags = lags or self.nobs
return arma_acf(self.ar, self.ma, lags=lags)
@Appender(remove_parameters(arma_pacf.__doc__, ['ar', 'ma']))
def pacf(self, lags=None):
lags = lags or self.nobs
return arma_pacf(self.ar, self.ma, lags=lags)
@Appender(remove_parameters(arma_periodogram.__doc__, ['ar', 'ma', 'worN',
'whole']))
def periodogram(self, nobs=None):
nobs = nobs or self.nobs
return arma_periodogram(self.ar, self.ma, worN=nobs)
@Appender(remove_parameters(arma_impulse_response.__doc__, ['ar', 'ma']))
def impulse_response(self, leads=None):
leads = leads or self.nobs
return arma_impulse_response(self.ar, self.ma, leads=leads)
@Appender(remove_parameters(arma2ma.__doc__, ['ar', 'ma']))
def arma2ma(self, lags=None):
lags = lags or self.lags
return arma2ma(self.ar, self.ma, lags=lags)
@Appender(remove_parameters(arma2ar.__doc__, ['ar', 'ma']))
def arma2ar(self, lags=None):
lags = lags or self.lags
return arma2ar(self.ar, self.ma, lags=lags)
@property
def arroots(self):
"""Roots of autoregressive lag-polynomial"""
return self.arpoly.roots()
@property
def maroots(self):
"""Roots of moving average lag-polynomial"""
return self.mapoly.roots()
@property
def isstationary(self):
"""
Arma process is stationary if AR roots are outside unit circle.
Returns
-------
bool
True if autoregressive roots are outside unit circle.
"""
if np.all(np.abs(self.arroots) > 1.0):
return True
else:
return False
@property
def isinvertible(self):
"""
Arma process is invertible if MA roots are outside unit circle.
Returns
-------
bool
True if moving average roots are outside unit circle.
"""
if np.all(np.abs(self.maroots) > 1):
return True
else:
return False
def invertroots(self, retnew=False):
"""
Make MA polynomial invertible by inverting roots inside unit circle.
Parameters
----------
retnew : bool
If False (default), then return the lag-polynomial as array.
If True, then return a new instance with invertible MA-polynomial.
Returns
-------
manew : ndarray
A new invertible MA lag-polynomial, returned if retnew is false.
wasinvertible : bool
True if the MA lag-polynomial was already invertible, returned if
retnew is false.
armaprocess : new instance of class
If retnew is true, then return a new instance with invertible
MA-polynomial.
"""
# TODO: variable returns like this?
pr = self.maroots
mainv = self.ma
invertible = self.isinvertible
if not invertible:
pr[np.abs(pr) < 1] = 1. / pr[np.abs(pr) < 1]
pnew = np.polynomial.Polynomial.fromroots(pr)
mainv = pnew.coef / pnew.coef[0]
if retnew:
return self.__class__(self.ar, mainv, nobs=self.nobs)
else:
return mainv, invertible
@Appender(str(_generate_sample_doc))
def generate_sample(self, nsample=100, scale=1., distrvs=None, axis=0,
burnin=0):
return arma_generate_sample(self.ar, self.ma, nsample, scale, distrvs,
axis=axis, burnin=burnin)
| {
"repo_name": "jseabold/statsmodels",
"path": "statsmodels/tsa/arima_process.py",
"copies": "5",
"size": "28643",
"license": "bsd-3-clause",
"hash": -5893312728923830000,
"line_mean": 29.9989177489,
"line_max": 84,
"alpha_frac": 0.5959571274,
"autogenerated": false,
"ratio": 3.611069087241553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00006684491978609626,
"num_lines": 924
} |
""" ARM architecture definition. """
import io
from ... import ir
from ...binutils.assembler import BaseAssembler
from ..arch import Architecture
from ..arch_info import ArchInfo, TypeInfo
from ..generic_instructions import Label, Alignment, RegisterUseDef
from ..data_instructions import Db, Dd, Dcd2, data_isa
from ..registers import RegisterClass
from ..stack import StackLocation
from .registers import ArmRegister, register_range, LowArmRegister, RegisterSet
from .registers import R0, R1, R2, R3, R4, all_registers
from .registers import R5, R6, R7, R8
from .registers import R9, R10, R11, LR, PC, SP
from .arm_instructions import LdrPseudo, arm_isa
from .thumb_instructions import thumb_isa
from . import thumb_instructions
from . import arm_instructions
class ArmCallingConvention:
pass
class ArmArch(Architecture):
""" Arm machine class. """
name = "arm"
option_names = ("thumb", "jazelle", "neon", "vfpv1", "vfpv2")
def __init__(self, options=None):
super().__init__(options=options)
if self.has_option("thumb"):
self.assembler = ThumbAssembler()
self.isa = thumb_isa + data_isa
# We use r7 as frame pointer (in case of thumb ;)):
self.fp = R7
self.callee_save = (R5, R6)
# Registers usable by register allocator:
register_classes = [
RegisterClass(
"loreg",
[ir.i8, ir.i32, ir.ptr, ir.u8, ir.u32, ir.i16, ir.u16],
LowArmRegister,
[R0, R1, R2, R3, R4, R5, R6, R7],
)
]
else:
self.isa = arm_isa + data_isa
self.assembler = ArmAssembler()
self.fp = R11
self.callee_save = (R5, R6, R7, R8, R9, R10)
# Registers usable by register allocator:
register_classes = [
RegisterClass(
"loreg",
[],
LowArmRegister,
[R0, R1, R2, R3, R4, R5, R6, R7],
),
RegisterClass(
"reg",
[ir.i8, ir.i32, ir.u8, ir.u32, ir.i16, ir.u16, ir.ptr],
ArmRegister,
[R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11],
),
]
self.assembler.gen_asm_parser(self.isa)
self.gdb_registers = all_registers
self.gdb_pc = PC
self.info = ArchInfo(
type_infos={
ir.i8: TypeInfo(1, 1),
ir.u8: TypeInfo(1, 1),
ir.i16: TypeInfo(2, 2),
ir.u16: TypeInfo(2, 2),
ir.i32: TypeInfo(4, 4),
ir.u32: TypeInfo(4, 4),
ir.f32: TypeInfo(4, 4),
ir.f64: TypeInfo(8, 8),
"int": ir.i32,
"ptr": ir.u32,
ir.ptr: ir.u32,
},
register_classes=register_classes,
)
def get_runtime(self):
""" Implement compiler runtime functions """
from ...api import asm
if self.has_option("thumb"):
asm_src = ""
else:
asm_src = ARM_ASM_RT
return asm(io.StringIO(asm_src), self)
def move(self, dst, src):
""" Generate a move from src to dst """
if self.has_option("thumb"):
return thumb_instructions.Mov2(dst, src, ismove=True)
else:
return arm_instructions.Mov2(
dst, src, arm_instructions.NoShift(), ismove=True
)
def gen_prologue(self, frame):
""" Returns prologue instruction sequence.
Reserve stack for this calling frame for:
- local variables
- save registers
- parameters to called functions
"""
# Label indication function:
yield Label(frame.name)
# Save the link register and the frame pointer:
if self.has_option("thumb"):
yield thumb_instructions.Push({LR, R7})
else:
yield arm_instructions.Push(RegisterSet({LR, R11}))
# Setup frame pointer:
if self.has_option("thumb"):
yield thumb_instructions.Mov2(R7, SP)
else:
yield arm_instructions.Mov2(R11, SP, arm_instructions.NoShift())
# Reserve stack for this calling frame for:
# 1. local variables
# 2. save registers
# 3. parameters to called functions
if frame.stacksize:
ssize = round_up(frame.stacksize)
if self.has_option("thumb"):
# Reserve stack space:
# subSp cannot handle large numbers:
while ssize > 0:
inc = min(124, ssize)
yield thumb_instructions.SubSp(inc)
ssize -= inc
else:
yield arm_instructions.SubImm(SP, SP, ssize)
# Callee save registers:
callee_save = {r for r in self.callee_save if r in frame.used_regs}
if callee_save:
if self.has_option("thumb"):
yield thumb_instructions.Push(callee_save)
else:
yield arm_instructions.Push(RegisterSet(callee_save))
# Allocate space for outgoing calls:
extras = max(frame.out_calls) if frame.out_calls else 0
if extras:
ssize = round_up(extras)
if self.has_option("thumb"):
raise NotImplementedError()
else:
yield arm_instructions.SubImm(SP, SP, ssize)
def gen_epilogue(self, frame):
""" Return epilogue sequence for a frame.
Adjust frame pointer and add constant pool.
Also free up space on stack for:
- Space for parameters passed to called functions.
- Space for save registers
- Space for local variables
"""
# Free space for outgoing calls:
extras = max(frame.out_calls) if frame.out_calls else 0
if extras:
ssize = round_up(extras)
if self.has_option("thumb"):
raise NotImplementedError()
else:
yield arm_instructions.AddImm(SP, SP, ssize)
# Callee save registers:
callee_save = {r for r in self.callee_save if r in frame.used_regs}
if callee_save:
if self.has_option("thumb"):
yield thumb_instructions.Pop(callee_save)
else:
yield arm_instructions.Pop(RegisterSet(callee_save))
if frame.stacksize > 0:
ssize = round_up(frame.stacksize)
if self.has_option("thumb"):
# subSp cannot handle large numbers:
while ssize > 0:
inc = min(124, ssize)
yield thumb_instructions.AddSp(inc)
ssize -= inc
else:
yield arm_instructions.AddImm(SP, SP, ssize)
if self.has_option("thumb"):
yield thumb_instructions.Pop({PC, R7})
else:
yield arm_instructions.Pop(RegisterSet({PC, R11}))
# Add final literal pool
for instruction in self.litpool(frame):
yield instruction
if not self.has_option("thumb"):
yield Alignment(4) # Align at 4 bytes
def gen_arm_memcpy(self, p1, p2, v3, size):
# Called before register allocation
# Major crappy memcpy, can be improved!
for idx in range(size):
yield arm_instructions.Ldrb(v3, p2, idx)
yield arm_instructions.Strb(v3, p1, idx)
# TODO: yield the below from time to time for really big stuff:
# yield arm_instructions.AddImm(p1, 1)
# yield arm_instructions.AddImm(p2, 1)
def gen_call(self, frame, label, args, rv):
arg_types = [a[0] for a in args]
arg_locs = self.determine_arg_locations(arg_types)
arg_regs = []
stack_size = 0
for arg_loc, arg2 in zip(arg_locs, args):
arg = arg2[1]
if isinstance(arg_loc, ArmRegister):
arg_regs.append(arg_loc)
yield self.move(arg_loc, arg)
elif isinstance(arg_loc, StackLocation):
stack_size += arg_loc.size
if isinstance(arg, ArmRegister):
# Store register on stack:
if self.has_option("thumb"):
yield thumb_instructions.Str1(arg, SP, arg_loc.offset)
else:
yield arm_instructions.Str1(arg, SP, arg_loc.offset)
elif isinstance(arg, StackLocation):
if self.has_option("thumb"):
raise NotImplementedError()
else:
# Generate memcpy now:
# print(arg2, arg_loc)
assert arg.size == arg_loc.size
# Now start a copy routine to copy some stack:
p1 = frame.new_reg(ArmRegister)
p2 = frame.new_reg(ArmRegister)
v3 = frame.new_reg(ArmRegister)
# Destination location:
# Remember that the LR and FP are pushed in between
# So hence -8:
yield arm_instructions.AddImm(
p1, SP, arg_loc.offset - 8
)
# Source location:
yield arm_instructions.SubImm(p2, self.fp, -arg.offset)
for instruction in self.gen_arm_memcpy(
p1, p2, v3, arg.size
):
yield instruction
else: # pragma: no cover
raise NotImplementedError(str(arg))
else: # pragma: no cover
raise NotImplementedError("Parameters in memory not impl")
# Record that certain amount of stack is required:
frame.add_out_call(stack_size)
yield RegisterUseDef(uses=arg_regs)
clobbers = [R0, R1, R2, R3, R4]
if self.has_option("thumb"):
if isinstance(label, ArmRegister):
# Ensure thumb mode!
yield thumb_instructions.AddImm(label, label, 1)
yield thumb_instructions.Blx(label, clobbers=clobbers)
else:
yield thumb_instructions.Bl(label, clobbers=clobbers)
else:
if isinstance(label, ArmRegister):
yield arm_instructions.Blx(label, clobbers=clobbers)
else:
yield arm_instructions.Bl(label, clobbers=clobbers)
if rv:
retval_loc = self.determine_rv_location(rv[0])
yield RegisterUseDef(defs=(retval_loc,))
yield self.move(rv[1], retval_loc)
def gen_function_enter(self, args):
arg_types = [a[0] for a in args]
arg_locs = self.determine_arg_locations(arg_types)
arg_regs = set(l for l in arg_locs if isinstance(l, ArmRegister))
yield RegisterUseDef(defs=arg_regs)
for arg_loc, arg2 in zip(arg_locs, args):
arg = arg2[1]
if isinstance(arg_loc, ArmRegister):
yield self.move(arg, arg_loc)
elif isinstance(arg_loc, StackLocation):
pass
else: # pragma: no cover
raise NotImplementedError("Parameters in memory not impl")
def gen_function_exit(self, rv):
live_out = set()
if rv:
retval_loc = self.determine_rv_location(rv[0])
yield self.move(retval_loc, rv[1])
live_out.add(retval_loc)
yield RegisterUseDef(uses=live_out)
def litpool(self, frame):
""" Generate instruction for the current literals """
# Align at 4 bytes
if frame.constants:
yield Alignment(4)
# Add constant literals:
while frame.constants:
label, value = frame.constants.pop(0)
yield Label(label)
if isinstance(value, int):
yield Dd(value)
elif isinstance(value, str):
yield Dcd2(value)
elif isinstance(value, bytes):
for byte in value:
yield Db(byte)
yield Alignment(4) # Align at 4 bytes
else: # pragma: no cover
raise NotImplementedError("Constant of type {}".format(value))
def between_blocks(self, frame):
for instruction in self.litpool(frame):
yield instruction
def determine_arg_locations(self, arg_types):
"""
Given a set of argument types, determine location for argument
ABI:
pass arg1 in R1
pass arg2 in R2
pass arg3 in R3
pass arg4 in R4
return value in R0
"""
# TODO: what ABI to use?
# Perhaps follow the arm ABI spec?
locations = []
regs = [R1, R2, R3, R4]
offset = 8
for arg_ty in arg_types:
if arg_ty.is_blob:
r = StackLocation(offset, arg_ty.size)
offset += arg_ty.size
else:
# Pass non-blob values in registers if possible:
if regs:
r = regs.pop(0)
else:
arg_size = self.info.get_size(arg_ty)
r = StackLocation(offset, arg_size)
offset += arg_size
locations.append(r)
return locations
def determine_rv_location(self, ret_type):
rv = R0
return rv
class ArmAssembler(BaseAssembler):
""" Assembler for the arm instruction set """
def __init__(self):
super().__init__()
# self.parser.assembler = self
self.add_extra_rules()
self.lit_pool = []
self.lit_counter = 0
def add_extra_rules(self):
# Implement register list syntaxis:
reg_nt = "$reg_cls_armregister$"
self.typ2nt[RegisterSet] = "reg_list"
self.add_rule(
"reg_list", ["{", "reg_list_inner", "}"], lambda rhs: rhs[1]
)
self.add_rule("reg_list_inner", ["reg_or_range"], lambda rhs: rhs[0])
# self.add_rule(
# 'reg_list_inner',
# ['reg_or_range', ',', 'reg_list_inner'],
# lambda rhs: RegisterSet(rhs[0] | rhs[2]))
self.add_rule(
"reg_list_inner",
["reg_list_inner", ",", "reg_or_range"],
lambda rhs: RegisterSet(rhs[0] | rhs[2]),
)
self.add_rule(
"reg_or_range", [reg_nt], lambda rhs: RegisterSet([rhs[0]])
)
self.add_rule(
"reg_or_range",
[reg_nt, "-", reg_nt],
lambda rhs: RegisterSet(register_range(rhs[0], rhs[2])),
)
# Ldr pseudo instruction:
# TODO: fix the add_literal other way:
self.add_rule(
"instruction",
["ldr", reg_nt, ",", "=", "ID"],
lambda rhs: LdrPseudo(rhs[1], rhs[4].val, self.add_literal),
)
def flush(self):
assert not self.in_macro
while self.lit_pool:
i = self.lit_pool.pop(0)
self.emit(i)
def add_literal(self, v):
""" For use in the pseudo instruction LDR r0, =SOMESYM """
# Invent some label for the literal and store it.
assert isinstance(v, str)
self.lit_counter += 1
label_name = "_lit_{}".format(self.lit_counter)
self.lit_pool.append(Label(label_name))
self.lit_pool.append(Dcd2(v))
return label_name
class ThumbAssembler(BaseAssembler):
def __init__(self):
super().__init__()
self.parser.assembler = self
self.add_extra_rules()
def add_extra_rules(self):
# Implement register list syntaxis:
reg_nt = "$reg_cls_armregister$"
self.typ2nt[set] = "reg_list"
self.add_rule(
"reg_list", ["{", "reg_list_inner", "}"], lambda rhs: rhs[1]
)
self.add_rule("reg_list_inner", ["reg_or_range"], lambda rhs: rhs[0])
# For a left right parser, or right left parser, this is important:
self.add_rule(
"reg_list_inner",
["reg_list_inner", ",", "reg_or_range"],
lambda rhs: rhs[0] | rhs[2],
)
# self.add_rule(
# 'reg_list_inner',
# ['reg_or_range', ',', 'reg_list_inner'], lambda rhs: rhs[0] | rhs[2])
self.add_rule("reg_or_range", [reg_nt], lambda rhs: set([rhs[0]]))
self.add_rule(
"reg_or_range",
[reg_nt, "-", reg_nt],
lambda rhs: register_range(rhs[0], rhs[2]),
)
def round_up(s):
return s + (4 - s % 4)
ARM_ASM_RT = """
global __sdiv
__sdiv:
; Divide r1 by r2
; R4 is a work register.
; r0 is the quotient
push {r4}
mov r4, r2 ; mov divisor into temporary register.
; Blow up divisor until it is larger than the divident.
cmp r4, r1, lsr 1 ; If r4 < r1, then, shift left once more.
__sdiv_inc:
movls r4, r4, lsl 1
cmp r4, r1, lsr 1
bls __sdiv_inc
mov r0, 0 ; Initialize the result
; Repeatedly substract shifted divisor
__sdiv_dec:
cmp r1, r4 ; Can we substract the current temp value?
subcs r1, r1, r4 ; Substract temp from divisor if carry
adc r0, r0, r0 ; double (shift left) and add carry
mov r4, r4, lsr 1 ; Shift right one
cmp r4, r2 ; Is temp less than divisor?
bhs __sdiv_dec ; If so, repeat.
pop {r4}
mov pc, lr ; Return from function.
"""
| {
"repo_name": "windelbouwman/ppci-mirror",
"path": "ppci/arch/arm/arch.py",
"copies": "1",
"size": "17844",
"license": "bsd-2-clause",
"hash": -497073032770770700,
"line_mean": 33.6485436893,
"line_max": 79,
"alpha_frac": 0.5226967048,
"autogenerated": false,
"ratio": 3.862337662337662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9884903167977337,
"avg_score": 0.000026239832065074783,
"num_lines": 515
} |
"""ARM inline assembler.
This module allows creation of new functions in ARM assembler
(machine language) which can be directly called from Python.
The assembler syntax parsed by this module follows as closely as practical
the offical ARM syntax
"""
from functools import partial as _partial
import re as _re
import ctypes as _ctypes
from ctypes.util import find_library as _find_library
class AssemblerError(Exception):
"""Exception thrown when a syntax error is encountered in the assembler code."""
pass
_registers = dict(("r%d" % _i, _i) for _i in range(16))
# register name synonyms
for _i in range(4):
_registers["a%d" % (_i + 1)] = _registers["r%d" % _i]
for _i in range(8):
_registers["v%d" % (_i + 1)] = _registers["r%d" % (_i + 4)]
_registers["sb"] = _registers["r9"]
_registers["ip"] = _registers["r12"]
_registers["sp"] = _registers["r13"]
_registers["lr"] = _registers["r14"]
_registers["pc"] = _registers["r15"]
_status_registers = {"cpsr" : 0, "spsr" : 1}
_conditions = [
"eq", "ne", "cs", "cc",
"mi", "pl", "vs", "vc",
"hi", "ls", "ge", "lt",
"gt", "le", "", "nv"]
class _InstructionFormat:
format_fields = {
"0" : 1,
"1" : 1,
"A" : 1,
"B" : 1,
"c" : 1,
"x" : 1,
"s" : 1,
"f" : 1,
"CPNum" : 4,
"CRd" : 4,
"CRm" : 4,
"CRn" : 4,
"Cond" : 4,
"H" : 1,
"I" : 1,
"Imm24" : 24,
"L" : 1,
"N" : 1,
"Offset" : 0,
"Offset1" : 4,
"Offset2" : 4,
"Op1" : 0,
"Op2" : 3,
"Opcode" : 4,
"Operand2" : 12,
"P" : 1,
"Rd" : 4,
"RdHi" : 4,
"RdLo" : 4,
"RegisterList" : 16,
"R" : 1,
"Rm" : 4,
"Rn" : 4,
"Rs" : 4,
"S" : 1,
"Shift" : 3,
"U" : 1,
"W" : 1,
}
def __init__(self, format, length=32):
self.format = format
self.length = length
format = format.split()[::-1]
leftover = length - sum(self.format_fields[f] for f in format)
bit = 0
base = 0
mask = 0
offset = 0
fields = {}
for f in format:
bits = self.format_fields[f]
if bits == 0:
bits = leftover
if f == "1":
base = base + (1 << offset)
if f in "01":
mask = mask + (1 << offset)
else:
fields[f] = (offset, bits)
offset = offset + bits
assert offset == length
self.base = base
self.mask = mask
self.fields = fields
self.signature = " ".join(sorted(fields.keys()))
def match(self, n):
return (n & self.mask) == self.base
def encode(self, fields):
if len(fields) != len(self.fields):
missing = set(self.fields.keys()) - set(fields.keys())
if missing:
raise ValueError("Missing fields: " + " ".join(missing))
spurious = set(fields.keys()) - set(self.fields.keys())
raise ValueError("Spurious fields: " + " ".join(spurious))
base = self.base
for f in fields:
offset, bits = self.fields[f]
value = fields[f]
mask = (1 << bits) - 1
base = base | ((value & mask) << offset)
return base
class _ShiftSpec:
allowed_immediates = dict([(i, i % 32) for i in range(1, 33)])
def __init__(self, number, allowed_immediates=None, register_allowed=True):
self.number = number
if allowed_immediates is not None:
self.allowed_immediates = allowed_immediates
self.register_allowed = register_allowed
_shifts = {
"lsl" : _ShiftSpec(0, allowed_immediates=dict([(_i,_i) for _i in range(32)])),
"lsr" : _ShiftSpec(2),
"asr" : _ShiftSpec(4),
"ror" : _ShiftSpec(6, allowed_immediates=dict([(_i,_i) for _i in range(1, 32)])),
"rrx" : _ShiftSpec(6, allowed_immediates={1:0}, register_allowed=False)
}
_shifts["asl"] = _shifts["lsl"]
_comma_split_re = _re.compile(r"(?:(?:\[[^\]]*\])|(?:{[^}]*})|(?:\$.)|[^,])+|.")
def _comma_split(str):
return [item.strip() for item in _comma_split_re.findall(str) if item != ',']
class _OperandParser:
pc = 0
labels = {}
constant_pool_offset = 0
instruction = None
operand2_format = _InstructionFormat("Offset Shift Rm", 12)
library_cache = {}
memory_re = _re.compile(r"^\[(.*)\]\s*(!?)$")
regset_re = _re.compile(r"^{(.*)}$")
special_chars = {"space" : ord(' '), "newline" : ord('\n'), "tab" : ord('\t')}
control_flags = frozenset("cxsf")
def __init__(self, libraries):
self.constant_pool = []
self.constant_pool_dict = {}
self.libraries = [self.convert_library(lib) for lib in libraries]
def error(self, message):
instruction = self.instruction
full_message = "%s\nLine %d: %s" % (message, instruction.linenumber, instruction.code)
error = AssemblerError(full_message)
error.linenumber = instruction.linenumber
error.code = instruction.code
error.message = message
raise error
def convert_library(self, lib):
library_cache = self.library_cache
if isinstance(lib, str):
if lib not in library_cache:
library_cache[lib] = _ctypes.CDLL(_find_library(lib))
return library_cache[lib]
else:
return lib
def get_constant_pool_address(self, constant):
if constant in self.constant_pool_dict:
return self.constant_pool_dict[constant]
address = self.constant_pool_offset
self.constant_pool_offset = self.constant_pool_offset + 1
self.constant_pool.append(constant)
self.constant_pool_dict[constant] = address
return address
def lookup_symbol(self, str):
for lib in self.libraries:
try:
return _ctypes.cast(getattr(lib, str), _ctypes.c_void_p).value
except AttributeError:
pass
return None
def encode_immediate(self, n, checked=True):
r = 0
b = n & 0xFFFFFFFF
while r < 16:
if b < 256:
return (r << 8) | b
r = r + 1
b = ((b << 2) | (b >> 30)) & 0xFFFFFFFF # rotate left by two bits
if checked:
self.error("Immediate value cannot be assembled: %d" % n)
else:
return None
def encode_ldr_immediate(self, n, checked=True):
if n >= 0 and n < (1 << 12):
return n
elif checked:
self.error("Immediate offset cannot be assembled: %d" % n)
else:
return None
def parse_immediate(self, str, checked=False, prefix="#"):
if str and str[0] == prefix:
str = str[1:].strip()
try:
return int(str, base=0)
except ValueError:
pass
if str and str[0] == '$':
ch = str[1:]
if len(ch) == 1:
return ord(ch)
elif ch in self.special_chars:
return self.special_chars[ch]
result = self.lookup_symbol(str)
if checked and result is None:
self.error("Expected immediate value, got: %s" % str)
else:
return result
def parse_memory(self, str):
mo = self.memory_re.match(str)
if mo is None:
self.error("Expected memory location, got: %s" % str)
return [s.strip() for s in _comma_split(mo.group(1))], mo.group(2)
def parse_register(self, str, checked=False):
reg = _registers.get(str.lower(), None)
if reg is None and checked:
self.error("Expected register, got: %s" % str)
else:
return reg
def parse_status_register(self, str):
reg = _status_registers.get(str.lower(), None)
if reg is None:
self.error("Expected CPSR or SPSR, got: %s" % str)
else:
return reg
def parse_status_register_flags(self, str):
fields = str.split('_', 1)
if len(fields) == 2:
R = self.parse_status_register(fields[0])
flags = set(fields[1].lower())
if flags.issubset(self.control_flags):
flags = {f : 1 if f in flags else 0 for f in self.control_flags}
return (R, flags)
self.error("Expected CPSR_flags or SPSR_flags, got: %s % str")
def parse_regset(self, str):
mo = self.regset_re.match(str)
if mo is not None:
str = mo.group(1)
result = set()
for r in _comma_split(str):
r = r.strip()
r = r.split("-", 1)
if len(r) == 1:
result.add(self.parse_register(r[0].strip()))
else:
r1, r2 = r
r1 = self.parse_register(r1.strip(), checked=True)
r2 = self.parse_register(r2.strip(), checked=True)
result.update(range(min(r1, r2), max(r1, r2) + 1))
return result
def parse_signed_register(self, str, checked=False):
U = 1
if str and str[0] == "-":
U = 0
str = str[1:].strip()
return self.parse_register(str, checked), U
def parse_shift(self, str, allow_registers=True):
shift = str[:3]
shift_field = str[3:].strip()
try:
shift_spec = _shifts[shift.lower()]
except KeyError:
self.error("Expected shift, got: %s" % str)
if allow_registers and shift_spec.register_allowed:
shift_value = self.parse_register(shift_field)
if shift_value is not None:
return (shift_spec.number + 1, shift_value << 1)
shift_value = self.parse_immediate(shift_field, checked=True)
if shift_value in shift_spec.allowed_immediates:
return (shift_spec.number, shift_spec.allowed_immediates[shift_value])
else:
self.error("Shift with value of %d is not allowed" % shift_value)
self.error("Expected shift, got: %s" % str)
def parse_operand2(self, operands, encode_imm, allow_shift_register=True):
if len(operands) == 0:
return {"I":1, "Operand2": 0, "U": 1}
elif len(operands) == 1:
Rm, U = self.parse_signed_register(operands[0])
if Rm is not None:
return {"I":0, "Operand2": Rm, "U": U}
imm = self.parse_immediate(operands[0])
if imm is not None:
U = 1
encoded_imm = encode_imm(imm, checked=False)
if encoded_imm is None:
U = 0
encoded_imm = encode_imm(-imm, checked=False)
if encoded_imm is None:
encode_imm(imm, checked=True) # cause error
return {"I":1, "Operand2": encoded_imm, "U": U}
self.error("Expected register or immediate, got: %s" % operands[0])
elif len(operands) == 2:
Rm, U = self.parse_signed_register(operands[0], checked=True)
t, c = self.parse_shift(operands[1], allow_shift_register)
operand2 = self.operand2_format.encode({"Shift" : t, "Offset" : c, "Rm" : Rm})
return {"I":0, "Operand2": operand2, "U": U}
def parse_dpi_operand2(self, operands):
fields = self.parse_operand2(operands, self.encode_immediate)
if fields["U"] == 0:
self.error("Minus sign (-) not allowed in this instruction")
del fields["U"]
return fields
def parse_load_store(self, operands):
W = 0
if len(operands) == 1:
pre_indexed = 1
operands, bang = self.parse_memory(operands[0])
if bang:
W = 1
else:
pre_indexed = 0
operands0, bang = self.parse_memory(operands[0])
if len(operands0) != 1:
self.error("Expected [register], got: %s" % operands[0])
if bang:
self.error("In post-indexed _mode, ! is not allowed")
operands = operands0 + operands[1:]
fields = self.parse_operand2(operands[1:], self.encode_ldr_immediate, allow_shift_register=False)
fields["P"] = pre_indexed
fields["W"] = W
fields["I"] = 1 - fields["I"]
fields["Rn"] = self.parse_register(operands[0], checked=True)
return fields
_instructions = {}
class _Instruction:
code = ""
label = ""
opcode = ""
operands = []
linenumber = 0
pc = 0
def __init__(self, code):
self.code = code
code = code.split(";", 1)[0]
code = code.split(":", 1)
if len(code) == 1:
code = code[0]
else:
self.label = code[0].strip()
code = code[1]
code = code.strip()
if code:
code = code.split(None, 1)
self.opcode = code[0].strip().lower()
if len(code) > 1:
self.operands = _comma_split(code[1])
def parse(self, parser):
parser.instruction = self
parser.pc = self.pc
if self.opcode not in _instructions:
parser.error("Invalid opcode: %s" % self.opcode)
return _instructions[self.opcode](parser, self.operands)
_dpi_format = _InstructionFormat("Cond 0 0 I Opcode S Rn Rd Operand2")
_branch_format = _InstructionFormat("Cond 1 0 1 L Offset")
_bx_format = _InstructionFormat("Cond 0 0 0 1 0 0 1 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 L 1 Rm")
_load_store_format = _InstructionFormat("Cond 0 1 I P U B W L Rn Rd Operand2")
_load_store_multi_format = _InstructionFormat("Cond 1 0 0 P U S W L Rn RegisterList")
_mul_format = _InstructionFormat("Cond 0 0 0 0 0 0 0 S Rd 0 0 0 0 Rs 1 0 0 1 Rm")
_mla_format = _InstructionFormat("Cond 0 0 0 0 0 0 1 S Rd Rn Rs 1 0 0 1 Rm")
_clz_format = _InstructionFormat("Cond 0 0 0 1 0 1 1 0 1 1 1 1 Rd 1 1 1 1 0 0 0 1 Rm")
_mrs_format = _InstructionFormat("Cond 0 0 0 1 0 R 0 0 1 1 1 1 Rd 0 0 0 0 0 0 0 0 0 0 0 0")
_msr_format_reg = _InstructionFormat("Cond 0 0 0 1 0 R 1 0 f s x c 1 1 1 1 0 0 0 0 0 0 0 0 Rm")
_msr_format_imm = _InstructionFormat("Cond 0 0 1 1 0 R 1 0 f s x c 1 1 1 1 Operand2")
_swi_format = _InstructionFormat("Cond 1 1 1 1 Imm24")
def _parse_dpi(opcode, condition, s, parser, operands):
if len(operands) not in (3, 4):
parser.error("Expected 3 or 4 arguments, got %d" % len(operands))
fields = parser.parse_dpi_operand2(operands[2:])
Rd = parser.parse_register(operands[0], checked=True)
Rn = parser.parse_register(operands[1], checked=True)
fields["Rd"] = Rd
fields["Rn"] = Rn
fields["Opcode"] = opcode
fields["Cond"] = condition
fields["S"] = s
return _dpi_format.encode(fields)
def _parse_move(opcode, condition, s, parser, operands):
if len(operands) not in (2, 3):
parser.error("Expected 2 or 3 arguments, got %d" % len(operands))
fields = parser.parse_dpi_operand2(operands[1:])
Rd = parser.parse_register(operands[0], checked=True)
fields["Rd"] = Rd
fields["Rn"] = 0
fields["Opcode"] = opcode
fields["Cond"] = condition
fields["S"] = s
return _dpi_format.encode(fields)
def _parse_cond(opcode, condition, s, parser, operands):
if len(operands) not in (2, 3):
parser.error("Expected 2 or 3 arguments, got %d" % len(operands))
fields = parser.parse_dpi_operand2(operands[1:])
Rn = parser.parse_register(operands[0], checked=True)
fields["Rd"] = 0
fields["Rn"] = Rn
fields["Opcode"] = opcode
fields["Cond"] = condition
fields["S"] = s
return _dpi_format.encode(fields)
def _parse_branch(condition, link, parser, operands):
if len(operands) != 1:
parser.error("Expected 1 argument, got %d" % len(operands))
label = operands[0]
if label not in parser.labels:
parser.error("Undefined label: %s" % label)
target = parser.labels[label]
offset = target - parser.pc - 2
return _branch_format.encode({"L" : link, "Cond" : condition, "Offset" : offset})
def _parse_bx(condition, link, parser, operands):
if len(operands) != 1:
parser.error("Expected 1 argument, got %d" % len(operands))
Rm = parser.parse_register(operands[0], checked=True)
return _bx_format.encode({"L" : link, "Cond" : condition, "Rm" : Rm})
def _parse_load_store(condition, load, B, parser, operands):
if len(operands) not in (2, 3, 4):
parser.error("Expected 2, 3 or 4 arguments, got %d" % len(operands))
Rd = parser.parse_register(operands[0], checked=True)
fields = parser.parse_load_store(operands[1:])
fields["Rd"] = Rd
fields["L"] = load
fields["B"] = B
fields["Cond"] = condition
return _load_store_format.encode(fields)
def _parse_load_store_multi(condition, load, before, increment, parser, operands):
if len(operands) != 2:
parser.error("Expected 2 arguments, got %d" % len(operands))
W = 0
S = 0
operand0 = operands[0]
if operand0 and operand0[-1] == '!':
W = 1
operand0 = operand0[:-1].strip()
operand1 = operands[1]
if operand1 and operand1[-1] == '^':
S = 1
operand1 = operand1[:-1].strip()
Rn = parser.parse_register(operand0, checked=True)
RegisterList = sum(1<<r for r in parser.parse_regset(operand1))
fields = {"P": before, "U": increment, "Cond" : condition, "L" : load, "W" : W, "S" : S, "Rn" : Rn, "RegisterList" : RegisterList}
return _load_store_multi_format.encode(fields)
def _parse_push_pop(condition, load, parser, operands):
if len(operands) != 1:
parser.error("Expected 1 argument, got %d" % len(operands))
Rn = 13 # stack pointer
before = 1 - load
increment = load
RegisterList = sum(1<<r for r in parser.parse_regset(operands[0]))
fields = {"P": before, "U": increment, "Cond" : condition, "L" : load, "W" : 1, "S" : 0, "Rn" : Rn, "RegisterList" : RegisterList}
return _load_store_multi_format.encode(fields)
def _parse_mul(condition, S, parser, operands):
if len(operands) != 3:
parser.error("Expected 3 arguments, got %d" % len(operands))
Rd = parser.parse_register(operands[0], checked=True)
Rm = parser.parse_register(operands[1], checked=True)
Rs = parser.parse_register(operands[2], checked=True)
if Rd == Rm:
Rm, Rs = Rs, Rm
return _mul_format.encode({"Rd" : Rd, "Rm" : Rm, "Rs" : Rs, "Cond" : condition, "S" : S})
def _parse_mla(condition, S, parser, operands):
if len(operands) != 4:
parser.error("Expected 4 arguments, got %d" % len(operands))
Rd = parser.parse_register(operands[0], checked=True)
Rm = parser.parse_register(operands[1], checked=True)
Rs = parser.parse_register(operands[2], checked=True)
Rn = parser.parse_register(operands[3], checked=True)
if Rd == Rm:
Rm, Rs = Rs, Rm
return _mla_format.encode({"Rd" : Rd, "Rm" : Rm, "Rs" : Rs, "Rn" : Rn, "Cond" : condition, "S" : S})
def _parse_clz(condition, parser, operands):
if len(operands) != 2:
parser.error("Expected 2 arguments, got %d" % len(operands))
Rd = parser.parse_register(operands[0], checked=True)
Rm = parser.parse_register(operands[1], checked=True)
return _clz_format.encode({"Rd" : Rd, "Rm" : Rm, "Cond" : condition})
def _parse_mrs(condition, parser, operands):
if len(operands) != 2:
parser.error("Expected 2 arguments, got %d" % len(operands))
Rd = parser.parse_register(operands[0], checked=True)
R = parser.parse_status_register(operands[1])
return _mrs_format.encode({"Rd" : Rd, "R" : R, "Cond" : condition})
def _parse_msr(condition, parser, operands):
if len(operands) != 2:
parser.error("Expected 2 arguments, got %d" % len(operands))
R, fields = parser.parse_status_register_flags(operands[0])
fields["R"] = R
fields["Cond"] = condition
imm = parser.parse_immediate(operands[1])
if imm is not None:
fields["Operand2"] = parser.encode_immediate(imm)
return _msr_format_imm.encode(fields)
else:
Rm = parser.parse_register(operands[1], checked=True)
fields["Rm"] = Rm
return _msr_format_reg.encode(fields)
def _parse_swi(condition, parser, operands):
if len(operands) != 1:
parser.error("Expected 1 argument, got %d" % len(operands))
imm24 = parser.parse_immediate(operands[0], checked=True)
limit = 1<<24
if imm24 < 0 or imm24 >= limit:
parser.error("Immediate value should be between 0 and %d, got: %d" % (limit - 1, imm24))
return _swi_format.encode({"Cond": condition, "Imm24" : imm24})
# Install data-processing instructions
_dpi_instructions = [("and", 0), ("eor", 1), ("sub", 2), ("rsb", 3), ("add", 4),
("adc", 5), ("sbc", 6), ("rsc", 7), ("orr", 12), ("bic", 14)]
for (_name, _opcode) in _dpi_instructions:
for _i in range(len(_conditions)):
_fullname = _name + _conditions[_i]
_instructions[_fullname] = _partial(_parse_dpi, _opcode, _i, 0)
_instructions[_fullname + "s"] = _partial(_parse_dpi, _opcode, _i, 1)
# Install move instructions
_move_instructions = [("mov", 13), ("mvn", 15)]
for (_name, _opcode) in _move_instructions:
for _i in range(len(_conditions)):
_fullname = _name + _conditions[_i]
_instructions[_fullname] = _partial(_parse_move, _opcode, _i, 0)
_instructions[_fullname + "s"] = _partial(_parse_move, _opcode, _i, 1)
# Install test instructions
_cond_instructions = [("tst", 8), ("teq", 9), ("cmp", 10), ("cmn", 11)]
for (_name, _opcode) in _cond_instructions:
for _i in range(len(_conditions)):
_fullname = _name + _conditions[_i]
_instructions[_fullname] = _partial(_parse_cond, _opcode, _i, 1)
# Install branch instructions
for _i in range(len(_conditions)):
_fullname = "b" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_branch, _i, 0)
_fullname = "bl" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_branch, _i, 1)
_fullname = "bx" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_bx, _i, 0)
_fullname = "blx" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_bx, _i, 1)
# Install load/store instructions
for _i in range(len(_conditions)):
_fullname = "ldr" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_load_store, _i, 1, 0)
_fullname = "str" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_load_store, _i, 0, 0)
_fullname = "ldr" + _conditions[_i] + "b"
_instructions[_fullname] = _partial(_parse_load_store, _i, 1, 1)
_fullname = "str" + _conditions[_i] + "b"
_instructions[_fullname] = _partial(_parse_load_store, _i, 0, 1)
# Install load/store instructions
for _i in range(len(_conditions)):
_fullname = "ldr" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_load_store, _i, 1, 0)
_fullname = "str" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_load_store, _i, 0, 0)
_fullname = "ldr" + _conditions[_i] + "b"
_instructions[_fullname] = _partial(_parse_load_store, _i, 1, 1)
_fullname = "str" + _conditions[_i] + "b"
_instructions[_fullname] = _partial(_parse_load_store, _i, 0, 1)
# Install load/store multi instructions
for _i in range(len(_conditions)):
_fullname = "push" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_push_pop, _i, 0)
_fullname = "pop" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_push_pop, _i, 1)
for _increment in range(2):
for _before in range(2):
_mode = "di"[_increment] + "ab"[_before]
_fullname = "ldm" + _conditions[_i] + _mode
_instructions[_fullname] = _partial(_parse_load_store_multi, _i, 1, _before, _increment)
_fullname = "stm" + _conditions[_i] + _mode
_instructions[_fullname] = _partial(_parse_load_store_multi, _i, 0, _before, _increment)
# Install MULtiply instructions
for _i in range(len(_conditions)):
_fullname = "mul" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_mul, _i, 0)
_fullname = _fullname + "s"
_instructions[_fullname] = _partial(_parse_mul, _i, 1)
_fullname = "mla" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_mla, _i, 0)
_fullname = _fullname + "s"
_instructions[_fullname] = _partial(_parse_mla, _i, 1)
# Install Count Leading Zero instructions
for _i in range(len(_conditions)):
_fullname = "clz" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_clz, _i)
# Install Move Register from/to Status instructions
for _i in range(len(_conditions)):
_fullname = "mrs" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_mrs, _i)
_fullname = "msr" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_msr, _i)
# Install SoftWare Interrupt instructions
for _i in range(len(_conditions)):
for _name in ("swi", "svc"):
_fullname = _name + _conditions[_i]
_instructions[_fullname] = _partial(_parse_swi, _i)
# support for LDR pseudo-instruction
def _wrap_ldr(ldr, mov, mvn, parser, operands):
if len(operands) == 2:
imm = parser.parse_immediate(operands[1], checked=False, prefix="=")
if imm is not None:
parser.parse_register(operands[0], checked=True)
if parser.encode_immediate(imm, checked=False) is not None:
operands = [operands[0], "#%d" % imm]
return mov(parser, operands)
elif parser.encode_immediate(~imm, checked=False) is not None:
operands = [operands[0], "#%d" % ~imm]
return mvn(parser, operands)
else:
address = parser.get_constant_pool_address(imm)
address = 4 * (address - parser.pc - 2)
return ldr(parser, [operands[0], "[pc, #%d]" % address])
return ldr(parser, operands)
for _cond in _conditions:
_name = "ldr" + _cond
_instructions[_name] = _partial(_wrap_ldr, _instructions[_name],
_instructions["mov" + _cond], _instructions["mvn" + _cond])
def _make_executable_array(opcodes):
import mmap
n = len(opcodes)
m = mmap.mmap(-1, 4*n, prot=mmap.PROT_READ|mmap.PROT_WRITE|mmap.PROT_EXEC)
result = (_ctypes.c_uint32 * n).from_buffer(m)
for i in range(n):
result[i] = opcodes[i]
return result
_type_flags = {
"b" : _ctypes.c_int8,
"B" : _ctypes.c_uint8,
"h" : _ctypes.c_int16,
"H" : _ctypes.c_uint16,
"i" : _ctypes.c_int32,
"I" : _ctypes.c_uint32,
"l" : _ctypes.c_int64,
"L" : _ctypes.c_uint64,
"str" : _ctypes.c_char_p,
"ch" : _ctypes.c_char,
"bool" : _ctypes.c_bool,
"p" : _ctypes.c_void_p,
"" : None
}
def prototype(proto):
if not isinstance(proto, str):
return proto
args, result = proto.split("->")
result = _type_flags[result.strip()]
args = [_type_flags[a.strip()] for a in args.split()]
return _ctypes.CFUNCTYPE(result, *args)
def _make_function(exec_array, proto):
proto = prototype(proto)
f = proto(_ctypes.addressof(exec_array))
f.__armasm_code__ = exec_array
return f
def asm(prototype, code, libraries=()):
"""Convert ARM assembler into a callable object.
Required arguments:
prototype -- either a `ctypes.CFUNCTYPE' object or a string acceptable to `armasm.prototype'
code -- the actual assembler code, as a string
Optional arguments:
libraries -- a sequence of either `ctypes.CDLL' objects or strings acceptable to `ctypes.util.find_library'
Examples:
asm("i i -> i", "mul r0, r1, r0") -- returns a callable object which takes two integers and returns their product
"""
linenumber = 0
pc = 0
_instructions = []
labels = {}
for line in code.split("\n"):
linenumber = linenumber + 1
instruction = _Instruction(line)
instruction.linenumber = linenumber
instruction.pc = pc
if instruction.label:
labels[instruction.label] = pc
if instruction.opcode:
pc = pc + 1
_instructions.append(instruction)
opcodes = []
parser = _OperandParser(libraries)
parser.labels = labels
parser.constant_pool_offset = pc + 1
for instruction in _instructions:
if instruction.opcode:
v = instruction.parse(parser)
opcodes.append(v)
opcodes.append(0xe12fff1e) # bx lr
opcodes.extend(parser.constant_pool)
result = _make_executable_array(opcodes)
return _make_function(result, prototype)
def dis(asm_function):
"""Disassemble assembled function object.
Given a callable object created with `armasm.asm', this function
prints its disassembled listing.
This functions uses the external `objdump' tool.
It first tries the ARM-specific `arm-linux-gnueabihf-objdump', then tries
the generic `objdump'.
If neither exist or their invocation produces an error, this function will error out.
"""
import tempfile
import os
import subprocess
f = tempfile.NamedTemporaryFile(delete=False)
try:
executable = subprocess.check_output(("which", "arm-linux-gnueabihf-objdump")).decode().strip()
except subprocess.CalledProcessError:
executable = "objdump"
try:
f.write(bytearray(asm_function.__armasm_code__))
f.close()
output = subprocess.check_output((executable, "-D", f.name, "-m", "arm", "-b", "binary")).decode()
finally:
os.unlink(f.name)
# try to skip useless headers
start = " 0:"
loc = output.find(start)
if loc >= 0:
output = output[loc:]
print(output)
| {
"repo_name": "stephanh42/armasm",
"path": "armasm.py",
"copies": "1",
"size": "29874",
"license": "mit",
"hash": -1284570822462034000,
"line_mean": 34.90625,
"line_max": 134,
"alpha_frac": 0.5745129544,
"autogenerated": false,
"ratio": 3.3748305467690916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4449343501169091,
"avg_score": null,
"num_lines": null
} |
# Armon Dadgar
# May 10th 2009
#
# This file houses the code used for interactions between repy and the node manager.
# Namely, it checks for a stopfile to inform us to terminate, and it periodically
# writes a status file informing the NM of our status.
#
# This is used to write out our status
import statusstorage
# This prevents updates to our current status when we are about to exit
statuslock = statusstorage.statuslock
# This is to sleep
import time
# This is for the thread
import threading
# This is for path checking and stuff
import os
# For ostype, harshexit
import harshexit
# For setting thread priority (import fails on non-windows)
try:
import windows_api
except:
windows_api = None
# This is to get around the safe module
safe_open = open
# Store our important variables
stopfilename = None
statusfilename_prefix = None
frequency = 1 # Check rate in seconds
# This lock is to allow the thread to run
# If the thread fails to acquire it (e.g. somebody else has it) it will stop
run_thread_lock = threading.Lock()
def init(stopfile=None, statusfile=None, freq=1):
"""
<Purpose>
Prepares the module to run.
<Arguments>
stopfile:
The name of the stopfile to check for. Set to None to disable checking for a stopfile.
statusfile:
The filename prefix for writing out our status. Set to None to disable a status file
freq:
The frequency of checks for the stopfile and status updates. 1 second is default.
"""
global stopfilename, statusfilename_prefix, frequency
# Check for the stopfile
if stopfile != None and os.path.exists(stopfile):
raise Exception, "Stop file already exists! File:"+stopfile
# Assign the values
stopfilename = stopfile
statusfilename_prefix = statusfile
frequency = freq
# Initialize statusstorage
statusstorage.init(statusfilename_prefix)
def launch(pid):
"""
<Purpose>
Starts a thread to handle status updates and stopfile checking.
<Arguments>
pid:
The repy process id on unix, or None on Windows.
<Side Effects>
Starts a new thread.
"""
# Check if we need to do anything
global stopfilename, statusfilename_prefix
if stopfilename == None and statusfilename_prefix == None:
return
# Launch the thread
threadobj = nm_interface_thread(pid)
threadobj.start()
def stop():
"""
<Purpose>
Stops the worker thread.
WARNING: Do not call this twice. It will block indefinately.
"""
# Acquiring the thread lock will cause the thread to stop
global run_thread_lock
run_thread_lock.acquire()
# This is an internal function called when the stopfile is found
# It handles some of the nonportable details for nm_interface_thread
def _stopfile_exit(exitcode, pid):
# On Windows, we are in the Repy process, so we can just use harshexit
if harshexit.ostype in ["Windows"]:
# Harshexit will store the appriopriate status for us
harshexit.harshexit(exitcode)
else: # On NIX we are on the external process
try:
if exitcode == 44:
# Write out status information, repy was Stopped
statusstorage.write_status("Stopped")
else:
# Status terminated
statusstorage.write_status("Terminated")
except:
pass
# Disable the other status thread, in case the resource thread detects we've killed repy
statusstorage.init(None)
# Kill repy
harshexit.portablekill(pid)
# Fix Derek proposed, this should solve the problem of
# the monitor exiting before the repy process.
time.sleep(1)
# Exit
harshexit.harshexit(78)
# This is the actual worker thread
class nm_interface_thread(threading.Thread):
def __init__(self, pid):
self.repy_process_id = pid
threading.Thread.__init__(self)
def run(self):
global stopfilename, frequency, run_thread_lock
# On Windows elevate our priority above the user code.
if harshexit.ostype in ["Windows"]:
# Elevate our priority, above normal is higher than the usercode
windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL)
while True:
# Attempt to get the lock
have_lock = run_thread_lock.acquire(False)
# If we have the lock, release and continue. Else break and exit the thread
if have_lock: run_thread_lock.release()
else: break
# Get the status lock
statuslock.acquire()
# Write out our status
statusstorage.write_status("Started")
# Release the status lock
statuslock.release()
# Look for the stopfile
if stopfilename != None and os.path.exists(stopfilename):
try:
# Get a file object for the file
fileobject = safe_open(stopfilename)
# Read in the contents, close the object
contents = fileobject.read()
fileobject.close()
# Check the length, if there is nothing then just close as stopped
if len(contents) > 0:
# Split, at most we have 2 parts, the exit code and message
(exitcode, mesg) = contents.split(";",1)
exitcode = int(exitcode)
# Check if exitcode is 56, which stands for ThreadErr is specified
# ThreadErr cannot be specified externally, since it has side-affects
# such as changing global thread restrictions
if exitcode == 56:
raise Exception, "ThreadErr exit code specified. Exit code not allowed."
# Print the message, then call harshexit with the exitcode
if mesg != "":
print mesg
_stopfile_exit(exitcode, self.repy_process_id)
else:
raise Exception, "Stopfile has no content."
except:
# On any issue, just do "Stopped" (44)
_stopfile_exit(44, self.repy_process_id)
# Sleep until the next loop around.
time.sleep(frequency)
| {
"repo_name": "SeattleTestbed/repy_v2",
"path": "nmstatusinterface.py",
"copies": "1",
"size": "5993",
"license": "mit",
"hash": 5105458017284097000,
"line_mean": 27.2688679245,
"line_max": 92,
"alpha_frac": 0.6701151343,
"autogenerated": false,
"ratio": 3.9794156706507304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.025801573614908146,
"num_lines": 212
} |
# Armon Dadgar
#
# Creates python interface for windows api calls that are required
#
# According to MSDN most of these calls are Windows 2K Pro and up
# Trying to replace the win32* stuff using ctypes
# Ctypes enable us to call the Windows API which written in C
import ctypes
# Needed so that we can sleep
import time
# Used for OS detection
import os
# Used for processing command output (netstat, etc)
import textops
# Detect whether or not it is Windows CE/Mobile
MobileCE = False
if os.name == 'ce':
MobileCE = True
else:
import portable_popen
# Main Libraries
# Loaded depending on OS
if MobileCE:
# kerneldll links to the library that has Windows Kernel Calls
kerneldll = ctypes.cdll.coredll
# Toolhelp library
# Contains Tool helper functions
toolhelp = ctypes.cdll.toolhelp
else:
# kerneldll links to the library that has Windows Kernel Calls
kerneldll = ctypes.windll.kernel32
# memdll links to the library that has Windows Process/Thread Calls
memdll = ctypes.windll.psapi
# Types
DWORD = ctypes.c_ulong # Map Microsoft DWORD type to C long
WORD = ctypes.c_ushort # Map microsoft WORD type to C ushort
HANDLE = ctypes.c_ulong # Map Microsoft HANDLE type to C long
LONG = ctypes.c_long # Map Microsoft LONG type to C long
SIZE_T = ctypes.c_ulong # Map Microsoft SIZE_T type to C long
ULONG_PTR = ctypes.c_ulong # Map Microsoft ULONG_PTR to C long
LPTSTR = ctypes.c_char_p # Map Microsoft LPTSTR to a pointer to a string
LPCSTR = ctypes.c_char_p # Map Microsoft LPCTSTR to a pointer to a string
ULARGE_INTEGER = ctypes.c_ulonglong # Map Microsoft ULARGE_INTEGER to 64 bit int
LARGE_INTEGER = ctypes.c_longlong # Map Microsoft ULARGE_INTEGER to 64 bit int
DWORDLONG = ctypes.c_ulonglong # Map Microsoft DWORDLONG to 64 bit int
# General Constants
ULONG_MAX = 4294967295 # Maximum value for an unsigned long, 2^32 -1
# Microsoft Constants
TH32CS_SNAPTHREAD = ctypes.c_ulong(0x00000004) # Create a snapshot of all threads
TH32CS_SNAPPROCESS = ctypes.c_ulong(0x00000002) # Create a snapshot of a process
TH32CS_SNAPHEAPLIST = ctypes.c_ulong(0x00000001) # Create a snapshot of a processes heap
INVALID_HANDLE_VALUE = -1
THREAD_QUERY_INFORMATION = 0x0040
THREAD_SET_INFORMATION = 0x0020
THREAD_SUSPEND_RESUME = 0x0002
THREAD_HANDLE_RIGHTS = THREAD_SET_INFORMATION | THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION
PROCESS_TERMINATE = 0x0001
PROCESS_QUERY_INFORMATION = 0x0400
SYNCHRONIZE = 0x00100000L
PROCESS_SET_INFORMATION = 0x0200
PROCESS_SET_QUERY_AND_TERMINATE = PROCESS_SET_INFORMATION | PROCESS_TERMINATE | PROCESS_QUERY_INFORMATION | SYNCHRONIZE
ERROR_ALREADY_EXISTS = 183
WAIT_FAILED = 0xFFFFFFFF
WAIT_OBJECT_0 = 0x00000000L
WAIT_ABANDONED = 0x00000080L
CE_FULL_PERMISSIONS = ctypes.c_ulong(0xFFFFFFFF)
NORMAL_PRIORITY_CLASS = ctypes.c_ulong(0x00000020)
HIGH_PRIORITY_CLASS = ctypes.c_ulong(0x00000080)
INFINITE = 0xFFFFFFFF
THREAD_PRIORITY_HIGHEST = 2
THREAD_PRIORITY_ABOVE_NORMAL = 1
THREAD_PRIORITY_NORMAL = 0
PROCESS_BELOW_NORMAL_PRIORITY_CLASS = 0x00004000
PROCESS_NORMAL_PRIORITY_CLASS = 0x00000020
PROCESS_ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000
# How many times to attempt sleeping/resuming thread or proces
# before giving up with failure
ATTEMPT_MAX = 10
# Which threads should not be put to sleep?
EXCLUDED_THREADS = []
# Key Functions
# Maps Microsoft API calls to more convenient name for internal use
# Also abstracts the linking library for each function for more portability
# Load the Functions that have a common library between desktop and CE
#_suspend_thread = kerneldll.SuspendThread # Puts a thread to sleep
# This workaround is needed to keep the Python Global Interpreter Lock (GIL)
# Normal ctypes CFUNCTYPE or WINFUNCTYPE prototypes will release the GIL
# Which causes the process to infinitely deadlock
# The downside to this method, is that a ValueError Exception is always thrown
_suspend_thread_proto = ctypes.PYFUNCTYPE(DWORD)
def _suspend_thread_err_check(result, func, args):
return result
_suspend_thread_err = _suspend_thread_proto(("SuspendThread", kerneldll))
_suspend_thread_err.errcheck = _suspend_thread_err_check
def _suspend_thread(handle):
result = 0
try:
result = _suspend_thread_err(handle)
except ValueError:
pass
return result
_resume_thread = kerneldll.ResumeThread # Resumes Thread execution
_open_process = kerneldll.OpenProcess # Returns Process Handle
_create_process = kerneldll.CreateProcessW # Launches new process
_set_thread_priority = kerneldll.SetThreadPriority # Sets a threads scheduling priority
_thread_times = kerneldll.GetThreadTimes # Gets CPU time data for a thread
_process_exit_code = kerneldll.GetExitCodeProcess # Gets Process Exit code
_terminate_process = kerneldll.TerminateProcess # Kills a process
_close_handle = kerneldll.CloseHandle # Closes any(?) handle object
_get_last_error = kerneldll.GetLastError # Gets last error number of last error
_wait_for_single_object = kerneldll.WaitForSingleObject # Waits to acquire mutex
_create_mutex = kerneldll.CreateMutexW # Creates a Mutex, Unicode version
_release_mutex = kerneldll.ReleaseMutex # Releases mutex
try:
_get_tick_count = kerneldll.GetTickCount64 # Try to get the 64 bit variant
except AttributeError: # This means the function does not exist
_get_tick_count = kerneldll.GetTickCount # Use the 32bit version
_free_disk_space = kerneldll.GetDiskFreeSpaceExW # Determines free disk space
# Load CE Specific function
if MobileCE:
# Uses kernel, but is slightly different on desktop
_global_memory_status = kerneldll.GlobalMemoryStatus
# Things using toolhelp
_create_snapshot = toolhelp.CreateToolhelp32Snapshot # Makes snapshot of threads
_close_snapshot = toolhelp.CloseToolhelp32Snapshot # destroys a snapshot
_first_thread = toolhelp.Thread32First # Reads from Thread from snapshot
_next_thread = toolhelp.Thread32Next # Reads next Thread from snapshot
# Things using kernel
# Windows CE uses thread identifiers and handles interchangably
# Use internal ce method to handle this
# _open_thread_ce
# Non-Supported functions:
# _process_times, there is no tracking of this on a process level
# _process_memory, CE does not track memory usage
# _current_thread_id, CE has this defined inline in a header file, so we need to do it
# These must be handled specifically
# We override this later
_current_thread_id = None
# Heap functions only needed on CE for getting memory info
_heap_list_first = toolhelp.Heap32ListFirst # Initializes Heap List
_heap_list_next = toolhelp.Heap32ListNext # Iterates through the heap list
_heap_first = toolhelp.Heap32First # Initializes Heap Entry
_heap_next = toolhelp.Heap32Next # Iterates through the Heaps
# Non-officially supported methods
_get_current_permissions = kerneldll.GetCurrentPermissions
_set_process_permissions = kerneldll.SetProcPermissions
# Load the Desktop Specific functions
else:
# These are in the kernel library on the desktop
_open_thread = kerneldll.OpenThread # Returns Thread Handle
_create_snapshot = kerneldll.CreateToolhelp32Snapshot # Makes snapshot of threads
_first_thread = kerneldll.Thread32First # Reads from Thread from snapshot
_next_thread = kerneldll.Thread32Next # Reads next Thread from snapshot
_global_memory_status = kerneldll.GlobalMemoryStatusEx # Gets global memory info
_current_thread_id = kerneldll.GetCurrentThreadId # Returns the thread_id of the current thread
# These process specific functions are only available on the desktop
_process_times = kerneldll.GetProcessTimes # Returns data about Process CPU use
_process_memory = memdll.GetProcessMemoryInfo # Returns data on Process mem use
# This is only available for desktop, sets the process wide priority
_set_process_priority = kerneldll.SetPriorityClass
# Classes
# Python Class which is converted to a C struct
# It encapsulates Thread Data, and is used in
# Windows Thread calls
class _THREADENTRY32(ctypes.Structure):
_fields_ = [('dwSize', DWORD),
('cntUsage', DWORD),
('th32thread_id', DWORD),
('th32OwnerProcessID', DWORD),
('tpBasePri', LONG),
('tpDeltaPri', LONG),
('dwFlags', DWORD)]
# It encapsulates Thread Data, and is used in
# Windows Thread calls, CE Version
class _THREADENTRY32CE(ctypes.Structure):
_fields_ = [('dwSize', DWORD),
('cntUsage', DWORD),
('th32thread_id', DWORD),
('th32OwnerProcessID', DWORD),
('tpBasePri', LONG),
('tpDeltaPri', LONG),
('dwFlags', DWORD),
('th32AccessKey', DWORD),
('th32CurrentProcessID', DWORD)]
# Python Class which is converted to a C struct
# It encapsulates Time data, with a low and high number
# We use it to get Process times (user/system/etc.)
class _FILETIME(ctypes.Structure):
_fields_ = [('dwLowDateTime', DWORD),
('dwHighDateTime', DWORD)]
# Python Class which is converted to a C struct
# It encapsulates data about a Processes
# Memory usage. A pointer to the struct is passed
# to the Windows API
class _PROCESS_MEMORY_COUNTERS(ctypes.Structure):
_fields_ = [('cb', DWORD),
('PageFaultCount', DWORD),
('PeakWorkingSetSize', SIZE_T),
('WorkingSetSize', SIZE_T),
('QuotaPeakPagedPoolUsage', SIZE_T),
('QuotaPagedPoolUsage', SIZE_T),
('QuotaPeakNonPagedPoolUsage', SIZE_T),
('QuotaNonPagedPoolUsage', SIZE_T),
('PagefileUsage', SIZE_T),
('PeakPagefileUsage', SIZE_T)]
# Python Class which is converted to a C struct
# It encapsulates data about a heap space
# see http://msdn.microsoft.com/en-us/library/ms683443(VS.85).aspx
class _HEAPENTRY32(ctypes.Structure):
_fields_ = [('dwSize', SIZE_T),
('hHandle', HANDLE),
('dwAddress', ULONG_PTR),
('dwBlockSize', SIZE_T),
('dwFlags', DWORD),
('dwLockCount', DWORD),
('dwResvd', DWORD),
('th32ProcessID', DWORD),
('th32HeapID', ULONG_PTR)]
# Python Class which is converted to a C struct
# It encapsulates data about a processes heaps
# see http://msdn.microsoft.com/en-us/library/ms683449(VS.85).aspx
class _HEAPLIST32(ctypes.Structure):
_fields_ = [('dwSize', SIZE_T),
('th32ProcessID', DWORD),
('th32HeapID', ULONG_PTR),
('dwFlags', DWORD)]
# Python Class which is converted to a C struct
# It encapsulates data about a newly created process
# see http://msdn.microsoft.com/en-us/library/ms684873(VS.85).aspx
class _PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [('hProcess', HANDLE),
('hThread', HANDLE),
('dwProcessId', DWORD),
('dwThreadId', DWORD)]
# Python Class which is converted to a C struct
# It encapsulates data about a Processes
# after it is created
# see http://msdn.microsoft.com/en-us/library/ms686331(VS.85).aspx
class _STARTUPINFO(ctypes.Structure):
_fields_ = [('cb', DWORD),
('lpReserved', LPTSTR),
('lpDesktop', LPTSTR),
('lpTitle', LPTSTR),
('dwX', DWORD),
('dwY', DWORD),
('dwXSize', DWORD),
('dwYSize', DWORD),
('dwXCountChars', DWORD),
('dwYCountChars', DWORD),
('dwFillAttribute', DWORD),
('dwFlags', DWORD),
('wShowWindow', DWORD),
('cbReserved2', WORD),
('lpReserved2', WORD),
('hStdInput', HANDLE),
('hStdOutput', HANDLE),
('hStdError', HANDLE)]
# Python Class which is converted to a C struct
# It encapsulates data about global memory
# This version is for Windows Desktop, and is not limited to 4 gb of ram
# see http://msdn.microsoft.com/en-us/library/aa366770(VS.85).aspx
class _MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [('dwLength', DWORD),
('dwMemoryLoad', DWORD),
('ullTotalPhys', DWORDLONG),
('ullAvailPhys', DWORDLONG),
('ullTotalPageFile', DWORDLONG),
('ullAvailPageFile', DWORDLONG),
('ullTotalVirtual', DWORDLONG),
('ullAvailVirtual', DWORDLONG),
('ullAvailExtendedVirtual', DWORDLONG)]
# Python Class which is converted to a C struct
# It encapsulates data about global memory
# This version is for WinCE (< 4gb ram)
# see http://msdn.microsoft.com/en-us/library/bb202730.aspx
class _MEMORYSTATUS(ctypes.Structure):
_fields_ = [('dwLength', DWORD),
('dwMemoryLoad', DWORD),
('dwTotalPhys', DWORD),
('dwAvailPhys', DWORD),
('dwTotalPageFile', DWORD),
('dwAvailPageFile', DWORD),
('dwTotalVirtual', DWORD),
('dwAvailVirtual', DWORD)]
# Exceptions
class DeadThread(Exception):
"""Gets thrown when a Tread Handle cannot be opened"""
pass
class DeadProcess(Exception):
"""Gets thrown when a Process Handle cannot be opened. Eventually a DeadThread will get escalated to DeadProcess"""
pass
class FailedMutex(Exception):
"""Gets thrown when a Mutex cannot be created, opened, or released"""
pass
# Global variables
# For each Mutex, record the lock count to properly release
_mutex_lock_count = {}
# High level functions
# When getProcessTheads is called, it iterates through all the
# system threads, and this global counter stores the thead count
_system_thread_count = 0
# Returns list with the Thread ID of all threads associated with the pid
def get_process_threads(pid):
"""
<Purpose>
Many of the Windows functions for altering processes and threads require
thread-based handles, as opposed to process based, so this function
gets all of the threads associated with a given process
<Arguments>
pid:
The Process Identifier number for which the associated threads should be returned
<Returns>
Array of Thread Identifiers, these are not thread handles
"""
global _system_thread_count
# Mobile requires different structuer
if MobileCE:
thread_class = _THREADENTRY32CE
else:
thread_class = _THREADENTRY32
threads = [] # List object for threads
current_thread = thread_class() # Current Thread Pointer
current_thread.dwSize = ctypes.sizeof(thread_class)
# Create Handle to snapshot of all system threads
handle = _create_snapshot(TH32CS_SNAPTHREAD, 0)
# Check if handle was created successfully
if handle == INVALID_HANDLE_VALUE:
_close_handle( handle )
return []
# Attempt to read snapshot
if not _first_thread( handle, ctypes.pointer(current_thread)):
_close_handle( handle )
return []
# Reset the global counter
_system_thread_count = 0
# Loop through threads, check for threads associated with the right process
more_threads = True
while (more_threads):
# Increment the global counter
_system_thread_count += 1
# Check if current thread belongs to the process were looking for
if current_thread.th32OwnerProcessID == ctypes.c_ulong(pid).value:
threads.append(current_thread.th32thread_id)
more_threads = _next_thread(handle, ctypes.pointer(current_thread))
# Cleanup snapshot
if MobileCE:
_close_snapshot(handle)
_close_handle(handle)
return threads
def get_system_thread_count():
"""
<Purpose>
Returns the number of active threads running on the system.
<Returns>
The thread count.
"""
global _system_thread_count
# Call get_process_threads to update the global counter
get_process_threads(os.getpid()) # Use our own pid
# Return the global thread count
return _system_thread_count
# Returns a handle for thread_id
def get_thread_handle(thread_id):
"""
<Purpose>
Returns a thread handle for a given thread identifier. This is useful
because a thread identified cannot be used directly for most operations.
<Arguments>
thread_id:
The Thread Identifier, for which a handle is returned
<Side Effects>
If running on a mobile CE platform, execution permissions will be elevated.
close_thread_handle must be called before get_thread_handle is called again,
or permissions will not be set to their original level.
<Exceptions>
DeadThread on bad parameters or general error
<Returns>
Thread Handle
"""
# Check if it is CE
if MobileCE:
# Use the CE specific function
handle = _open_thread_ce(thread_id)
else:
# Open handle to thread
handle = _open_thread(THREAD_HANDLE_RIGHTS, 0, thread_id)
# Check for a successful handle
if handle:
return handle
else: # Raise exception on failure
raise DeadThread, "Error opening thread handle! thread_id: " + str(thread_id) + " Error Str: " + str(ctypes.WinError())
# Closes a thread handle
def close_thread_handle(thread_handle):
"""
<Purpose>
Closes a given thread handle.
<Arguments>
ThreadHandle:
The Thread handle which is closed
"""
# Check if it is CE
if MobileCE:
# Opening a thread raises permissions,
# so we need to revert to default
_revert_permissions();
# Close thread handle
_close_handle(thread_handle)
# Suspend a thread with given thread_id
def suspend_thread(thread_id):
"""
<Purpose>
Suspends the execution of a thread.
Will not execute on currently executing thread.
<Arguments>
thread_id:
The thread identifier for the thread to be suspended.
<Exceptions>
DeadThread on bad parameters or general error.
<Side Effects>
Will suspend execution of the thread until resumed or terminated.
<Returns>
True on success, false on failure
"""
global EXCLUDED_THREADS
# Check if it is a white listed thread
if thread_id in EXCLUDED_THREADS:
return True
# Open handle to thread
handle = get_thread_handle(thread_id)
# Try to suspend thread, save status of call
status = _suspend_thread(handle)
# Close thread handle
close_thread_handle(handle)
# -1 is returned on failure, anything else on success
# Translate this to True and False
return (not status == -1)
# Resume a thread with given thread_id
def resume_thread(thread_id):
"""
<Purpose>
Resumes the execution of a thread.
<Arguments>
thread_id:
The thread identifier for the thread to be resumed
<Exceptions>
DeadThread on bad parameter or general error.
<Side Effects>
Will resume execution of a thread.
<Returns>
True on success, false on failure
"""
# Get thread Handle
handle = get_thread_handle(thread_id)
# Attempt to resume thread, save status of call
val = _resume_thread(handle)
# Close Thread Handle
close_thread_handle(handle)
# -1 is returned on failure, anything else on success
# Translate this to True and False
return (not val == -1)
# Suspend a process with given pid
def suspend_process(pid):
"""
<Purpose>
Instead of manually getting a list of threads for a process and individually
suspending each, this function will do the work transparently.
<Arguments>
pid:
The Process Identifier number to be suspended.
<Side Effects>
Suspends the given process indefinitely.
<Returns>
True on success, false on failure
"""
# Get List of threads related to Process
threads = get_process_threads(pid)
# Suspend each thread serially
for t in threads:
sleep = False # Loop until thread sleeps
attempt = 0 # Number of times we've attempted to suspend thread
while not sleep:
if (attempt > ATTEMPT_MAX):
return False
attempt = attempt + 1
try:
sleep = suspend_thread(t)
except DeadThread:
# If the thread is dead, lets just say its asleep and continue
sleep = True
return True
# Resume a process with given pid
def resume_process(pid):
"""
<Purpose>
Instead of manually resuming each thread in a process, this functions
handles that transparently.
<Arguments>
pid:
The Process Identifier to be resumed.
<Side Effects>
Resumes thread execution
<Returns>
True on success, false on failure
"""
# Get list of threads related to Process
threads = get_process_threads(pid)
# Resume each thread
for t in threads:
wake = False # Loop until thread wakes up
attempt = 0 # Number of attempts to resume thread
while not wake:
if (attempt > ATTEMPT_MAX):
return False
attempt = attempt + 1
try:
wake = resume_thread(t)
except DeadThread:
# If the thread is dead, its hard to wake it up, so contiue
wake = True
return True
# Suspends a process and restarts after a given time interval
def timeout_process(pid, stime):
"""
<Purpose>
Calls suspend_process and resume_process with a specified period of sleeping.
<Arguments>
pid:
The process identifier to timeout execution.
stime:
The time period in seconds to timeout execution.
<Exceptions>
DeadProcess if there is a critical problem sleeping or resuming a thread.
<Side Effects>
Timeouts the execution of the process for specified interval.
The timeout period is blocking, and will cause a general timeout in the
calling thread.
<Returns>
True of success, false on failure.
"""
if stime==0: # Don't waste time
return True
try:
# Attempt to suspend process, return immediately on failure
if suspend_process(pid):
# Sleep for user defined period
time.sleep (stime)
# Attempt to resume process and return whether that succeeded
return resume_process(pid)
else:
return False
except DeadThread: # Escalate DeadThread to DeadProcess, because that is the underlying cause
raise DeadProcess, "Failed to sleep or resume a thread!"
# Sets the current threads priority level
def set_current_thread_priority(priority=THREAD_PRIORITY_NORMAL,exclude=True):
"""
<Purpose>
Sets the priority level of the currently executing thread.
<Arguments>
Thread priority level. Must be a predefined constant.
See THREAD_PRIORITY_NORMAL, THREAD_PRIORITY_ABOVE_NORMAL and THREAD_PRIORITY_HIGHEST
exclude: If true, the thread will not be put to sleep when compensating for CPU use.
<Exceptions>
See get_thread_handle
<Returns>
True on success, False on failure.
"""
global EXCLUDED_THREADS
# Get thread identifier
thread_id = _current_thread_id()
# Check if we should exclude this thread
if exclude:
# Use a list copy, so that our swap doesn't cause any issues
# if the CPU scheduler is already running
new_list = EXCLUDED_THREADS[:]
new_list.append(thread_id)
EXCLUDED_THREADS = new_list
# Open handle to thread
handle = get_thread_handle(thread_id)
# Try to change the priority
status = _set_thread_priority(handle, priority)
# Close thread handle
close_thread_handle(handle)
# Return the status of this call
if status == 0:
return False
else:
return True
# Gets a process handle
def get_process_handle(pid):
"""
<Purpose>
Get a process handle for a specified process identifier
<Arguments>
pid:
The process identifier for which a handle is returned.
<Exceptions>
DeadProcess on bad parameter or general error.
<Returns>
Process handle
"""
# Get handle to process
handle = _open_process(PROCESS_SET_QUERY_AND_TERMINATE, 0, pid)
# Check if we successfully got a handle
if handle:
return handle
else: # Raise exception on failure
raise DeadProcess, "Error opening process handle! Process ID: " + str(pid) + " Error Str: " + str(ctypes.WinError())
# Launches a new process
def launch_process(application,cmdline = None, priority = NORMAL_PRIORITY_CLASS):
"""
<Purpose>
Launches a new process.
<Arguments>
application:
The path to the application to be started
cmdline:
The command line parameters that are to be used
priority
The priority of the process. See NORMAL_PRIORITY_CLASS and HIGH_PRIORITY_CLASS
<Side Effects>
A new process is created
<Returns>
Process ID on success, None on failure.
"""
# Create struct to hold process info
process_info = _PROCESS_INFORMATION()
process_info_addr = ctypes.pointer(process_info)
# Determine what is the cmdline Parameter
if not (cmdline == None):
cmdline_param = unicode(cmdline)
else:
cmdline_param = None
# Adjust for CE
if MobileCE:
# Not Supported on CE
priority = 0
window_info_addr = 0
# Always use absolute path
application = unicode(os.path.abspath(application))
else:
# For some reason, Windows Desktop uses the first part of the second parameter as the
# Application... This is documented on MSDN under CreateProcess in the user comments
# Create struct to hold window info
window_info = _STARTUPINFO()
window_info_addr = ctypes.pointer(window_info)
cmdline_param = unicode(application) + " " + cmdline_param
application = None
# Lauch process, and save status
status = _create_process(
application,
cmdline_param,
None,
None,
False,
priority,
None,
None,
window_info_addr,
process_info_addr)
# Did we succeed?
if status:
# Close handles that we don't need
_close_handle(process_info.hProcess)
_close_handle(process_info.hThread)
# Return pid
return process_info.dwProcessId
else:
return None
# Helper function to launch a python script with some parameters
def launch_python_script(script, params=""):
"""
<Purpose>
Launches a python script with parameters
<Arguments>
script:
The python script to be started. This should be an absolute path (and quoted if it contains spaces).
params:
A string command line parameter for the script
<Side Effects>
A new process is created
<Returns>
Process ID on success, None on failure.
"""
# Get all repy constants
import repy_constants
# Create python command line string
# Use absolute path for compatibility
cmd = repy_constants.PYTHON_DEFAULT_FLAGS + " " + script + " " + params
# Launch process and store return value
retval = launch_process(repy_constants.PATH_PYTHON_INSTALL,cmd)
return retval
# Sets the current process priority level
def set_current_process_priority(priority=PROCESS_NORMAL_PRIORITY_CLASS):
"""
<Purpose>
Sets the priority level of the currently executing process.
<Arguments>
Process priority level. Must be a predefined constant.
See PROCESS_NORMAL_PRIORITY_CLASS, PROCESS_BELOW_NORMAL_PRIORITY_CLASS and PROCESS_ABOVE_NORMAL_PRIORITY_CLASS
<Exceptions>
See get_process_handle
<Returns>
True on success, False on failure.
"""
# This is not supported, just return True
if MobileCE:
return True
# Get our pid
pid = os.getpid()
# Get process handle
handle = get_process_handle(pid)
# Try to change the priority
status = _set_process_priority(handle, priority)
# Close Process Handle
_close_handle(handle)
# Return the status of this call
if status == 0:
return False
else:
return True
# Kill a process with specified pid
def kill_process(pid):
"""
<Purpose>
Terminates a process.
<Arguments>
pid:
The process identifier to be killed.
<Exceptions>
DeadProcess on bad parameter or general error.
<Side Effects>
Terminates the process
<Returns>
True on success, false on failure.
"""
try:
# Get process handle
handle = get_process_handle(pid)
except DeadProcess: # This is okay, since we're trying to kill it anyways
return True
dead = False # Status of Process we're trying to kill
attempt = 0 # Attempt Number
# Keep hackin' away at it
while not dead:
if (attempt > ATTEMPT_MAX):
raise DeadProcess, "Failed to kill process! Process ID: " + str(pid) + " Error Str: " + str(ctypes.WinError())
# Increment attempt count
attempt = attempt + 1
# Attempt to terminate process
# 0 is return code for failure, convert it to True/False
dead = not 0 == _terminate_process(handle, 0)
# Close Process Handle
_close_handle(handle)
return True
# Get info about a processes CPU time, normalized to seconds
def get_process_cpu_time(pid):
"""
<Purpose>
See process_times
<Arguments>
See process_times
<Exceptions>
See process_times
<Returns>
The amount of CPU time used by the kernel and user in seconds.
"""
# Get the times
times = process_times(pid)
# Add kernel and user time together... It's in units of 100ns so divide
# by 10,000,000
total_time = (times['KernelTime'] + times['UserTime'] ) / 10000000.0
return total_time
# Get information about a process CPU use times
def process_times(pid):
"""
<Purpose>
Gets information about a processes CPU time utilization.
Because Windows CE does not keep track of this information at a process level,
if a thread terminates (belonging to the pid), then it is possible for the
KernelTime and UserTime to be lower than they were previously.
<Arguments>
pid:
The process identifier about which the information is returned
<Exceptions>
DeadProcess on bad parameter or general error.
<Returns>
Dictionary with the following indices:
CreationTime: the time at which the process was created
KernelTime: the execution time of the process in the kernel
UserTime: the time spent executing user code
"""
# Check if it is CE
if MobileCE:
# Use the CE specific function
return _process_times_ce(pid)
# Open process handle
handle = get_process_handle(pid)
# Create all the structures needed to make API Call
creation_time = _FILETIME()
exit_time = _FILETIME()
kernel_time = _FILETIME()
user_time = _FILETIME()
# Pass all the structures as pointers into process_times
_process_times(handle, ctypes.pointer(creation_time), ctypes.pointer(exit_time), ctypes.pointer(kernel_time), ctypes.pointer(user_time))
# Close Process Handle
_close_handle(handle)
# Extract the values from the structures, and return then in a dictionary
return {"CreationTime":creation_time.dwLowDateTime,"KernelTime":kernel_time.dwLowDateTime,"UserTime":user_time.dwLowDateTime}
# Get the CPU time of the current thread
def get_current_thread_cpu_time():
"""
<Purpose>
Gets the total CPU time for the currently executing thread.
<Exceptions>
An Exception will be raised if the underlying system call fails.
<Returns>
A floating amount of time in seconds.
"""
# Get our thread identifier
thread_id = _current_thread_id()
# Open handle to thread
handle = get_thread_handle(thread_id)
# Create all the structures needed to make API Call
creation_time = _FILETIME()
exit_time = _FILETIME()
kernel_time = _FILETIME()
user_time = _FILETIME()
# Pass all the structures as pointers into threadTimes
res = _thread_times(handle, ctypes.pointer(creation_time), ctypes.pointer(exit_time), ctypes.pointer(kernel_time), ctypes.pointer(user_time))
# Close thread Handle
close_thread_handle(handle)
# Sum up the cpu time
time_sum = kernel_time.dwLowDateTime
time_sum += user_time.dwLowDateTime
# Units are 100 ns, so divide by 10M
time_sum /= 10000000.0
# Check the result, error if result is 0
if res == 0:
raise Exception,(res, _get_last_error(), "Error getting thread CPU time! Error Str: " + str(ctypes.WinError()))
# Return the time
return time_sum
# Wait for a process to exit
def wait_for_process(pid):
"""
<Purpose>
Blocks execution until the specified Process finishes execution.
<Arguments>
pid:
The process identifier to wait for
"""
try:
# Get process handle
handle = get_process_handle(pid)
except DeadProcess:
# Process is likely dead, so just return
return
# Pass in code as a pointer to store the output
status = _wait_for_single_object(handle, INFINITE)
if status != WAIT_OBJECT_0:
raise EnvironmentError, "Failed to wait for Process!"
# Close the Process Handle
_close_handle(handle)
# Get the exit code of a process
def process_exit_code(pid):
"""
<Purpose>
Get the exit code of a process
<Arguments>
pid:
The process identifier for which the exit code is returned.
<Returns>
The process exit code, or 0 on failure.
"""
try:
# Get process handle
handle = get_process_handle(pid)
except DeadProcess:
# Process is likely dead, so give anything other than 259
return 0
# Store the code, 0 by default
code = ctypes.c_int(0)
# Pass in code as a pointer to store the output
_process_exit_code(handle, ctypes.pointer(code))
# Close the Process Handle
_close_handle(handle)
return code.value
# Get information on process memory use
def process_memory_info(pid):
"""
<Purpose>
Get information about a processes memory usage.
On Windows CE, all of the dictionary indices will return the same
value. This is due to the imprecision of CE's memory tracking,
and all of the indices are only returned for compatibility reasons.
<Arguments>
pid:
The process identifier for which memory info is returned
<Exceptions>
DeadProcess on bad parameters or general error.
<Returns>
Dictionary with memory data associated with description.
"""
# Check if it is CE
if MobileCE:
# Use the CE specific function
return _process_memory_info_ce(pid)
# Open process Handle
handle = get_process_handle(pid)
# Define structure to hold memory data
meminfo = _PROCESS_MEMORY_COUNTERS()
# Pass pointer to meminfo to processMemory to store the output
_process_memory(handle, ctypes.pointer(meminfo), ctypes.sizeof(_PROCESS_MEMORY_COUNTERS))
# Close Process Handle
_close_handle(handle)
# Extract data from meminfo structure and return as python
# dictionary structure
return {'PageFaultCount':meminfo.PageFaultCount,
'PeakWorkingSetSize':meminfo.PeakWorkingSetSize,
'WorkingSetSize':meminfo.WorkingSetSize,
'QuotaPeakPagedPoolUsage':meminfo.QuotaPeakPagedPoolUsage,
'QuotaPagedPoolUsage':meminfo.QuotaPagedPoolUsage,
'QuotaPeakNonPagedPoolUsage':meminfo.QuotaPeakNonPagedPoolUsage,
'QuotaNonPagedPoolUsage':meminfo.QuotaNonPagedPoolUsage,
'PagefileUsage':meminfo.PagefileUsage,
'PeakPagefileUsage':meminfo.PeakPagefileUsage}
# INFO: Pertaining to _mutex_lock_count:
# With Mutexes, each time they are acquired, they must be released the same number of times.
# For this reason we account for the number of times a mutex has been acquired, and release_mutex
# will call the underlying release enough that the mutex will actually be released.
# The entry for _mutex_lock_count is initialized in create_mutex, incremented in acquire_mutex
# and zero'd out in release_mutex
# Creates and returns a handle to a Mutex
def create_mutex(name):
"""
<Purpose>
Creates and returns a handle to a mutex
<Arguments>
name:
The name of the mutex to be created
<Exceptions>
FailedMutex on bad parameters or failure to create mutex.
<Side Effects>
Creates a global mutex and retains control.
<Returns>
handle to the mutex.
"""
# Attempt to create Mutex
handle = _create_mutex(None, 0, unicode(name))
# Check for a successful handle
if not handle == False:
# Try to acquire the mutex for 200 milliseconds, check if it is abandoned
val = _wait_for_single_object(handle, 200)
# If the mutex is signaled, or abandoned release it
# If it was abandoned, it will become normal now
if (val == WAIT_OBJECT_0) or (val == WAIT_ABANDONED):
_release_mutex(handle)
# Initialize the lock count to 0, since it has not been signaled yet.
_mutex_lock_count[handle] = 0
return handle
else: # Raise exception on failure
raise FailedMutex, (_get_last_error(), "Error creating mutex! Mutex name: " + str(name) + " Error Str: " + str(ctypes.WinError()))
# Waits for specified interval to acquire Mutex
# time should be in milliseconds
def acquire_mutex(handle, time):
"""
<Purpose>
Acquires exclusive control of a mutex
<Arguments>
handle:
Handle to a mutex object
time:
the time to wait in milliseconds to get control of the mutex
<Side Effects>
If successful, the calling thread had exclusive control of the mutex
<Returns>
True if the mutex is acquired, false otherwise.
"""
# Wait up to time to acquire lock, fail otherwise
val = _wait_for_single_object(handle, time)
# Update lock count
_mutex_lock_count[handle] += 1
# WAIT_OBJECT_0 is returned on success, other on failure
return (val == WAIT_OBJECT_0) or (val == WAIT_ABANDONED)
# Releases a mutex
def release_mutex(handle):
"""
<Purpose>
Releases control of a mutex
<Arguments>
handle:
Handle to the mutex object to be release
<Exceptions>
FailedMutex if a general error is occurred when releasing the mutex.
This is not raised if the mutex is not owned, and a release is attempted.
<Side Effects>
If controlled previous to calling, then control will be given up
<Returns>
None.
"""
# Get the lock count
count = _mutex_lock_count[handle]
# 0 out the count
_mutex_lock_count[handle] = 0
# Attempt to release a Mutex
for i in range(0, count):
try:
release = _release_mutex(handle)
# 0 return value means failure
if release == 0:
raise FailedMutex, (_get_last_error(), "Error releasing mutex! Mutex id: " + str(handle) + " Error Str: " + str(ctypes.WinError()))
except FailedMutex, e:
if (e[0] == 288): # 288 is for non-owned mutex, which is ok
pass
else:
raise
def exists_outgoing_network_socket(localip, localport, remoteip, remoteport):
"""
<Purpose>
Determines if there exists a network socket with the specified unique tuple.
Assumes TCP.
* Not supported on Windows Mobile.
<Arguments>
localip: The IP address of the local socket
localport: The port of the local socket
remoteip: The IP of the remote host
remoteport: The port of the remote host
<Returns>
A Tuple, indicating the existence and state of the socket. E.g. (Exists (True/False), State (String or None))
"""
if MobileCE:
return False
# This only works if all are not of the None type
if not (localip and localport and remoteip and remoteport):
return (False, None)
# Construct search strings, add a space so port 8 wont match 80
localsocket = localip+":"+str(localport)+" "
remotesocket = remoteip+":"+str(remoteport)+" "
# Launch up a shell, get the feedback
netstat_process = portable_popen.Popen(["netstat", "-an"])
netstat_output, _ = netstat_process.communicate()
target_lines = textops.textops_grep(localsocket, \
textops.textops_rawtexttolines(netstat_output, linedelimiter="\r\n"))
target_lines = textops.textops_grep(remotesocket, target_lines)
target_lines = textops.textops_grep("tcp ", target_lines, case_sensitive=False)
# Check each line, to make sure the local socket comes before the remote socket
# Since we are just using find, the "order" is not imposed, so if the remote socket
# is first that implies it is an inbound connection
if len(target_lines) > 0:
# Check each entry
for line in target_lines:
# Check the indexes for the local and remote socket, make sure local
# comes first
local_index = line.find(localsocket)
remote_index = line.find(remotesocket)
if local_index <= remote_index and local_index != -1:
# Replace tabs with spaces, explode on spaces
parts = line.replace("\t","").strip("\r\n").split()
# Get the state
socket_state = parts[-1]
return (True, socket_state)
return (False, None)
# If there were no entries, then there is no socket!
else:
return (False, None)
def exists_listening_network_socket(ip, port, tcp):
"""
<Purpose>
Determines if there exists a network socket with the specified ip and port which is the LISTEN state.
*Note: Not currently supported on Windows CE. It will always return False on this platform.
<Arguments>
ip: The IP address of the listening socket
port: The port of the listening socket
tcp: Is the socket of TCP type, else UDP
<Returns>
True or False.
"""
if MobileCE:
return False
# This only works if both are not of the None type
if not (ip and port):
return False
# UDP connections are stateless, so for TCP check for the LISTEN state
# and for UDP, just check that there exists a UDP port
if tcp:
find = ["tcp", "LISTEN"]
else:
find = ["udp"]
# Launch up a shell, get the feed back
netstat_process = portable_popen.Popen(["netstat", "-an"])
netstat_output, _ = netstat_process.communicate()
target_lines = textops.textops_grep(ip+':'+str(port)+' ', \
textops.textops_rawtexttolines(netstat_output, linedelimiter="\r\n"))
for term in find: # Add additional grep's
target_lines = textops.textops_grep(term, target_lines, case_sensitive=False)
# Convert to an integer
num = len(target_lines)
return (num > 0)
def _fetch_ipconfig_infomation():
"""
<Purpose>
Fetch's the information from ipconfig and stores it in a useful format.
* Not Supported on Windows Mobile.
<Returns>
A dictionary object.
"""
# Launch up a shell, get the feedback
process = portable_popen.Popen(["ipconfig", "/all"])
# Get the output
outputdata = process.stdout.readlines()
# Close the pipe
process.stdout.close()
# Stores the info
info_dict = {}
# Store the current container
current_container = None
# Process each line
for line in outputdata:
# Strip unwanted characters
line = line.strip("\r\n")
# Check if this line is blank, skip it
if line.strip() == "":
continue
# This is a top-level line if it does not start with a space
if not line.startswith(" "):
# Do some cleanup
line = line.strip(" :")
# Check if this exists in the top return dictionary, if not add it
if line not in info_dict:
info_dict[line] = {}
# Set the current container
current_container = line
# Otherwise, this line just contains some information
else:
# Check if we are in a container
if not current_container:
continue
# Cleanup
line = line.strip()
line = line.replace(". ", "")
# Explode on the colon
(key, value) = line.split(":",1)
# More cleanup
key = key.strip()
value = value.strip()
# Store this
info_dict[current_container][key] = value
# Return everything
return info_dict
def get_available_interfaces():
"""
<Purpose>
Returns a list of available network interfaces.
* Not Supported on Windows Mobile.
<Returns>
An array of string interfaces
"""
if MobileCE:
return []
# Get the information from ipconfig
ipconfig_data = _fetch_ipconfig_infomation()
# Get the keys
ipconfig_data_keys = ipconfig_data.keys()
# Remove the Generic "Windows IP Configuration"
if "Windows IP Configuration" in ipconfig_data_keys:
index = ipconfig_data_keys.index("Windows IP Configuration")
del ipconfig_data_keys[index]
# Return the keys
return ipconfig_data_keys
def get_interface_ip_addresses(interfaceName):
"""
<Purpose>
Returns the IP address associated with the interface.
* Not Supported on Windows Mobile.
<Arguments>
interfaceName: The string name of the interface, e.g. eth0
<Returns>
A list of IP addresses associated with the interface.
"""
if MobileCE:
return []
# Get the information from ipconfig
ipconfig_data = _fetch_ipconfig_infomation()
# Check if the interface exists
if interfaceName not in ipconfig_data:
return []
# Check if there is an IP address
if "IP Address" in ipconfig_data[interfaceName]:
return [ipconfig_data[interfaceName]["IP Address"]]
return []
# Windows CE Stuff
# Internal function, not public
# Get information about a process CPU use times
# Windows CE does not have a GetProcessTimes function, so we will emulate it
def _process_times_ce(pid):
# Get List of threads related to Process
threads = get_process_threads(pid)
# Create all the structures needed to make API Call
creation_time = _FILETIME()
exit_time = _FILETIME()
kernel_time = _FILETIME()
user_time = _FILETIME()
# Create counters for each category
# Only adds the "low date time" (see _FILETIME()), since thats what we return
creation_time_sum = 0
exit_time_sum = 0 # We don't return this, but we keep it anyways
kernel_time_sum = 0
user_time_sum = 0
# Get the process times for each thread
for t in threads:
# Open handle to thread
handle = get_thread_handle(t)
# Pass all the structures as pointers into threadTimes
_thread_times(handle, ctypes.pointer(creation_time), ctypes.pointer(exit_time), ctypes.pointer(kernel_time), ctypes.pointer(user_time))
# Close thread Handle
close_thread_handle(handle)
# Update all the counters
creation_time_sum += creation_time.dwLowDateTime
exit_time_sum += exit_time.dwLowDateTime
kernel_time_sum += kernel_time.dwLowDateTime
user_time_sum += user_time.dwLowDateTime
# Return the proper values in a dictionaries
return {"CreationTime":creation_time_sum,"KernelTime":kernel_time_sum,"UserTime":user_time_sum}
# Windows CE does not have a GetProcessMemoryInfo function,
# so memory usage may be more inaccurate
# We iterate over all of the process's heap spaces, and tally up the
# total size, and return that value for all types of usage
def _process_memory_info_ce(pid):
heap_size = 0 # Keep track of heap size
heap_list = _HEAPLIST32() # List of heaps
heap_entry = _HEAPENTRY32() # Current Heap entry
heap_list.dwSize = ctypes.sizeof(_HEAPLIST32)
heap_entry.dwSize = ctypes.sizeof(_HEAPENTRY32)
# Create Handle to snapshot of all system threads
handle = _create_snapshot(TH32CS_SNAPHEAPLIST, pid)
# Check if handle was created successfully
if handle == INVALID_HANDLE_VALUE:
return {}
# Attempt to read snapshot
if not _heap_list_first( handle, ctypes.pointer(heap_list)):
_close_snapshot(handle)
_close_handle(handle)
return {}
# Loop through threads, check for threads associated with the right process
more_heaps = True
while (more_heaps):
# Check if there is a heap entry here
if _heap_first(handle, ctypes.pointer(heap_entry), heap_list.th32ProcessID, heap_list.th32HeapID):
# Loop through available heaps
more_entries = True
while more_entries:
# Increment the total heap size by the current heap size
heap_size += heap_entry.dwBlockSize
heap_entry.dwSize = ctypes.sizeof(_HEAPENTRY32)
more_entries = _heap_next(handle, ctypes.pointer(heap_entry)) # Go to next Heap entry
heap_list.dwSize = ctypes.sizeof(_HEAPLIST32)
more_heaps = _heap_list_next(handle, ctypes.pointer(heap_list)) # Go to next Heap List
# Cleanup snapshot
_close_snapshot(handle)
_close_handle(handle)
# Since we only have one value, return that for all different possible sets
return {'PageFaultCount':heap_size,
'PeakWorkingSetSize':heap_size,
'WorkingSetSize':heap_size,
'QuotaPeakPagedPoolUsage':heap_size,
'QuotaPagedPoolUsage':heap_size,
'QuotaPeakNonPagedPoolUsage':heap_size,
'QuotaNonPagedPoolUsage':heap_size,
'PagefileUsage':heap_size,
'PeakPagefileUsage':heap_size}
# Windows CE does not have a separate handle for threads
# Since handles and identifiers are interoperable, just return the ID
# Set process permissions higher or else this will fail
def _open_thread_ce(thread_id):
# Save original permissions
global _original_permissions_ce
_original_permissions_ce = _get_process_permissions()
# Get full system control
_set_current_proc_permissions(CE_FULL_PERMISSIONS)
return thread_id
# Sets the permission level of the current process
def _set_current_proc_permissions(permission):
_set_process_permissions(permission)
# Global variable to store permissions
_original_permissions_ce = None
# Returns the permission level of the current process
def _get_process_permissions():
return _get_current_permissions()
# Reverts permissions to original
def _revert_permissions():
global _original_permissions_ce
if not _original_permissions_ce == None:
_set_current_proc_permissions(_original_permissions_ce)
# Returns ID of current thread on WinCE
def _current_thread_id_ce():
# We need to check this specific memory address
loc = ctypes.cast(0xFFFFC808, ctypes.POINTER(ctypes.c_ulong))
# Then follow the pointer to get the value there
return loc.contents.value
# Over ride this for CE
if MobileCE:
_current_thread_id = _current_thread_id_ce
## Resource Determining Functions
# For number of CPU's check the %NUMBER_OF_PROCESSORS% Environment variable
# Determines available and used disk space
def disk_util(directory):
""""
<Purpose>
Gets information about disk utilization, and free space.
<Arguments>
directory:
The directory to be queried. This can be a folder, or a drive root.
If set to None, then the current directory will be used.
<Exceptions>
EnvironmentError on bad parameter.
<Returns>
Dictionary with the following indices:
bytesAvailable: The number of bytes available to the current user
total_bytes: The total number of bytes
freeBytes: The total number of free bytes
"""
# Define values that need to be passed to the function
bytes_free = ULARGE_INTEGER(0)
total_bytes = ULARGE_INTEGER(0)
total_free_bytes = ULARGE_INTEGER(0)
# Allow for a Null parameter
dirchk = None
if not directory == None:
dirchk = unicode(directory)
status = _free_disk_space(dirchk, ctypes.pointer(bytes_free), ctypes.pointer(total_bytes), ctypes.pointer(total_free_bytes))
# Check if we succeded
if status == 0:
raise EnvironmentError("Failed to determine free disk space: Directory: "+directory)
return {"bytesAvailable":bytes_free.value,"total_bytes":total_bytes.value,"freeBytes":total_free_bytes.value}
# Get global memory information
def global_memory_info():
""""
<Purpose>
Gets information about memory utilization
<Exceptions>
EnvironmentError on general error.
<Returns>
Dictionary with the following indices:
load: The percentage of memory in use
totalPhysical: The total amount of physical memory
availablePhysical: The total free amount of physical memory
totalPageFile: The current size of the committed memory limit, in bytes. This is physical memory plus the size of the page file, minus a small overhead.
availablePageFile: The maximum amount of memory the current process can commit, in bytes.
totalVirtual: The size of the user-mode portion of the virtual address space of the calling process, in bytes
availableVirtual: The amount of unreserved and uncommitted memory currently in the user-mode portion of the virtual address space of the calling process, in bytes.
"""
# Check if it is CE
if MobileCE:
# Use the CE specific function
return _global_memory_info_ce()
# Initialize the data structure
mem_info = _MEMORYSTATUSEX() # Memory usage ints
mem_info.dwLength = ctypes.sizeof(_MEMORYSTATUSEX)
# Make the call, save the status
status = _global_memory_status(ctypes.pointer(mem_info))
# Check if we succeded
if status == 0:
raise EnvironmentError("Failed to get global memory info!")
# Return Dictionary
return {"load":mem_info.dwMemoryLoad,
"totalPhysical":mem_info.ullTotalPhys,
"availablePhysical":mem_info.ullAvailPhys,
"totalPageFile":mem_info.ullTotalPageFile,
"availablePageFile":mem_info.ullAvailPageFile,
"totalVirtual":mem_info.ullTotalVirtual,
"availableVirtual":mem_info.ullAvailVirtual}
def _global_memory_info_ce():
# Initialize the data structure
mem_info = _MEMORYSTATUS() # Memory usage ints
mem_info.dwLength = ctypes.sizeof(_MEMORYSTATUS)
# Make the call
_global_memory_status(ctypes.pointer(mem_info))
# Return Dictionary
return {"load":mem_info.dwMemoryLoad,
"totalPhysical":mem_info.dwTotalPhys,
"availablePhysical":mem_info.dwAvailPhys,
"totalPageFile":mem_info.dwTotalPageFile,
"availablePageFile":mem_info.dwAvailPageFile,
"totalVirtual":mem_info.dwTotalVirtual,
"availableVirtual":mem_info.dwAvailVirtual}
| {
"repo_name": "sburnett/seattle",
"path": "repy/windows_api.py",
"copies": "3",
"size": "53904",
"license": "mit",
"hash": 5328903439403555000,
"line_mean": 29.4027072758,
"line_max": 167,
"alpha_frac": 0.6872217275,
"autogenerated": false,
"ratio": 3.9058039272516485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6093025654751649,
"avg_score": null,
"num_lines": null
} |
# armor/advection/semiLagrangian.py
# to calculate advected scalar (or vector too) fields
# will develop into the semi-Lagrangian scheme
# status: still under development
import copy
import time
import os
import scipy
import numpy
import numpy as np
import numpy.ma as ma
#import matplotlib
import matplotlib.pyplot as plt
#import scipy.misc.pilutil as smp
#import numpy.fft as fft
#import shutil
#import sys
from .. import pattern
def shift(phi0, m=0, n=0):
"""
shifting an array or a masked array
returning a masked array
"""
if not isinstance(phi0, ma.MaskedArray):
phi0 = ma.array(phi0)
phi0.fill_value = -999
print phi0, m, n
if isinstance(m, tuple):
m, n = m
phi0 = np.roll(phi0, m, axis=0)
phi0 = np.roll(phi0, n, axis=1)
return phi0
def getCoordinateGrid(m=0, n=0):
"""get the coordinate grid, [[[0,0],[0,1],[0,2]...],[[1,0],[1,1]...]]
input: two numbers, an ordered pair, or a numpy.ndarray
"""
if isinstance(m, tuple):
m, n= m
if isinstance(m, np.ndarray):
m, n= m.shape
X, Y = np.meshgrid(range(m), range(n))
XX = X.flatten()
YY = Y.flatten()
XY = [(YY[i], XX[i]) for i in range(len(XX))]
XY = np.array(XY).reshape(m, n, 2)
return XY
def interpolate1pt(phi, i=0, j=0):
"""interpolating one point only,6
"""
#print i,j
if isinstance(i, tuple):
i,j = i
try:
val =phi[int(i) ,int(j) ]*(1-i%1)*(1-j%1) +\
phi[int(i) ,int(j)+1]*(1-i%1)*( j%1) +\
phi[int(i)+1,int(j) ]*( i%1)*(1-j%1) +\
phi[int(i)+1,int(j)+1]*( i%1)*( j%1)
return val
except IndexError:
return -999
def interpolate1(phi0, Ishifted, Jshifted):
""" defining the interpolated scalar field
test: 820 seconds
input: array phi0, components of a vector field (Ishifted, Jshifted)
output: array phi2
"""
width = len(phi0[0])
height= len(phi0)
phi2 = ma.zeros((height, width)) #initialise
phi2.mask = True #initialise
phi2.fill_value=-999
#phi2
for i in range(height):
for j in range(width):
phi2[i,j] = interpolate1pt(phi0, Ishifted[i,j], Jshifted[i,j])
return phi2
def interpolate2(phi0, vect, scope=(9,9)):
"""interpolation with matrix operations
see how much the speed up is
scope = size of window to check (i.e. max speed allowed)
default = (7,7) i.e from -3 to -3 in both i, j directions (i=y, x=j)
input: phi0 - an armor.pattern.DBZ object - a DBZ pattern
vect - an armor.pattern.VectorField obejct - the advection field
output: phi2 - another armor.pattern.DBZ object
"""
verbose = phi0.verbose
I_window = range( -(scope[0]-1)/2, (scope[0]+1)/2)
J_window = range( -(scope[1]-1)/2, (scope[1]+1)/2)
print "I_window, J_window =", I_window, J_window
# 0. order of appearance: initialisation of the variables
# 1. set up: get the various shifted images
# 2. compute the sums
# ========= 0. set up ================================================
matrix = phi0.matrix.copy()
width = len(matrix[0])
height = len(matrix)
X, Y = np.meshgrid(range(width), range(height)) #standard stuff
I_coord, J_coord = Y, X #switching to I, J
shiftedDBZ = {} # the dictionary is the simplest to implement, though an object list
# may be slightly quicker
matrix2 = ma.zeros((height,width))
U = vect.U.copy() # the vector field
V = vect.V.copy()
u = U % 1
v = V % 1
U_ = U - u
V_ = V - v
# debug
print U, V, U_, V_
# ========= 1. set up: get the various matrices ===============================
# shifted images
for i in I_window:
for j in J_window:
shiftedDBZ[(i,j)] = phi0.shiftMatrix(i,j)
# ========== 2. compute the sums =====================================================
# the square (U_==i) *(V_==j)
#
# shiftedDBZ(i,j+1) | shiftedDBZ(i,j)
# .----> _._
# .. |
# advected pt- *
# ________/ ^
# / |
# . __________.__
# shiftedDBZ(i+1,j+1) shiftedDBZ(i+1,j)
#
#
# u(1-v) | uv
# -------------+--------------
# (1-u)(1-v) | (1-u)v
#
for i in I_window[1:-1]: # search window
for j in J_window[1:-1]:
#key line: to compute the contributions from the shifted images
# need to sort this out.
#??? 2013.1.31
if verbose:
print "\n-----------\ni = %d, j = %d, in I_window, J_window" % (i, j)
print shiftedDBZ[(i ,j )].matrix.shape,
print shiftedDBZ[(i+1,j )].matrix.shape,
print shiftedDBZ[(i ,j+1)].matrix.shape,
print shiftedDBZ[(i+1,j+1)].matrix.shape
newTerm = shiftedDBZ[(i ,j )].matrix * (1-v) *(1-u) + \
shiftedDBZ[(i+1,j )].matrix * v *(1-u) + \
shiftedDBZ[(i ,j+1)].matrix * (1-v) * u + \
shiftedDBZ[(i+1,j+1)].matrix * v * u
#upper right corner i,j
#lower right corner i+1,j
#upper left corner j, j+1
#lower left corner i+1,j+1
if phi0.verbose:
print "\n.....\nnewterm", (i, j)
print newTerm #Debug
if ((U_==j)*(V_==i)).sum() >0:
print "((U_==i)*(V_==j)).sum()", ((U_==j)*(V_==i)).sum()
newTerm *= (U_==j) *(V_==i)
if phi0.verbose:
print "new term\n", newTerm
matrix2 += newTerm
print "(i, j), matrix2.sum()=\n", (i,j), matrix2.sum() #debug
#??? 2013.1.31
name = phi0.name+"_advected_by_"+vect.name
outputPath = phi0.outputPath + "_advected_by_"+vect.name+".dat"
dataPath = outputPath
imagePath = phi0.outputPath + "_advected_by_"+vect.name+".png"
phi2 = pattern.DBZ(matrix=matrix2, name=name,\
dt=phi0.dt, dx=phi0.dx, dy=phi0.dy, dataPath=dataPath, outputPath=outputPath,\
imagePath=imagePath, database=phi0.database, cmap=phi0.cmap, verbose=phi0.verbose)
return phi2
| {
"repo_name": "yaukwankiu/armor",
"path": "advection/semiLagrangian.py",
"copies": "1",
"size": "6700",
"license": "cc0-1.0",
"hash": -6458999019293658000,
"line_mean": 35.2162162162,
"line_max": 105,
"alpha_frac": 0.4849253731,
"autogenerated": false,
"ratio": 3.201146679407549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4186072052507549,
"avg_score": null,
"num_lines": null
} |
# armor.geometry.edges
# module for edge detecting and stuff
import copy
import numpy as np
import numpy.ma as ma
from scipy.signal import fftconvolve
#from armor import pattern
def find(a):
"""
use straightforward summing of mask criteria
"""
m1 = ma.zeros(a.matrix.shape)
m2 = ma.zeros(a.matrix.shape)
# look around it's neighbourhood
for i in [-1,0,1]:
for j in [-1,0,1]:
m1 += (a.shiftMatrix(i,j).matrix.mask==False) # finding a point not masked
m2 += (a.shiftMatrix(i,j).matrix.mask==True ) # finding a point masked
return m1*m2
def complexity(a, windowSize=20):
"""
local complexity map of a based on the proportion of edge elements in the region
"""
height, width = a.matrix.shape
complexityMap = ma.zeros(a.matrix.shape)
try:
a.edges+1
aEdges = a.edges
except AttributeError:
aEdges = find(a)
nonEdge = (aEdges==0)
for i in range(0, height, windowSize):
for j in range(0, width, windowSize):
complexityMap[i:i+windowSize, j:j+windowSize] = \
((aEdges[i:i+windowSize, j:j+windowSize]>0).sum()+1.0) / windowSize**2
a.complexityMap = complexityMap
return complexityMap
def sobel(a):
"""
sobel operator for edge detection
(ref: <<image processing, analysis and machine vision>>,
the big black book, p.95)
"""
h1 = np.array( [[ 1, 2, 1],
[ 0, 0, 0],
[-1,-2,-1]])
h2 = np.array( [[ 0, 1, 2],
[-1, 0, 1],
[-2,-1, 0]])
h3 = np.array( [[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
ah1 = fftconvolve(a.matrix, h1)
ah2 = fftconvolve(a.matrix, h2)
ah3 = fftconvolve(a.matrix, h3)
return ah1, ah2, ah3
def distance(p1=(1,1), p2=(2,2)):
return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 ) **.5
def neighbours(points=[(1,1), (2,2), (3,3), (4,4), (5,5)], gap=5, toBeSorted=True):
# greedy algorithm, one direction
if toBeSorted:
neighbours = [sorted([u for u in points if distance(u,v)<=gap], key=lambda u:distance(u,v), reverse=False) for v in points]
else:
neighbours = [[u for u in points if distance(u,v)<=gap] for v in points]
neighbours = dict([(points[i], neighbours[i]) for i in range(len(points))])
return neighbours
def connectTheDots(points=[(1,1), (2,2), (3,3), (4,4), (5,5)], gap=5, toBeSorted=True):
nh = neighbours(points, gap, toBeSorted)
unmarkedPoints = copy.deepcopy(points)
point0 = unmarkedPoints.pop()
point1 = point0
pointsSequence = []
while len(unmarkedPoints)>0:
pointsSequence.append(point1)
neighs = nh[point1]
neighs = [v for v in neighs if v in unmarkedPoints] #remove marked points
point2 = neighs[0]
unmarkedPoints = [v for v in unmarkedPoints if v!=point2] #marking point2
point1 = point2
return pointsSequence
if __name__ == '__main__':
time0=time.time()
import matplotlib.pyplot as plt
X, Y = np.meshgrid(range(-50,50), range(-50,50))
X = X.flatten()
Y = Y.flatten()
N = len(X)
allPoints = np.vstack([X,Y])
allPoints2 = [(allPoints[0,i], allPoints[1,i]) for i in range(N)]
circle = [v for v in allPoints2 if distance(v, (0,0))>47 and distance(v, (0,0))<=48]
circle2 = connectTheDots(circle)
print 'Time spent:', time.time()-time0
x = [v[1] for v in circle2]
y = [v[0] for v in circle2]
plt.plot(x,y, '.')
plt.title('dots')
plt.savefig('../pictures/circle-dots.png')
plt.show(block=True)
plt.plot(x,y)
plt.title('dots connected')
plt.savefig('../pictures/circle-dots-connected.png')
plt.show(block=True)
| {
"repo_name": "yaukwankiu/armor",
"path": "geometry/edges.py",
"copies": "1",
"size": "3878",
"license": "cc0-1.0",
"hash": -6307599924369710000,
"line_mean": 32.1452991453,
"line_max": 131,
"alpha_frac": 0.5683341929,
"autogenerated": false,
"ratio": 3.051140833988985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4119475026888985,
"avg_score": null,
"num_lines": null
} |
# armor.geometry.transforms
# functions: translation(arr, (a,b))
# linear(arr, matrix 2x2)
# affine(arr, matrix 2x3)
# in this module we compute the linear/affine transformations of arrays
# it is done in two steps
# 1. the corresponding coordinates are calculated
# 2. interpolation
##################################################
# imports
import numpy as np
ma = np.ma
cos = np.cos
sin = np.sin
import matplotlib.pyplot as plt
import scipy.interpolate
RBS = scipy.interpolate.RectBivariateSpline
from armor import pattern
dbz=pattern.DBZ
###################################################
# setups
#defaultOrigin = pattern.a.coordinateOrigin
#defaultOrigin = (492, 455)
defaultOrigin = (0,0)
def IJ(arr1):
if isinstance(arr1, pattern.DBZ):
ARR1 = arr1
arr1 = ARR1.matrix
height, width = arr1.shape
X, Y = np.meshgrid(range(width), range(height))
I, J = 1.*Y, 1.*X
return I, J
def translation(I, J, T=(0,0)):
"""
translate arr0 onto arr1
height width = that of arr1
return: array of pointers to points on arr0 (need not be integers)
vector: (i,j) = (y,x)
"""
height, width = I.shape
try:
if T.shape == (2,1):
T = (T[0,0], T[1,0])
except AttributeError:
pass
I -= T[0]
J -= T[1]
return I, J
def rotation(rad=0):
return np.matrix( [[cos(rad), -sin(rad)],
[sin(rad), cos(rad)]])
def rotationMatrix(*args, **kwargs):
"""alias
"""
return rotation(*args, **kwargs)
def linear(I, J, T=np.matrix([[1,0],[0,1]]), origin=defaultOrigin ):
"""
translate arr0 onto arr1
height width = that of arr1
return: arr1 = array of pointers to points on arr0 (need not be integers)
convention for T: (i,j) = (y,x)
we assume that T is nonsingular
coordinate origin added, default = (0, 0) as set in the setup section above
"""
height, width = I.shape
I, J = translation(I, J, origin)
T = np.matrix(T)
T_inv = np.linalg.pinv(T)
for i in range(height):
for j in range(width):
v = T_inv * [ [I[i,j]],
[J[i,j]] ]
I[i,j], J[i,j] = v[0,0], v[1,0]
I, J = translation(I, J, (-origin[0], -origin[1]))
return I, J
def affine(I, J, T=np.array([[1,0,0], [0,1,0]]), origin=defaultOrigin ):
"""
translation + linear forward
inverse linear + translation backward
"""
if T.shape == (2,2):
T = ma.hstack([T, [[0],[0]]])
I, J = linear(I, J, T=T[:,0:2], origin=origin)
I, J = translation(I, J, T=T[:,2])
return I, J
def interpolation(arr0, I, J, useRBS=True):
"""
"""
try:
height0, width0 = arr0.shape
except:
arr0 = arr0.matrix
height0, width0 = arr0.shape
if isinstance(arr0, ma.MaskedArray):
mask = arr0.mask.copy()
mask1 = np.roll(mask, 1)
mask += mask1 + np.roll(mask,1, axis=0) + np.roll(mask1, 1, axis=0)
else:
mask = np.zeros((height0, width0))
# arr0.mask = 0
# arr0 = arr0 - mask*999999
height, width = I.shape
arr1 = ma.ones((height, width))* (-99999)
I += (-999999) * ( (I<0) + (I>=height0-1) + (J<0) + (J>=width0-1) ) #unwanted portions
if useRBS:
arr0_spline = RBS(range(height0), range(width0), arr0)
for i in range(height):
for j in range(width):
if I[i,j] >=0 and not mask[i, j]:
arr1[i,j] = arr0_spline(I[i,j],J[i,j])
else:
continue
else:
# the original loop, replaced by the new one above 2013-10-03
for i in range(height):
for j in range(width):
if I[i,j]>=0:
arr1[i,j] = arr0[I[i,j],J[i,j]]
else:
continue
return arr1
def test(a="", transformationMatrix = np.array([[1, 0, 100],[0, 1, 100]]), showImage=True, saveImage=False):
from armor import pattern
if a =="":
print 'loading armor.pattern...'
a = pattern.a
#b = pattern.b
print 'loading a'
a.load()
#b.load()
print 'computing the coordinates'
I, J = IJ(a.matrix)
#I, J = affine(I, J, np.array([[0.7, -0.7, 100],[0.7, 0.7, 100]]))
I, J = affine(I, J, transformationMatrix, origin=a.coordinateOrigin)
print I, J
print 'interpolating'
c = interpolation(a.matrix, I, J, useRBS=False)
c = pattern.DBZ(matrix=c, name=a.name + '_transformed_by_' + str(transformationMatrix.tolist()))
if showImage:
c.show4()
if saveImage:
print "saving to:", c.imagePath
c.saveImage()
return c
#########################################################################################
# codes from transformedCorrelations.py
# 2013-10-08
def weight(height=881, width=921):
"""
weight function, indicating significance/reliability of the data point at (x,y)
"""
return np.ones((height,width)) # placeholder
def getCentroid(arr):
"""
input: an array
output: centroid of the array
"""
height, width = arr.shape
a = arr * weight(height,width)
height, width = a.shape
X, Y = np.meshgrid(range(width), range(height))
I, J = Y,X # numpy order: (i,j) = (x,y)
a_mean = a.mean()
i_centroid = (I*a).mean() / a_mean #
j_centroid = (J*a).mean() / a_mean #
return np.array([i_centroid, j_centroid])
def getMomentMatrix(arr, display=False):
height, width = arr.shape
a = arr * weight(height, width)
height, width = a.shape
X, Y = np.meshgrid(range(width), range(height))
I, J = Y,X # numpy order: (i,j) = (x,y)
a_mean = a.mean()
i0, j0 = getCentroid(a)
I -= i0 # resetting the centre
J -= j0
cov_ii = (I**2 * a).mean() / a_mean
cov_jj = (J**2 * a).mean() / a_mean
cov_ij = (I*J * a).mean() / a_mean
M = np.array([[cov_ii, cov_ij],
[cov_ij, cov_jj]])
if display:
print M
return M
def getAxes(M, display=False):
#print 'getAxes'
"""
input: moment matrix M
output: eigenvalues, eigenvectors
"""
eigenvalues, eigenvectors = np.linalg.eig(M)
#if eigenvalues[1]>eigenvalues[0]:
if eigenvalues[1] < eigenvalues[0]: # 2013-10-15
eigenvectors = np.fliplr(eigenvectors) #flip
eigenvalues[1], eigenvalues[0] = eigenvalues[0], eigenvalues[1]
if display:
print "eigenvalues, eigenvectors = ", eigenvalues, eigenvectors
return eigenvalues, eigenvectors
def drawArrow(x=.5, y=.7, dx=.2, dy=-0.3, fc="k", ec="k"):
"""wrapping the matplotlib.pyplot.arrow function
"""
# plt.arrow( x, y, dx, dy, **kwargs )
head_width = (dx**2 +dy**2)**.5 *0.05
head_length = (dx**2 +dy**2)**.5 *0.1
plt.arrow(x, y, dx, dy, fc=fc, ec=ec, head_width=head_width, head_length=head_length)
def showArrayWithAxes(arr, cmap='jet', verbose=True, imagePath="", imageTopDown=""):
""" Intermediate step. showing the array with the axes
"""
########
# set up
height, width = arr.shape
if imageTopDown =="":
imageTopDown = pattern.defaultImageTopDown # default to false
########
# computation
i0, j0 = getCentroid(arr)
M = getMomentMatrix(arr)
eigenvalues, eigenvectors = getAxes(M)
v0 = eigenvectors[:,0]
v1 = eigenvectors[:,1]
v0_size = eigenvalues[0]**.5
v1_size = eigenvalues[1]**.5
########
# display
#plt.xlim(0, width) # or other attributes of arr if desired?!
#plt.ylim(0, height)
plt.imshow(arr, cmap=cmap)
drawArrow(x=j0, y=i0, dx=v0[1]*v0_size, dy=v0[0]*v0_size)
drawArrow(x=j0, y=i0, dx=v1[1]*v1_size, dy=v1[0]*v1_size, fc="r", ec="r")
# if not imageTopDown:FLIP Y-AXIS after all plotting
if not imageTopDown:
ax=plt.gca()
ax.set_ylim(ax.get_ylim()[::-1])
if imagePath != "":
print 'saving image to', imagePath
plt.savefig(imagePath, dpi=200)
if verbose:
plt.show()
return {'eigenvalues' : eigenvalues,
'eigenvectors' : eigenvectors,
'momentMatrix' : M,
'centroid' : (i0,j0),
}
def transform(arr, centroid_i, centroid_j,
theta=-9999, scale_0=-9999, scale_1=-9999,
eigenvectors=-9999, eigenvalues=-9999):
"""
5. Transform and normalise:
translate to centre of array, rotate axes to x,y, scale x, scale y
first step: compute the coordinates in reverse
second step: carry out the interpolation
the former transform() function,
"""
#print 'transform'
if theta ==-9999:
v0 = eigenvectors[:,0]
v1 = eigenvectors[:,1]
theta = np.arctan(1.*v0[1]/v0[0]) # rotate the long axis to x
# two choices differing by pi - will try both later
# in the comparison function
print 'theta=', theta
if scale_0==-9999:
scale_0=eigenvalues[0]**.5 * 0.04
scale_1=eigenvalues[1]**.5 * 0.04
height, width = arr.shape
centre_i, centre_j = height//2, width//2
X, Y = np.meshgrid(range(width), range(height))
I, J = Y, X # python array convention: vertical first
# tracking backwards
I -= centroid_i # 1. moving from the centre of the board back to zero
J -= centroid_j
I *= scale_0 # 2. scale (2, 3 can't be interchanged)
J *= scale_1
I = cos(theta) * I - sin(theta) * J # 3. rotate to the appropriate angle
J = sin(theta) * I + cos(theta) * J
I += centroid_i # 4. moving to the centroid specified, finding
J += centroid_j # the point where it came from
return I, J
#
# end codes from transformedCorrelations.py
################################################################################
def momentNormalise(a1 = pattern.a, verbose=True, imagePath=""):
"""
PURPOSE
to set the image upright according to its moments
USE
cd /media/KINGSTON/ARMOR/python/
python
from armor import pattern
from armor.geometry import transforms as tr
import numpy as np
dbz=pattern.DBZ
#a = pattern.a
#b = pattern.b
a = dbz('20120612.0230')
b = dbz('20120612.0900')
a.load()
a.setThreshold(0)
b.load()
b.setThreshold(0)
a.show()
b.show()
x = tr.momentNormalise(a1=a)
a2 = x['a2']
centroid_a = x['centroid']
eigenvalues_a = x['eigenvalues']
y = tr.momentNormalise(a1=b)
b2 = y['a2']
centroid_b = y['centroid']
eigenvalues_b = y['eigenvalues']
a2.matrix = a2.drawCross(*centroid_a, radius=50).matrix
a2.show()
b2.matrix = b2.drawCross(*centroid_b, radius=50).matrix
b2.show()
a2.backupMatrix()
b2.backupMatrix()
I, J = tr.IJ(a2.matrix)
I, J = tr.translation( I, J, (centroid_b - centroid_a))
a2.matrix = tr.interpolation(a2.matrix, I, J)
a2.setThreshold(0)
a2.show()
b2.show()
# adding in axial scalings
print 'eigenvalues_a:', eigenvalues_a
print 'eigenvalues_b:', eigenvalues_b
a2.restoreMatrix()
b2.restoreMatrix()
displacement = np.matrix(centroid_b - centroid_a)
linearTr = np.diag( (eigenvalues_b / eigenvalues_a) **.5 )
affineTr = np.hstack( [linearTr, displacement.T] )
I, J = tr.IJ(a2.matrix)
I, J = tr.affine(I, J, T=affineTr, origin=centroid_b)
a2.matrix = tr.interpolation(a2.matrix, I, J)
a2.setThreshold(0)
a2.show()
b2.show()
#######################
test: normalising the given pattern with moments
1. load the pic, compute and draw the axes
a. compute the centroid and eigenvalues
b. get the IJs
c. interpolate
2. normalise (i.e. rotate the axes to x/y)
3. draw the new pic
"""
arr1 = a1.matrix
showArrayWithAxes(arr1, verbose=verbose, imagePath=imagePath, imageTopDown=a1.imageTopDown)
i0, j0 = getCentroid(arr1)
M = getMomentMatrix(arr1)
eigenvalues, eigenvectors = getAxes(M)
# | from transformedCorrelations.transform(): |
# v v
v0 = eigenvectors[:,0]
v1 = eigenvectors[:,1]
theta = np.arctan(1.*v0[1]/v0[0]) #- np.pi /2
if verbose:
print 'theta =', theta # theta = angle of the short arm with the i-(y-)axis
T = rotation(-theta) # rotate by negative of theta
translation = [[0],[0]]
T = np.hstack([T, translation])
rotationOrigin = i0, j0
I, J = IJ(arr1)
I, J = affine(I, J, T=T, origin=rotationOrigin)
arr2 = interpolation(arr1, I, J)
a2 = pattern.DBZ(name=a1.name+'_normalised',
matrix = arr2,
coordinateOrigin = (i0,j0), #taichung park make no sense any more
)
if verbose:
a2.show()
plt.clf()
showArrayWithAxes(a2.matrix, verbose=verbose, imagePath= a2.imagePath[:-4]+ '_with_axes' + a2.imagePath[-4:], imageTopDown=a2.imageTopDown)
return {'IJ' : (I, J),
'a2' : a2,
'T' : T,
'centroid' : np.array([i0, j0]),
'eigenvalues' : eigenvalues,
'eigenvectors' : eigenvectors,
'theta' : theta,
}
def test2(*args, **kwargs):
return momentNormalise(*args, **kwargs)
def momentMatch(a=pattern.a, b=pattern.b, verbose=True, saveImage=False):
x = momentNormalise(a1=a, imagePath=a.imagePath[:-4] + '_withaxes' + a.imagePath[-4:])
a1 = x['a2']
a1.setThreshold(0)
centroid_a = x['centroid']
eigenvalues_a = x['eigenvalues']
y = momentNormalise(a1=b, imagePath=b.imagePath[:-4] + '_withaxes' + b.imagePath[-4:])
b1 = y['a2']
b1.setThreshold(0)
centroid_b = y['centroid']
eigenvalues_b = y['eigenvalues']
if saveImage:
print "saving to", a1.imagePath
a1.saveImage()
print "saving to", b1.imagePath
b1.saveImage()
a2 = a1.drawCross(*centroid_a, radius=50)
b2 = b1.drawCross(*centroid_b, radius=50)
if verbose:
a2.show()
b2.show()
a2.backupMatrix()
b2.backupMatrix()
I, J = IJ(a2.matrix)
I, J = translation( I, J, (centroid_b - centroid_a))
a2.matrix = interpolation(a2.matrix, I, J)
a2.setThreshold(0)
if verbose:
a2.show()
b2.show()
if saveImage:
print "saving to", a2.imagePath
a2.saveImage()
print "saving to", b2.imagePath
b2.saveImage()
# adding in axial scalings
print 'eigenvalues_a:', eigenvalues_a
print 'eigenvalues_b:', eigenvalues_b
a2.restoreMatrix()
b2.restoreMatrix()
displacement = np.matrix(centroid_b - centroid_a)
linearTr = np.diag( (eigenvalues_b / eigenvalues_a) **.5 )
affineTr = np.hstack( [linearTr, displacement.T] )
I, J = IJ(a2.matrix)
I, J = affine(I, J, T=affineTr, origin=centroid_b)
a3 = a2.copy()
a3.matrix = interpolation(a2.matrix, I, J)
a3.setThreshold(0)
if verbose:
a3.show()
b2.show()
if saveImage:
print "saving to", a.imagePath[:-4] + "_normalised_to_" + b.name + a.imagePath[-4:]
a3.saveImage(imagePath= a.imagePath[:-4] + "_normalised_to_" + b.name + a.imagePath[-4:])
a4 = a1.copy()
a4.matrix = interpolation(a1.matrix, I, J)
b1.setThreshold(0)
a4.setThreshold(0)
if verbose:
print 'a4 = a normalised to b'
a4.show()
rawCorr = a.corr(b)
normCorr = b1.corr(a4)
if verbose:
b1.show()
a4.show()
outputString = 'raw correlation: ' + str(rawCorr) + '\nnormalised correlation:' + str(normCorr)
print outputString
open(a.imagePath[:-4] + '_' + b.name + 'correlations.txt', 'w').write(outputString)
return { 'a momentnormalised to b' : a4,
'raw correlation' : rawCorr,
'normalised correlation' : normCorr,
'a-momentnormalised': x,
'b-momentnormalised': y,
'a2-translation' : a2,
'a3-affineTr' : a3,
'b2' : b2,
'IJaffine' : (I, J),
'centroid_a' : centroid_a,
'centroid_b' : centroid_b,
'affineTr' : affineTr
}
def test3(aTime='0800', bTime='0900', verbose=False, saveImage=True):
"""
from armor.geometry import transforms as tr ; reload(tr) ; from armor import pattern ; reload(pattern) ; x= tr.test3()
"""
c=dbz('20120612.' + aTime)
d = dbz('20120612.' + bTime)
c.load()
d.load()
#c.show()
#d.show()
c.setThreshold(0)
d.setThreshold(0)
if verbose:
c.show()
reload(pattern)
x = momentMatch(c,d, verbose=verbose,saveImage=saveImage)
return x
| {
"repo_name": "yaukwankiu/armor",
"path": "geometry/transforms.py",
"copies": "1",
"size": "17861",
"license": "cc0-1.0",
"hash": 1114190648917957100,
"line_mean": 30.9516994633,
"line_max": 147,
"alpha_frac": 0.5270701528,
"autogenerated": false,
"ratio": 3.318038268623444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43451084214234437,
"avg_score": null,
"num_lines": null
} |
# armor.kmeans.stormTracking.py
# codes taken from armor.tests.trackingTest20140901
import numpy as np
#from armor import objects4 as ob
def stormTracking(a0,
upperThreshold=40, lowerThreshold=20,
lowerLimit1=10, upperLimit1=100000,
lowerLimit2=30000,
minAcceptedDensity = 5,
verbose=True, display=True, block=False,
minUnmaskedValue=-50, #hack
newObject=True,
*args, **kwargs):
if newObject:
a = a0.copy()
else:
a = a0
a.backupMatrix(0)
a1 = a.above(upperThreshold)
x = a1.connectedComponents()
x.show(block=block)
if verbose:
print x.matrix.max()
N = x.matrix.max()
for i in range(N):
size= x.levelSet(i).matrix.sum()
if size> lowerLimit1 and size<upperLimit1:
print i, ":", size, "||",
S = [v for v in range(N) if x.levelSet(v).matrix.sum()>lowerLimit1 and x.levelSet(v).matrix.sum()<upperLimit1]
print S
centroids = [x.levelSet(v).getCentroid().astype(int) for v in S]
if display:
a.restoreMatrix(0)
for i, j in centroids:
try:
a.drawRectangle(max(0, i-30), max(0,j-30), min(880-30-i, 60), min(920-30-j, 60), newObject=False)
except:
pass
a.show(block=block)
a.restoreMatrix(0) #for safety
y = a.getKmeans(threshold=lowerThreshold, k=np.vstack(centroids), minit='matrix')
a2 = y['pattern']
N2 = a2.matrix.max()
regionsToTrack = [a2.getRegionForValue(v) for v in range(int(N2))]
regionsToTrack = [v for v in regionsToTrack if v!=(-1,-1,0,0)]
a.restoreMatrix(0)
for i, R in enumerate(regionsToTrack):
print i, R
a3 = a.getWindow(*R)
a3.matrix.mask = (a3.matrix < minUnmaskedValue) #hack
#print a3.matrix.mask.sum()
#print a3.matrix.sum(), a3.matrix.shape
#a3.show(block=block)
#time.sleep(2)
if verbose:
print a3.matrix.sum()
print a3.matrix.shape[0]*a3.matrix.shape[1]*20
if a3.matrix.sum()< lowerLimit2 or (a3.matrix.sum() < a3.matrix.shape[0]*a3.matrix.shape[1] * minAcceptedDensity):
regionsToTrack[i]=(-1,-1,0,0) # to be removed
regionsToTrack = [v for v in regionsToTrack if v!=(-1,-1,0,0)]
for R in regionsToTrack:
print R
a.drawRectangle(*R, newObject=False)
if display:
a.show(block=block)
return {'regionsToTrack':regionsToTrack,
'a':a,
'a2':a2
}
| {
"repo_name": "yaukwankiu/armor",
"path": "kmeans/stormTracking.py",
"copies": "1",
"size": "2616",
"license": "cc0-1.0",
"hash": -2941465321128190500,
"line_mean": 29.4186046512,
"line_max": 122,
"alpha_frac": 0.5718654434,
"autogenerated": false,
"ratio": 3.2019583843329253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9146923695162987,
"avg_score": 0.025380026513987693,
"num_lines": 86
} |
# armor/shiiba/regression2.py
# adapted from regression.py
# see regression.py for further remarks
# with a change: c1,.., c6 -> c1,..,c9
# 19 February 2013.
# to calculate the advection coefficiations after Shi-iba et al
# Takasao T. and Shiiba M. 1984: Development of techniques for on-line forecasting of rainfall and flood runoff (Natural Disaster Science 6, 83)
# with upwind scheme etc
###################################################################
# imports
import numpy as np
import numpy.ma as ma
from .. import pattern
import copy
################################################################
# the functions
def centralDifference(phi0, phi1):
"""
adapted from shiiba.py, internalising the parameters into the objects
dt, dx, dy comes from the latter dbz image phi1
25 January 2013, Yau Kwan Kiu.
----
to compute the advection coefficients via the central difference scheme
as a step to the shiiba method
use numpy.linalg.lstsq for linear regression:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
We shall use the C/python convention for coordinate axes, but with the first axis going up,
following the convention of the Central Weather Bureau and matplotlib (meshgrid) usage
first axis
^
|
|
+ ----> second axis
input: phi0,phi1 - two armor.pattern.DBZ objects (wrapping a masked array)
output: v - an armor.pattern.VectorField object (wrapping a pair of masked arrays)
"""
# setting up the parameters
dt = phi0.dt # use the attribute of the latter DBZ image
dj = phi0.dx # changed from phi1.dt to phi0.dt for compatibility, 13-3-2013
di = phi0.dy
####################################################
# defining the shifted masked arrays
# [ref: http://docs.scipy.org/doc/numpy/reference/maskedarray.generic.html
# http://docs.scipy.org/doc/numpy/reference/routines.ma.html ]
######## initialise
# phi0_up.matrix = np.roll(phi0.matrix,+1,axis=0)
# phi0_down.matrix = np.roll(phi0.matrix,-1,axis=0)
# phi0_left.matrix = np.roll(phi0.matrix,-1,axis=1)
# phi0_right.matrix = np.roll(phi0.matrix,+1,axis=1)
######## to set the masks
# phi0_up.mask[ 0,:] = 1 #mask the first (=bottom) row
# phi0_down.mask[-1,:]= 1 #mask the last (=top) row
# phi0_left.mask[:,-1]= 1 #mask the last (=right) column
# phi0_right.mask[:,0]= 1 #mask the first (=left) column
phi0_up = phi0.shiftMatrix( 1, 0) # a new armor.pattern.DBZ object defined via DBZ's own methods
phi0_down = phi0.shiftMatrix(-1, 0)
phi0_left = phi0.shiftMatrix( 0,-1)
phi0_right = phi0.shiftMatrix( 0, 1)
##############################################
# applying the advection equation
# see ARMOR annual report (1/3), section 4.2.1, esp. eqn (4.3)
Y = (phi1.matrix-phi0.matrix) # regression: Y = X.c, where c = (c1,...,c6,..,c9)
# (up to signs or permutations) are the unknowns
# setting up the proper mask for regression
Y.mask = Y.mask + phi0_up.matrix.mask + phi0_down.matrix.mask +\
phi0_left.matrix.mask + phi0_right.matrix.mask # setting up the proper mask
# for regression
if phi1.verbose or phi0.verbose:
print 'sum(sum(Y.mask))=', sum(sum(Y.mask))
X = np.zeros((phi0.matrix.shape[0], phi0.matrix.shape[1], 9)) # a 9 components for each dbz pixel
# -> 9 vector for (c1,c2,...,c9)
# where u=c1x+c2y+c3, v=c4x+c5y+c6,
# q=c7x+c8y+c9, j=x, i=y,
# therefore up to +- signs or permutations,
# X=(A1c1, A1c2, A1c3, A2c1, A2c2, A2c3)
# Central difference scheme: phi(i+1)-phi(i-1) / 2di, etc
if phi1.verbose or phi0.verbose:
print "phi0,1.matrix.shape", phi0.matrix.shape, phi1.matrix.shape #debug
A = ma.array([-dt*(phi0_down.matrix - phi0_up.matrix) /(2*di),\
-dt*(phi0_left.matrix - phi0_right.matrix)/(2*dj)])
A = np.transpose(A, (1,2,0)) #swap the axes: pixel dim1, pixel dim2, internal
A.fill_value = -999
Bj, Bi = np.meshgrid( range(phi0.matrix.shape[1]), range(phi0.matrix.shape[0]) ) #location stuff, from the bottom left corner
#############################
# IS THE ABOVE CORRECT??? #
# or should that be something like
# meshgrid(phi0.matrix.shape[1], phi0.matrix.shape[0]
# and we have transpose (1,2,0) below?
#############################
##################################################
# COORDINATE TRANSFORM ADDED 13 MARCH 2013
Bi -= phi0.coordinateOrigin[0]
Bj -= phi0.coordinateOrigin[1]
#
##################################################
B = ma.array([Bi,Bj]).transpose((1,2,0)) # (881x921x2), (i,j, (i,j) )
#[a,b] * [[x,y],[z,w]] * [c,d].T = [acx+ady+bcz+bdw]; want: [ac, ad, bc, bd]
# debug
#if phi0.verbose or phi1.verbose:
# print '== shapes for X, A and B: =='
# print X.shape, A.shape, B.shape
X[:,:,0] = A[:,:,0]*B[:,:,0] # coeffs for c1,..,c6,..,c9 up to a permutation i,j=y,x
# which we don't care for now
X[:,:,1] = A[:,:,0]*B[:,:,1]
X[:,:,2] = A[:,:,0] # the constant term
X[:,:,3] = A[:,:,1]*B[:,:,0]
X[:,:,4] = A[:,:,1]*B[:,:,1]
X[:,:,5] = A[:,:,1]
X[:,:,6] = dt *B[:,:,0] # c.f. p.32 of the December 2012 report
X[:,:,7] = dt *B[:,:,1]
X[:,:,8] = dt
if phi0.verbose or phi1.verbose:
# debug
print "== stats for X, A and B =="
print "X.max, X.sum() = "
print X.max(), X.sum()
print "A.max, A.sum() = "
print A.max(), A.sum()
print "B.max, B.sum() = "
print B.max(), B.sum()
print Y.shape
Y = Y.reshape(phi0.matrix.size,1) # convert the pixel data into a column vector
X = X.reshape(phi0.matrix.size,9)
# HACK FOR numpy problem: dealing with numpy.linalg.lstsq which sees not the mask for masked arrays
Y1 = ma.array(Y.view(np.ndarray)*(Y.mask==False))
X1 = ma.array(X.view(np.ndarray)*(Y.mask==False)) #yes, that's right. X* (Y.mask==0)d
Y1.mask = Y.mask
C, residues, rank, s = np.linalg.lstsq(X1,Y1)
c1,c2,c3,c4,c5,c6,c7,c8,c9 = C
#################################################################################
# the following line was changed on 8 march 2013
#SStotal = ((Y1-Y1.mean())**2).sum() # total sum of squares
# http://en.wikipedia.org/wiki/Coefficient_of_determination
# array operations respect masks
SStotal = phi1.var() * (1-phi1.mask).sum()
# end the following line was changed on 8 march 2013
#################################################################################
# !!!! debug !!!!
# print "SStotal=", SStotal
# print "sum of residues=",residues[0]
#
#print C.shape, X1.shape, Y1.shape
#print [v[0] for v in C]
#C = np.array([v[0] for v in C])
#residue2 = ((C * X1).sum(axis=1) - Y1)**2
#
#print "((C * X1).sum(axis=1) - Y1)**2 = ", residue2
#print "difference:", residue-residue2
print "Number of points effective, total variance, sum of squared residues:"
print (1-Y.mask).sum(), ((Y-Y.mean())**2).sum(), ((Y1-Y1.mean())**2).sum(),
print (1-Y.mask).sum() * Y.var(), (1-Y1.mask).sum() * Y1.var(), residues
print '\n+++++ size of window (unmasked points): ', (Y.mask==False).sum()
R_squared = 1 - (residues[0]/SStotal)
print "c1,..c9 = "
print C.reshape((3,3))
print "R_squared=", R_squared, '\n=======================================\n'
return ([c1[0],c2[0],c3[0],c4[0],c5[0],c6[0],c7[0],c8[0],c9[0]], R_squared)
###################################################################################################
def upWind(phi0, phi1, convergenceMark = 0.00000000001):
""" adapted to object oriented form 27-1-2013
-----
to compute the advection coefficients via the central difference scheme
as a step to the shiiba method
u(k-1) and v(k-1) are given, possibly, from previous upWind steps
or from the central differnece scheme
builds upon the central difference scheme
11-1-2013
"""
# algorithm: 1. start with the central difference scheme to obtain an initial (u0,v0)
# 2. recursively regress for the next u(n+1), v(n+1) until convergence
#to get the shifted arrays - copied from def centralDifference
###########
dt = phi0.dt # use the attribute phi1.dt of the latter DBZ image
dj = phi0.dx # or should we use phi0.dt? <-- decided on this for compatibility
# across functions -see. e.g. getPrediction()
# below
# 13 march 2013
di = phi0.dy
verbose = phi0.verbose or phi1.verbose
if verbose:
print '==========================================================================='
print 'di, dj, dt, convergenceMark =', di, dj, dt, convergenceMark
phi0_up = phi0.shiftMatrix( 1, 0) # a new armor.pattern.DBZ object defined via DBZ's own methods
phi0_down = phi0.shiftMatrix(-1, 0)
phi0_left = phi0.shiftMatrix( 0,-1)
phi0_right = phi0.shiftMatrix( 0, 1)
[c1_, c2_, c3_, c4_, c5_, c6_,c7_,c8_,c9_ ], R_squared_ = centralDifference(phi0=phi0, phi1=phi1)
if phi0.verbose and phi1.verbose: # need to be very verbose to show pic!
vect = getShiibaVectorField((c1_, c2_, c3_, c4_, c5_, c6_,c7_,c8_,c9_ ),phi1)
vect.show()
J, I = np.meshgrid(np.arange(0,phi0.matrix.shape[1]), np.arange(0,phi0.matrix.shape[0]))
##################################################
# COORDINATE TRANSFORM ADDED 13 MARCH 2013
I -= phi0.coordinateOrigin[0]
J -= phi0.coordinateOrigin[1]
#
##################################################
# see our ARMOR December 2012 annual report (1/3), section 4.2.1, esp. eqn (4.3)
c1 = 9999; c2 = 9999 ; c3=-9999 ; c4=9999 ; c5=-9999 ; c6= -9999 #initialise
c7 = 999.9; c8= 999; c9=-999
# perhaps I should change the following convergence criterion
# from absolute value to component-wise-scaled correlations
while (c1_-c1)**2 + (c2_-c2)**2 + (c3_-c3)**2 + (c4_-c4)**2 + (c5_-c5)**2 + \
(c6_-c6)**2 + (c7_-c7)**2 + (c8_-c8)**2 + (c9_-c9)**2 > convergenceMark:
c1_=c1; c2_= c2; c3_=c3; c4_=c4; c5_=c5; c6_=c6; c7_=c7; c8_=c8; c9_=c9
#debug
#print " print U0.shape, V0.shape, phi0.shape, \nprint di.shape, dj.shape "
#print U0.shape, V0.shape, phi0.shape,
#print di.shape, dj.shape
U0 = c1_*I + c2_*J + c3_ # use old (c1,..c6,..,c9) to compute old U,V
V0 = c4_*I + c5_*J + c6_ # to be used as estimates for the new U,V
# Q0 = c7_*I + c8_*J + c9_ # not needed yet?
upWindCorrectionTerm = abs(U0/(2*di)) * (2*phi0.matrix -phi0_down.matrix -phi0_up.matrix) +\
abs(V0/(2*dj)) * (2*phi0.matrix -phi0_left.matrix -phi0_right.matrix)
upWindCorrectionTerm = pattern.DBZ(dataTime=phi1.dataTime, matrix=upWindCorrectionTerm)
# the following line doesn't work: takes up too much computation resource
#upWindCorrectionTerm = abs(U0/(2*di)) * (2*phi0 -phi0_down -phi0_up) +\
# abs(V0/(2*dj)) * (2*phi0 -phi0_left -phi0_right)
#print 'sum(upWindCorrectionTerm.mask==0)=',sum( (upWindCorrectionTerm.mask==0)) #debug
[c1, c2, c3, c4, c5, c6, c7, c8, c9], R_squared = centralDifference(phi0=phi0 + dt *upWindCorrectionTerm,\
phi1=phi1)
if verbose:
print "\n##################################################################\n"
print "c1, c2, c3, c4, c5, c6, c7, c8, c9: ", c1, c2, c3, c4, c5, c6,c7,c8,c9
print "\nR^2: ", R_squared
print "\n##################################################################\n"
return [c1, c2, c3, c4, c5, c6,c7,c8,c9], R_squared
def getShiibaVectorField(shiibaCoeffs, phi1, gridSize=25, name="",\
key="Shiiba vector field", title="UpWind Scheme"):
""" plotting vector fields from shiiba coeffs
input: shiiba coeffs (c1,c2,c3,..,c6) for Ui=c1.I + c2.J +c3, Vj=c4.I +c5.J+c6
and transform it via I=y, J=x, to Ux = c5.x+c4.y+c6, Vy = c2.x+c1.y+c3
"""
# 1. setting the variables
# 2. setting the stage
# 3. plotting
# 4. no need to save or print to screen
# 1. setting the variables
c1, c2, c3, c4, c5, c6,c7,c8,c9 = shiibaCoeffs
c5, c4, c6, c2, c1, c3, c8,c7,c9 = c1, c2, c3, c4, c5, c6,c7,c8,c9 # x,y <- j, i switch
# 2. setting the stage
height= phi1.matrix.shape[0]
width = phi1.matrix.shape[1]
mask = phi1.matrix.mask
name = "shiiba vector field for "+ phi1.name
imagePath = phi1.name+"shiibaVectorField.png"
key = key
ploTitle = title
gridSize = gridSize
X, Y = np.meshgrid(range(width), range(height))
##################################################
# COORDINATE TRANSFORM ADDED 13 MARCH 2013
Y -= phi1.coordinateOrigin[0] # "latitute"
X -= phi1.coordinateOrigin[1] # "logtitude"
#
##################################################
Ux = c1*X + c2*Y + c3
Vy = c4*X + c5*Y + c6
Ux = ma.array(Ux, mask=mask)
Vy = ma.array(Vy, mask=mask)
#constructing the vector field object
vect = pattern.VectorField(Ux, Vy, name=name, imagePath=imagePath, key=key,
title=title, gridSize=gridSize)
return vect
def getShiibaSourceField(shiibaCoeffs, phi1, cmap='Spectral'):
"""returns a scalar field i.e. pattern.DBZ object
"""
height, width = phi1.matrix.shape
mask = phi1.matrix.mask
if len(shiibaCoeffs) ==9:
c1, c2,c3, c4,c5,c6,c7,c8,c9=shiibaCoeffs
else:
c7,c8,c9 = shiibaCoeffs
c8, c7, c9 = c7, c8, c9 #coordinate transform, i=y, j=x
X, Y = np.meshgrid(range(width), range(height))
##################################################
# COORDINATE TRANSFORM ADDED 13 MARCH 2013
Y -= phi1.coordinateOrigin[0]
X -= phi1.coordinateOrigin[1]
#
##################################################
Q = c7*X + c8*Y + c9
Q = ma.array(Q, mask=mask)
return pattern.DBZ(name=phi1.name+'_shiiba_source_term', matrix=Q,
dataPath =phi1.dataPath+'_shiiba_source_term.dat',
outputPath=phi1.dataPath+'_shiiba_source_term.dat',
imagePath =phi1.dataPath+'_shiiba_source_term.png',
cmap=cmap, verbose=phi1.verbose)
def getPrediction(shiibaCoeffs, a, cmap='', coeffsUsed=9):
"""
equation (4.3), p.32, Annual Report December 2012
input: "a", a DBZ object
output: "a1", the prediction, a dbz object
"""
if cmap == '':
cmap = a.cmap
dt = a.dt
dx = a.dx
dy = a.dy
di = dy
dj = dx
a_up = a.shiftMatrix( 1, 0) # a new armor.pattern.DBZ object defined via DBZ's own methods
a_down = a.shiftMatrix(-1, 0)
a_left = a.shiftMatrix( 0,-1)
a_right = a.shiftMatrix( 0, 1)
height,width= a.matrix.shape
try:
c1, c2, c3, c4, c5, c6, c7, c8, c9 = shiibaCoeffs # i want 9 coeffs
#defining the grid:
X, Y = np.meshgrid(range(width), range(height)) # can change here for coord transforms
# e.g. -> range(-centre, width-centre)
##################################################
# COORDINATE TRANSFORM ADDED 13 MARCH 2013
Y -= a.coordinateOrigin[0]
X -= a.coordinateOrigin[1]
#
##################################################
J, I = X, Y
#print J, I, #debug
#print J.shape, I.shape, J.shape==I.shape
# calculating the U, V and Q:
U = c1*I + c2*J + c3
V = c4*I + c5*J + c6
Q = c7*I + c8*J + c9
except TypeError:
# if fails, check if shiibaCoeffs are actually a pattern.VectorField object
#
U = shiibaCoeffs.U
V = shiibaCoeffs.V
Q = 0
#############
# as before:
upWindCorrectionTerm = abs(U/(2*di)) * (2*a.matrix -a_down.matrix -a_up.matrix)+\
abs(V/(2*dj)) * (2*a.matrix -a_left.matrix -a_right.matrix)
A1 = (a_down.matrix -a_up.matrix) * (U/(2*dx))
A2 = (a_left.matrix -a_right.matrix) * (V/(2*dy))
#Q = Q
if coeffsUsed==6:
phi_hat = a.matrix - dt * (A1 + A2 ) - upWindCorrectionTerm
else:
phi_hat = a.matrix - dt * (A1 + A2 -Q) - upWindCorrectionTerm
a1 = pattern.DBZ(name = "shiiba prediction for " + a.name,
matrix = phi_hat.copy(),
dt = dt,
dx = dx,
dy = dy,
outputPath= "shiiba_prediction_for_"+a.name+"_and_dt_"+str(dt) +".txt",
imagePath="shiiba_prediction_for_"+a.name+"_and_dt_"+str(dt) +".txt",
coastDataPath=a.coastDataPath,
database =a.database,
cmap =a.cmap,
vmin =a.vmin,
vmax =a.vmax,
verbose =a.verbose)
return a1
###
###
def convert(shiibaCoeffs, phi1, gridSize=25, name="",\
key="Shiiba vector field", title="UpWind Scheme"):
"""alias
"""
return getShiibaVectorField(shiibaCoeffs, phi1, gridSize, name,\
key, title)
def convert2(shiibaCoeffs, phi1, cmap='Spectral'):
"""alias
"""
return getShiibaSourceField(shiibaCoeffs=shiibaCoeffs, phi1=phi1, cmap=cmap)
def showShiibaVectorField(phi0,phi1):
shiibaCoeffs, R_squared = upWind(phi0,phi1)
vect = getShiibaVectorField(shiibaCoeffs, phi0)
vect.show()
return shiibaCoeffs, R_squared
#def shiiba(phi0, phi1, convergenceMark = 0.00001):
# ### a pointer for the moment
# ###
# return upWind(phi0, phi1, convergenceMark = 0.00001)
def shiiba(phi0, phi1):
"""alias
"""
shiibaCoeffs, R_squared = showShiibaVectorField(phi0, phi1)
return shiibaCoeffs, R_squared
def shiibaNonCFL(phi0, phi1, mask=None, windowHeight=5, windowWidth=5,\
convergenceMark=0.0000001,):
""" to find the shiiba coeffs without the CFL condition
plan:
to shift and regress, minimising the average R^2
"""
#parameters
verbose = phi0.verbose or phi1.verbose
#0. initialise a matrix for the r^2
#1. put the mask on phi0
#2. roll back phi1 by (m,n); per our convention, internal stuff we use (i,j), not (x,y); i=y, j=x
R2s = {} #dict to record the R^2s
ShiibaCoeffs = {} #dict to record the shiiba coeffs
if mask!=None:
phi0.matrix.mask = mask # put the mask on phi0
for m in range(-(windowHeight-1)/2, (windowHeight+1)/2):
for n in range(-(windowWidth-1)/2, (windowWidth+1)/2):
phi1_temp = phi1.shiftMatrix(m,n)
[c1, c2, c3, c4, c5, c6,c7,c8,c9], R2 = upWind(phi0=phi0, phi1=phi1_temp,\
convergenceMark=convergenceMark)
R2s [(m,n)] = R2
ShiibaCoeffs[(m,n)] = [c1, c2, c3, c4, c5, c6,c7,c8,c9]
if phi0.verbose and phi1.verbose:
print "\n-++++++++++++++++++++++++++++-\n(m,n), [c1,c2,c3,c4,c5,c6,..c9], R2 = \n",\
(m,n), [c1,c2,c3,c4,c5,c6,c7,c8,c9], R2
#getting the (m,n) for max(R2)
(m, n) = max(R2s, key=R2s.get)
if verbose:
print "\nfor the given mask, \nMax R2:+++++++++++++++++++++++++++-\n",\
"(m,n), [c1,c2,c3,c4,c5,c6,..,c9], R2 = \n", (m,n), [c1,c2,c3,c4,c5,c6,c7,c8,c9], R2
return (m,n), ShiibaCoeffs, R2s
def interpolation():
"""to interpolate after movements (translation, advection, rotation, etc)
estimate phi1^(x,y) = sum_{s=x-1,x,x+1; t=y-1,y,y+1} H(s-x_pullback)*H(t-y_pullback)*phi0(s,t)
where H = weight function: H(x)=x cut-off and levelled at two ends 0,1
_
H = _/
"""
pass
def semiLagrangeAdvect():
"""to compute the semi-Lagrangian advection of a grid, given a velocity field
"""
pass
| {
"repo_name": "yaukwankiu/armor",
"path": "shiiba/regression2.py",
"copies": "1",
"size": "21811",
"license": "cc0-1.0",
"hash": 1406079854444242400,
"line_mean": 41.269379845,
"line_max": 145,
"alpha_frac": 0.4996102884,
"autogenerated": false,
"ratio": 3.15643994211288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.415605023051288,
"avg_score": null,
"num_lines": null
} |
# armor/spectral/powerSpec1.py
# migrated from armor/test/
# 2014-06-17
# powerSpec1.py
# test script for computing power spectrum
# 2014-06-10
"""
== Spectral analysis ==
0. RADAR domain -> normalise to WRF domain
tests to do -
1. average each 4x4 grid in RADAR then compare the spectrum of the resulting image
to the original RADAR image
2. filter (gaussian with various sigmas) and then averge each 4x4 grid
3. oversampling (compute 4x4 averages 16 times)
4. plot power spec for WRF and various preprocessings
A. WRF + RADAR/4x4 normalised (with or without oversampling)/no pre-filtering
B. WRF + RADAR/4x4 normalised (with or without oversampling)/pre-filter 1,2,3...
(unspecified/trial and error)
C. RADAR/normalise/no filtering + RADAR/normalised/pre-filtered 1,2,3...
+ difference
D. test successive gaussian filtering - is the result the same as doing it once
with a variance equal to the sum of variances?
USE
from armor.tests import powerSpec1 as ps
from armor import pattern
from armor import objects4 as ob
from armor import defaultParameters as dp
import numpy as np
import matplotlib.pyplot as plt
reload(ps); a.LOGspec = ps.testA(dbzList=ob.kongrey)
reload(ps); a.LOGspec = ps.testAwrf(dbzList=ob.kongreywrf)
"""
# imports
import pickle, os, shutil, time
from armor import defaultParameters as dp
from armor import pattern
from armor import objects4 as ob
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from scipy import ndimage
from scipy import signal
dbz=pattern.DBZ
root = dp.rootFolder
timeString = str(int(time.time()))
ob.march2014wrf.fix()
ob.kongreywrf.fix()
###############################################################################
# defining the parameters
thisScript = "powerSpec1.py"
testName = "powerSpec1"
scriptFolder = root + "python/armor/tests/"
outputFolder = root + "labLogs/powerSpec1/" + timeString + "/"
sigmaPreprocessing=20
thresPreprocessing=0
radarLL = np.array([18., 115.]) # lat/longitude of the lower left corner for radar data grids
wrfLL = np.array([20.,117.5])
wrfGrid = np.array([150,140])
radarGrid=np.array([881,921])
wrfGridSize = 0.05 #degrees
radarGridSize=0.0125
radar_wrf_grid_ratio = wrfGridSize / radarGridSize
#sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256,]
sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128]
bins=[0.01, 0.03, 0.1, 0.3, 1., 3., 10., 30.,100.]
#scaleSpacePower = 2 # <-- edit here
scaleSpacePower = 0 # <-- edit here
dbzList = ob.kongrey
############################################################################
# setting up the output folder
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
if __name__ == "__main__":
shutil.copyfile(scriptFolder+thisScript, outputFolder+ str(int(time.time()))+thisScript)
# defining the functions:
# filtering, averaging, oversampling
def filtering(a, sigma=sigmaPreprocessing):
"""gaussian filter with appropriate sigmas"""
a.matrix = a.gaussianFilter(sigma=sigma).matrix
def averaging(a, starting=(0,0)):
"""4x4 to 1x1 averaging
oversampling 4x4 to 1x1 avaraging with various starting points"""
starting = (wrfLL - radarLL)/radarGridSize + starting
ending = starting + wrfGrid * radar_wrf_grid_ratio
mask = 1./16 * np.ones((4,4))
a1 = a.copy()
a1.matrix = signal.convolve2d(a1.matrix, mask, mode='same') #http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve2d.html
a1.matrix = a1.matrix[starting[0]:ending[0]:radar_wrf_grid_ratio,
starting[1]:ending[1]:radar_wrf_grid_ratio,
]
a1.matrix=np.ma.array(a1.matrix)
print 'starting, ending:',starting, ending #debug
return a1
def oversampling():
"""use averaging() to perform sampling
oversampling 4x4 to 1x1 avaraging with various starting points
and then average/compare"""
pass
def getLaplacianOfGaussianSpectrum(a, sigmas=sigmas, thres=thresPreprocessing, outputFolder=outputFolder,
#spectrumType="numerical", #2014-06-23
useLogScale= False, #2014-06-23
responseThreshold=0.01 , #2014-06-23
scaleSpacePower=scaleSpacePower, # 2014-06-24
tipSideUp = True, #2014-06-24
useOnlyPointsWithSignals=True, #2014-06-26
toReload=True,
toDumpResponseImages=True,
bins = bins,
):
shutil.copyfile(scriptFolder+thisScript, outputFolder+ str(int(time.time())) +thisScript) #2014-06-25
L=[]
a.responseImages=[]
if toReload:
a.load()
a.backupMatrix(0)
for sigma in sigmas:
print "sigma:", sigma
a.restoreMatrix(0)
a.setThreshold(thres)
arr0 = a.matrix
###################################
# key line
arr1 = (-1) ** tipSideUp * \
ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) *\
sigma**scaleSpacePower # #2014-06-25
#
###################################
a1 = dbz(matrix=arr1.real, name=a.name + "_" + testName + "_sigma" + str(sigma))
L.append({ 'sigma' : sigma,
'a1' : a1,
'abssum1': abs(a1.matrix).sum(),
'sum1' : a1.matrix.sum(),
})
print "abs sum", abs(a1.matrix.sum())
#a1.show()
#a2.show()
plt.close()
#a1.histogram(display=False, outputPath=outputFolder+a1.name+"_histogram.png")
##########################################
# key - to set up an appropriate mask to cut out unwanted response signals
# 2014-06-24
mask = (arr1 < responseThreshold) #2014-06-24
#mask += (ndimage.filters.median_filter(a.matrix,4)>0) #2014-06-25
#mask += (arr1>10000)
if useOnlyPointsWithSignals:
mask += (a.matrix <=0)
arr1 = np.ma.array(arr1, mask=mask, fill_value=0)
#print "useOnlyPointsWithSignals", useOnlyPointsWithSignals
#plt.imshow(arr1) ; plt.show() #debug
#
############################################
###############################################################################
# computing the spectrum, i.e. sigma for which the LOG has max response
# 2014-05-02
a.responseImages.append({'sigma' : sigma,
'matrix' : arr1 * sigma**scaleSpacePower,
})
a.restoreMatrix(0)
if toDumpResponseImages:
pickle.dump(a.responseImages, open(outputFolder+ str(time.time()) +a.name+"responseImagesList.pydump",'w'))
responseImages0 = a.responseImages
#####################################
#debug
#print "a.responseImages", a.responseImages
#print type(a.responseImages)
logString = "sigmas:\n" + str(sigmas)
logString += "DBZ object: " + a.name + "\n"
logString += "a.matrix.shape = " + str(a.matrix.shape) + '\n'
logString += "\na.responseImages: number of nonzero elements along each layer: \n"
logString += str([(a.responseImages[v]['matrix']>0).sum() for v in range(len(sigmas)) ])
open(outputFolder + str(time.time()) +'log.txt','w').write(logString)
#
######################################
a.LOGspec = dbz(name= a.name + "Laplacian-of-Gaussian_numerical_spectrum",
imagePath=outputFolder+ str(time.time())+a1.name+"_LOG_numerical_spec.png",
outputPath = outputFolder+a1.name+"_LOG_numerical_spec.dat",
cmap = 'jet',
coastDataPath = a.coastDataPath
)
a.responseImages = np.ma.dstack([v['matrix'] for v in a.responseImages])
a.responseMax = a.responseImages.max(axis=2) # find max along the deepest dimension
a.responseMax = np.ma.array(a.responseMax, mask = 0)
a.responseMax.mask += (a.responseMax <responseThreshold)
a.maxSpec = a.LOGspec
if useLogScale:
aResponseMax = np.log10(a.responseMax)
else:
aResponseMax = a.responseMax
aResponseMax = np.ma.array(aResponseMax)
#aResponseMax.mask = 0
vmax = aResponseMax.max()
vmin = aResponseMax.min()
print "vmax, vmin for ", a.name, ":", vmax, vmin
#try:
# a.drawCoast(matrix=aResponseMax)
#except:
# pass
a.saveImage(imagePath=outputFolder+ str(time.time()) + a.name+"LOG_max_response.png",
matrix =aResponseMax,
title=a.name+" Max Responses of L-O-G filter",
vmax = vmax, vmin=vmin,
cmap='jet')
#a.restoreMatrix('goodcopy')
a.LOGspec.matrix = np.zeros(a.matrix.shape)
for count, sigma in enumerate(sigmas):
a.LOGspec.matrix += sigma * (a.responseMax == a.responseImages.filled()[:,:,count])
mask = (a.LOGspec.matrix ==0)
a.LOGspec.matrix = np.ma.array(a.LOGspec.matrix, mask=mask)
#else:
a.LOGtotalSpec = a.responseImages.sum(axis=0).sum(axis=0) # leaving the deepest dimension -the sigmas
# end numerical spec / total spec fork
###
if useLogScale:
a.LOGspec.matrix = np.log10(a.LOGspec.matrix)
a.LOGtotalSpec = np.log10(a.LOGtotalSpec)
a.LOGspec.setMaxMin()
##########################################
# 2014-06-24
#mask = (a.LOGspec.matrix <=0.001)
#a.LOGspec.matrix = np.ma.array(a.LOGspec.matrix, mask=mask, fill_value=-999.)
#
##########################################
pickle.dump(a.LOGspec, open(outputFolder+ str(time.time()) + a.LOGspec.name + ".pydump","w"))
print a.LOGspec.outputPath
print "saving to:", a.LOGspec.imagePath
a.LOGspec.backupMatrix('goodCopy')
#try:
# a.LOGspec.drawCoast()
#except:
# pass
print "saving a.LOGspec image to", a.LOGspec.imagePath
a.LOGspec.saveImage()
a.LOGspec.restoreMatrix('goodCopy')
a.LOGspec.saveMatrix()
a.LOGspec.histogram(display=False, matrix=a.LOGspec.matrix, outputPath=outputFolder+ str(time.time()) + a1.name+\
"_LOGspec_numerical" + ("_logScale" * useLogScale) + "_histogram.png")
plt.close()
plt.plot(sigmas, a.LOGtotalSpec) # plot(xs, ys)
plt.title(a.name+" Total Spectrum for the L-O-G Kernel")
plt.savefig(outputFolder + str(time.time()) +a.name + "_LOGspec_total"+ \
("_logScale" * useLogScale) + "_histogram.png")
pickle.dump(a.LOGtotalSpec, open(outputFolder+ str(time.time()) +a.name + "LOGtotalSpec.pydump","w"))
#a.LOGtotalSpec = dbz(matrix = a.LOGtotalSpec,
# name= a.name + "Laplacian-of-Gaussian_total_spectrum",
# imagePath=outputFolder+a1.name+"_LOG_total_spec.png",
# outputPath = outputFolder+a1.name+"_LOG_total_spec.dat",
# cmap = 'jet',
# coastDataPath = a.coastDataPath
# ) #2014-07-04
########################################################
# 3d plots
# 1. total/full spec
# 2. max spec
# 2014-06-27
#bins=[0., 0.01, 0.03, 0.1, 0.3, 1., 3., 10., 30.,100.]
#bins=[0.003, 0.01, 0.03, 0.1, 0.3, 1., 3., 10., 30.,100.]
#bins=[0.008, 0.01, 0.03, 0.1, 0.3, 1., 3., 10., 30.,100.]
#bins=[0.01, 0.03, 0.1, 0.3, 1., 3., 10., 30.,100.]
dataSource = a.name
#responseImages = pickle.load(open(outputFolder+a.name+"responseImagesList.pydump")) #load it back up
responseImages = responseImages0
X, Y = np.meshgrid(range(len(bins)-1), sigmas)
I, J = Y, X
Z = np.zeros(X.shape)
z = np.zeros(X.shape)
logString = "j sigma \t M.min() \t M.max()\n"
for j in range(len(responseImages)):
M = responseImages[j]['matrix']
#M = M*(M>0)
sigma = responseImages[j]['sigma']
logString +=str(j) + " " + str(sigma)+ '\t'+str( M.min())+ '\t'+str( M.max()) +'\n'
h = np.histogram(M, bins=bins )
z[j,:] = h[0]
open(outputFolder+str(time.time())+ \
'totalSpec' + a.name+ \
'.log.txt','w').write(logString)
print logString
print ".... saved to ", outputFolder
Z +=z
XYZ = {"X": X, "Y":Y, "Z":Z}
pickle.dump(XYZ, open(outputFolder+str(time.time())+a.name+'XYZ.pydump','w'))
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, np.log2(Y), np.log10(Z), rstride=1, cstride=1) #key line
plt.title(dataSource+ " DBZ images\n"+\
"x-axis: response intensity(from 0 to 20)\n"+\
"y-axis: log_2(sigma)\n"+\
"z-axis: log_10(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
# saving
fig.savefig(outputFolder+ str(time.time())+a.name+"3d_numspec_plot_log2scale.png", dpi=200)
###############################################################
# max spec
#bins=[0.00000000001, 0.01, 0.03, 0.1, 0.3, 1., 3., 10., 30.,100.]
#dataSource = a.name
#responseImages = pickle.load(open(outputFolder+a.name+"responseImagesList.pydump")) #load it back up
X, Y = np.meshgrid(range(len(bins)-1), sigmas)
I, J = Y, X
Z = np.zeros(X.shape)
z = np.zeros(X.shape)
logString = "j sigma \t M.min() \t M.max()\n"
for j in range(len(responseImages)):
M = responseImages[j]['matrix']
sigma = responseImages[j]['sigma']
M = M*(M>0)*(a.maxSpec.matrix==sigma)
logString +=str(j) + " " + str(sigma)+ '\t'+str( M.min())+ '\t'+str( M.max()) +'\n'
h = np.histogram(M, bins=bins )
z[j,:] = h[0]
open(outputFolder+str(time.time())+ \
'maxSpec' + a.name+ \
'.log.txt','w').write(logString)
print logString
print ".... saved to ", outputFolder
Z +=z
XYZ2 = {"X": X, "Y":Y, "Z":Z}
pickle.dump(XYZ2, open(outputFolder+str(time.time())+a.name+'XYZmax.pydump','w'))
# end 3d plots
#########################################################
return {'maxSpec' : a.maxSpec,
'responseMax' : a.responseMax,
'XYZtotal' : XYZ,
'XYZmax' :XYZ2,
'responseImages' : a.responseImages,
'sigmas' : sigmas,
'bins' : bins,
}
def plotting(folder):
pass
# defining the workflows
# testA, testB, testC, testD
def testA(dbzList=ob.march2014,sigmas=sigmas):
for a in dbzList:
a.load()
a.matrix = a.threshold(thresPreprocessing).matrix
a1 = averaging(a)
filtering(a1)
a.LOGspec = getLaplacianOfGaussianSpectrum(a1, sigmas=sigmas)
#return a.LOGspec
#def testAwrf(dbzList=ob.kongreywrf, sigmas=sigmas):
def testAwrf(dbzList=ob.march2014wrf, sigmas=sigmas):
for a in dbzList:
a.load()
a.matrix = a.threshold(thresPreprocessing).matrix
#a1 = averaging(a)
a1=a
filtering(a1)
a.LOGspec = getLaplacianOfGaussianSpectrum(a1, sigmas=sigmas)
#return a.LOGspec
def testB():
'''
oversampling
'''
pass
def testC():
pass
def testD():
pass
### loading /setting up the objects ################################
## old type
# kongrey
kongrey = ob.kongrey
kongreywrf = ob.kongreywrf
# march2014
march2014 = ob.march2014
march2014wrf= ob.march2014wrf
# may2014
## new type
# may2014
# run
| {
"repo_name": "yaukwankiu/armor",
"path": "spectral/powerSpec1.py",
"copies": "1",
"size": "16676",
"license": "cc0-1.0",
"hash": -472652363812750900,
"line_mean": 35.9,
"line_max": 146,
"alpha_frac": 0.535080355,
"autogenerated": false,
"ratio": 3.368888888888889,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4403969243888889,
"avg_score": null,
"num_lines": null
} |
# armor.texture.analysis.py
# should work automatically
"""
script to analyse the texture and perform segmentation, given radar data
pipeline:
1. dbz radar data -> armor.DBZ object
2. (filter bank) -> feature layers
3. (clustering) -> texture layers
4. (neighbour aggregation) -> segmentation
== USE ==
cd /media/Seagate\ Expansion\ Drive/ARMOR/python
python
from armor.texture import analysis
reload(analysis)
analysis.main()
"""
######################################################
# 0. imports and setups
import time
import pickle
import os
import numpy as np
import matplotlib.pyplot as plt # just in case
timestamp = int(time.time())
NumberOfOrientations = 5
scales = [1, 2, 4, 8, 16, 32, 64, 128]
featureFolder = "armor/texture/gaborFeatures%d/" % timestamp
k = 72
textureFolder = "armor/texture/textureLayers%d/" % k
######################################################
# 1. dbz radar data -> armor.DBZ object
from armor import pattern
a = pattern.a
######################################################
# 2. (filter bank) -> feature layers
# outputs to be found in: featureFolder
def computeFeatures(a=a,scales=scales, NumberOfOrientations = NumberOfOrientations,
outputFolder=featureFolder, memoryProblem=True):
from armor.filter import gabor
fvf = gabor.main(a,scales=scales, NumberOfOrientations = NumberOfOrientations,
outputFolder=featureFolder, memoryProblem=True)
return fvf
######################################################
# 3. (clustering) -> texture layers
############
# reading the feature vector field
def load(folder=featureFolder):
# or /media/Seagate Expansion Drive/ARMOR/python/armor/filter/gaborFeatures1369996409
t0 = time.time()
pydumpList = [fl for fl in os.listdir(folder) if fl[-7:]==".pydump"]
print '\n'.join(pydumpList)
if len(pydumpList) ==1:
d = pickle.load(open(folder+pydumpList[0],'r'))
data = d['content']
else:
# initialise
d = pickle.load(open(folder+pydumpList[0],'r'))
data = np.zeros((d.shape[0], d.shape[1], len(pydumpList)))
data[:,:,0] = d
print "array size:", (d.shape[0], d.shape[1], len(pydumpList))
for i in range(1,len(pydumpList)):
data[:,:,i] = pickle.load(open(folder+pydumpList[i],'r'))
timespent = time.time()-t0; print "time spent:",timespent
return data
############
# performing the clustering
def computeClustering(data, textureFolder=textureFolder):
outputFolder=textureFolder #self reminding alias
height, width, depth = data.shape
data = data.reshape(height*width, depth)
clust = kmeans2(data=data, k=k, iter=10, thresh=1e-05,\
minit='random', missing='warn')
# output to textureFolder
os.makedirs(textureFolder)
texturelayer= []
for i in range(k):
print i
texturelayer.append( (clust[1]==i).reshape(881,921) )
#plt.imshow(cluster[i])
#plt.show()
if texturelayer[i].sum()==0:
continue
pic = dbz( name='texture layer'+str(i),
matrix= np.flipud(texturelayer[i]), vmin=-2, vmax=1,
imagePath= textureFolder+ '/texturelayer'+ str(i) + '.png')
#pic.show()
pic.saveImage()
timespent= time.time()-t0; print "time spent:",timespent
pickle.dump({'content':texturelayer, 'notes':"%d texture layers from 'armor/filter/gaborFilterVectorField.pydump' " %k}, open(textureFolder+'/texturelayer.pydump','w'))
return texturelayer
######################################################
# 4. (neighbour aggregation) -> segmentation
def computeSegmentation(texturelayer):
from armor.geometry import morphology as morph
disc = morph.disc
dilate = morph.dilate
plt.imshow(texturelayer[20])
plt.show()
outputFolder = textureFolder + 'thick/'
os.makedirs(outputFolder)
t0=time.time()
texturelayer_thick = []
for i in range(k):
thislayer = (clust[1]==i).reshape(881,921)
print i, '\tall / interior sums:',
print thislayer.sum(), thislayer[20:-20,20:-20].sum()
#if thislayer[50:-50,50:-50].sum() < 40: # only layer 53 is missing. should be ok
# continue
#if thislayer.sum() < 3000: # an arbitrary threshold, first consider the bigger ones
# continue
layer_thick = dilate(M=thislayer, neighbourhood = disc(3)) ## <--- dilation with disc of radius 3
texturelayer_thick.append( layer_thick )
#plt.imshow(cluster[i])
#plt.show()
# only those with values away from the border
pic = dbz( name='texture layer %d thicked' %i,
matrix= np.flipud(layer_thick), vmin=-2, vmax=1,
imagePath= outputFolder+ '/texturelayer_thick'+ str(i) + '.png')
#pic.show()
pic.saveImage()
print 'time spent:', time.time()-t0
pickle.dump({'content':texturelayer_thick, 'notes':"%d texture layers from 'armor/filter/gaborFilterVectorField.pydump' " %k}, open(outputFolder+'/texturelayer_thick.pydump','w'))
########
# segment via intersections of various texture layers
# i. compute correlations between layers with thickened texture layers
# ii. grouping
#######
# computing the correlations of thickened textures
import numpy.ma as ma
corr_matrix = ma.ones((k,k)) * (-999.)
corr_matrix.mask = True
corr_matrix.fill_value=-999.
for i in range(len(texturelayer)):
print '||||||\n||'
for j in range(len(texturelayer)):
layer1 = texturelayer_thick[i]
layer2 = texturelayer_thick[j]
corr_matrix[i,j] = (layer1*layer2).sum() / (layer1.sum() * layer2.sum())**.5
print '||', i,',', j,',', corr_matrix[i,j],
###########
# grouping
matchedlayers1 =[]
matchedlayers2 =[]
matchedlayers3 =[]
for i in range(k):
for j in range(k):
if i==j:
continue
if corr_matrix[i,j]>0.5:
matchedlayers1.append((i,j))
if corr_matrix[i,j]>0.6:
matchedlayers2.append((i,j))
if corr_matrix[i,j]>0.8:
matchedlayers3.append((i,j))
print matchedlayers1
print matchedlayers2
print matchedlayers3
combinedtextureregions={}
matchedlayers= matchedlayers1 #choosing one of the above
"""
for L in set(v[0] for v in matchedlayers):
L_partners = [v[1] for v in matchedlayers if v[0] == L]
tt = texturelayer[L]
print L, L_partners
for j in L_partners:
tt += texturelayer[j]
combinedtextureregions[L] = tt
plt.imshow(tt)
plt.show()
"""
return {"matchedlayers1":matchedlayers1,
"matchedlayers2":matchedlayers2,
"matchedlayers3":matchedlayers3,
}
#####################################################
# run it all
def main(a=a, scales=scales, NumberOfOrientations = NumberOfOrientations,
memoryProblem=True):
# temporarily switching off after debugging - 2013-6-4
#fvf = computeFeatures(a=a,scales=scales, NumberOfOrientations = NumberOfOrientations,
# outputFolder=featureFolder, memoryProblem=memoryProblem)
#
data = load(featureFolder)
texturelayer = computeClustering(data=data, textureFolder=textureFolder)
segmentation = computeSegmentation(texturelayer=texturelayer)
return {'texturelayer':texturelayer,
'segmentation':segmentation}
if __name__=='__main__':
main()
| {
"repo_name": "yaukwankiu/armor",
"path": "texture/analysis_0.py",
"copies": "1",
"size": "7735",
"license": "cc0-1.0",
"hash": 2358009601262023000,
"line_mean": 34.3196347032,
"line_max": 183,
"alpha_frac": 0.5903038138,
"autogenerated": false,
"ratio": 3.678078934854969,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4768382748654969,
"avg_score": null,
"num_lines": null
} |
""" ArnoldC -> Python translator
This file includes abstract model of blocks and statements.
"""
import reserved_words as rword
#Abstract syntax model
#---------------------
class Runnables(object):
"""Abstract definition of runnable blocks/statements"""
def __init__(self):
raise NotImplementedError
def get_parsed_structure(self):
raise NotImplementedError
class Block(Runnables):
"""Common constructor and methods that Blocks have"""
"""Main, If and While are included."""
def __init__(self):
self.child = []
def add_child(self, child):
self.child.append(child)
return self.child[-1]
class Statement(Runnables):
"""Common definition of Statements (No longer needed?)"""
def __init__(self):
pass
#Concrete blocks/statements model
#--------------------------------
class Main(Block):
"""Main method"""
def __init__(self):
super().__init__()
def get_parsed_structure(self):
s = ""
for i in self.child:
if type(i) in [type(If), type(While)]:
s += i.get_parsed_structure(nest_lv=1)
else:
s += "".join([i.get_parsed_structure(), "\n"])
while (s[-1], s[-2]) == ("\n", "\n") :
s = s[:-1]
return s
class If(Block):
"""If block"""
def __init__(self, exp):
super().__init__()
self.value = exp
def add_else(self):
self.child.append("else")
def has_else(self):
return "else" in self.child
def get_parsed_structure(self, nest_lv=0):
s = "".join([" " * nest_lv, "if %s:\n" % GetEvalExpression(self.value)])
for i in self.child:
if i == "else":
s += "".join([" " * nest_lv, "else:\n"])
elif type(i) in [type(If("")), type(While(""))]:
s += i.get_parsed_structure(nest_lv=nest_lv+1)
else:
s += "".join([" " * (nest_lv+1), i.get_parsed_structure(), "\n"])
return s
class While(Block):
"""While block"""
def __init__(self, exp):
super().__init__()
self.value = exp
def get_parsed_structure(self, nest_lv=0):
s = "".join([" " * nest_lv, "while %s:\n" % GetEvalExpression(self.value)])
for i in self.child:
if type(i) in [type(If("")), type(While(""))]:
s += i.get_parsed_structure(nest_lv=nest_lv+1)
else:
s += "".join([" " * (nest_lv+1), i.get_parsed_structure(), "\n"])
return s
class Print(Statement):
"""Print statement"""
def __init__(self, string):
self.string = string
def get_parsed_structure(self):
return "".join(["print(", self.string, ")"])
class DeclaringVariable(Statement):
"""Variable declaration"""
def __init__(self, name, value):
self.name, self.value = name, value
def get_parsed_structure(self):
return "".join([self.name, " = ", str(self.value)])
class Expression(Statement):
"""Expression recognizer class"""
"""It inherits Statement class but it's not a statement."""
"""It's used to construct the right side of equation."""
def __init__(self, args, operations):
self.args = args
self.operations = operations
self.operations.insert(0, "")
#To avoid calculate first arg with nothing
def get_parsed_structure(self):
s = ""
for (i, j) in zip(self.operations, self.args):
s = "".join(["(", s, i, j, ")"])
return s
class AssigningValue(Statement):
"""Value assign statement"""
"""It uses Expression class to get the right side of equation."""
def __init__(self, name, args, operations):
self.name = name
self.exp = Expression(args, operations)
def get_parsed_structure(self):
s = self.exp.get_parsed_structure()
return "".join([self.name, " = ", s])
#Functions for syntax analysis
#-----------------------------
def GetOprAndArgs(l):
"""Extract the operation and its arguments from line."""
r = rword.ReservedWords()
lsp = set(l.split())
opr = ""
for i in r.word.values():
isp = set(i.split())
if lsp & isp == isp:
opr = " ".join( l.split()[:-len(lsp - isp)] )
if opr == "":
return " ".join(l.split()), "<NONE>"
arg = " ".join( l.split()[len(opr.split()):] )
return opr, arg
def GetEndOfBlock(code, end_op):
"""Get the last line of block."""
"""It returns -1 if it can't find."""
for i in code:
if end_op in i:
return code.index(i)
else:
return -1
def GetArithmeticMembers(code, operator):
"""Get members and operators used in equation."""
op_list = []
arg_list = []
for i in code:
op = " ".join(i.split()[:-1])
arg = i.split()[-1]
if op in operator.keys():
arg_list.append(arg)
op_list.append(operator[op])
return op_list, arg_list
def ReplaceMacros(code):
"""Replace macro words."""
w = rword.ReservedWords()
code = code.replace(w.word["1"], "1")
code = code.replace(w.word["0"], "0")
return code
def GetEvalExpression(value):
"""Generate evaluation formula."""
"""In ArnoldC, 0 means True and other numbers mean False."""
"""To follow ArnoldC's evaluation rule, it's little complicated."""
return "(%s if type(%s) == type(bool()) else %s > 0)" % tuple([value]*3)
#Main translator function
#------------------------
def Translate(inp, debug=False):
"""Translate the ArnoldC code in Python."""
code = [ReplaceMacros(x) for x in inp.readlines()]
w = rword.ReservedWords()
tree = None
stack = [None]
ptr = None
pc = 0
WTFException = rword.WhatTheFuckDidIDoWrong
while True:
#Get a line of program
try:
l = code[pc]
except IndexError:
raise WTFException(pc+1, "unexpected EOF")
else:
if l[-1] == "\n":
l = l[:-1]
op, arg = GetOprAndArgs(l)
#Remove \n code
try:
l_ = code[pc+1]
except IndexError:
pass
else:
if l_[-1] == "\n":
l_ = l_[:-1]
op_, arg_ = GetOprAndArgs(l_)
if debug:
print("l:", l)
print("op:", op)
print("arg:", arg)
print("")
print("l_:", l_)
print("op_:", op_)
print("arg_:", arg_)
print("\n")
if w.word["Main"] == op:
if ptr == None:
tree = Main()
ptr = tree
else:
raise WTFException(pc+1, "attempted to begin Main method in another method")
elif w.word["Main_end"] == op:
if type(ptr) == type(Main()):
out = ptr.get_parsed_structure()
if debug:
print(out)
return out
else:
raise WTFException(pc+1, "unexpected end of Main: " + str(type(ptr)))
elif w.word["If"] == op:
stack.append(ptr)
ptr = ptr.add_child(If(arg))
elif w.word["Else"] == op:
if type(ptr) == type(If("")):
if ptr.has_else() == False:
ptr.add_else()
else:
raise WTFException(pc+1, "there is already Else before this")
else:
raise WTFException(pc+1, "there is no If before Else:")
elif w.word["While"] == op:
stack.append(ptr)
ptr = ptr.add_child(While(arg))
elif op in [w.word["If_end"], w.word["While_end"]]:
ptr = stack.pop()
elif w.word["Print"] == op:
ptr.add_child(Print(arg))
elif (w.word["DecVar"] == op) & (w.word["DecVar_value"] == op_):
ptr.add_child(DeclaringVariable(arg, arg_))
pc += 1
elif (w.word["AssignVar"] == op) & (w.word["AssignVar_opr"] == op_):
pc += 1
offset = GetEndOfBlock(code[pc:], w.word["AssignVar_end"])
b = code[pc:pc + offset]
op_list, arg_list = GetArithmeticMembers(b, w.operator)
ptr.add_child(AssigningValue(arg, [arg_] + arg_list, op_list))
pc += offset
elif op == "":
pass
else:
raise WTFException(pc+1, "unknown: \"%s\"" % op)
pc += 1
| {
"repo_name": "puhitaku/PynoldC",
"path": "translator.py",
"copies": "1",
"size": "8531",
"license": "mit",
"hash": 1058452540661691500,
"line_mean": 27.7239057239,
"line_max": 91,
"alpha_frac": 0.5059195874,
"autogenerated": false,
"ratio": 3.723701440419031,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9704809297603154,
"avg_score": 0.004962346043175356,
"num_lines": 297
} |
"""Arnoldi iteration eigensolver using ARPACK-NG.
See also: https://github.com/opencollab/arpack-ng
"""
from ctypes import (byref, cdll, create_string_buffer, c_int, c_char,
c_char_p, c_double, POINTER, sizeof)
import ctypes.util
import logging
from typing import Optional, Tuple
import sys
import numba.cuda as cuda
import numpy as np
import scipy.sparse
from . import matrix_vector_product as mvp
from . import clock
EPSILON = sys.float_info.epsilon
MAX_ITERATIONS = int(1e7)
arpack = cdll.LoadLibrary(ctypes.util.find_library('arpack'))
dnaupd = arpack.dnaupd_
dnaupd.argtypes = [POINTER(c_int), c_char_p, POINTER(c_int), c_char_p,
POINTER(c_int), POINTER(c_double), POINTER(c_double),
POINTER(c_int), POINTER(c_double), POINTER(c_int),
POINTER(c_int), POINTER(c_int), POINTER(c_double),
POINTER(c_double), POINTER(c_int), POINTER(c_int)]
dnaupd_messages = dict([
(0, "Normal exit."),
(1, "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. "
"IPARAM(5) returns the number of wanted converged Ritz values."),
(2, "No longer an informational error. "
"Deprecated starting with release 2 of ARPACK."),
(3, "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase the"
" size of NCV relative to NEV."),
(-1, "N must be positive."),
(-2, "NEV must be positive."),
(-3, "NCV-NEV >= 2 and less than or equal to N."),
(-4, "The maximum number of Arnoldi update iteration must be "
"greater than zero."),
(-5, "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'"),
(-6, "BMAT must be one of 'I' or 'G'."),
(-7, "Length of private work array is not sufficient."),
(-8, "Error return from LAPACK eigenvalue calculation;"),
(-9, "Starting vector is zero."),
(-10, "IPARAM(7) must be 1,2,3,4."),
(-11, "IPARAM(7) = 1 and BMAT = 'G' are incompatible."),
(-12, "IPARAM(1) must be equal to 0 or 1."),
(-9999, "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi factorization.")])
dneupd = arpack.dneupd_
# dneupd.argtypes = [POINTER(c_int), c_char_p, POINTER(c_int),
# POINTER(c_double), POINTER(c_double), POINTER(c_double),
# POINTER(c_int), POINTER(c_double), POINTER(c_double),
# POINTER(c_double), c_char_p, POINTER(c_int), c_char_p,
# POINTER(c_int), POINTER(c_double), POINTER(c_double),
# POINTER(c_int), POINTER(c_double), POINTER(c_int),
# POINTER(c_int), POINTER(c_int), POINTER(c_double),
# POINTER(c_double), POINTER(c_int), POINTER(c_int)]
dneupd_messages = dict([
(0, "Normal exit."),
(1, "The Schur form computed by LAPACK routine dlahqr could not be "
"reordered by LAPACK routine dtrsen. Re-enter subroutine dneupd with "
"IPARAM(5)=NCV and increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV columns "
"for Z. NOTE, \"Not necessary if Z and V share the same space. "
"Please notify the authors if this error occurs.\""),
(-1, "N must be positive."),
(-2, "NEV must be positive."),
(-3, "NCV-NEV >= 2 and less than or equal to N."),
(-5, "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'"),
(-6, "BMAT must be one of 'I' or 'G'."),
(-7, "Length of private work WORKL array is not sufficient."),
(-8, "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr."),
(-9, "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc."),
(-10, "IPARAM(7) must be 1,2,3,4."),
(-11, "IPARAM(7) = 1 and BMAT = 'G' are incompatible."),
(-12, "HOWMNY = 'S' not yet implemented."),
(-13, "HOWMNY must be one of 'A' or 'P' if RVEC = .true."),
(-14, "DNAUPD did not find any eigenvalues to sufficient accuracy."),
(-15, "DNEUPD got a different count of the number of converged Ritz "
"values than DNAUPD got. This indicates the user probably made an "
"error in passing data from DNAUPD to DNEUPD or that the data was "
"modified before entering DNEUPD.")])
class ArpackError(Exception):
pass
def eigensolver(matrix: scipy.sparse.csr_matrix,
num_eigenpairs: Optional[int] = 10,
sigma: Optional[float] = None,
initial_vector: Optional[np.array] = None) \
-> Tuple[np.array, np.array]:
"""Solve eigenvalue problem for sparse matrix.
Parameters
----------
matrix : scipy.sparse.spmatrix
A matrix in compressed sparse row format.
num_eigenpairs : int, optional
Number of eigenpairs to compute. Default is 10.
sigma : float, optional
Find eigenvalues close to the value of sigma. The default value is
near 1.0. Currently unsupported on the GPU.
initial_vector : np.array, optional
Initial vector to use in the Arnoldi iteration. If not set, a vector
with all entries equal to one will be used.
Returns
-------
ew : np.array
Eigenvalues in descending order of magnitude.
ev : np.array
Eigenvectors corresponding to the eigenvalues in `ew`.
"""
if sigma is not None:
raise RuntimeError('Shift/invert mode not implemented on the GPU')
N = matrix.shape[0]
if initial_vector is None:
initial_vector = np.ones(N)
n = c_int(N) # Dimension of the eigenproblem.
maxn = n
nev = c_int(num_eigenpairs + 1) # Number of eigenvalues to compute.
ncv = c_int(num_eigenpairs + 3) # Number of columns of the matrix V.
maxncv = ncv.value
ldv = c_int(maxn.value)
# assert 0 < nev.value < n.value - 1
# assert 2 - nev.value <= ncv.value <= n.value
tol = c_double(EPSILON)
d = (c_double * (3 * maxncv))()
resid = initial_vector.ctypes.data_as(POINTER(c_double))
vnp = np.zeros((maxncv, ldv.value), dtype=np.float64)
v = vnp.ctypes.data_as(POINTER(c_double))
workdnp = np.zeros(3 * maxn.value, dtype=np.float64)
workd = workdnp.ctypes.data_as(POINTER(c_double))
workev = (c_double * (3 * maxncv))()
workl = (c_double * (3 * maxncv * maxncv + 6 * maxncv))()
ipntr = (c_int * 14)()
select = (c_int * maxncv)()
bmat = create_string_buffer(b'I') # B = I, standard eigenvalue problem.
which = create_string_buffer(b'LM') # Eigenvalues of largest magnitude.
ido = c_int(0)
lworkl = c_int(len(workl))
info = c_int(1)
ishfts = c_int(1) # Use exact shifts.
maxitr = c_int(MAX_ITERATIONS)
# mode = c_int(3) # A x = lambda x (OP = inv(A - sigma I), B = I)
mode = c_int(1) # A x = lambda x (OP = A, B = I)
iparam = (c_int * 11)(ishfts, 0, maxitr, 0, 0, 0, mode)
ierr = c_int(0)
rvec = c_int(1)
howmny = c_char(b'A')
if sigma is not None:
sigmar = c_double(np.real(sigma))
sigmai = c_double(np.imag(sigma))
else:
sigmar = c_double()
sigmai = c_double()
MVP = mvp.MatrixVectorProduct(matrix)
# eye = scipy.sparse.spdiags(np.ones(N), 0, N, N)
# A = (matrix - (sigma) * eye).tocsc()
# solve = scipy.sparse.linalg.factorized(A)
logging.debug('Running Arnoldi iteration with tolerance {:g}...'
.format(tol.value))
clk = clock.Clock()
clk.tic()
for itr in range(maxitr.value):
dnaupd(byref(ido), bmat, byref(n), which, byref(nev), byref(tol),
resid, byref(ncv), v, byref(ldv), iparam, ipntr, workd, workl,
byref(lworkl), byref(info))
if info.value != 0:
raise ArpackError(dnaupd_messages[info.value])
if ido.value == 99:
break
elif abs(ido.value) != 1:
logging.warning('DNAUPD repoted IDO = {}'.format(ido.value))
break
idx_rhs, idx_sol = ipntr[0] - 1, ipntr[1] - 1
rhs = workdnp[idx_rhs:(idx_rhs+N)]
# # sol = solve(rhs)
# sol = matrix @ rhs
# workdnp[idx_sol:idx_sol+N] = sol
sol = MVP.product(cuda.to_device(rhs.astype(np.float64)))
workdnp[idx_sol:idx_sol+N] = sol.copy_to_host() # XXX pin memory
clk.toc()
logging.debug('Done with Arnoldi iteration after {} steps. '
'Elapsed time: {} seconds'.format(itr, clk))
logging.debug('Running post-processing step...')
clk.tic()
d0 = byref(d, 0)
d1 = byref(d, maxncv * sizeof(c_double))
dneupd(byref(rvec), byref(howmny), select, d0, d1, v, byref(ldv),
byref(sigmar), byref(sigmai), workev, byref(bmat), byref(n),
which, byref(nev), byref(tol), resid, byref(ncv), v, byref(ldv),
iparam, ipntr, workd, workl, byref(lworkl), byref(ierr))
clk.toc()
logging.debug('Done with postprocessing step after {} seconds. '
'Status: {}'.format(clk, ('OK' if ierr.value == 0
else 'FAIL')))
if ierr.value != 0:
raise ArpackError(dneupd_messages[ierr.value])
nconv = iparam[4] - 1
logging.debug('Converged on {} eigenpairs.'.format(nconv))
ew = np.array(d[:num_eigenpairs])
ii = np.argsort(np.abs(ew))[::-1]
ev = vnp[ii, :]
return ew[ii], ev
| {
"repo_name": "jmbr/diffusion-maps",
"path": "diffusion_maps/gpu_eigensolver.py",
"copies": "1",
"size": "9523",
"license": "mit",
"hash": 1774400967809592800,
"line_mean": 36.6403162055,
"line_max": 79,
"alpha_frac": 0.5977108054,
"autogenerated": false,
"ratio": 3.1988579106483037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4296568716048304,
"avg_score": null,
"num_lines": null
} |
'''A Robot Framework listener that sends information to a socket in JSON format
This requires Python 2.6+
'''
import os
import socket
import sys
import json
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
PORT = 8910
HOST = "localhost"
# Robot expects the class to be the same as the filename, so
# we can't use the convention of capitalizing the class name
class socket_listener(object):
"""Pass all listener events to a remote listener
If called with one argument, that argument is a port
If called with two, the first is a hostname, the second is a port
"""
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, *args):
self.port = PORT
self.host = HOST
self.sock = None
if len(args) == 1:
self.port = int(args[0])
elif len(args) >= 2:
self.host = args[0]
self.port = int(args[1])
self._connect()
self._send_pid()
def _send_pid(self):
self._send_socket("pid", os.getpid())
def start_test(self, name, attrs):
self._send_socket("start_test", name, attrs)
def end_test(self, name, attrs):
self._send_socket("end_test", name, attrs)
def start_suite(self, name, attrs):
self._send_socket("start_suite", name, attrs)
def end_suite(self, name, attrs):
self._send_socket("end_suite", name, attrs)
def start_keyword(self, name, attrs):
self._send_socket("start_keyword", name, attrs)
def end_keyword(self, name, attrs):
self._send_socket("end_keyword", name, attrs)
def message(self, message):
self._send_socket("message", message)
def log_message(self, message):
self._send_socket("log_message", message)
def log_file(self, path):
self._send_socket("log_file", path)
def output_file(self, path):
self._send_socket("output_file", path)
def report_file(self, path):
self._send_socket("report_file", path)
def summary_file(self, path):
self._send_socket("summary_file", path)
def debug_file(self, path):
self._send_socket("debug_file", path)
def close(self):
self._send_socket("close")
if self.sock:
self.filehandler.close()
self.sock.close()
def _connect(self):
'''Establish a connection for sending JSON data'''
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.filehandler = self.sock.makefile('w')
except socket.error, e:
print 'unable to open socket to "%s:%s" error: %s' % (self.host, self.port, str(e))
self.sock = None
def _send_socket(self, name, *args):
if self.sock:
try:
packet = json.dumps([name,] + list(args)) + "\n"
self.filehandler.write(packet)
self.filehandler.flush()
except Exception, e:
print "writing to the socket failed:", e
| {
"repo_name": "boakley/robotframework-workbench",
"path": "rwb/misc/socket_listener.py",
"copies": "1",
"size": "3071",
"license": "apache-2.0",
"hash": 2710207237830152700,
"line_mean": 29.1078431373,
"line_max": 95,
"alpha_frac": 0.5985021166,
"autogenerated": false,
"ratio": 3.754278728606357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48527808452063564,
"avg_score": null,
"num_lines": null
} |
# A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
#
# The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right
# corner of the grid (marked 'Finish' in the diagram below).
#
# How many possible unique paths are there?
#
# Above is a 3 x 7 grid. How many possible unique paths are there?
#
# Note: m and n will be at most 100.
import math
class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
# We know the formula!
m -= 1
n -= 1
return math.factorial(m+n)/(math.factorial(m) * math.factorial(n))
# Bottom Up DP
# grid = [[1 if i == 0 or j == 0 else 0 for j in range(n)] for i in range(m)]
# for i in range(1, m):
# for j in range(1, n):
# grid[i][j] = grid[i][j - 1] + grid[i - 1][j]
# return grid[m - 1][n - 1]
# Top Down DP
# def paths(i, j):
# if i == 0 or j == 0:
# return 1
# return paths(i, j-1) + paths(i-1, j)
#
# return paths(m-1, n-1)
# Note:
# Solving by dp and formula
| {
"repo_name": "jigarkb/CTCI",
"path": "LeetCode/062-M-UniquePaths.py",
"copies": "2",
"size": "1244",
"license": "mit",
"hash": -2237396285817897500,
"line_mean": 28.619047619,
"line_max": 114,
"alpha_frac": 0.5313504823,
"autogenerated": false,
"ratio": 3.2395833333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9767311843103373,
"avg_score": 0.0007243945059920793,
"num_lines": 42
} |
"""A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the
bottom-right corner of the grid (marked 'Finish' in the diagram below).
How many possible unique paths are there?
Grid of 3 x 7
S # # # # # #
# # # # # # #
# # # # # # E
"""
# TC: O(2^N), SP: O(1)
def find_paths_recursive(m, n):
paths = 0
def _find_paths_recursive(curr_x, curr_y):
if curr_x > m or curr_y > n:
return
nonlocal paths
if curr_x == m - 1 and curr_y == n - 1:
paths += 1
_find_paths_recursive(curr_x + 1, curr_y)
_find_paths_recursive(curr_x, curr_y + 1)
_find_paths_recursive(0, 0)
return paths
# TC: O(N * M), SP: O(N * M)
def find_paths(m, n):
paths_dp = [[0] * m for _ in range(n)]
for i in range(n):
paths_dp[i][0] = 1
for i in range(m):
paths_dp[0][i] = 1
for row in range(1, n):
for column in range(1, m):
paths_dp[row][column] = paths_dp[row - 1][column] + paths_dp[row][column - 1]
return paths_dp[n - 1][m - 1]
if __name__ == "__main__":
test_cases = [
# (rows, columns, expected_result)
(4, 5, 35),
(3, 3, 6),
(1, 2, 1),
(4, 3, 10)
]
for rows, columns, expected_result in test_cases:
result = find_paths(columns, rows)
assert result == expected_result
assert result == find_paths_recursive(columns, rows)
print(find_paths(100, 100))
| {
"repo_name": "rcanepa/cs-fundamentals",
"path": "python/interview_questions/unique_paths.py",
"copies": "1",
"size": "1605",
"license": "mit",
"hash": 4877051687653773000,
"line_mean": 23.3181818182,
"line_max": 99,
"alpha_frac": 0.53894081,
"autogenerated": false,
"ratio": 3.0455407969639468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40844816069639467,
"avg_score": null,
"num_lines": null
} |
'''A robots.txt cache.'''
from functools import partial
import threading
import time
from cachetools import LRUCache
from .policy import DefaultObjectPolicy, ReraiseExceptionPolicy
from ..robots import Robots, AllowNone, Agent
from .. import logger
class ExpiringObject(object):
'''Wrap an object that expires over time.'''
def __init__(self, factory):
self.factory = factory
self.lock = threading.Lock()
self.obj = None
self.expires = 0
self.exception = None
def get(self):
'''Get the wrapped object.'''
if (self.obj is None) or (time.time() >= self.expires):
with self.lock:
self.expires, self.obj = self.factory()
if isinstance(self.obj, BaseException):
self.exception = self.obj
else:
self.exception = None
if self.exception:
raise self.exception
else:
return self.obj
class BaseCache(object):
'''A base cache class.'''
DEFAULT_CACHE_POLICY = ReraiseExceptionPolicy(ttl=600)
DEFAULT_TTL_POLICY = Robots.DEFAULT_TTL_POLICY
def __init__(self, capacity, cache_policy=None, ttl_policy=None, *args, **kwargs):
self.cache_policy = cache_policy or self.DEFAULT_CACHE_POLICY
self.ttl_policy = ttl_policy or self.DEFAULT_TTL_POLICY
self.cache = LRUCache(maxsize=capacity)
self.args = args
self.kwargs = kwargs
def get(self, url):
'''Get the entity that corresponds to URL.'''
robots_url = Robots.robots_url(url)
if robots_url not in self.cache:
self.cache[robots_url] = ExpiringObject(partial(self.factory, robots_url))
return self.cache[robots_url].get()
def factory(self, url):
'''
Return (expiration, obj) corresponding to provided url, exercising the
cache_policy as necessary.
'''
try:
return self.fetch(url)
except BaseException as exc:
logger.exception('Reppy cache fetch error on %s' % url)
return self.cache_policy.exception(url, exc)
def fetch(self, url):
'''Return (expiration, obj) corresponding to provided url.'''
raise NotImplementedError('BaseCache does not implement fetch.')
class RobotsCache(BaseCache):
'''A cache of Robots objects.'''
DEFAULT_CACHE_POLICY = DefaultObjectPolicy(ttl=600, factory=AllowNone)
def allowed(self, url, agent):
'''Return true if the provided URL is allowed to agent.'''
return self.get(url).allowed(url, agent)
def fetch(self, url):
'''Return (expiration, Robots) for the robots.txt at the provided URL.'''
robots = Robots.fetch(
url, ttl_policy=self.ttl_policy, *self.args, **self.kwargs)
return (robots.expires, robots)
class AgentCache(BaseCache):
'''A cache of Agent objects.'''
DEFAULT_CACHE_POLICY = DefaultObjectPolicy(
ttl=600, factory=lambda url: Agent().disallow('/'))
def __init__(self, agent, *args, **kwargs):
BaseCache.__init__(self, *args, **kwargs)
self.agent = agent
def allowed(self, url):
'''Return true if the provided URL is allowed to self.agent.'''
return self.get(url).allowed(url)
def fetch(self, url):
'''Return (expiration, Agent) for the robots.txt at the provided URL.'''
robots = Robots.fetch(
url, ttl_policy=self.ttl_policy, *self.args, **self.kwargs)
return (robots.expires, robots.agent(self.agent))
| {
"repo_name": "seomoz/reppy",
"path": "reppy/cache/__init__.py",
"copies": "1",
"size": "3586",
"license": "mit",
"hash": -8436773746767240000,
"line_mean": 31.6,
"line_max": 86,
"alpha_frac": 0.6196319018,
"autogenerated": false,
"ratio": 4.002232142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5121864044657143,
"avg_score": null,
"num_lines": null
} |
"""A rough translation of an example from the Java Tutorial
http://java.sun.com/docs/books/tutorial/
This example shows how to do very simple Canvas Drawing
"""
from java import applet, awt
from pawt import GridBag
class CoordinatesDemo(applet.Applet):
def init(self):
bag = GridBag(self)
self.framedArea = FramedArea(self)
bag.addRow(self.framedArea, weighty=1.0, fill='BOTH')
self.label = awt.Label('Click within the framed area')
bag.addRow(self.label, weightx=1.0, weighty=0.0, fill='HORIZONTAL')
def updateLabel(self, point):
text = 'Click occurred at coordinate (%d, %d).'
self.label.text = text % (point.x, point.y)
class FramedArea(awt.Panel):
def __init__(self, controller):
self.background = awt.Color.lightGray
self.setLayout(awt.GridLayout(1,0))
self.add(CoordinateArea(controller))
def getInsets(self):
return awt.Insets(4,4,5,5)
def paint(self, g):
d = self.size
g.color = self.background
g.draw3DRect(0, 0, d.width-1, d.height-1, 1)
g.draw3DRect(3, 3, d.width-7, d.height-7, 1)
class CoordinateArea(awt.Canvas):
def __init__(self, controller):
self.mousePressed = self.push
self.controller = controller
def push(self, e):
try:
self.point.x = e.x
self.point.y = e.y
except AttributeError:
self.point = awt.Point(e.x, e.y)
self.repaint()
def paint(self, g):
if hasattr(self, 'point'):
self.controller.updateLabel(self.point)
g.fillRect(self.point.x-1, self.point.y-1, 2, 2)
if __name__ == '__main__':
import pawt
pawt.test(CoordinatesDemo(), size=(300, 200))
| {
"repo_name": "nelmiux/CarnotKE",
"path": "jyhton/Demo/applet/deprecated/CoordinatesDemo.py",
"copies": "12",
"size": "1581",
"license": "apache-2.0",
"hash": -5800794217332636000,
"line_mean": 21.9130434783,
"line_max": 68,
"alpha_frac": 0.6799493991,
"autogenerated": false,
"ratio": 2.7736842105263158,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.030194584445936746,
"num_lines": 69
} |
"""A rough translation of an example from the Java Tutorial
http://java.sun.com/docs/books/tutorial/
This example shows how to use List
"""
from java import applet, awt
from java.awt.event import ItemEvent
from pawt import GridBag
class ListDemo(applet.Applet):
def fillList(self, list, names):
list.actionPerformed=self.action
list.itemStateChanged=self.change
for name in names:
list.add(name)
def init(self):
self.spanish = awt.List(4, 1)
self.fillList(self.spanish, ['uno', 'dos', 'tres', 'cuatro',
'cinco', 'seis', 'siete'])
self.italian = awt.List()
self.fillList(self.italian, ['uno', 'due', 'tre', 'quattro',
'cinque', 'sei', 'sette'])
self.output = awt.TextArea(10, 40, editable=0)
bag = GridBag(self)
bag.add(self.output,
fill='BOTH', weightx=1.0, weighty=1.0,
gridheight=2)
bag.addRow(self.spanish, fill='VERTICAL')
bag.addRow(self.italian, fill='VERTICAL')
self.language = {self.spanish:'Spanish', self.italian:'Italian'}
def action(self, e):
list = e.source
text = 'Action event occurred on "%s" in %s.\n'
self.output.append(text % (list.selectedItem, self.language[list]))
def change(self, e):
list = e.source
if e.stateChange == ItemEvent.SELECTED:
select = 'Select'
else:
select = 'Deselect'
text = '%s event occurred on item #%d (%s) in %s.\n'
params = (select, e.item, list.getItem(e.item), self.language[list])
self.output.append(text % params)
if __name__ == '__main__':
import pawt
pawt.test(ListDemo())
| {
"repo_name": "babble/babble",
"path": "include/jython/Demo/applet/ListDemo.py",
"copies": "12",
"size": "1524",
"license": "apache-2.0",
"hash": 2192309005405104600,
"line_mean": 25.275862069,
"line_max": 69,
"alpha_frac": 0.6666666667,
"autogenerated": false,
"ratio": 2.770909090909091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03428969244211728,
"num_lines": 58
} |
""" A route is a function that returns a dict consisting of {number: message}
key/values. I figured it'd be nice to keep them separate from
models, which seem strictly tied to database tables in django. The only
thing the rest of SMS should see is the ROUTES dispatch table.
routes are divided into the apps they require. IE routes.contacts requires
the panic/contacts app, routes.journal requires the journal app, etc.
The order of import is important. Later updates clobber earlier ones,
allowing us to define default functions in routes_base that other route
modules can extend.
In addition to aggregating the other apps, this file also contains routes
that effect general use of routes, ie help and echo. """
from sms.routes.sms import sms_routes
from sms.routes.contact import contact_routes
from sms.routes.journal import journal_routes
from collections import defaultdict
from functools import partial, update_wrapper
import sms.models as model
def echo(*args):
""" Returns all arguments back as a string. Is the default. """
return " ".join(args)
def routes_help(route=None, routes_dict={}):
""" Get information how the routes, either a list of all available
routes or the docstring of a specific one. For keeping track of what
things do. """
if route:
return routes_dict[route].__doc__
return ", ".join(sorted(routes_dict.keys()))
def default_route(routes_dict):
""" Given a routes dict, return the function in the route with the same key
as the value of the 'default' config. If no default, use echo."""
try:
default = model.Config.objects.get(key='default').val
if default not in routes_dict.keys(): #prevent default_factory recursion
raise KeyError
return routes_dict[default]
except (model.Config.DoesNotExist, KeyError):
return echo
ROUTES = defaultdict()
ROUTES.update(sms_routes)
ROUTES.update(contact_routes)
ROUTES.update(journal_routes)
ROUTES.update({"info": update_wrapper(partial(routes_help, routes_dict=ROUTES),
wrapped=routes_help),
"echo": echo,
})
ROUTES.default_factory = lambda: default_route(ROUTES)
| {
"repo_name": "hwayne/safehouse",
"path": "sms/routes/routes.py",
"copies": "1",
"size": "2245",
"license": "apache-2.0",
"hash": -6803259418297146000,
"line_mean": 39.0892857143,
"line_max": 80,
"alpha_frac": 0.7024498886,
"autogenerated": false,
"ratio": 4.2120075046904315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017117958294428882,
"num_lines": 56
} |
"""A routine for generating measurement records that can be fed to different
estimators for comparison purposes
"""
from __future__ import division, print_function
import numpy as np
from model import HaarTestModel
from qubit_dst.dst_povm_sampling import DSTDistribution
from qinfer.smc import SMCUpdater
from qinfer.resamplers import LiuWestResampler
#TODO: Finish implementation
def generate_records(n_meas, n_meas_rep, estimators, n_trials=100,
n_particles=1000):
"""Generate measurement records. (Not implemented yet)
:param n_meas: Number of copies of the system to be measured in each
trial.
:type n_meas: int
:param n_meas_rep: Number of measurement outcomes samples for each copy of
the system for each trial.
:type n_meas_rep: int
:param estimators: List of functions that take measurement records and
calculate estimates of the state that produced those
records.
:type estimators: [function, ...]
:param n_trials: Number of input pure states sampled from the prior
distribution.
:type n_trials: int
:param n_particles: Number of SMC particles to use.
:type n_particles: int
:returns: An array with the calculated average fidelities at the
specified times for all tomographic runs
:return type: numpy.array((n_trials, n_rec))
"""
n_qubits = 1 # Not tested for n_qubits > 1
dim = int(2**n_qubits)
# Instantiate model and state prior
model = HaarTestModel(n_qubits=n_qubits)
prior = HaarDistribution(n_qubits=n_qubits)
# Sample all the measurement directions used at once (since some samplers
# might be more efficient doing things this way)
raw_meas_dirs = meas_dist.sample(n_trials*n_meas)
# Reshape the measurement directions to be a n_trials x n_meas array of unit
# vectors in C^2
meas_dirs = np.reshape(raw_meas_dirs.T, (n_trials, n_meas, 2))
| {
"repo_name": "jarthurgross/seq_mon_car",
"path": "src/seq_mon_car/gen_meas_record.py",
"copies": "1",
"size": "2073",
"license": "mit",
"hash": 3881934599493820400,
"line_mean": 39.6470588235,
"line_max": 80,
"alpha_frac": 0.6584659913,
"autogenerated": false,
"ratio": 4.001930501930502,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00121085903465419,
"num_lines": 51
} |
# A row measuring seven units in length has red blocks with a minimum
# length of three units placed on it, such that any two red blocks
# (which are allowed to be different lengths) are separated by at
# least one black square. There are exactly seventeen ways of doing
# this.
# B B B B B B B R R R B B B B B R R R B B B
# B B R R R B B B B B R R R B B B B B R R R
# R R R B R R R R R R R B B B B R R R R B B
# B B R R R R B B B B R R R R R R R R R B B
# B R R R R R B B B R R R R R R R R R R R B
# B R R R R R R R R R R R R R
# How many ways can a row measuring fifty units in length be filled?
# NOTE: Although the example above does not lend itself to the possibility,
# in general it is permitted to mix block sizes. For example, on a row
# measuring eight units in length you could use red (3), black (1), and
# red (4).
n = 50
dp = [0 for i in range(n)]
count = 1
for i in range(n - 2):
if i >= 4:
count += dp[i - 2]
for j in range(2, n - i):
dp[i + j] += count
print sum(dp) + 1
| {
"repo_name": "cloudzfy/euler",
"path": "src/114.py",
"copies": "1",
"size": "1062",
"license": "mit",
"hash": 1203962665046048800,
"line_mean": 28.5,
"line_max": 75,
"alpha_frac": 0.6016949153,
"autogenerated": false,
"ratio": 2.744186046511628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.881593534158823,
"avg_score": 0.0059891240446796,
"num_lines": 36
} |
# A row of five black square tiles is to have a number of its tiles
# replaced with coloured oblong tiles chosen from red (length two),
# green (length three), or blue (length four).
# If red tiles are chosen there are exactly seven ways this can be
# done.
# R R K K K K R R K K K K R R K K K K R R
# R R R R K R R K R R K R R R R
# If green tiles are chosen there are three ways.
# G G G K K K G G G K K K G G G
# And if blue tiles are chosen there are two ways.
# K B B B B B B B B K
# Assuming that colours cannot be mixed there are 7 + 3 + 2 = 12 ways
# of replacing the black tiles in a row measuring five units in length.
# How many different ways can the black tiles in a row measuring fifty
# units in length be replaced if colours cannot be mixed and at least
# one coloured tile must be used?
# NOTE: This is related to Problem 117.
n = 50
ans = 0
for c in range(2, 5):
dp = [0 for i in range(n)]
count = 1
for i in range(n - c + 1):
dp[i + c - 1] += count
count += dp[i]
ans += sum(dp)
print ans
| {
"repo_name": "cloudzfy/euler",
"path": "src/116.py",
"copies": "1",
"size": "1068",
"license": "mit",
"hash": 7128411921409622000,
"line_mean": 26.3846153846,
"line_max": 71,
"alpha_frac": 0.643258427,
"autogenerated": false,
"ratio": 2.991596638655462,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9093360041072203,
"avg_score": 0.008299004916651975,
"num_lines": 39
} |
# Arpegiattors
import random
from itertools import cycle
from pprint import pformat
from zope.interface import Interface, Attribute, implements
from twisted.python import log
from bl.debug import debug, DEBUG
from bl.utils import getClock, exhaustCall
__all__ = [
'IArp', 'IndexedArp', 'AscArp', 'DescArp', 'OrderedArp', 'RevOrderedArp',
'RandomArp', 'ArpSwitcher', 'OctaveArp', 'Adder', 'PhraseRecordingArp',
'Paradiddle', 'SingleParadiddle', 'DoubleParadiddle', 'TripleParadiddle',
'ParadiddleDiddle', 'ArpMap', 'PatternArp', 'ChordPatternArp'
]
class IArp(Interface):
"""
An interface for arpeggiators.
"""
values = Attribute("Values to arpeggiate")
def reset(values):
"""
Reset `values` to the given list.
"""
def __call__():
"""
Get the next value in the arpeggiation.
"""
class BaseArp(object):
implements(IArp)
values = ()
def __init__(self, values=()):
self.reset(values)
def reset(self, values):
self.values = values
def __call__(self):
raise NotImplementedError
def sortNumeric(values, sort=None):
if sort is None:
sort = lambda l: list(sorted(l))
numbers = [v for v in values if type(v) in (int, float, list, tuple)]
numbers = sort(numbers)
newvalues = []
for v in values:
if type(v) in (int, float):
newvalues.append(numbers.pop(0))
else:
newvalues.append(v)
return newvalues
class IndexedArp(BaseArp):
index = 0
count = 0
direction = 1
def sort(self, values):
raise NotImplementedError
def reset(self, values):
values = self.sort(values)
self.count = len(values)
if self.values:
factor = len(values) / float(len(self.values))
if factor != 1:
self.index = int(self.index * factor)
self.index = self.index % self.count
if self.index == self.count:
self.index -= 1
elif self.index >= self.count:
self.index = self.index % self.count
self.values = values
def __call__(self):
if not self.values:
return
if self.index >= len(self.values):
self.reset(self.values)
v = self.values[self.index]
self.index += self.direction
self.index = self.index % self.count
return exhaustCall(v)
class AscArp(IndexedArp):
def sort(self, values):
return sortNumeric(values)
class DescArp(IndexedArp):
def sort(self, values):
return sortNumeric(values, lambda l: list(reversed(sorted(l))))
class OrderedArp(IndexedArp):
def sort(self, values):
return values
def RevOrderedArp(values=()):
arp = OrderedArp(values)
arp.direction = -1
arp.index = len(values) - 1
return arp
class RandomArp(BaseArp):
def reset(self, values):
self._current = list(values)
self._next = []
self.count = len(values)
self.values = values
def __call__(self):
if not self._current:
self._current = self._next
if not self._current:
return
l = len(self._current)
index = random.randint(0, l - 1)
next = self._current.pop(index)
self._next.append(next)
return next
class PatternArp(BaseArp):
"""
Play notes in order dictated by C{pattern}: a sequencing of indexes (or
list of indexes for chords) into values. Note the C{pattern} can be a
C{list} or callable that produces index or index lists/tuples.
Example:
>>> arp = PatterArp([60, 63, 67, 69], [0, 0, 0, 3, 2, 1, 2, 3, 0, 1])
>>> [arp() for i in range(11)]
[60, 60, 60, 69, 67, 63, 67, 69, 60, 63, 60]
"""
def __init__(self, values=(), pattern=(0,)):
BaseArp.__init__(self, values)
self.resetPattern(pattern)
def resetPattern(self, pattern):
"""
Reset the pattern ugen. If pattern is not a callable then it is
coerced to cycle(pattern).next.
"""
if not callable(pattern):
pattern = cycle(pattern).next
self._pattern = pattern
def coerce(self, note):
return note
def __call__(self):
if not self.values:
return
p = self._pattern()
if type(p) in (tuple, list):
next = [self.values[i] for i in p]
else:
next = self.values[p]
return self.coerce(next)
class ChordPatternArp(PatternArp):
"""
Like PatternArp except values generated by call are always lists or tuples.
"""
def coerce(self, noteOrChord):
if type(noteOrChord) not in (list, tuple):
return (noteOrChord,)
return noteOrChord
class ArpSwitcher(BaseArp):
def __init__(self, arp, values=None):
if values is None:
values = arp.values
self.arp = arp
self.arp.reset(values)
self.values = values
self.count = len(self.values)
def reset(self, values):
self.values = values
self.arp.reset(values)
self.count = len(values)
def switch(self, arp):
arp.reset(self.values)
self.arp = arp
def __call__(self):
return self.arp()
class Paradiddle(OrderedArp):
def reset(self, values):
assert (
len(values) == 2, 'Paradiddles take exactly two values (R and L)'
)
paradiddle = self.makeParadiddle(values[0], values[1])
OrderedArp.reset(self, paradiddle)
def makeParadiddle(self, r, l):
"""
Make a single paradiddle: RlRR LRLL
"""
return [r, l, r, r, l, r, l, l]
SingleParadiddle = Paradiddle
class DoubleParadiddle(Paradiddle):
def makeParadiddle(self, r, l):
"""
Make a double paradiddle: RLRLRR LRLRLL
"""
return [r, l, r, l, r, r, l, r, l, r, l, l]
class TripleParadiddle(Paradiddle):
def makeParadiddle(self, r, l):
"""
Make a triple paradiddle: RLRLRLRR LRLRLRLL
"""
return [r, l, r, l, r, l, r, r, l, r, l, r, l, r, l, l]
class ParadiddleDiddle(Paradiddle):
def makeParadiddle(self, r, l):
"""
Make a paradiddle diddle
"""
return [r, l, r, r, l, l] * 2 + [l, r, l, l, r, r] * 2
class OctaveArp(ArpSwitcher):
def __init__(self, arp, values=None, octaves=3, direction=1,
oscillate=False):
ArpSwitcher.__init__(self, arp, values)
self.octaves = octaves
self.currentOctave = 0
self.index = 0
if direction == -1:
self.currentOctave = octaves
self.direction = direction
self.oscillate = oscillate
def __call__(self):
if not self.count:
return
v = exhaustCall(self.arp())
if v is not None:
v += (self.currentOctave * 12)
self.index += 1
self.index = self.index % self.count
if self.index == 0:
self.currentOctave += self.direction
if self.octaves:
self.currentOctave = self.currentOctave % (self.octaves + 1)
if self.oscillate and self.currentOctave in (0, self.octaves):
self.direction *= -1
else:
self.currentOctave = 0
return v
class ArpMap(ArpSwitcher):
"""
An ArpSwitcher that maps values returning from underlying C{arp} through a
function C{func}.
Example:
from pyo import midiToHz
freqArp = ArpMap(midiToHz, RandomArp(range(128)))
"""
def __init__(self, func, arp, values=None):
ArpSwitcher.__init__(self, arp, values)
self.func = func
def __call__(self):
return self.func(exhaustCall(self.arp()))
class PhraseRecordingArp(BaseArp):
def __init__(self, clock=None):
self.clock = getClock(clock)
self._phraseStartTicks = self.clock.ticks
self._last_tape = None
self._tape = {}
self._eraseTape()
self.phrase = []
def __call__(self):
self.elapsed = self._phraseStartTicks = self.clock.ticks
self._resetRecording()
return list(self.phrase)
def _resetRecording(self):
whens = self._tape['whens']
notes = self._tape['notes']
velocities = self._tape['velocities']
sustains = self._tape['sustains']
indexes = self._tape['indexes']
if DEBUG:
log.msg('>tape===\n%s' % pformat(self._tape))
self._eraseTape()
if not whens and (self._last_tape and self._last_tape['dirty']):
whens = self._last_tape['whens']
notes = self._last_tape['notes']
velocities = self._last_tape['velocities']
sustains = self._last_tape['sustains']
indexes = self._last_tape['indexes']
self._last_tape['dirty'] = True
if whens:
sus = [None] * len(whens)
for (ontick, onnote, sustain) in sustains:
index = indexes.get((ontick, onnote))
if index is not None:
sus[index] = sustain
else:
log.err(ValueError(
'no index for tick=%s note=%s' % (ontick, onnote)))
self.phrase = zip(whens, notes, velocities, sus)
self.phrase = [
(w, n, v, s or self.elapsed - w)
for (w, n, v, s) in self.phrase
]
if DEBUG:
log.msg('>phrase===\n%s' % pformat(self.phrase))
def _eraseTape(self):
if self._tape and self._tape['whens']:
self._last_tape = dict(self._tape)
self._last_tape['dirty'] = False
self._tape = {'whens': [], 'notes': [], 'velocities': [],
'sustains': [], 'indexes': {}, 'last_ticks': {}}
def recordNoteOn(self, note, velocity=100, ticks=None):
if ticks is None:
ticks = self.clock.ticks
self._tape['indexes'][(self.clock.ticks, note)] = len(
self._tape['notes'])
self._tape['last_ticks'][note] = self.clock.ticks
self._tape['notes'].append(note)
self._tape['velocities'].append(velocity)
self._tape['whens'].append(ticks - self._phraseStartTicks)
def recordNoteOff(self, note):
tape = self._tape
last = tape['last_ticks'].get(note, None)
if last is None:
if self._last_tape:
last = self._last_tape['last_ticks'].get(note, None)
debug('got last tick from past recording: %s' % last)
if last is None:
log.err(ValueError(
'woops, i have not seen noteon event in current '
'or last phrase for note: %s' % note))
return
tape = self._last_tape
tape['dirty'] = True
sustain = self.clock.ticks - last
tape['sustains'].append((last, note, sustain))
class Adder(ArpSwitcher):
"""
A simple wrapper over an Arp instance which will add `amount` to the
value returned by the wrapped `arp`. The configured `amount`
can be changed on the fly while the arp is being called.
"""
def __init__(self, arp, values=None):
ArpSwitcher.__init__(self, arp, values)
self.amount = 0
def __call__(self):
v = exhaustCall(self.arp())
if v is not None:
if type(v) in (list, tuple):
return [self.amount + vk for vk in v]
return self.amount + v
| {
"repo_name": "djfroofy/beatlounge",
"path": "bl/arp.py",
"copies": "1",
"size": "11748",
"license": "mit",
"hash": -634913482918320100,
"line_mean": 27.1052631579,
"line_max": 79,
"alpha_frac": 0.5498808308,
"autogenerated": false,
"ratio": 3.6360259981429897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9683900137321821,
"avg_score": 0.00040133832423368216,
"num_lines": 418
} |
"""Arpegio's models."""
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.template.defaultfilters import slugify, striptags
from django.utils.timezone import now
from django.db import models
@python_2_unicode_compatible
class ContentMixin(models.Model):
"""
This model contains three fields: title, slug and content.
"""
title = models.CharField(max_length=100, blank=True, null=True)
slug = models.SlugField(max_length=100, blank=True)
content = models.TextField(blank=True, null=True)
def __str__(self):
return self.title or '(No title)'
def save(self, *args, **kwargs):
if self.title and not self.slug:
self.slug = slugify(striptags(self.title))
elif not self.slug:
raise ValueError('You have to give this object a slug.')
super(ContentMixin, self).save(*args, **kwargs)
class Meta:
abstract = True
class Timestampable(models.Model): # pylint: disable=model-missing-unicode
"""
This model adds a creation and modification date fields.
"""
creation_date = models.DateTimeField(blank=True, default=now)
modification_date = models.DateTimeField(editable=False)
def save(self, *args, **kwargs):
self.modification_date = now()
super(Timestampable, self).save(*args, **kwargs)
class Meta:
abstract = True
class Sluggable(models.Model): # pylint: disable=model-missing-unicode
"""
This models adds a title and slug fields. The slug is derived from the
title field.
"""
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, editable=False)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Sluggable, self).save(*args, **kwargs)
class Meta:
abstract = True
| {
"repo_name": "arpegio-dj/arpegio",
"path": "arpegio/core/models.py",
"copies": "1",
"size": "1900",
"license": "bsd-3-clause",
"hash": -7445711271289052000,
"line_mean": 29.6451612903,
"line_max": 75,
"alpha_frac": 0.6705263158,
"autogenerated": false,
"ratio": 3.925619834710744,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5096146150510744,
"avg_score": null,
"num_lines": null
} |
"""Arpegio's templatetags."""
import re
from django import template
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter()
@stringfilter
def more(text, url='#'):
"""Adds a 'Read more' tag to the text."""
value = re.split(r'<!--\s*more\s*-->', text)
more_tag = '<p><a href="%s" class="more-link">Read More</a></p>'
more_link = more_tag % url
if len(value) > 1:
text = '%s%s' % (value[0], more_link)
return text
ALLOWED_TAGS = 'a|abbr|area|audio|b|bdi|bdo|br|button|canvas|cite|'
ALLOWED_TAGS += 'code|data|datalist|del|dfn|em|embed|i|iframe|img|input|'
ALLOWED_TAGS += 'ins|kbd|label|link|map|mark|math|meter|noscrip|object|'
ALLOWED_TAGS += 'output|picture|progress|q|ruby|s|samp|script|select|small|'
ALLOWED_TAGS += 'span|strong|sub|sup|svg|template|textarea|time|u|var|video|'
ALLOWED_TAGS += 'wbr|text'
@register.filter(is_safe=True)
@stringfilter
def linebreakshtml(text):
"""Acts like the default breaklines but respect html tags."""
lines = re.split(r'\n{2,}', text.replace('\r', ''))
lines = [line.strip() for line in lines]
paragraphs = ['<p>%s</p>' % line.replace('\n', '<br>')
if re.match(r'^[\w\d\s]|<(%s)(\s+.*)*>' % ALLOWED_TAGS, line)
else
line
for line in lines]
return mark_safe(''.join(paragraphs))
@register.filter(is_safe=True)
def join_link(items_list, separator=''):
"""Join a list of elements inside a tags linking to get_absolute_url."""
links = []
for element in items_list:
link = format_html('<a href="{}">{}</a>',
element.get_absolute_url(),
element
)
links.append(link)
return mark_safe(separator.join(links))
| {
"repo_name": "arpegio-dj/arpegio",
"path": "arpegio/core/templatetags/arpegio_tags.py",
"copies": "1",
"size": "1925",
"license": "bsd-3-clause",
"hash": -3521376082170638000,
"line_mean": 32.7719298246,
"line_max": 79,
"alpha_frac": 0.6083116883,
"autogenerated": false,
"ratio": 3.3075601374570445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44158718257570445,
"avg_score": null,
"num_lines": null
} |
# ARP Suite - Run ARP Commands From Command Line
import sys
import arp_mitm as mitm
import arp_sslstrip as sslstrip
import arp_listen as listen
import arp_request as request
import arp_cache as cache
import arp_reconnaissance as recon
import arp_interactive as interactive
if __name__ == "__main__":
arguments = sys.argv[1:]
if '-h' in arguments or '--help' in arguments:
print '[INFO]\tARP Suite\n'
print '[USAGE] arp.py -c/i/L/r\n'
print '[FUNCTIONS]'
print ' -c --cache = Work with ARP Cache.'
print ' -i --interactive = Runs Interactive ARP Suite.'
print ' -L --listen = Runs an arpclient in listen Mode.'
print ' -r --request = Generate an ARP Request Message.'
print '\n\t* Use --h with any of these functions to learn more about them.'
print '\t\tex. arp.py -c --h'
print ''
sys.exit(1)
if '-i' in arguments or '--interactive' in arguments:
interactive.run()
sys.exit(1)
if '-L' in arguments or'--listen' in arguments:
if '--h' in arguments:
print '[INFO]\tCreates an instance of arpclient in listen mode.'
print '\tHandles ARP Messages and ARP Table.'
print ''
print '[USAGE] arp.py -l\n'
print '[ARGUMENTS]'
print '\tNONE'
sys.exit(1)
listen.listen()
sys.exit(1)
if '-r' in arguments or '--request' in arguments:
if '--h' in arguments:
print '[INFO]\tCreate an ARP Request message to given IP Address.'
print '\tMake sure there is an instance of arpclient in listen mode'
print '\tto handle ARP messages and manipulate ARP table ("arp.py -l").'
print ''
print '[USAGE] arp.py -r --ip [ip]\n'
print '[ARGUMENTS]'
print '\t"--ip" = IP Address You Wish To Resolve'
print ''
sys.exit(1)
if '--ip' in arguments:
option_index = arguments.index('--ip')
ip = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -r --h"'
sys.exit(0)
request.send(ip)
sys.exit(1)
if '-c' in arguments or '--cache' in arguments:
if '--h' in arguments:
print '[INFO]\tWork with the ARP Cache\n'
print '[USAGE] arp.py -c --d/l/a/r --i [ip] --m [mac]\n'
print '[ARGUMENTS]'
print '"--d" = Display ARP Cache.'
print '"--l" = Look Up ARP Cache. Must Specify Either Address'
print '"--a" = Add ARP Cache Entry. Must Specify Both Addresses'
print '"--r" = Remove ARP Cache Entry. Must Specify Both Addresses'
print '"--i" = An IP Address'
print '"--m" = A MAC Address'
print ''
# Display
if '--d' in arguments:
cache.cache(1)
# Look Up
if '--l' in arguments:
if '--i' in arguments:
option_index = arguments.index('--i')
ipoption = arguments[option_index+1]
cache.cache(2,ip=ipoption)
sys.exit(1)
elif '--m' in arguments:
option_index = arguments.index('--m')
macoption = arguments[option_index+1]
cache.cache(2,mac=macoption)
sys.exit(1)
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
# ADD an Entry
if '--a' in arguments:
if '--i' in arguments: # use --i to indicate you are giving an ip address
option_index = arguments.index('--i')
ipoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
if '--m' in arguments: # use --m to indicate you are giving a mac address
option_index = arguments.index('--m')
macoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
cache.cache(3,ip=ipoption,mac=macoption)
sys.exit(1)
# REMOVE an Entry
if '--r' in arguments:
if '--i' in arguments: # use --i to indicate you are giving an ip address
option_index = arguments.index('--i')
ipoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
if '--m' in arguments: # use --m to indicate you are giving a mac address
option_index = arguments.index('--m')
macoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
cache.cache(4,ip=ipoption,mac=macoption)
sys.exit(1)
if '-m' in arguments or '--mitm' in arguments:
if '--h' in arguments:
print '[Info]\tLaunch an ARP Poisoning Man in the Middle Attack.\n'
print '[Usage] arp.py -m --aI [ip] --aM [mac] --bI [ip] --bM [mac]\n'
print '[Arguments]'
print '\t"--aI" = target A\'s IP Address'
print '\t"--aM" = target A\'s MAC Address'
print '\t"--bI" = target B\'s IP Address'
print '\t"--bM" = target B\'s MAC Address'
print ''
sys.exit(1)
if '--aI' in arguments:
option_index = arguments.index('--aI')
aIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--aM' in arguments:
option_index = arguments.index('--aM')
aMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--bI' in arguments:
option_index = arguments.index('--bI')
bIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--bM' in arguments:
option_index = arguments.index('--bM')
bMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
mitm.mitm(aIP,aMAC,bIP,bMAC)
sys.exit(1)
if '--sslstrip' in arguments:
if '--h' in arguments:
print '[Info]\tLaunch a SSL Strip Attack.\n'
print '[Usage] arp.py --sslstrip --gI [ip] --gM [mac] --tI [ip] --tM [mac]\n'
print '[Arguments]'
print '\t"--gI" = gateway\'s IP Address'
print '\t"--gM" = gateway\'s MAC Address'
print '\t"--tI" = target\'s IP Address'
print '\t"--tM" = target\'s MAC Address'
print ''
sys.exit(1)
if '--gI' in arguments:
option_index = arguments.index('--gI')
gIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--gM' in arguments:
option_index = arguments.index('--gM')
gMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--tI' in arguments:
option_index = arguments.index('--tI')
tIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--tM' in arguments:
option_index = arguments.index('--tM')
tMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
sslstrip.sslstrip(gIP,gMAC,tIP,tMAC)
sys.exit(1)
if '--recon' in arguments:
if '--h' in arguments:
print '[Info]\tLearn Address of Those on Network.\n'
print '[Usage] arp.py --recon --ip [iprange], wildcards * allowed\n'
print '[Arguments]'
print '\t"--ip" = A Range of IP Adresses to Scan'
if '--ip' in arguments:
option_index = arguments.index('--ip')
iprange = arguments[option_index+1]
recon.run(str(iprange))
sys.exit(1)
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py --recon --h"'
sys.exit(0)
| {
"repo_name": "monkeesuit/school",
"path": "Network Security/ARP/arp suite/py/arp.py",
"copies": "1",
"size": "7562",
"license": "mit",
"hash": -2187622124215015700,
"line_mean": 31.3162393162,
"line_max": 80,
"alpha_frac": 0.632107908,
"autogenerated": false,
"ratio": 2.903993855606759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4036101763606759,
"avg_score": null,
"num_lines": null
} |
#Arquivo que contem os parametros do jogo
quantidade_jogadores = 2 #quantidade de Jogadores
jogadores = [] #array que contem os jogadores(na ordem de jogo)
tamanho_tabuleiro = 40 #tamanho do array do tabuleiro (sempre multiplo de 4 para o tabuleiro ficar quadrado)
quantidade_dados = 2 #quantos dados serao usados
quantidade_reves = int(tamanho_tabuleiro/5) #quantos Sorte/Reves existirao no tabuleiro
dinheiro_inicial = 10000000 #dinheiro inicial de cada Jogador
jogadas_default = 1 #quantidade de Jogadas que cada jogador possui(pode alterarse dados forem iguais)
#cadeia e vai para cadeia devem ficar em cantos opostos do tabuleiro. Dividimos o tabuleiro em 4,
# e colocamos o vai para cadeia na primeira "esquina", e a cadeia na terceira esquina
pos_vai_para_cadeia = int(tamanho_tabuleiro/4) #Posicao da casa "vai para cadeia"
pos_Cadeia = int(pos_vai_para_cadeia * 3) #posicao da casa "cadeia"
contrucoes={
'1': 'Nada',
'2': 'Casa',
'3': 'Hotel'
}
possiveis_sorte = [
{"Ganhou na loteria!": "500"},
{"Foi promovido no emprego!": "1500"}
]
possiveis_reves = [
{"Perdeu o mindinho da mao esquerda": "500"},
{"Seu filho pegou Piolho": "50"},
{"Policia Apreendeu seus 15 hectares de maconha, por pouco nao foi preso!": "3500"}
] | {
"repo_name": "fcrozetta/Text-Based-Monopoly",
"path": "Configuracoes.py",
"copies": "1",
"size": "1268",
"license": "mit",
"hash": 8191924199828407000,
"line_mean": 38.65625,
"line_max": 108,
"alpha_frac": 0.7247634069,
"autogenerated": false,
"ratio": 2.3656716417910446,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8364578886422109,
"avg_score": 0.0451712324537868,
"num_lines": 32
} |
arr = [0x00, 0x5F, 0x87, 0xAF, 0xD7, 0xFF]
def get_place(num):
if num == 0xff:
return 5
for index, i in enumerate(arr):
if i > num:
return index - (1 if num - arr[index - 1] <= i - num else 0)
def color(num):
if num%0xa0a0a == 0x80808:
return 232 + int((num - 0x080808) / 0xa0a0a)
pos = 16
for i in range(3):
pos += get_place(num & 0xff) * 6**i
num >>= 8
return pos
def mkcl(num):
return "[38;5;{}m".format(num)
def mkclb(num):
return "[48;5;{}m".format(num)
if __name__ == "__main__":
import sys
back = False
result = ""
if len(sys.argv) == 0:
print("please color num")
else:
args = sys.argv[1:]
if args[0] == "-b":
back = True
args = args[1:]
for arg in args:
cn = color(int(arg, 16))
try:
if back:
result += mkclb(cn) + " "
else:
result += mkcl(cn) + " "
except:
pass
print(result)
| {
"repo_name": "CsTarepanda/cuiImage",
"path": "old/tocol.py",
"copies": "1",
"size": "1086",
"license": "mit",
"hash": -666249832171712300,
"line_mean": 20.2941176471,
"line_max": 72,
"alpha_frac": 0.4447513812,
"autogenerated": false,
"ratio": 3.1296829971181555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4074434378318156,
"avg_score": null,
"num_lines": null
} |
arr = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
expect = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def next_greatest_element_slow(arr: list) -> list:
"""
Get the Next Greatest Element (NGE) for all elements in a list.
Maximum element present after the current one which is also greater than the
current one.
>>> next_greatest_element_slow(arr) == expect
True
"""
result = []
for i in range(0, len(arr), 1):
next = -1
for j in range(i + 1, len(arr), 1):
if arr[i] < arr[j]:
next = arr[j]
break
result.append(next)
return result
def next_greatest_element_fast(arr: list) -> list:
"""
Like next_greatest_element_slow() but changes the loops to use
enumerate() instead of range(len()) for the outer loop and
for in a slice of arr for the inner loop.
>>> next_greatest_element_fast(arr) == expect
True
"""
result = []
for i, outer in enumerate(arr):
next = -1
for inner in arr[i + 1 :]:
if outer < inner:
next = inner
break
result.append(next)
return result
def next_greatest_element(arr: list) -> list:
"""
Get the Next Greatest Element (NGE) for all elements in a list.
Maximum element present after the current one which is also greater than the
current one.
A naive way to solve this is to take two loops and check for the next bigger
number but that will make the time complexity as O(n^2). The better way to solve
this would be to use a stack to keep track of maximum number giving a linear time
solution.
>>> next_greatest_element(arr) == expect
True
"""
stack = []
result = [-1] * len(arr)
for index in reversed(range(len(arr))):
if len(stack):
while stack[-1] <= arr[index]:
stack.pop()
if len(stack) == 0:
break
if len(stack) != 0:
result[index] = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
setup = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| {
"repo_name": "TheAlgorithms/Python",
"path": "data_structures/stacks/next_greater_element.py",
"copies": "1",
"size": "2861",
"license": "mit",
"hash": -248818529745682200,
"line_mean": 28.193877551,
"line_max": 85,
"alpha_frac": 0.5704299196,
"autogenerated": false,
"ratio": 3.4763061968408264,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9542195245090441,
"avg_score": 0.0009081742700771708,
"num_lines": 98
} |
# Arranged probability
#
# Problem 100
#
# If a box contains twenty-one coloured discs, composed of fifteen blue discs and six red discs, and two discs were
# taken at random, it can be seen that the probability of taking two blue discs, P(BB) = (15/21)×(14/20) = 1/2.
# The next such arrangement, for which there is exactly 50% chance of taking two blue discs at random, is a box
# containing eighty-five blue discs and thirty-five red discs.
# By finding the first arrangement to contain over 10^12 = 1,000,000,000,000 discs in total, determine the number of
# blue discs that the box would contain.
# P(BB) = (B / N) * ((B - 1) / (N - 1)) = 0.5 = B * (B - 1) / N * (N - 1)
# -> B**2 - B = N*(N-1)/2
# -> B**2 - B - N*(N-1)/2 = 0
# quadratic formula:
# ax**2 + bx + c = 0, x = (-b +- sqrt(b**2 - 4ac))/2a
# -> a = 1, b = -1, c = -N*(N-1)/2
# by using this formula we can find the amount of blue disks necessary for a 50% chance for a given N.
# if this amount is an integer, we have a valid solution.
from itertools import count
from math import sqrt
def solve(a, b, c):
return (-b + sqrt(b**2 - 4*a*c)) / (2*a)
def is_valid(B):
return B.is_integer()
def solve_for_50(N):
return solve(1, -1, (-N*(N-1))/2)
solutions = ((N + 10**12, int(B)) for N, B in enumerate(map(solve_for_50, count(10**12))) if is_valid(B))
print(next(solutions))
# not 100% correct due to precision (result is 0.49999999999999999) but I'll leave it at that since
# https://www.google.at/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=707106783028%2F1000000002604++*+707106783027%2F1000000002603
| {
"repo_name": "chjdev/euler",
"path": "python/problem100.py",
"copies": "1",
"size": "1604",
"license": "bsd-2-clause",
"hash": 8523230152979735000,
"line_mean": 35.4318181818,
"line_max": 138,
"alpha_frac": 0.6562694947,
"autogenerated": false,
"ratio": 2.6672212978369383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38234907925369377,
"avg_score": null,
"num_lines": null
} |
class Queue(object):
def __init__(self, max_size):
self.queue = [None] * (max_size + 1)
self.enq = 0
self.deq = 0
self.n = 0
def increase(self, n):
n += 1
if n >= len(self.queue):
n -= len(self.queue)
return n
def enqueue(self, v):
assert not self.is_full()
self.queue[self.enq] = v
self.enq = self.increase(self.enq)
self.n += 1
def dequeue(self):
assert not self.is_empty()
v = self.queue[self.deq]
self.deq = self.increase(self.deq)
self.n -= 1
return v
def is_empty(self):
assert (self.enq == self.deq) == (self.n == 0)
return self.n == 0
def is_full(self):
return self.increase(self.enq) == self.deq
def size(self):
return self.n
if __name__ == "__main__":
from queuetest import arrayqueuetest
arrayqueuetest(Queue)
| {
"repo_name": "BartMassey/nb-misc",
"path": "arrayqueue.py",
"copies": "1",
"size": "1163",
"license": "mit",
"hash": -7759576620189280000,
"line_mean": 24.2608695652,
"line_max": 54,
"alpha_frac": 0.5662650602,
"autogenerated": false,
"ratio": 3.4277286135693217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4493993673769322,
"avg_score": null,
"num_lines": null
} |
# Array based union find data structure
# P: The array, which encodes the set membership of all the elements
class UFarray:
def __init__(self):
# Array which holds label -> set equivalences
self.P = []
# Name of the next label, when one is created
self.label = 0
def makeLabel(self):
r = self.label
self.label += 1
self.P.append(r)
return r
# Makes all nodes "in the path of node i" point to root
def setRoot(self, i, root):
while self.P[i] < i:
j = self.P[i]
self.P[i] = root
i = j
self.P[i] = root
# Finds the root node of the tree containing node i
def findRoot(self, i):
while self.P[i] < i:
i = self.P[i]
return i
# Finds the root of the tree containing node i
# Simultaneously compresses the tree
def find(self, i):
root = self.findRoot(i)
self.setRoot(i, root)
return root
# Joins the two trees containing nodes i and j
# Modified to be less agressive about compressing paths
# because performance was suffering some from over-compression
def union(self, i, j):
if i != j:
root = self.findRoot(i)
rootj = self.findRoot(j)
if root > rootj: root = rootj
self.setRoot(j, root)
self.setRoot(i, root)
def flatten(self):
for i in range(1, len(self.P)):
self.P[i] = self.P[self.P[i]]
def flattenL(self):
k = 1
for i in range(1, len(self.P)):
if self.P[i] < i:
self.P[i] = self.P[self.P[i]]
else:
self.P[i] = k
k += 1
| {
"repo_name": "spwhitt/cclabel",
"path": "ufarray.py",
"copies": "3",
"size": "1751",
"license": "unlicense",
"hash": 1773855529449242000,
"line_mean": 27.2419354839,
"line_max": 68,
"alpha_frac": 0.5242718447,
"autogenerated": false,
"ratio": 3.6103092783505155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5634581123050515,
"avg_score": null,
"num_lines": null
} |
"""Array buffer class.
@todo: Implement slicing to allow for C{glVertexAttribPointer} with C{size}, C{stride}, and C{pointer} parameters.
@author: Stephan Wenger
@date: 2012-02-29
"""
import numpy as _np
import glitter.raw as _gl
from glitter.utils import buffer_dimensions_to_primitive, primitive_to_buffer_dimensions
from glitter.arrays.basebuffer import BaseBuffer
class ArrayBuffer(BaseBuffer):
_binding = "array_buffer_binding"
_target = _gl.GL_ARRAY_BUFFER
def _use(self, index, num_components=None, stride=0, first=0):
if num_components is None:
if len(self.shape) == 1:
num_components = 1
elif 1 <= self.shape[-1] <= 4:
num_components = self.shape[-1]
else:
raise ValueError("must specify num_components")
if self.dtype.is_float():
with self:
_gl.glVertexAttribPointer(index, num_components, self.dtype._as_gl(), _gl.GL_FALSE, stride * self.dtype.nbytes, first * self.dtype.nbytes)
else:
with self:
_gl.glVertexAttribIPointer(index, num_components, self.dtype._as_gl(), stride * self.dtype.nbytes, first * self.dtype.nbytes)
def draw(self, mode=None, count=None, first=0, instances=None):
if mode is None:
if len(self.shape) > 2:
mode = buffer_dimensions_to_primitive.get(self.shape[1], None)
else:
mode = buffer_dimensions_to_primitive.get(1, None)
if mode is None:
raise ValueError("must specify mode")
if count is None:
dim = primitive_to_buffer_dimensions.get(mode, None)
if dim is not None and len(self.shape) > 2 and self.shape[-2] != dim:
raise ValueError("buffer shape does not match mode")
count = _np.prod(self.shape[:-1]) if len(self.shape) > 1 else self.shape[0]
if instances is None:
with self:
_gl.glDrawArrays(mode._value, first, count)
else:
with self:
_gl.glDrawArraysInstances(mode._value, first, count, instances)
__all__ = ["ArrayBuffer"]
| {
"repo_name": "swenger/glitter",
"path": "glitter/arrays/arraybuffer.py",
"copies": "1",
"size": "2182",
"license": "mit",
"hash": 6779522705983344000,
"line_mean": 37.2807017544,
"line_max": 154,
"alpha_frac": 0.6008249313,
"autogenerated": false,
"ratio": 3.781629116117851,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9773786069693458,
"avg_score": 0.021733595544878595,
"num_lines": 57
} |
"""Array container types.
A parray.type is used to create a data structure that describes an list of a
particular subtype. The methods provided to a user expose a list-like interface
to the user. A parray.type's interface inherits from ptype.container and will
always have a .value that's a list. In most cases, a parray.type can be treated
as a python list.
The basic parray interface provides the following methods on top of the methods
required to provide an array-type interface.
class interface(parray.type):
# the sub-element that the array is composed of.
_object_ = sub-type
# the length of the array
length = count
def insert(self, index, object):
'''Insert ``object`` into the array at the specified ``index``.'''
def append(self, object):
'''Appends the specified ``object`` to the end of the array type.'''
def extend(self, iterable):
'''Appends all the objects provided in ``iterable`` to the end of the array type.'''
def pop(self, index):
'''Removes and returns the instance at the specified index of the array.'''
There are a couple of array types that can be used to describe the different data structures
one may encounter. They are as following:
parray.type -- The basic array type. /self.length// specifies it's length,
and /self._object_/ specifies it's subtype.
parray.terminated -- An array type that is terminated by a specific element
type. In this array type, /self.length/ is initially
set to None due to the termination of this array being
defined by the result of a user-supplied
.isTerminator(sub-instance) method.
parray.uninitialized -- An array type that will read until an error or other
kind of interrupt happens. The size of this type is
determined dynamically.
parray.infinite -- An array type that will read indefinitely until it
consumes the blocksize of it's parent element or the
entirety of it's data source.
parray.block -- An array type that will read elements until it reaches the
length of it's .blocksize() method. If a sub-element causes
the array to read past it's .blocksize(), the sub-element
will remain partially uninitialized.
Example usage:
# define a basic type
from ptypes import parray
class type(parray.type):
_object_ = subtype
length = 4
# define a terminated array
class terminated(parray.terminated):
_object_ = subtype
def isTerminator(self, value):
return value is sentineltype or value == sentinelvalue
# define a block array
class block(parray.block):
_object_ = subtype
def blocksize(self):
return size-of-array
# instantiate and load a type
instance = type()
instance.load()
# fetch an element from the array
print(instance[index])
# print the length of the array
print(len(instance))
"""
import itertools, operator
from . import ptype, utils, error
__all__ = 'type,terminated,infinite,block'.split(',')
from . import config
Config = config.defaults
Log = Config.log.getChild('parray')
class __array_interface__(ptype.container):
'''provides the generic features expected out of an array'''
def __contains__(self, instance):
'''L.__contains__(x) -> True if L has an item x, else False'''
return any(item is instance for item in self.value)
def __len__(self):
'''x.__len__() <==> len(x)'''
if not self.initializedQ():
return self.length
return len(self.value)
def insert(self, index, object):
"""Insert ``object`` into ``self`` at the specified ``index``.
This will update the offsets within ``self``, so that all elements are
contiguous when committing.
"""
offset = self.value[index].getoffset()
object.setoffset(offset, recurse=True)
object.parent, object.source = self, None
self.value.insert(index, object)
for i in range(index, len(self.value)):
item = self.value[i]
item.setoffset(offset, recurse=True)
offset += item.blocksize()
return
def __append__(self, object):
idx = len(self.value)
offset = super(__array_interface__, self).__append__(object)
offset = (self.value[idx - 1].getoffset() + self.value[idx - 1].size()) if idx > 0 else self.getoffset()
self.value[idx].setoffset(offset, recurse=True)
return offset
def append(self, object):
"""Append ``object`` to a ``self``. Return the offset it was inserted at.
This will update the offset of ``object`` so that it will appear at the
end of the array.
"""
return self.__append__(object)
def extend(self, iterable):
[ self.append(item) for item in iterable ]
return self
def pop(self, index=-1):
"""Remove the element at ``index`` or the last element in the array.
This will update all the offsets within ``self`` so that all elements are
contiguous.
"""
# determine the correct index
idx = self.value.index(self.value[index])
res = self.value.pop(idx)
offset = res.getoffset()
for i, item in enumerate(self.value[idx:]):
item.setoffset(offset, recurse=True)
offset += item.blocksize()
return res
def __getindex__(self, index):
return index
def __delitem__(self, index):
'''x.__delitem__(y) <==> del x[y]'''
if isinstance(index, slice):
origvalue = self.value[:]
for idx in range(*slice(index.start or 0, index.stop, index.step or 1).indices(index.stop)):
realidx = self.__getindex__(idx)
self.value.pop( self.value.index(origvalue[realidx]) )
return origvalue.__getitem__(index)
return self.pop(index)
def __setitem__(self, index, value):
'''x.__setitem__(i, y) <==> x[i]=y'''
if isinstance(index, slice):
ivalue = itertools.repeat(value) if isinstance(value, ptype.generic) else iter(value)
res = self.value[:]
for idx in range(*slice(index.start or 0, index.stop, index.step or 1).indices(index.stop)):
i = self.__getindex__(idx)
self.value[i] = next(ivalue)
return res.__getitem__(index)
idx = self.__getindex__(index)
result = super(__array_interface__, self).__setitem__(idx, value)
result.__name__ = str(index)
return result
def __getitem__(self, index):
'''x.__getitem__(y) <==> x[y]'''
if isinstance(index, slice):
result = [ self.value[self.__getindex__(idx)] for idx in range(*index.indices(len(self))) ]
t = ptype.clone(type, length=len(result), _object_=self._object_)
return self.new(t, offset=result[0].getoffset() if len(result) else self.getoffset(), value=result)
idx = self.__getindex__(index)
([None]*len(self))[idx] # make python raise the correct exception if so..
return super(__array_interface__, self).__getitem__(idx)
def __element__(self):
try: length = len(self)
except Exception: length = self.length or 0
object = self._object_
if object is None:
res = '(untyped)'
else:
res = object.typename() if ptype.istype(object) else object.__name__
return u"{:s}[{:d}]".format(res, length)
def summary(self, **options):
res = super(__array_interface__, self).summary(**options)
if self.initializedQ():
return ' '.join([self.__element__(), res])
return ' '.join([self.__element__(), res])
def __repr__(self):
"""Calls .repr() to display the details of a specific object"""
try:
prop = ','.join(u"{:s}={!r}".format(k, v) for k, v in self.properties().items())
# If we got an InitializationError while fetching the properties (due to
# a bunk user implementation), then we simply fall back to the internal
# implementation.
except error.InitializationError:
prop = ','.join(u"{:s}={!r}".format(k, v) for k, v in self.__properties__().items())
result, element = self.repr(), self.__element__()
# multiline (includes element description)
if result.count('\n') > 0 or utils.callable_eq(self.repr, __array_interface__.details):
result = result.rstrip('\n')
if prop:
return u"{:s} '{:s}' {{{:s}}} {:s}\n{:s}".format(utils.repr_class(self.classname()), self.name(), prop, element, result)
return u"{:s} '{:s}' {:s}\n{:s}".format(utils.repr_class(self.classname()), self.name(), element, result)
# if the user chose to not use the default summary, then prefix the element description.
if all(not utils.callable_eq(self.repr, item) for item in [__array_interface__.repr, __array_interface__.summary]):
result = ' '.join([element, result])
_hex, _precision = Config.pbinary.offset == config.partial.hex, 3 if Config.pbinary.offset == config.partial.fractional else 0
# single-line
descr = u"{:s} '{:s}'".format(utils.repr_class(self.classname()), self.name()) if self.value is None else utils.repr_instance(self.classname(), self.name())
if prop:
return u"[{:s}] {:s} {{{:s}}} {:s}".format(utils.repr_position(self.getposition(), hex=_hex, precision=_precision), descr, prop, result)
return u"[{:s}] {:s} {:s}".format(utils.repr_position(self.getposition(), hex=_hex, precision=_precision), descr, result)
class type(__array_interface__):
'''
A container for managing ranges of a particular object.
Settable properties:
_object_:ptype.type<w>
The type of the array
length:int<w>
The length of the array only used during initialization of the object
'''
_object_ = None # subclass of ptype.type
length = 0 # int
# load ourselves lazily
def __load_block(self, **attrs):
offset = self.getoffset()
for index in range(self.length):
item = self.new(self._object_, __name__=str(index), offset=offset, **attrs)
self.value.append(item)
offset += item.blocksize()
return self
# load ourselves incrementally
def __load_container(self, **attrs):
offset = self.getoffset()
for index in range(self.length):
item = self.new(self._object_, __name__=str(index), offset=offset, **attrs)
self.value.append(item)
item.load()
offset += item.blocksize()
return self
def copy(self, **attrs):
result = super(type, self).copy(**attrs)
result._object_ = self._object_
result.length = self.length
return result
def alloc(self, fields=(), **attrs):
result = super(type, self).alloc(**attrs)
if len(fields) and isinstance(fields[0], tuple):
for name, val in fields:
idx = result.__getindex__(name)
position = result.value[idx].getposition()
if ptype.istype(val) or ptype.isresolveable(val):
result.value[idx] = result.new(val, position=position).a
elif isinstance(val, ptype.generic):
result.value[idx] = result.new(val, position=position)
else:
result.value[idx].set(val)
continue
else:
offset = result.getoffset()
for idx, val in enumerate(fields):
name = "{:d}".format(idx)
if ptype.istype(val) or ptype.isresolveable(val):
result.value[idx] = result.new(val, __name__=name, offset=offset).a
elif isinstance(val, ptype.generic):
result.value[idx] = result.new(val, __name__=name, offset=offset)
else:
result.value[idx].set(val)
offset += result.value[idx].blocksize()
# re-alloc elements that exist in the rest of the array
for idx in range(len(fields), len(result.value)):
result.value[idx].a
result.setoffset(self.getoffset(), recurse=True)
return result
def load(self, **attrs):
try:
with utils.assign(self, **attrs):
object = self._object_
self.value = []
# which kind of load are we
if ptype.istype(object) and not ptype.iscontainer(object):
self.__load_block()
elif ptype.iscontainer(object) or ptype.isresolveable(object):
self.__load_container()
else:
Log.info("type.load : {:s} : Unable to load array due to an unknown element type ({!s}).".format(self.instance(), object))
return super(type, self).load(**attrs)
except error.LoadError as E:
raise error.LoadError(self, exception=E)
raise error.AssertionError(self, 'type.load')
def __setvalue__(self, *values, **attrs):
"""Update self with the contents of the first argument in ``value``"""
if not values:
return self
value, = values
if self.initializedQ() and len(self) == len(value):
return super(type, self).__setvalue__(*value)
else:
iterable = enumerate(value)
length, self.value = len(self), []
for idx, ivalue in iterable:
if ptype.isresolveable(ivalue) or ptype.istype(ivalue):
res = self.new(ivalue, __name__=str(idx)).a
elif isinstance(ivalue, ptype.generic):
res = ivalue
else:
res = self.new(self._object_, __name__=str(idx)).a.set(ivalue)
self.value.append(res)
# output a warning if the length is already set to something and the user explicitly changed it to something different.
if length and length != len(self):
Log.warning("type.__setvalue__ : {:s} : Length of array was explicitly changed ({:d} != {:d}).".format(self.instance(), length, len(self)))
result = super(type, self).__setvalue__(*value)
result.length = len(self)
return self
def __getstate__(self):
return super(type, self).__getstate__(), self._object_, self.length
def __setstate__(self, state):
state, self._object_, self.length = state
super(type, self).__setstate__(state)
class terminated(type):
'''
an array that terminates deserialization based on the value returned by
.isTerminator()
'''
length = None
def isTerminator(self, value):
'''intended to be overloaded. should return True if element /value/ represents the end of the array.'''
raise error.ImplementationError(self, 'terminated.isTerminator')
def __len__(self):
'''x.__len__() <==> len(x)'''
if self.length is None:
if self.value is None:
raise error.InitializationError(self, 'terminated.__len__')
return len(self.value)
return super(terminated, self).__len__()
def alloc(self, fields=(), **attrs):
attrs.setdefault('length', len(fields))
attrs.setdefault('isTerminator', lambda value: False)
return super(terminated, self).alloc(fields, **attrs)
def load(self, **attrs):
try:
with utils.assign(self, **attrs):
forever = itertools.count() if self.length is None else range(self.length)
offset, self.value = self.getoffset(), []
for index in forever:
item = self.new(self._object_, __name__=str(index), offset=offset)
self.value.append(item)
if self.isTerminator(item.load()):
break
size = item.blocksize()
# we only allow elements with a zero size when the object type is
# a call (meaning it's a dynamic type) or if its blocksize is dynamic.
if size <= 0:
if issubclass(self._object_, ptype.generic) and item.__blocksize_originalQ__():
Log.warning("terminated.load : {:s} : Terminated early due to zero-length element : {:s}".format(self.instance(), item.instance()))
break
# validate that the element size is a sane value, as the size returned
# by the user's implementation should _always_ be positive.
if size < 0:
raise error.AssertionError(self, 'terminated.load', message="Element size for {:s} is < 0".format(item.classname()))
Log.info("terminated.load : {:s} : Added a dynamic element with a {:d} length to a terminated array : {:s}".format(self.instance(), size, item.instance()))
offset += size
except (Exception, error.LoadError) as E:
raise error.LoadError(self, exception=E)
return self
def initializedQ(self):
'''Returns True if all elements excluding the last one (sentinel) are initialized'''
# Check to see if array contains any elements
if self.value is None:
return False
# Check if all elements are initialized.
return all(item.initializedQ() for item in self.value)
class uninitialized(terminated):
"""An array that can contain uninitialized or partially initialized elements.
This array determines it's size dynamically ignoring partially or
uninitialized elements found near the end.
"""
def size(self):
if self.value is not None:
return sum(item.size() for item in self.value if item.value is not None)
raise error.InitializationError(self, 'uninitialized.size')
def __properties__(self):
res = super(uninitialized, self).__properties__()
# If we're really not initialized, then there's nothing to do.
if self.value is None:
return res
# Otherwise, we're actually initialized but not entirely and we need
# to fix up our properties a bit to clean up the rendering of the instance.
# fix up our properties a bit to clean up our rendering of the instance.
if self.length is not None:
if self.length < len(self.value):
res['inflated'] = True
elif self.length > len(self.value):
res['abated'] = True
return res
return res
def initializedQ(self):
'''Returns True if all elements are partial or completely initialized.'''
# Check to see if array contains any elements
if self.value is None:
return False
# Grab all initialized elements near the beginning
res = list(itertools.takewhile(operator.methodcaller('initializedQ'), self.value))
# Return True if the whole thing is initialized or just the tail is uninitialized
return len(res) == len(self.value) or all(not item.initializedQ() for item in self.value[len(res):])
def serialize(self):
'''Serialize all currently available content of the array.'''
iterable = itertools.takewhile(lambda item: item.initializedQ() or item.size() > 0, self.value)
return b''.join(item.serialize() for item in iterable)
class infinite(uninitialized):
'''An array that reads elements until an exception or interrupt happens'''
def __next_element(self, offset, **attrs):
'''Utility method that returns a new element at a specified offset and loads it. intended to be overloaded.'''
index = len(self.value)
item = self.new(self._object_, __name__=str(index), offset=offset)
try:
item.load(**attrs)
except (error.LoadError, error.InitializationError) as E:
path = str().join(map("<{:s}>".format, self.backtrace()))
Log.info("infinite.__next_element : {:s} : Unable to read terminal element {:s} : {:s}".format(self.instance(), item.instance(), path))
return item
def isTerminator(self, value):
return False
def __properties__(self):
res = super(infinite, self).__properties__()
# Check if we're really an underloaded parray.infinite
if res.get('underload', False):
# If the size of our partially initialized last element is larger
# than what our expected size should be, then it's not.
if self.value[-1].blocksize() >= self.blocksize() - self.size():
res.pop('underload')
return res
# That was all we wanted..
return res
def load(self, **attrs):
# fallback to regular loading if user has hardcoded the length
if attrs.get('length', self.length) is not None:
return super(infinite, self).load(**attrs)
with utils.assign(self, **attrs):
offset, self.value = self.getoffset(), []
current, maximum = 0, None if self.parent is None else self.parent.blocksize()
try:
while True if maximum is None else current < maximum:
# read next element at the current offset
item = self.__next_element(offset)
if not item.initializedQ():
Log.debug("infinite.load : {:s} : Element {:d} left partially initialized : {:s}".format(self.instance(), len(self.value), item.instance()))
self.value.append(item)
if not item.initializedQ():
break
if self.isTerminator(item):
break
size = item.blocksize()
# only allow elements with a zero size when the object type is a call
# or if its blocksize is dynamically calculated.
if size <= 0:
if issubclass(self._object_, ptype.generic) and item.__blocksize_originalQ__():
Log.warning("infinite.load : {:s} : Terminated early due to zero-length element : {:s}".format(self.instance(), item.instance()))
break
# check sanity of element size
if size < 0:
raise error.AssertionError(self, 'infinite.load', message="Element size for {:s} is < 0".format(item.classname()))
Log.info("infinite.load : {:s} : Added a dynamic element with a {:d} length to an infinite array : {:s}".format(self.instance(), size, item.instance()))
# next iteration
offset += size
current += size
except (Exception, error.LoadError) as E:
if self.parent is not None:
path = str().join(map("<{:s}>".format, self.backtrace()))
if len(self.value):
Log.warning("infinite.load : {:s} : Stopped reading at element {:s} : {:s}".format(self.instance(), self.value[-1].instance(), path), exc_info=True)
else:
Log.warning("infinite.load : {:s} : Stopped reading before load : {:s}".format(self.instance(), path), exc_info=True)
raise error.LoadError(self, exception=E)
return self
def loadstream(self, **attr):
'''an iterator that incrementally populates the array'''
with utils.assign(self, **attr):
self.value = []
offset = self.getoffset()
current, maximum = 0, None if self.parent is None else self.parent.blocksize()
try:
while True if maximum is None else current < maximum:
# yield next element at the current offset
item = self.__next_element(offset)
self.value.append(item)
yield item
if not item.initializedQ():
break
if self.isTerminator(item):
break
size = item.blocksize()
# validate the size of the element, we only will allow zero-sized
# elements if our object is dynamically determined via a callable.
if size <= 0:
if issubclass(self._object_, ptype.generic) and item.__blocksize_originalQ__():
Log.warning("infinite.loadstream : {:s} : Terminated early due to zero-length element : {:s}".format(self.instance(), item.instance()))
break
# check sanity of element size
if size < 0:
raise error.AssertionError(self, 'infinite.loadstream', message="Element size for {:s} is < 0".format(item.classname()))
Log.info("infinite.loadstream : {:s} : Added a dynamic element with a {:d} length to an infinite array : {:s}".format(self.instance(), size, item.instance()))
# next iteration
offset += size
current += size
except error.LoadError as E:
if self.parent is not None:
path = str().join(map("<{:s}>".format, self.backtrace()))
Log.warning("infinite.loadstream : {:s} : Stopped reading at element {:s} : {:s}".format(self.instance(), item.instance(), path))
raise error.LoadError(self, exception=E)
pass
# Read everything until we have a load error, because that's what this
# method does...
try:
super(type, self).load()
except error.LoadError:
pass
return
class block(uninitialized):
'''An array that reads elements until their size totals the same amount returned by .blocksize()'''
def isTerminator(self, value):
return False
def load(self, **attrs):
# fallback to regular loading if user has hardcoded the length
if attrs.get('length', self.length) is not None:
return super(block, self).load(**attrs)
with utils.assign(self, **attrs):
forever = itertools.count() if self.length is None else range(len(self))
offset, self.value = self.getoffset(), []
if self.blocksize() == 0: # if array is empty...
return self
current = 0
for index in forever:
item = self.new(self._object_, __name__=str(index), offset=offset)
try:
item = item.load()
except error.LoadError as E:
#E = error.LoadError(self, exception=E)
o = current + item.blocksize()
# if we error'd while decoding too much, then let user know
if o > self.blocksize():
path = str().join(map("<{:s}>".format, item.backtrace()))
Log.warning("block.load : {:s} : Reached end of blockarray at {:s} : {:s}".format(self.instance(), item.instance(), path))
self.value.append(item)
# otherwise add the incomplete element to the array
elif o < self.blocksize():
Log.warning("block.load : {:s} : LoadError raised at {:s} : {!r}".format(self.instance(), item.instance(), E))
self.value.append(item)
break
# validate the size of the element, we only will allow zero-sized
# elements if our object is dynamically determined via a callable.
size = item.blocksize()
if size <= 0:
if issubclass(self._object_, ptype.generic) and item.__blocksize_originalQ__():
Log.warning("block.load : {:s} : Terminated early due to zero-length element : {:s}".format(self.instance(), item.instance()))
self.value.append(item)
break
# verify the sanity of the element size as lengths can't be less than zero.
if size < 0:
raise error.AssertionError(self, 'block.load', message="Element size for {:s} is < 0".format(item.classname()))
Log.info("block.load : {:s} : Added a dynamic element with a {:d} length to a block array : {:s}".format(self.instance(), size, item.instance()))
# if our child element pushes us past the blocksize
if current + size >= self.blocksize():
path = str().join(map("<{:s}>".format, item.backtrace()))
Log.debug("block.load : {:s} : Terminated at {:s} : {:s}".format(self.instance(), item.instance(), path))
self.value.append(item)
break
# add to list, and check if we're done.
self.value.append(item)
if self.isTerminator(item):
break
offset, current = offset+size, current+size
pass
return self
def alloc(self, *args, **attrs):
return super(block if args else terminated, self).alloc(*args, **attrs)
def initializedQ(self):
return super(block, self).initializedQ() and (self.size() >= self.blocksize() if self.length is None else len(self.value) == self.length)
if __name__ == '__main__':
class Result(Exception): pass
class Success(Result): pass
class Failure(Result): pass
TestCaseList = []
def TestCase(fn):
def harness(**kwds):
name = fn.__name__
try:
res = fn(**kwds)
raise Failure
except Success as E:
print('%s: %r'% (name, E))
return True
except Failure as E:
print('%s: %r'% (name, E))
except Exception as E:
print('%s: %r : %r'% (name, Failure(), E))
return False
TestCaseList.append(harness)
return fn
if __name__ == '__main__':
import ptypes, sys, operator, array, string, random, functools
from ptypes import pstruct, parray, pint, provider, utils, dynamic, ptype
arraytobytes = operator.methodcaller('tostring' if sys.version_info.major < 3 else 'tobytes')
class RecordGeneral(pstruct.type):
_fields_ = [
(pint.uint8_t, 'start'),
(pint.uint8_t, 'end'),
]
class qword(ptype.type): length = 8
class dword(ptype.type): length = 4
class word(ptype.type): length = 2
class byte(ptype.type): length = 1
random.seed()
def function(self):
# if len(self.value) > 0:
# self[0].load()
# print(self[0])
return random.sample([byte, word, dword, function2], 1)[0]
def function2(self):
return qword()
@TestCase
def test_array_type_dword():
class myarray(parray.type):
length = 5
_object_ = dword
x = myarray()
# print(x)
# print(x.length,len(x), x.value)
x.source = provider.bytes(b'AAAA'*15)
x.l
# print(x.length,len(x), x.value)
# print("{!r}".format(x))
if len(x) == 5 and x[4].serialize() == b'AAAA':
raise Success
@TestCase
def test_array_type_function():
class myarray(parray.type):
length = 16
_object_ = function
x = myarray()
x.source = provider.memory()
x.setoffset(id(x))
x.load()
# print(x)
if len(x) == 16:
raise Success
@TestCase
def test_array_terminated_uint8():
class myarray(parray.terminated):
_object_ = pint.uint8_t
def isTerminator(self, v):
if v.serialize() == b'H':
return True
return False
block = b'GFEDCBABCDHEFG'
x = myarray(source=provider.bytes(block)).l
if len(x) == 11:
raise Success
@TestCase
def test_array_infinite_struct():
class RecordContainer(parray.infinite):
_object_ = RecordGeneral
chars = b'\xdd\xdd'
string = chars * 8
string = string[:-1]
z = RecordContainer(source=provider.bytes(string)).l
if len(z)-1 == int(len(string)/2.0) and len(string)%2 == 1:
raise Success
@TestCase
def test_array_infinite_struct_partial():
class RecordContainer(parray.infinite):
_object_ = RecordGeneral
data = provider.bytes(b'AAAAA')
z = RecordContainer(source=data).l
s = RecordGeneral().a.blocksize()
if z.blocksize() == len(z)*s and len(z) == 3 and z.size() == 5 and not z[-1].initializedQ():
raise Success
@TestCase
def test_array_block_uint8():
class container(parray.block):
_object_ = pint.uint8_t
blocksize = lambda s:4
block = bytes(bytearray(range(0x10)))
a = container(source=provider.bytes(block)).l
if len(a) == 4:
raise Success
@TestCase
def test_array_infinite_type_partial():
b = string.ascii_letters+string.digits
count = 0x10
child_type = pint.uint32_t
class container_type(parray.infinite):
_object_ = child_type
block_length = child_type.length * count
block = b'\0'*block_length
n = container_type(source=provider.bytes(block)).l
if len(n)-1 == count and not n[-1].initializedQ():
raise Success
@TestCase
def test_array_block_uint32():
count = 8
child_type = pint.uint32_t
class container_type(parray.block):
_object_ = child_type
block_length = child_type.length * count
block = b'\0'*block_length
container_type.blocksize = lambda s: child_type.length * 4
a = container_type(source=provider.bytes(block)).l
if len(a) == 4:
raise Success
@TestCase
def test_array_infinite_nested_array():
class subarray(parray.type):
length = 4
_object_ = pint.uint8_t
def int(self):
return functools.reduce(lambda agg, item: 256 * agg + item.int(), self.value, 0)
def repr(self, **options):
if self.initializedQ():
return self.classname() + " {:x}".format(self.int())
return self.classname() + ' ???'
class extreme(parray.infinite):
_object_ = subarray
def isTerminator(self, v):
return v.int() == 0x42424242
a = extreme(source=provider.bytes(b'A'*0x100 + b'B'*0x100 + b'C'*0x100 + b'DDDD'))
a=a.l
if len(a) == (0x100 / subarray.length)+1:
raise Success
@TestCase
def test_array_infinite_nested_block():
random.seed(0)
class leaf(pint.uint32_t): pass
class rootcontainer(parray.block):
_object_ = leaf
class acontainer(rootcontainer):
blocksize = lambda x: 8
class bcontainer(rootcontainer):
_object_ = pint.uint16_t
blocksize = lambda x: 8
class ccontainer(rootcontainer):
_object_ = pint.uint8_t
blocksize = lambda x: 8
class arr(parray.infinite):
def randomcontainer(self):
l = [ acontainer, bcontainer, ccontainer ]
return random.sample(l, 1)[0]
_object_ = randomcontainer
iterable = (random.randint(*params) for params in [tuple(boundary for boundary in bytearray(b'AZ'))] * 0x100)
string = bytes(bytearray(iterable))
a = arr(source=provider.bytes(string))
a=a.l
if a.blocksize() == 0x108:
raise Success
@TestCase
def test_array_infinite_nested_partial():
class fakefile(object):
d = array.array('L' if len(array.array('I', 4 * b'\0')) > 1 else 'I', ((item * 0xdead) & 0xffffffff for item in range(0x100)))
d = array.array('B', bytearray(arraytobytes(d) + b'\xde\xad\xde\xad'))
o = 0
def seek(self, ofs):
self.o = ofs
def read(self, amount):
r = arraytobytes(self.d[self.o : amount + self.o])
self.o += amount
return r
strm = provider.stream(fakefile())
class stoofoo(pstruct.type):
_fields_ = [ (pint.uint32_t, 'a') ]
class argh(parray.infinite):
_object_ = stoofoo
x = argh(source=strm)
for a in x.loadstream():
pass
if not a.initializedQ() and x[-2].serialize() == b'\xde\xad\xde\xad':
raise Success
@TestCase
def test_array_terminated_string():
class szstring(parray.terminated):
_object_ = pint.uint8_t
def isTerminator(self, value):
return value.int() == 0
data = provider.bytes(b'hello world\x00not included\x00')
a = szstring(source=data).l
if len(a) == len(b'hello world\x00'):
raise Success
@TestCase
def test_array_nested_terminated_string():
class szstring(parray.terminated):
_object_ = pint.uint8_t
def isTerminator(self, value):
return value.int() == 0
class argh(parray.terminated):
_object_ = szstring
def isTerminator(self, value):
return value.serialize() == b'end\x00'
data = provider.bytes(b'hello world\x00is included\x00end\x00not\x00')
a = argh(source=data).l
if len(a) == 3:
raise Success
@TestCase
def test_array_block_nested_terminated_string():
class szstring(parray.terminated):
_object_ = pint.uint16_t
def isTerminator(self, value):
return value.int() == 0
class ninethousand(parray.block):
_object_ = szstring
blocksize = lambda x: 9000
s = ((b'A'*498) + b'\x00\x00') + ((b'B'*498)+b'\x00\x00')
a = ninethousand(source=provider.bytes(s*9000)).l
if len(a) == 18 and a.size() == 9000:
raise Success
@TestCase
def test_array_block_nested_terminated_block():
class fiver(parray.block):
_object_ = pint.uint8_t
blocksize = lambda s: 5
class feiverfrei(parray.terminated):
_object_ = fiver
def isTerminator(self, value):
return value.serialize() == b'\x00\x00\x00\x00\x00'
class dundundun(parray.block):
_object_ = feiverfrei
blocksize = lambda x: 50
dat = b'A'*5
end = b'\x00'*5
s = (dat*4)+end + (dat*4)+end
a = dundundun(source=provider.bytes(s*5)).l
if len(a) == 2 and len(a[0]) == 5 and len(a[1]) == 5:
raise Success
@TestCase
def test_array_block_blocksize():
class blocked(parray.block):
_object_ = pint.uint32_t
def blocksize(self):
return 16
data = b'\xAA\xAA\xAA\xAA'*4
data+= b'\xBB'*4
x = blocked(source=provider.bytes(data))
x = x.l
if len(x) == 4 and x.size() == 16:
raise Success
@TestCase
def test_array_set_uninitialized():
class argh(parray.type):
_object_ = pint.int32_t
a = argh(source=provider.empty())
a.set([x for x in range(69)])
if len(a) == 69 and sum(x.int() for x in a) == 2346:
raise Success
@TestCase
def test_array_set_initialized():
class argh(parray.type):
_object_ = pint.int32_t
a = argh(source=provider.empty(), length=69)
a.a.set([42 for _ in range(69)])
if sum(x.int() for x in a) == 2898:
raise Success
@TestCase
def test_array_alloc_keyvalue_set():
class argh(parray.type):
_object_ = pint.int32_t
a = argh(length=4).alloc(((0,0x77777777),(3,-1)))
if a[0].int() == 0x77777777 and a[-1].int() == -1:
raise Success
@TestCase
def test_array_alloc_set_iterable():
class argh(parray.type):
_object_ = pint.int32_t
a = argh(length=4).alloc((0,2,4))
if tuple(s.int() for s in a) == (0,2,4,0):
raise Success
@TestCase
def test_array_alloc_keyvalue_instance():
class aigh(parray.type):
_object_ = pint.uint8_t
length = 4
class argh(parray.type):
_object_ = pint.uint32_t
x = aigh().alloc(list(bytearray(b'PE\0\0')))
a = argh(length=4).alloc(((0,x),(-1,0x5a4d)))
if a[0].serialize() == b'PE\0\0' and a[-1].serialize() == b'MZ\0\0':
raise Success
@TestCase
def test_array_set_initialized_value():
a = parray.type(_object_=pint.uint32_t,length=4).a
a.set((10,10,10,10))
if sum(x.int() for x in a) == 40:
raise Success
@TestCase
def test_array_set_initialized_type():
a = parray.type(_object_=pint.uint8_t,length=4).a
a.set((pint.uint32_t,)*4)
if sum(x.size() for x in a) == 16:
raise Success
@TestCase
def test_array_set_initialized_container():
b = ptype.clone(parray.type,_object_=pint.uint8_t,length=4)
a = parray.type(_object_=pint.uint8_t,length=4).a
a.set((b,)*4)
if sum(x.size() for x in a) == 16:
raise Success
@TestCase
def test_array_set_initialized_instance():
b = ptype.clone(parray.type,_object_=pint.uint8_t,length=4)
a = parray.type(_object_=pint.uint8_t,length=4).a
a.set(tuple(pint.uint32_t().set(0x40) for x in range(4)))
if sum(x.int() for x in a) == 256:
raise Success
@TestCase
def test_array_set_uninitialized_dynamic_value():
class blah(parray.type):
def _object_(self):
length = 0 if len(self.value) == 0 else (self.value[-1].length+1)%4
return ptype.clone(pint.uint_t,length=length)
length = 16
a = blah()
a.set((0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3))
if sum(x.size() for x in a) == 6*4:
raise Success
@TestCase
def test_array_set_uninitialized_dynamic_type():
class blah(parray.type):
def _object_(self):
length = 0 if len(self.value) == 0 else (self.value[-1].length+1)%4
return ptype.clone(pint.uint_t,length=length)
length = 4
a = blah()
a.set((pint.uint8_t,pint.uint8_t,pint.uint8_t,pint.uint8_t))
if sum(x.size() for x in a) == 4:
raise Success
@TestCase
def test_array_set_uninitialized_dynamic_instance():
class blah(parray.type):
def _object_(self):
length = 0 if len(self.value) == 0 else (self.value[-1].length+1)%4
return ptype.clone(pint.uint_t,length=length)
length = 4
a = blah()
a.set((pint.uint8_t().set(2),pint.uint8_t().set(2),pint.uint8_t().set(2),pint.uint8_t().set(2)))
if sum(x.int() for x in a) == 8:
raise Success
@TestCase
def test_array_alloc_value():
class blah(parray.type):
_object_ = pint.uint32_t
length = 4
a = blah().alloc((4,8,0xc,0x10))
if all(x.size() == 4 for x in a) and tuple(x.int() for x in a) == (4,8,12,16):
raise Success
@TestCase
def test_array_alloc_type():
class blah(parray.type):
_object_ = pint.uint32_t
length = 4
a = blah().alloc((pint.uint8_t,)*4)
if all(x.size() == 1 for x in a):
raise Success
@TestCase
def test_array_alloc_instance():
class blah(parray.type):
_object_ = pint.uint32_t
length = 4
a = blah().alloc([pint.uint8_t().set(i) for i in range(4)])
if all(x.size() == 1 for x in a) and sum(x.int() for x in a) == 6:
raise Success
@TestCase
def test_array_alloc_partial():
class blah(parray.type):
_object_ = pint.uint32_t
length = 4
a = blah().alloc([pint.uint8_t])
if a[0].size() == 1 and all(a[x].size() == 4 for x in range(1,4)):
raise Success
@TestCase
def test_array_alloc_infinite_empty():
class blah(parray.infinite):
_object_ = pint.uint32_t
a = blah().a
if a.serialize() == b'':
raise Success
#@TestCase
#def test_array_alloc_terminated_partial():
# class blah(parray.terminated):
# _object_ = pint.uint32_t
# def isTerminator(self, value):
# return value.int() == 1
# a = blah().a
# a.value.extend(map(a.new, (pint.uint32_t,)*2))
# a.a
# if a.serialize() == b'\x00\x00\x00\x00\x00\x00\x00\x00':
# raise Success
@TestCase
def test_array_alloc_infinite_sublement_infinite():
class blah(parray.infinite):
class _object_(parray.terminated):
_object_ = pint.uint32_t
def isTerminator(self, value):
return value.int() == 1
a = blah().a
if a.initializedQ() and a.serialize() == b'':
raise Success
@TestCase
def test_array_set_array_with_dict():
class blah(parray.type):
length = 4
class _object_(pstruct.type):
_fields_ = [(pint.uint32_t, 'a'), (pint.uint32_t, 'b')]
res = blah().a.set([dict(a=1, b=2), dict(a=3, b=4), dict(a=5), dict(b=6)])
if res.get() == ((1, 2), (3, 4), (5, 0), (0, 6)):
raise Success
@TestCase
def test_array_append_getoffset():
x = parray.type(length=2, _object_=pint.uint32_t, offset=0x10).a
offset = x.append(pint.uint16_t)
if offset == x.getoffset() + x[0].size() * 2:
raise Success
@TestCase
def test_array_alloc_dynamic_element_blocksize_1():
class t(parray.type):
_object_, length = pint.uint8_t, 4
class dynamic_t(ptype.block):
def blocksize(self):
return 1 if self.getoffset() else 0
res = t().alloc([(2, dynamic_t)])
if res.size() == 4:
raise Success
@TestCase
def test_array_alloc_dynamic_element_blocksize_2():
class t(parray.type):
_object_, length = pint.uint8_t, 4
class dynamic_t(ptype.block):
def blocksize(self):
return 1 if self.getoffset() else 0
res = t().alloc([pint.uint8_t, pint.uint8_t, dynamic_t, pint.uint8_t])
if res.size() == 4:
raise Success
if __name__ == '__main__':
import logging
ptypes.config.defaults.log.setLevel(logging.DEBUG)
results = []
for t in TestCaseList:
results.append( t() )
| {
"repo_name": "arizvisa/syringe",
"path": "lib/ptypes/parray.py",
"copies": "1",
"size": "47839",
"license": "bsd-2-clause",
"hash": 4398554643739254300,
"line_mean": 37.149122807,
"line_max": 182,
"alpha_frac": 0.5524781036,
"autogenerated": false,
"ratio": 4.031942688579857,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003310128944649043,
"num_lines": 1254
} |
"""Array data-type implementations (abstraction points for GL array types"""
import ctypes
import OpenGL
from OpenGL import constants, plugins
from OpenGL.arrays import formathandler
from OpenGL import logs
log = logs.getLog( 'OpenGL.arrays.arraydatatype' )
from OpenGL import acceleratesupport
ADT = None
if acceleratesupport.ACCELERATE_AVAILABLE:
try:
from OpenGL_accelerate.arraydatatype import ArrayDatatype as ADT
except ImportError, err:
log.warn(
"Unable to load ArrayDatatype accelerator from OpenGL_accelerate"
)
if ADT is None:
# Python-coded version
class HandlerRegistry( dict ):
GENERIC_OUTPUT_PREFERENCES = ['numpy','numeric','ctypesarrays']
def __init__( self, plugin_match ):
self.match = plugin_match
self.output_handler = None
self.preferredOutput = None
self.all_output_handlers = []
def __call__( self, value ):
"""Lookup of handler for given value"""
try:
typ = value.__class__
except AttributeError, err:
typ = type(value)
handler = self.get( typ )
if not handler:
if hasattr( typ, '__mro__' ):
for base in typ.__mro__:
handler = self.get( base )
if not handler:
handler = self.match( base )
if handler:
handler = handler.load()
if handler:
handler = handler()
if handler:
self[ typ ] = handler
if hasattr( handler, 'registerEquivalent' ):
handler.registerEquivalent( typ, base )
return handler
raise TypeError(
"""No array-type handler for type %r (value: %s) registered"""%(
typ, repr(value)[:50]
)
)
return handler
def get_output_handler( self ):
"""Fast-path lookup for output handler object"""
if self.output_handler is None:
if not self:
formathandler.FormatHandler.loadAll()
if self.preferredOutput is not None:
self.output_handler = self.get( self.preferredOutput )
if not self.output_handler:
for preferred in self.GENERIC_OUTPUT_PREFERENCES:
self.output_handler = self.get( preferred )
if self.output_handler:
break
if not self.output_handler:
# look for anything that can do output...
for handler in self.all_output_handlers:
self.output_handler = handler
break
if not self.output_handler:
raise RuntimeError(
"""Unable to find any output handler at all (not even ctypes/numpy ones!)"""
)
return self.output_handler
def register( self, handler, types=None ):
"""Register this class as handler for given set of types"""
if not isinstance( types, (list,tuple)):
types = [ types ]
for type in types:
self[ type ] = handler
if handler.isOutput:
self.all_output_handlers.append( handler )
def registerReturn( self, handler ):
"""Register this handler as the default return-type handler"""
self.preferredOutput = handler
self.output_handler = None
GLOBAL_REGISTRY = HandlerRegistry( plugins.FormatHandler.match)
formathandler.FormatHandler.TYPE_REGISTRY = GLOBAL_REGISTRY
class ArrayDatatype( object ):
"""Mix-in for array datatype classes
The ArrayDatatype marker essentially is used to mark a particular argument
as having an "array" type, which means that it is eligible for handling
via the arrays sub-package and its registered handlers.
"""
typeConstant = None
handler = GLOBAL_REGISTRY
getHandler = GLOBAL_REGISTRY.__call__
returnHandler = GLOBAL_REGISTRY.get_output_handler
isAccelerated = False
@classmethod
def getRegistry( cls ):
"""Get our handler registry"""
return cls.handler
def from_param( cls, value ):
"""Given a value in a known data-pointer type, convert to a ctypes pointer"""
return cls.getHandler(value).from_param( value, cls.typeConstant )
from_param = classmethod( logs.logOnFail( from_param, log ) )
def dataPointer( cls, value ):
"""Given a value in a known data-pointer type, return long for pointer"""
try:
return cls.getHandler(value).dataPointer( value )
except Exception, err:
log.warn(
"""Failure in dataPointer for %s instance %s""", type(value), value,
)
raise
dataPointer = classmethod( logs.logOnFail( dataPointer, log ) )
def voidDataPointer( cls, value ):
"""Given value in a known data-pointer type, return void_p for pointer"""
pointer = cls.dataPointer( value )
try:
return ctypes.c_void_p(pointer)
except TypeError, err:
return pointer
voidDataPointer = classmethod( logs.logOnFail( voidDataPointer, log ) )
def typedPointer( cls, value ):
"""Return a pointer-to-base-type pointer for given value"""
return ctypes.cast( cls.dataPointer(value), ctypes.POINTER( cls.baseType ))
typedPointer = classmethod( typedPointer )
def asArray( cls, value, typeCode=None ):
"""Given a value, convert to preferred array representation"""
return cls.getHandler(value).asArray( value, typeCode or cls.typeConstant )
asArray = classmethod( logs.logOnFail( asArray, log ) )
def arrayToGLType( cls, value ):
"""Given a data-value, guess the OpenGL type of the corresponding pointer
Note: this is not currently used in PyOpenGL and may be removed
eventually.
"""
return cls.getHandler(value).arrayToGLType( value )
arrayToGLType = classmethod( logs.logOnFail( arrayToGLType, log ) )
def arraySize( cls, value, typeCode = None ):
"""Given a data-value, calculate dimensions for the array (number-of-units)"""
return cls.getHandler(value).arraySize( value, typeCode or cls.typeConstant )
arraySize = classmethod( logs.logOnFail( arraySize, log ) )
def unitSize( cls, value, typeCode=None ):
"""Determine unit size of an array (if possible)
Uses our local type if defined, otherwise asks the handler to guess...
"""
return cls.getHandler(value).unitSize( value, typeCode or cls.typeConstant )
unitSize = classmethod( logs.logOnFail( unitSize, log ) )
def zeros( cls, dims, typeCode=None ):
"""Allocate a return array of the given dimensions filled with zeros"""
return cls.returnHandler().zeros( dims, typeCode or cls.typeConstant )
zeros = classmethod( logs.logOnFail( zeros, log ) )
def dimensions( cls, value ):
"""Given a data-value, get the dimensions (assumes full structure info)"""
return cls.getHandler(value).dimensions( value )
dimensions = classmethod( logs.logOnFail( dimensions, log ) )
def arrayByteCount( cls, value ):
"""Given a data-value, try to determine number of bytes it's final form occupies
For most data-types this is arraySize() * atomic-unit-size
"""
return cls.getHandler(value).arrayByteCount( value )
arrayByteCount = classmethod( logs.logOnFail( arrayByteCount, log ) )
# the final array data-type classes...
class GLclampdArray( ArrayDatatype, ctypes.POINTER(constants.GLclampd )):
"""Array datatype for GLclampd types"""
baseType = constants.GLclampd
typeConstant = constants.GL_DOUBLE
class GLclampfArray( ArrayDatatype, ctypes.POINTER(constants.GLclampf )):
"""Array datatype for GLclampf types"""
baseType = constants.GLclampf
typeConstant = constants.GL_FLOAT
class GLfloatArray( ArrayDatatype, ctypes.POINTER(constants.GLfloat )):
"""Array datatype for GLfloat types"""
baseType = constants.GLfloat
typeConstant = constants.GL_FLOAT
class GLdoubleArray( ArrayDatatype, ctypes.POINTER(constants.GLdouble )):
"""Array datatype for GLdouble types"""
baseType = constants.GLdouble
typeConstant = constants.GL_DOUBLE
class GLbyteArray( ArrayDatatype, ctypes.POINTER(constants.GLbyte )):
"""Array datatype for GLbyte types"""
baseType = constants.GLbyte
typeConstant = constants.GL_BYTE
class GLcharArray( ArrayDatatype, ctypes.c_char_p):
"""Array datatype for ARB extension pointers-to-arrays"""
baseType = constants.GLchar
typeConstant = constants.GL_BYTE
GLcharARBArray = GLcharArray
class GLshortArray( ArrayDatatype, ctypes.POINTER(constants.GLshort )):
"""Array datatype for GLshort types"""
baseType = constants.GLshort
typeConstant = constants.GL_SHORT
class GLintArray( ArrayDatatype, ctypes.POINTER(constants.GLint )):
"""Array datatype for GLint types"""
baseType = constants.GLint
typeConstant = constants.GL_INT
class GLubyteArray( ArrayDatatype, ctypes.POINTER(constants.GLubyte )):
"""Array datatype for GLubyte types"""
baseType = constants.GLubyte
typeConstant = constants.GL_UNSIGNED_BYTE
GLbooleanArray = GLubyteArray
class GLushortArray( ArrayDatatype, ctypes.POINTER(constants.GLushort )):
"""Array datatype for GLushort types"""
baseType = constants.GLushort
typeConstant = constants.GL_UNSIGNED_SHORT
class GLuintArray( ArrayDatatype, ctypes.POINTER(constants.GLuint )):
"""Array datatype for GLuint types"""
baseType = constants.GLuint
typeConstant = constants.GL_UNSIGNED_INT
class GLint64Array( ArrayDatatype, ctypes.POINTER(constants.GLint64 )):
"""Array datatype for GLuint types"""
baseType = constants.GLint64
typeConstant = None # TODO: find out what this should be!
class GLuint64Array( ArrayDatatype, ctypes.POINTER(constants.GLuint64 )):
"""Array datatype for GLuint types"""
baseType = constants.GLuint64
typeConstant = constants.GL_UNSIGNED_INT64
class GLenumArray( ArrayDatatype, ctypes.POINTER(constants.GLenum )):
"""Array datatype for GLenum types"""
baseType = constants.GLenum
typeConstant = constants.GL_UNSIGNED_INT
class GLsizeiArray( ArrayDatatype, ctypes.POINTER(constants.GLsizei )):
"""Array datatype for GLenum types"""
baseType = constants.GLsizei
typeConstant = constants.GL_INT
class GLvoidpArray( ArrayDatatype, ctypes.POINTER(constants.GLvoid )):
"""Array datatype for GLenum types"""
baseType = constants.GLvoidp
typeConstant = constants.GL_VOID_P
else:
# Cython-coded array handler
log.info( 'Using accelerated ArrayDatatype' )
ArrayDatatype = ADT( None, None )
GLclampdArray = ADT( constants.GL_DOUBLE, constants.GLclampd )
GLclampfArray = ADT( constants.GL_FLOAT, constants.GLclampf )
GLdoubleArray = ADT( constants.GL_DOUBLE, constants.GLdouble )
GLfloatArray = ADT( constants.GL_FLOAT, constants.GLfloat )
GLbyteArray = ADT( constants.GL_BYTE, constants.GLbyte )
GLcharArray = GLcharARBArray = ADT( constants.GL_BYTE, constants.GLchar )
GLshortArray = ADT( constants.GL_SHORT, constants.GLshort )
GLintArray = ADT( constants.GL_INT, constants.GLint )
GLubyteArray = GLbooleanArray = ADT( constants.GL_UNSIGNED_BYTE, constants.GLubyte )
GLushortArray = ADT( constants.GL_UNSIGNED_SHORT, constants.GLushort )
GLuintArray = ADT( constants.GL_UNSIGNED_INT, constants.GLuint )
GLint64Array = ADT( None, constants.GLint64 )
GLuint64Array = ADT( constants.GL_UNSIGNED_INT64, constants.GLuint64 )
GLenumArray = ADT( constants.GL_UNSIGNED_INT, constants.GLenum )
GLsizeiArray = ADT( constants.GL_INT, constants.GLsizei )
GLvoidpArray = ADT( constants.GL_VOID_P, constants.GLvoidp )
GL_CONSTANT_TO_ARRAY_TYPE = {
constants.GL_DOUBLE : GLclampdArray,
constants.GL_FLOAT : GLclampfArray,
constants.GL_FLOAT : GLfloatArray,
constants.GL_DOUBLE : GLdoubleArray,
constants.GL_BYTE : GLbyteArray,
constants.GL_SHORT : GLshortArray,
constants.GL_INT : GLintArray,
constants.GL_UNSIGNED_BYTE : GLubyteArray,
constants.GL_UNSIGNED_SHORT : GLushortArray,
constants.GL_UNSIGNED_INT : GLuintArray,
#constants.GL_UNSIGNED_INT : GLenumArray,
}
| {
"repo_name": "frederica07/Dragon_Programming_Process",
"path": "PyOpenGL-3.0.2/OpenGL/arrays/arraydatatype.py",
"copies": "2",
"size": "13457",
"license": "bsd-2-clause",
"hash": -8926495907233217000,
"line_mean": 44.9283276451,
"line_max": 100,
"alpha_frac": 0.6131381437,
"autogenerated": false,
"ratio": 4.503681392235609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020945691219601034,
"num_lines": 293
} |
"""Array definitions.
Contains arrays, beamforming, and null-steering.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from .geometry import GeometryMixin
from .element import MonopoleElement
class BaseArrayMixin(GeometryMixin):
"""Core array functionality, including plots and shared calculations.
This class should not be used directly! Inherit, and override
_get_beam_weights instead.
"""
def __init__(self, n_ant, geometry_type, wavelength_spacing=.5,
random_seed=None):
self.n_ant = n_ant
self.geometry_type = geometry_type
GeometryMixin.__init__(self, n_ant, geometry_type, wavelength_spacing,
random_seed)
self.wavelength_spacing = wavelength_spacing
self.beam_weights = self._get_beam_weights()
def _get_beam_weights(self):
raise AssertionError("""Arrays should override this method!""")
def gain_response(self, az_arr, el_arr=None):
"""Calculate gain responses for input azimuths.
Expects a numpy array of any shape for az_arr
If el_arr is specified, must be the same size as az_arr
Returns a numpy array of
"""
if el_arr is not None:
assert az_arr.shape == el_arr.shape
flat_az = az_arr.ravel()
az_gains = np.zeros(flat_az.shape)
for n, az in enumerate(flat_az):
propagation = self._get_propagation(az)
response = np.matrix(np.exp(-2j * np.pi * np.dot(
self.geometry, propagation) * self.wavelength_spacing))
az_gains[n] = np.abs(np.dot(self.beam_weights.H, response))[0, 0]
return az_gains.reshape(az_arr.shape)
def plot_gain(self, n_pts=50, min_az=-np.pi, max_az=np.pi, log_scale=True):
"""Plot the gain over azimuth for an array."""
all_az = np.linspace(min_az, max_az, n_pts)
all_gain = self.gain_response(all_az)
if log_scale is True:
all_gain = 10 * np.log(all_gain)
plt.plot(all_az, all_gain, color='steelblue')
plt.xlim(min_az, max_az)
def plot_gain2D(self, n_pts=720, log_scale=True):
"""Plot the 2D gain pattern of an array."""
x_plot_min = self.x_min - 1000
x_plot_max = self.x_max + 1000
y_plot_min = self.y_min - 1000
y_plot_max = self.y_max + 1000
# Based on tricontourf example from
# http://matplotlib.org/examples/pylab_examples/tricontour_demo.html
n_angles = n_pts
n_radii = 10
min_radius = 200
radii = np.linspace(min_radius, y_plot_max, n_radii)
angles = np.linspace(-np.pi, np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi / n_angles
x = (radii * np.cos(angles)).ravel()
y = (radii * np.sin(angles)).ravel()
z = self.gain_response(angles).ravel()
# Roll so that 0 degrees is north
z = np.roll(z, z.shape[0] / 4)
if log_scale:
z = 10 * np.log(z)
triang = tri.Triangulation(x, y)
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
mask = np.where(xmid * xmid + ymid * ymid < min_radius * min_radius,
1, 0)
triang.set_mask(mask)
ax = plt.gca()
ax.set_aspect('equal')
alpha = .8
plt.tricontourf(triang, z, cmap=plt.cm.Purples, alpha=alpha)
plt.colorbar(alpha=alpha)
self.plot_geometry()
plt.xlim(x_plot_min, x_plot_max)
plt.ylim(y_plot_min, y_plot_max)
ax.patch.set_facecolor('white')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
class UniformArrayMixin(BaseArrayMixin):
"""Equally weighted array."""
def _get_beam_weights(self):
return np.matrix([1.0] * self.n_ant).T
class ClassicalBeamformerMixin(BaseArrayMixin):
"""Classical beamforming."""
def __init__(self, n_ant, beam_dir, geometry_type, wavelength_spacing=.5,
random_seed=None):
self.beam_dir = beam_dir
BaseArrayMixin.__init__(self, n_ant, geometry_type, wavelength_spacing,
random_seed)
def _get_beam_weights(self):
propagation = self._get_propagation(self.beam_dir)
response = np.matrix(np.exp(-2j * np.pi * np.dot(
self.geometry, propagation) * self.wavelength_spacing))
return response / np.sqrt(np.dot(response.H, response))
class MonopoleArray(UniformArrayMixin, MonopoleElement):
"""Monopole array with no beamforming."""
pass
class BeamformedMonopoleArray(ClassicalBeamformerMixin,
MonopoleElement):
"""Classically beamformed monopole array."""
pass
| {
"repo_name": "kastnerkyle/arrayprocessing",
"path": "arrayprocessing/array.py",
"copies": "1",
"size": "5010",
"license": "bsd-3-clause",
"hash": -1060804765701736300,
"line_mean": 32.6241610738,
"line_max": 79,
"alpha_frac": 0.6027944112,
"autogenerated": false,
"ratio": 3.426812585499316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4529606996699316,
"avg_score": null,
"num_lines": null
} |
# array generator for _mm512_permutexvar_epi8
def main():
arr = generate_array()
code = render(arr)
print code
def generate_array():
shuffle_input_tbl = [0] * 64
src_index = 0
for i in xrange(0, 64, 4):
# 32-bit input: [00000000|ccdddddd|bbbbcccc|aaaaaabb]
# 2 1 0
# output order [1, 2, 0, 1], i.e.:
# [bbbbcccc|ccdddddd|aaaaaabb|bbbbcccc]
shuffle_input_tbl[i + 0] = src_index + 1
shuffle_input_tbl[i + 1] = src_index + 0
shuffle_input_tbl[i + 2] = src_index + 2
shuffle_input_tbl[i + 3] = src_index + 1
src_index += 3
return shuffle_input_tbl
def render(table):
dwords = []
for i in xrange(0, 64, 4):
b0 = table[i + 0]
b1 = table[i + 1]
b2 = table[i + 2]
b3 = table[i + 3]
dword = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0
dwords.append(dword)
return "_mm512_setr_epi32(%s)" % ', '.join('0x%08x' % v for v in dwords)
if __name__ == '__main__':
main()
| {
"repo_name": "WojciechMula/base64simd",
"path": "encode/script/permutexvar_parameters.py",
"copies": "1",
"size": "1085",
"license": "bsd-2-clause",
"hash": 4699333309745395000,
"line_mean": 24.2325581395,
"line_max": 76,
"alpha_frac": 0.4912442396,
"autogenerated": false,
"ratio": 2.940379403794038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39316236433940377,
"avg_score": null,
"num_lines": null
} |
# array in Fortran starts from 0, in python starts from 0
# To avoid confusion for Python user, the phase and element index will +1
#
import os
import numpy as np
# &&&& tq function &&&&
from liboctqpy import pytq as ptq
# ============= initialize
def tqini():
ptq('ini',0,1.0,' ')
return None
# ============= read database
def tqrfil(filename):
ptq('tqrfil',0,1.0,filename)
return None
# ============= read database & read element
def tqrpfil(filename,elements):
global deg_freedom # degree of freedom in Thermodynamics
print elements,filename
if type(elements) is str:
no_element = 1
file_ele = filename.upper() + ' ' + elements.upper()
elif (type(elements) is tuple) or (type(elements) is list):
no_element = len(elements)
file_ele = filename.upper() + ' ' + ' '.join(elements).upper()
else:
print "=== Composition inputs error ==="
return None
print file_ele
ptq('tqrpfil',no_element,1.0,file_ele)
deg_freedom = no_element + 2
return None
# ============= change phase status
def tqphsts(phase,status,value):
phase_index = get_phase_index(phase)
if phase_index == -5:
return None
# nystat:-4 hidden, -3 suspended, -2 dormant, -1,0,1 entered, 2 fix
if status[0].lower() == "h":
status_index = -4
elif status[0].lower() == "s":
status_index = -3
elif status[0].lower() == "d":
status_index = -2
elif status[0].lower() == "e":
status_index = 0
elif status[0].lower() == "f":
status_index = 2
else:
print "ERROR: incorrect phase status"
return None
phase_prop = [phase_index,status_index]
ptq('tqphsts',phase_prop,value,' ')
return None
# ============= get composition name
def tqgcom():
dum, int_out, doub_out, char_out = ptq('tqgcom',0,1.,' ')
elem = ["".join(char_out[i]).split()[0] for i in range(int_out[0])]
return elem
# ============= get # phase
def tqgnp():
dum, int_out, doub_out, char_out = ptq('tqgnp',0,1.,' ')
return int_out[0]
# ============= get phase name
def tqgpn():
dum, int_out, doub_out, char_out = ptq('tqgpn',0,1.,' ')
phase = ["".join(char_out[i]).split()[0] for i in range(int_out[0])]
return phase
# ============= get phase index
def tqgpi(phase):
dum, int_out, doub_out, char_out = ptq('tqgpi',0,1.,phase)
return int_out[0]
# ============= set condition
def tqsetc(condition,element,value):
element_index = 0
if type(element) is str:
element_index = get_element_index(element.upper())
if element_index == -5:
return None
#print "element",element,element_index
dum, int_out, doub_out, char_out = ptq('tqsetc',element_index,value,condition)
return None
# ============= set multiple conditions
def tqsetcs(conditions):
if type(conditions) != dict:
print "use dictionary as inputs"
no_conditions = len(conditions.keys())
if no_conditions != deg_freedom:
print "Degree of freedom is not satisfied"
dict_key = conditions.keys()
for i in range(no_conditions):
new_keys = str(dict_key[i].lstrip().upper())
conditions[new_keys] = conditions.pop(dict_key[i])
for i in range(no_conditions):
element = ' '
element_index = 0
if conditions.keys()[i][0] == 'N' or conditions.keys()[i][0] == 'T' or conditions.keys()[i][0] == 'P':
condition = conditions.keys()[i][0]
elif ( conditions.keys()[i][0] == 'W' or conditions.keys()[i][0] == 'X'):
condition = conditions.keys()[i][0]
element = conditions.keys()[i][conditions.keys()[i].index("(")+1:conditions.keys()[i].index(")")]
if type(element) is str:
element_index = get_element_index(element)
if element_index == -5:
print "ERROR element inputs"
return None
else:
print "ERROR condition"
return None
value = conditions[conditions.keys()[i]]
print "condition",condition,element,element_index,value
dum, int_out, doub_out, char_out = ptq('tqsetc',element_index,value,condition)
return None
# ============= eq calculation
def tqce():
dum, int_out, doub_out, char_out = ptq('tqce',0,0.,' ')
return None
# ============= retrive the data
def tqgetv(condition,phase,element):
# phase name to phase index
phase_index = get_phase_index(phase)
if phase_index == -5:
return None
# element name to element index
element_index = get_element_index(element)
if element_index == -5:
return None
#print "phase",phase,phase_index," element:",element,element_index
#print "phase index: ",phase_index
i_var = [phase_index,element_index]
dum, int_out, doub_out, char_out = ptq('tqgetv',i_var,0.,condition)
#print doub_out
return doub_out[0]
# ============= retrive the sublattice information
# input: phase name
# output:
# no_sublattice - number of sublattice
# no_component_sublattice - number of component in each sublattice
# ele_names - component names in each sublattice
# composition - composition of component in each sublattice
# no_sites_sublattice - number of sites in each sublattice
# moles_atom - mole atoms of this phase
# net_charge - net charge of the phase
#
def tqgphc(phase):
# phase name to phase index
phase_index = get_phase_index(phase)
if phase_index == -5:
return None
i_var = phase_index
dum, int_out, doub_out, char_out = ptq('tqgphc',i_var,0.,' ')
no_sublattice = int_out[0]
#print no_sublattice
no_component_sublattice = int_out[1:1+no_sublattice]
#print no_component_sublattice
count_index = 0
element_index = []
composition = []
for i in range(no_sublattice):
element_index.append(int_out[count_index+1+no_sublattice:count_index+1+no_sublattice+no_component_sublattice[i]])
composition.append(list(doub_out[count_index:count_index+no_component_sublattice[i]]))
count_index = count_index+no_component_sublattice[i]
#print composition
#print element_index
element_index[:] = [x - 1 for x in element_index]
#print element_index
sys_ele_names = tqgcom()
sys_ele_names.insert(0,'VA')
#print sys_ele_names
ele_names = [[sys_ele_names[i] for i in element_index[j]] for j in range(no_sublattice)]
#print ele_names
no_sites_sublattice = doub_out[count_index:count_index+no_sublattice]
#print no_sites_sublattice
moles_atom = doub_out[count_index+no_sublattice]
net_charge = doub_out[count_index+no_sublattice+1]
#print moles_atom,net_charge
return (no_sublattice,no_component_sublattice,ele_names,composition,no_sites_sublattice,moles_atom,net_charge)
# ============= get diffusion coefficient
def tqgdif(conditions):
if type(conditions) != dict:
print "use dictionary as inputs"
return None
no_conditions = len(conditions.keys())
if no_conditions != deg_freedom+2: # gibbs phase rule + (phase name + (diffusion element + dependent element))
print "Degree of freedom is not satisfied"
return None
dict_key = conditions.keys()
for i in range(no_conditions):
new_keys = str(dict_key[i].lstrip().upper())
conditions[new_keys] = conditions.pop(dict_key[i])
ele_names = tqgcom()
# req_composition = ["X(" + ele_name + ")" for ele_name in ele_names]
# print "REQ ",req_composition
N = 0
ele_ = [0]*2
X = [0]*len(ele_names)
for i in range(no_conditions):
if conditions.keys()[i][0] == 'N':
N = N + conditions['N']
elif conditions.keys()[i][0] == 'T':
T = [conditions['T']]
elif conditions.keys()[i][0:2] == 'P':
P = [conditions['P']]
elif conditions.keys()[i][0:2] == 'PH':
phase = conditions['PHASE']
phase_index = get_phase_index(phase)
if phase_index == -5:
print "ERROR: phase name: ",phase
return None
elif conditions.keys()[i][0:4] == 'COMP':
if len(conditions[conditions.keys()[i]]) > 2:
print "ERROR: only 2 composition allowed for composition - (diffusion element, dependent element)"
return None
for j in range(2):
element = conditions[conditions.keys()[i]][j].upper()
ele_[j] = get_element_index(element)
if ele_[j] < 0:
print "ERROR name for diffusion element: ", element
return None
elif conditions.keys()[i][0] == 'X': # to fill the composition list, so the index is not the element index in OC
element = conditions.keys()[i][conditions.keys()[i].index("(")+1:conditions.keys()[i].index(")")]
ele_index = get_element_index(element) - 1 # -1 for python list
if ele_index >= 0:
X[ele_index] = conditions[conditions.keys()[i]]
N = N - conditions[conditions.keys()[i]]
else:
print "ERROR composition name: ", element
return None
else:
print "ERROR condition: ",conditions.keys()[i]
return None
if N < -1.001:
print "ERROR composition: total mole fraction > 1 -- ",abs(N)
return None
X_zero = [i for i, e in enumerate(X) if e == 0]
if N > 0. and len(X_zero) == 1:
X[X_zero[0]] = N
i_var = [phase_index] + ele_
d_var = T + P + X
print "python ",i_var,d_var
dum, int_out, doub_out, char_out = ptq('tqgdif',i_var,d_var,' ')
# ============= reset errors
def tqrseterr():
ptq('tqreset',0.,0.,' ')
return None
# ************
def get_phase_index(phase):
no_phase = tqgnp()
phase_names = tqgpn()
#print "py",phase_names,len(phase_names)
phase_index = -5
if phase[0].lower() == "*":
phase_index = -1
else:
if type(phase) is str:
'''
for i in range(no_phase):
if phase_names[i].find(phase.upper()) == 0:
phase_index = i + 1
break
'''
full_name = [fn for fn in phase_names if phase in fn]
#print "PY: ",full_name
if full_name:
phase_index = phase_names.index(full_name[0]) + 1
#print full_name,phase_index
else:
print "ERROR: incorrect phase name"
if phase_index == -5:
print "ERROR: cannot find the phase"
return phase_index
# ************
def get_element_index(element):
ele_names = tqgcom()
element_index = -5
if element.upper() == "*":
element_index = -1
elif type(element) is str:
if element.upper() == "NA":
element_index = -1
else:
full_name = [fn for fn in ele_names if element in fn]
if not full_name:
print "ERROR: incorrect element name"
return element_index
element_index = ele_names.index(full_name[0]) + 1 # +1 for Fortran array
else:
print "ERROR: incorrect element name"
return element_index
| {
"repo_name": "dpttw/PyOC",
"path": "libocpy.py",
"copies": "1",
"size": "10094",
"license": "mit",
"hash": 6131151084555855000,
"line_mean": 27.3539325843,
"line_max": 115,
"alpha_frac": 0.6437487616,
"autogenerated": false,
"ratio": 2.865985235661556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40097339972615553,
"avg_score": null,
"num_lines": null
} |
"""Array interaction tools."""
# --- import --------------------------------------------------------------------------------------
import numpy as np
from .. import exceptions as wt_exceptions
# --- define --------------------------------------------------------------------------------------
__all__ = [
"closest_pair",
"diff",
"fft",
"joint_shape",
"orthogonal",
"remove_nans_1D",
"share_nans",
"smooth_1D",
"svd",
"unique",
"valid_index",
"mask_reduce",
"enforce_mask_shape",
]
# --- functions -----------------------------------------------------------------------------------
def closest_pair(arr, give="indicies"):
"""Find the pair of indices corresponding to the closest elements in an array.
If multiple pairs are equally close, both pairs of indicies are returned.
Optionally returns the closest distance itself.
I am sure that this could be written as a cheaper operation. I
wrote this as a quick and dirty method because I need it now to use on some
relatively small arrays. Feel free to refactor if you need this operation
done as fast as possible. - Blaise 2016-02-07
Parameters
----------
arr : numpy.ndarray
The array to search.
give : {'indicies', 'distance'} (optional)
Toggle return behavior. If 'distance', returns a single float - the
closest distance itself. Default is indicies.
Returns
-------
list of lists of two tuples
List containing lists of two tuples: indicies the nearest pair in the
array.
>>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1])
>>> closest_pair(arr)
[[(1,), (8,)], [(3,), (4,)]]
"""
idxs = [idx for idx in np.ndindex(arr.shape)]
outs = []
min_dist = arr.max() - arr.min()
for idxa in idxs:
for idxb in idxs:
if idxa == idxb:
continue
dist = abs(arr[idxa] - arr[idxb])
if dist == min_dist:
if not [idxb, idxa] in outs:
outs.append([idxa, idxb])
elif dist < min_dist:
min_dist = dist
outs = [[idxa, idxb]]
if give == "indicies":
return outs
elif give == "distance":
return min_dist
else:
raise KeyError("give not recognized in closest_pair")
def diff(xi, yi, order=1) -> np.ndarray:
"""Take the numerical derivative of a 1D array.
Output is mapped onto the original coordinates using linear interpolation.
Expects monotonic xi values.
Parameters
----------
xi : 1D array-like
Coordinates.
yi : 1D array-like
Values.
order : positive integer (optional)
Order of differentiation.
Returns
-------
1D numpy array
Numerical derivative. Has the same shape as the input arrays.
"""
yi = np.array(yi).copy()
flip = False
if xi[-1] < xi[0]:
xi = np.flipud(xi.copy())
yi = np.flipud(yi)
flip = True
midpoints = (xi[1:] + xi[:-1]) / 2
for _ in range(order):
d = np.diff(yi)
d /= np.diff(xi)
yi = np.interp(xi, midpoints, d)
if flip:
yi = np.flipud(yi)
return yi
def fft(xi, yi, axis=0) -> tuple:
"""Take the 1D FFT of an N-dimensional array and return "sensible" properly shifted arrays.
Parameters
----------
xi : numpy.ndarray
1D array over which the points to be FFT'ed are defined
yi : numpy.ndarray
ND array with values to FFT
axis : int
axis of yi to perform FFT over
Returns
-------
xi : 1D numpy.ndarray
1D array. Conjugate to input xi. Example: if input xi is in the time
domain, output xi is in frequency domain.
yi : ND numpy.ndarray
FFT. Has the same shape as the input array (yi).
"""
# xi must be 1D
if xi.ndim != 1:
raise wt_exceptions.DimensionalityError(1, xi.ndim)
# xi must be evenly spaced
spacing = np.diff(xi)
if not np.allclose(spacing, spacing.mean()):
raise RuntimeError("WrightTools.kit.fft: argument xi must be evenly spaced")
# fft
yi = np.fft.fft(yi, axis=axis)
d = (xi.max() - xi.min()) / (xi.size - 1)
xi = np.fft.fftfreq(xi.size, d=d)
# shift
xi = np.fft.fftshift(xi)
yi = np.fft.fftshift(yi, axes=axis)
return xi, yi
def joint_shape(*args) -> tuple:
"""Given a set of arrays, return the joint shape.
Parameters
----------
args : array-likes
Returns
-------
tuple of int
Joint shape.
"""
if len(args) == 0:
return ()
shape = []
shapes = [a.shape for a in args]
ndim = args[0].ndim
for i in range(ndim):
shape.append(max([s[i] for s in shapes]))
return tuple(shape)
def orthogonal(*args) -> bool:
"""Determine if a set of arrays are orthogonal.
Parameters
----------
args : array-likes or array shapes
Returns
-------
bool
Array orthogonality condition.
"""
for i, arg in enumerate(args):
if hasattr(arg, "shape"):
args[i] = arg.shape
for s in zip(*args):
if np.product(s) != max(s):
return False
return True
def remove_nans_1D(*args) -> tuple:
"""Remove nans in a set of 1D arrays.
Removes indicies in all arrays if any array is nan at that index.
All input arrays must have the same size.
Parameters
----------
args : 1D arrays
Returns
-------
tuple
Tuple of 1D arrays in same order as given, with nan indicies removed.
"""
vals = np.isnan(args[0])
for a in args:
vals |= np.isnan(a)
return tuple(np.array(a)[~vals] for a in args)
def share_nans(*arrs) -> tuple:
"""Take a list of nD arrays and return a new list of nD arrays.
The new list is in the same order as the old list.
If one indexed element in an old array is nan then every element for that
index in all new arrays in the list is then nan.
Parameters
----------
*arrs : nD arrays.
Returns
-------
list
List of nD arrays in same order as given, with nan indicies syncronized.
"""
nans = np.zeros(joint_shape(*arrs))
for arr in arrs:
nans *= arr
return tuple([a + nans for a in arrs])
def smooth_1D(arr, n=10, smooth_type="flat") -> np.ndarray:
"""Smooth 1D data using a window function.
Edge effects will be present.
Parameters
----------
arr : array_like
Input array, 1D.
n : int (optional)
Window length.
smooth_type : {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'} (optional)
Type of window function to convolve data with.
'flat' window will produce a moving average smoothing.
Returns
-------
array_like
Smoothed 1D array.
"""
# check array input
if arr.ndim != 1:
raise wt_exceptions.DimensionalityError(1, arr.ndim)
if arr.size < n:
message = "Input array size must be larger than window size."
raise wt_exceptions.ValueError(message)
if n < 3:
return arr
# construct window array
if smooth_type == "flat":
w = np.ones(n, dtype=arr.dtype)
elif smooth_type == "hanning":
w = np.hanning(n)
elif smooth_type == "hamming":
w = np.hamming(n)
elif smooth_type == "bartlett":
w = np.bartlett(n)
elif smooth_type == "blackman":
w = np.blackman(n)
else:
message = "Given smooth_type, {0}, not available.".format(str(smooth_type))
raise wt_exceptions.ValueError(message)
# convolve reflected array with window function
out = np.convolve(w / w.sum(), arr, mode="same")
return out
def svd(a, i=None) -> tuple:
"""Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1D array of `a`'s singular values.
Parameters
----------
a : array_like
Input array.
i : int or slice (optional)
What singular value "slice" to return.
Default is None which returns unitary 2D arrays.
Returns
-------
tuple
Decomposed arrays in order `u`, `v`, `s`
"""
u, s, v = np.linalg.svd(a, full_matrices=False, compute_uv=True)
u = u.T
if i is None:
return u, v, s
else:
return u[i], v[i], s[i]
def unique(arr, tolerance=1e-6) -> np.ndarray:
"""Return unique elements in 1D array, within tolerance.
Parameters
----------
arr : array_like
Input array. This will be flattened if it is not already 1D.
tolerance : number (optional)
The tolerance for uniqueness.
Returns
-------
array
The sorted unique values.
"""
arr = sorted(arr.flatten())
unique = []
while len(arr) > 0:
current = arr[0]
lis = [xi for xi in arr if np.abs(current - xi) < tolerance]
arr = [xi for xi in arr if not np.abs(lis[0] - xi) < tolerance]
xi_lis_average = sum(lis) / len(lis)
unique.append(xi_lis_average)
return np.array(unique)
def valid_index(index, shape) -> tuple:
"""Get a valid index for a broadcastable shape.
Parameters
----------
index : tuple
Given index.
shape : tuple of int
Shape.
Returns
-------
tuple
Valid index.
"""
# append slices to index
index = list(index)
while len(index) < len(shape):
index.append(slice(None))
# fill out, in reverse
out = []
for i, s in zip(index[::-1], shape[::-1]):
if s == 1:
if isinstance(i, slice):
out.append(slice(None))
else:
out.append(0)
else:
out.append(i)
return tuple(out[::-1])
def mask_reduce(mask):
"""Reduce a boolean mask, removing all false slices in any dimension.
Parameters
----------
mask : ndarray with bool dtype
The mask which is to be reduced
Returns
-------
A boolean mask with no all False slices.
"""
mask = mask.copy()
for i in range(len(mask.shape)):
a = mask.copy()
j = list(range(len(mask.shape)))
j.remove(i)
j = tuple(j)
a = a.max(axis=j, keepdims=True)
idx = [slice(None)] * len(mask.shape)
a = a.flatten()
idx[i] = [k for k in range(len(a)) if a[k]]
mask = mask[tuple(idx)]
return mask
def enforce_mask_shape(mask, shape):
"""Reduce a boolean mask to fit a given shape.
Parameters
----------
mask : ndarray with bool dtype
The mask which is to be reduced
shape : tuple of int
Shape which broadcasts to the mask shape.
Returns
-------
A boolean mask, collapsed along axes where the shape given has one element.
"""
red = tuple([i for i in range(len(shape)) if shape[i] == 1])
return mask.max(axis=red, keepdims=True)
| {
"repo_name": "wright-group/WrightTools",
"path": "WrightTools/kit/_array.py",
"copies": "1",
"size": "11038",
"license": "mit",
"hash": 7743844782244333000,
"line_mean": 25.280952381,
"line_max": 99,
"alpha_frac": 0.5549918463,
"autogenerated": false,
"ratio": 3.793127147766323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4848118994066323,
"avg_score": null,
"num_lines": null
} |
# Array I/O to text files
#
# seb changed Numeric to numarray - 2003-Apr-07
# seb changed numarray to numpy - 2006-Jul-22
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 2001-12-5
#
"""This module contains elementary support for I/O of one- and
two-dimensional numerical arrays to and from plain text files. The
text file format is very simple and used by many other programs as
well:
- each line corresponds to one row of the array
- the numbers within a line are separated by white space
- lines starting with # are ignored (comment lines)
An array containing only one line or one column is returned as a
one-dimensional array on reading. One-dimensional arrays are written
as one item per line.
Numbers in files to be read must conform to Python/C syntax. For
reading files containing Fortran-style double-precision numbers
(exponent prefixed by D), use the module Scientific.IO.FortranFormat.
"""
#from Scientific.IO.TextFile import TextFile
from .Scientific_IO_TextFile import TextFile
import string, numpy
def readArray(filename, comment='#', sep=None):
"""Return an array containing the data from file |filename|. This
function works for arbitrary data types (every array element can be
given by an arbitrary Python expression), but at the price of being
slow. For large arrays, use readFloatArray or readIntegerArray
if possible.
ignore all lines that start with any character contained in comment
"""
data = []
for line in TextFile(filename):
if not line[0] in comment:
data.append(list(map(eval, line.split(sep))))#string.split(line, sep))))
a = numpy.array(data)
if a.shape[0] == 1 or a.shape[1] == 1:
a = numpy.ravel(a)
return a
def readArray_conv(filename, fn, comment='#', dtype=None, sep=None):
"""
Return an array containing the data from file |filename|. This
function works for arbitrary data types (every array element can be
given by an arbitrary Python expression), but at the price of being
slow.
fn is called for each "cell" value.
if dtype is None, uses "minimum type" (ref. Numpy doc)
if sep is None, any white space is seen as field separator
ignore all lines that start with any character contained in comment
"""
data = []
for line in TextFile(filename):
if not line[0] in comment:
data.append(list(map(fn, string.split(line, sep))))
a = numpy.array(data)
if a.shape[0] == 1 or a.shape[1] == 1:
a = numpy.ravel(a)
return a
def readFloatArray(filename, dtype=numpy.float64, sep=None): ## seb added type argument
"Return a floating-point array containing the data from file |filename|."
data = []
for line in TextFile(filename):
if line[0] != '#':
data.append(list(map(string.atof, string.split(line, sep))))
a = numpy.array(data, dtype=dtype) ## seb added type argument
if a.shape[0] == 1 or a.shape[1] == 1:
a = numpy.ravel(a)
return a
def readIntegerArray(filename, dtype=numpy.int32, sep=None): ## seb added type argument
"Return an integer array containing the data from file |filename|."
data = []
for line in TextFile(filename):
if line[0] != '#':
data.append(list(map(string.atoi, string.split(line, sep))))
a = numpy.array(data, dtype=dtype) ## seb added type argument
if a.shape[0] == 1 or a.shape[1] == 1:
a = numpy.ravel(a)
return a
def writeArray(array, filename, mode='w', sep=' '):
"""Write array |a| to file |filename|. |mode| can be 'w' (new file)
or 'a' (append)."""
file = TextFile(filename, mode)
if len(array.shape) == 1:
array = array[:, numpy.newaxis]
for line in array:
#for element in line:
# file.write(`element` + sep)
file.write(sep.join([repr(element) for element in line]))
file.write('\n')
file.close()
#
# Write several data sets (one point per line) to a text file,
# with a separator line between data sets. This is sufficient
# to make input files for most plotting programs.
#
def writeDataSets(datasets, filename, separator = ''):
"""Write each of the items in the sequence |datasets|
to the file |filename|, separating the datasets by a line
containing |separator|. The items in the data sets can be
one- or two-dimensional arrays or equivalent nested sequences.
The output file format is understood by many plot programs.
"""
file = TextFile(filename, 'w')
nsets = len(datasets)
for i in range(nsets):
d = numpy.array(datasets[i])
if len(d.shape) == 1:
d = d[:, numpy.newaxis]
for point in d:
for number in point:
file.write(repr(number) + ' ')
file.write('\n')
if (i < nsets-1):
file.write(separator + '\n')
file.close()
| {
"repo_name": "macronucleus/chromagnon",
"path": "Chromagnon/Priithon/ArrayIO.py",
"copies": "1",
"size": "4907",
"license": "mit",
"hash": 1413770669290591000,
"line_mean": 35.3481481481,
"line_max": 87,
"alpha_frac": 0.6588546974,
"autogenerated": false,
"ratio": 3.708994708994709,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9840442174025129,
"avg_score": 0.005481446473915926,
"num_lines": 135
} |
# array matching index (strength) to numerical value
master_array = ['2','3','4','5','6','7','8','9','T','J','Q','K','A']
# pythonic hack to determine if an input string is an integer
def test(s):
try:
return int(s)
except ValueError:
return -1
# function that returns the index of the numLegalActions element of the packet,
# using the observation that numLegalActions is always the fourth integer in the GETACTION packet.
def splitPacket(data):
packet = data.split()
counter = 0
for i in range(len(packet)):
if test(packet[i])>=0:
counter+=1
if counter == 4:
return i
return None
# function that takes data packet and potential legal action, and determines
# whether said action is legal. Gives index of that action within packet, or -1 if not legal.
def canIDoThis(action,data):
packet = data.split()
index = -1
for i in range(splitPacket(data),len(packet)):
if packet[i][0:len(action)]==action:
index = i
return index
# preflop strategy that goes all in on a pocket pair or high card (T,J,Q,K,A in hand) and checkfolds otherwise
def getaction(myHand,data):
firstNum = myHand[0][0]
secondNum = myHand[1][0]
if firstNum == secondNum or max(master_array.index(firstNum),master_array.index(secondNum))>7:
if canIDoThis('BET',data)>-1:
# next line splits BET:minBet:maxBet action into its components,
# then extracts the maxBet so we can go all in
maxBet = data.split()[canIDoThis('BET',data)].split(':')[2]
return 'BET:'+maxBet+'\n'
elif canIDoThis('RAISE',data)>-1:
maxRaise = data.split()[canIDoThis('RAISE',data)].split(':')[2]
return 'RAISE:'+maxRaise+'\n'
elif canIDoThis('CALL',data)>-1:
return 'CALL\n'
else:
return 'CHECK\n'
return 'CHECK\n' | {
"repo_name": "surgebiswas/poker",
"path": "PokerBots_2017/sample_bot/prefloplogic.py",
"copies": "1",
"size": "1735",
"license": "mit",
"hash": -5605990529002182000,
"line_mean": 35.9361702128,
"line_max": 110,
"alpha_frac": 0.6876080692,
"autogenerated": false,
"ratio": 3.0492091388400704,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8974648311874582,
"avg_score": 0.052433779233097444,
"num_lines": 47
} |
# Array methods which are called by the both the C-code for the method
# and the Python code for the NumPy-namespace function
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core.numeric import asanyarray
def _amax(a, axis=None, out=None, keepdims=False):
return um.maximum.reduce(a, axis=axis,
out=out, keepdims=keepdims)
def _amin(a, axis=None, out=None, keepdims=False):
return um.minimum.reduce(a, axis=axis,
out=out, keepdims=keepdims)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
return um.add.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
return um.multiply.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
def _count_reduce_items(arr, axis):
if axis is None:
axis = tuple(xrange(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[ax]
return items
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
# Upgrade bool, unsigned int, and int to float64
if dtype is None and arr.dtype.kind in ['b','u','i']:
ret = um.add.reduce(arr, axis=axis, dtype='f8',
out=out, keepdims=keepdims)
else:
ret = um.add.reduce(arr, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
rcount = _count_reduce_items(arr, axis)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(ret, rcount,
out=ret, casting='unsafe', subok=False)
else:
ret = ret / float(rcount)
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0,
keepdims=False):
arr = asanyarray(a)
# First compute the mean, saving 'rcount' for reuse later
if dtype is None and arr.dtype.kind in ['b','u','i']:
arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True)
else:
arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True)
rcount = _count_reduce_items(arr, axis)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(arrmean, rcount,
out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean / float(rcount)
# arr - arrmean
x = arr - arrmean
# (arr - arrmean) ** 2
if arr.dtype.kind == 'c':
x = um.multiply(x, um.conjugate(x), out=x).real
else:
x = um.multiply(x, x, out=x)
# add.reduce((arr - arrmean) ** 2, axis)
ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# add.reduce((arr - arrmean) ** 2, axis) / (n - ddof)
if not keepdims and isinstance(rcount, mu.ndarray):
rcount = rcount.squeeze(axis=axis)
rcount -= ddof
if isinstance(ret, mu.ndarray):
ret = um.true_divide(ret, rcount,
out=ret, casting='unsafe', subok=False)
else:
ret = ret / float(rcount)
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
else:
ret = um.sqrt(ret)
return ret
| {
"repo_name": "lthurlow/Network-Grapher",
"path": "proj/external/numpy-1.7.0/numpy/core/_methods.py",
"copies": "11",
"size": "3842",
"license": "mit",
"hash": -7855904983546634000,
"line_mean": 34.247706422,
"line_max": 78,
"alpha_frac": 0.5913586674,
"autogenerated": false,
"ratio": 3.3583916083916083,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.944975027579161,
"avg_score": null,
"num_lines": null
} |
# Array methods which are called by the both the C-code for the method
# and the Python code for the NumPy-namespace function
#from numpy.core import multiarray as mu
#from numpy.core import umath as um
import _numpypy as mu
um = mu
from numpy.core.numeric import asanyarray
def _amax(a, axis=None, out=None, keepdims=False):
return um.maximum.reduce(a, axis=axis,
out=out, keepdims=keepdims)
def _amin(a, axis=None, out=None, keepdims=False):
return um.minimum.reduce(a, axis=axis,
out=out, keepdims=keepdims)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
return um.add.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
return um.multiply.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
def _count_reduce_items(arr, axis):
if axis is None:
axis = tuple(xrange(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[ax]
return items
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
# Upgrade bool, unsigned int, and int to float64
if dtype is None and arr.dtype.kind in ['b','u','i']:
ret = um.add.reduce(arr, axis=axis, dtype='f8',
out=out, keepdims=keepdims)
else:
ret = um.add.reduce(arr, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
rcount = _count_reduce_items(arr, axis)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(ret, rcount,
out=ret, casting='unsafe', subok=False)
else:
ret = ret / float(rcount)
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0,
keepdims=False):
arr = asanyarray(a)
# First compute the mean, saving 'rcount' for reuse later
if dtype is None and arr.dtype.kind in ['b','u','i']:
arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True)
else:
arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True)
rcount = _count_reduce_items(arr, axis)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(arrmean, rcount,
out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean / float(rcount)
# arr - arrmean
x = arr - arrmean
# (arr - arrmean) ** 2
if arr.dtype.kind == 'c':
x = um.multiply(x, um.conjugate(x), out=x).real
else:
x = um.multiply(x, x, out=x)
# add.reduce((arr - arrmean) ** 2, axis)
ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# add.reduce((arr - arrmean) ** 2, axis) / (n - ddof)
if not keepdims and isinstance(rcount, mu.ndarray):
rcount = rcount.squeeze(axis=axis)
rcount -= ddof
if isinstance(ret, mu.ndarray):
ret = um.true_divide(ret, rcount,
out=ret, casting='unsafe', subok=False)
else:
ret = ret / float(rcount)
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
else:
ret = um.sqrt(ret)
return ret
| {
"repo_name": "ojii/sandlib",
"path": "lib/lib_pypy/numpypy/core/_methods.py",
"copies": "1",
"size": "3874",
"license": "bsd-3-clause",
"hash": -6613248748276846000,
"line_mean": 33.9009009009,
"line_max": 78,
"alpha_frac": 0.5918946825,
"autogenerated": false,
"ratio": 3.348314606741573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9419307803806558,
"avg_score": 0.004180297087002987,
"num_lines": 111
} |
# Array methods which are called by the both the C-code for the method
# and the Python code for the NumPy-namespace function
import multiarray as mu
import umath as um
from numeric import asanyarray
def _amax(a, axis=None, out=None, keepdims=False):
return um.maximum.reduce(a, axis=axis,
out=out, keepdims=keepdims)
def _amin(a, axis=None, out=None, keepdims=False):
return um.minimum.reduce(a, axis=axis,
out=out, keepdims=keepdims)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
return um.add.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
return um.multiply.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
def _count_reduce_items(arr, axis):
if axis is None:
axis = tuple(xrange(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[ax]
return items
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
# Upgrade bool, unsigned int, and int to float64
if dtype is None and arr.dtype.kind in ['b','u','i']:
ret = um.add.reduce(arr, axis=axis, dtype='f8',
out=out, keepdims=keepdims)
else:
ret = um.add.reduce(arr, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
rcount = _count_reduce_items(arr, axis)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(ret, rcount,
out=ret, casting='unsafe', subok=False)
else:
ret = ret / float(rcount)
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0,
keepdims=False):
arr = asanyarray(a)
# First compute the mean, saving 'rcount' for reuse later
if dtype is None and arr.dtype.kind in ['b','u','i']:
arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True)
else:
arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True)
rcount = _count_reduce_items(arr, axis)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(arrmean, rcount,
out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean / float(rcount)
# arr - arrmean
x = arr - arrmean
# (arr - arrmean) ** 2
if arr.dtype.kind == 'c':
x = um.multiply(x, um.conjugate(x), out=x).real
else:
x = um.multiply(x, x, out=x)
# add.reduce((arr - arrmean) ** 2, axis)
ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# add.reduce((arr - arrmean) ** 2, axis) / (n - ddof)
if not keepdims and isinstance(rcount, mu.ndarray):
rcount = rcount.squeeze(axis=axis)
rcount -= ddof
if isinstance(ret, mu.ndarray):
ret = um.true_divide(ret, rcount,
out=ret, casting='unsafe', subok=False)
else:
ret = ret / float(rcount)
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
else:
ret = um.sqrt(ret)
return ret
| {
"repo_name": "bussiere/pypyjs",
"path": "website/demo/home/rfk/repos/pypy/lib_pypy/numpypy/core/_methods.py",
"copies": "2",
"size": "3799",
"license": "mit",
"hash": -3786545950004664300,
"line_mean": 33.8532110092,
"line_max": 78,
"alpha_frac": 0.5888391682,
"autogenerated": false,
"ratio": 3.356007067137809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9927046444917278,
"avg_score": 0.0035599580841063384,
"num_lines": 109
} |
""" Array of all possible menus """
menus = []
########## Module wrapping menus and their creation ##########
##
# @brief A class wrapping the menu objects.
#
# Note: These ARE the menus that go in the nav
class Menu:
##
# @brief Create a new menu object
#
# @param name Actual string for the appearing menu
# @param route_func_name Route to the view for the given menuentry.
# @param submenus Option arg: array of submenus of this menu
#
# @return
def __init__(self, name, route_func_name, visible=True, submenus=[]):
self._name = name
self._route_func_name = route_func_name
self._submenus = submenus
self._visible = visible
# Properties
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def route_func_name(self):
return self._route_func_name
@route_func_name.setter
def route_func_name(self, route_func_name):
self._route_func_name = route_func_name
def visible(self, request):
return self._visible
@property
def submenus(self):
return self._submenus
@submenus.setter
def submenus(self, subs):
self._submenus = subs
def display_submenus(self):
""" Boolean of whether or not to render the submenu at all
Checks that there is at least one visible submenu
"""
if self._submenus:
for m in self._submenus:
if m.visible:
return True
return False
class AuthDependentMenu(Menu):
def __init__(self, name, route_func_name, visible=True, submenus=[]):
super().__init__(name, route_func_name, visible, submenus)
def isAuthenticated(self, request):
return request.user and \
request.user.is_authenticated()
class UserMenu(AuthDependentMenu):
def __init__(self, name, route_func_name, visible=True, submenus=[]):
self.logoutMenu = Menu("Logout", "pswebsite:logout", False)
submenus.append(self.logoutMenu)
super().__init__(name, route_func_name, visible, submenus)
# Lets override visible() as a pseudo-hook
def visible(self, request):
# Change the names if we have auth
if self.isAuthenticated(request):
# note, the username will be the email
# as this is being enforced in the app
self.name = request.user.username
self.route_func_name = "pswebsite:user"
# Turn on the logout submenu
self.logoutMenu.visible = True
else:
self.name = "Login"
self.route_func_name = "pswebsite:login"
# Turn off the logout submenu
self.logoutMenu.visible = False
return super().visible(request)
class RegisterMenu(AuthDependentMenu):
def __init__(self, name, route_func_name, visible=True, submenus=[]):
super().__init__(name, route_func_name, visible, submenus)
def visible(self, request):
return not self.isAuthenticated(request)
def createApplicationMenus():
home = Menu("Home", "pswebsite:index")
about = Menu("About", "pswebsite:about")
menus.append(home)
menus.append(about)
users = UserMenu("Login", "pswebsite:login")
menus.append(users)
register = RegisterMenu("Register", "pswebsite:register")
menus.append(register)
| {
"repo_name": "aabmass/pswebsite_django",
"path": "pswebsite/menus.py",
"copies": "1",
"size": "3447",
"license": "apache-2.0",
"hash": 1075642501937753700,
"line_mean": 28.4615384615,
"line_max": 73,
"alpha_frac": 0.6141572382,
"autogenerated": false,
"ratio": 4.0174825174825175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004151084341324209,
"num_lines": 117
} |
"""array of bits, represented as int in C++"""
from collections import Sequence
from ctypes import cdll
bit_array = cdll.LoadLibrary('lib/lib_bit_array.so')
class BitArray(Sequence):
"""an array of bits, wrapped C"""
def __init__(self, length):
assert length >= 0
self.length = length
self.obj = bit_array._new(length)
def __len__(self):
return self.length
def __getitem__(self, position):
assert(0 <= position < self.length), "outside array"
return(bit_array._get_bit(self.obj, position))
def __str__(self):
"""pretty print of the bit array"""
result_string = []
for i in range(self.length):
result_string.append(str(self.__getitem__(i)))
if (i+1) % 10 == 0:
result_string.append(" ")
if (i+1) % 50 == 0:
result_string.append("{0}\n".format(i))
return "".join(result_string)
def __repr__(self):
return self.__str__()
def __del__(self):
bit_array._free(self.obj)
def get_bits(self):
"""get the contiguous array of bits, in string form"""
result_string = []
for i in range(self.length):
result_string.append(str(self.__getitem__(i)))
return "".join(result_string)
def set_bit(self, position):
assert 0 <= position < self.length, "outside array"
bit_array._set_bit(self.obj, position)
def clear_bit(self, position):
assert 0 <= position < self.length, "outside array"
bit_array._clear_bit(self.obj, position)
__all__ = ["BitArray"]
| {
"repo_name": "in3rtial/huffman",
"path": "src/bit_array.py",
"copies": "2",
"size": "1626",
"license": "cc0-1.0",
"hash": -8206598734838193000,
"line_mean": 27.5263157895,
"line_max": 62,
"alpha_frac": 0.5621156212,
"autogenerated": false,
"ratio": 3.7379310344827585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5300046655682759,
"avg_score": null,
"num_lines": null
} |
'''array of objects'''
class A:
def foo(self) -> string:
return 'xxxx'
class B(A):
def foo(self) -> string:
return 'hello'
class C(A):
def foo(self) -> string:
return 'world'
def push( arr:[]int, x:int ):
arr.append( x )
def push2( arr:[]*A, x:*A ):
arr.append( x )
def my_generic( s:A ):
print( s.foo() )
def main():
arr = []int()
arr.append(1)
push( arr, 100)
TestError( len(arr)==2 )
print(arr)
a1 = A(); a2 = A(); a3 = A()
obarr = []A( a1, a2 )
print(obarr)
push2( obarr, a3 )
print(obarr)
TestError( len(obarr)==3 )
b1 = B()
print(b1)
#obarr.append( b1 ) ## fails because subclasses can not be cast to their base class
#################################################
barr = []B( b1, ) ## todo single item array should not require `,`
c1 = C()
barr.append( c1 )
print(barr)
bb = barr[0]
print('bb:', bb)
print(bb.foo())
cc = barr[1]
print('cc:', cc)
print(cc.foo())
#ccc = go.type_assert( cc, C )
#print(ccc.foo())
print('----testing generic----')
for subclass in barr:
print('subclass in bar:', subclass)
my_generic(subclass) | {
"repo_name": "tempbottle/Rusthon",
"path": "regtests/go/array_of_objects.py",
"copies": "2",
"size": "1228",
"license": "bsd-3-clause",
"hash": 3790739595576521700,
"line_mean": 18.8225806452,
"line_max": 88,
"alpha_frac": 0.4991856678,
"autogenerated": false,
"ratio": 2.9805825242718447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9336358255769497,
"avg_score": 0.0286819872604698,
"num_lines": 62
} |
"""Array operations
"""
# Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
# Revised BSD License, included in this distribution as LICENSE.txt
from numpy import *
from ambry.geo import Point
def std_norm(a):
"""Normalize to +-4 sigma on the range 0 to 1"""
mean = a.mean()
std = a.std()
o = (( a - mean) / std).clip(-4,4) # Def of z-score
o += 4
o /= 8
try:
o.set_fill_value(0)
except AttributeError:
# If it isn't a masked array
pass
return o
def unity_norm(a):
"""scale to the range 0 to 1"""
range = a.max() - a.min()
o = (a - a.min()) / range
try:
o.set_fill_value(0)
except AttributeError:
# If it isn't a masked array
pass
return o
def statistics(a):
from numpy import sum as asum
r = ("Min, Max: {},{}\n".format(amin(a), amax(a)) +
"Range : {}\n".format(ptp(a)) +
"Average : {}\n".format(average(a))+
"Mean : {}\n".format(mean(a))+
"Median : {}\n".format(median(a))+
"StdDev : {}\n".format(std(a))+
"Sum : {}\n".format(asum(a))
)
try:
# Try the method for masked arrays. The other method will not
# respect the mask
r += "Histogram:{}".format(histogram(a.compressed())[0].ravel().tolist())
except:
r += "Histogram: {}".format(histogram(a)[0].ravel().tolist())
return r
def add(s,v,m):
return v+(m*s)
def apply_copy(kernel, a, func=add, nodata=None, mult=True):
"""For all cells in a, or all nonzero cells, apply the kernel
to a new output array
"""
from itertools import izip
o = zeros_like(a)
#
# Generate indices,
if nodata == 0:
indx = nonzero(a)
z = izip(indx[0],indx[1])
elif nodata is not None:
indx = nonzero(a != nodata)
z = izip(indx[0],indx[1])
else:
z = ndindex(a.shape)
for row, col in z:
kernel.apply(o,Point(col,row), func, a[row,col])
return o
| {
"repo_name": "kball/ambry",
"path": "ambry/geo/array.py",
"copies": "1",
"size": "2178",
"license": "bsd-2-clause",
"hash": 3836112349127994000,
"line_mean": 21.6875,
"line_max": 81,
"alpha_frac": 0.5101010101,
"autogenerated": false,
"ratio": 3.4245283018867925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9203453360593419,
"avg_score": 0.0462351902786747,
"num_lines": 96
} |
"""Array operations
"""
# Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
# Revised BSD License, included in this distribution as LICENSE.txt
from numpy import *
from databundles.geo import Point
def std_norm(a):
"""Normalize to +-4 sigma on the range 0 to 1"""
mean = a.mean()
std = a.std()
o = (( a - mean) / std).clip(-4,4) # Def of z-score
o += 4
o /= 8
try:
o.set_fill_value(0)
except AttributeError:
# If it isn't a masked array
pass
return o
def unity_norm(a):
"""scale to the range 0 to 1"""
range = a.max() - a.min()
o = (a - a.min()) / range
try:
o.set_fill_value(0)
except AttributeError:
# If it isn't a masked array
pass
return o
def statistics(a):
from numpy import sum as asum
r = ("Min, Max: {},{}\n".format(amin(a), amax(a)) +
"Range : {}\n".format(ptp(a)) +
"Average : {}\n".format(average(a))+
"Mean : {}\n".format(mean(a))+
"Median : {}\n".format(median(a))+
"StdDev : {}\n".format(std(a))+
"Sum : {}\n".format(asum(a))
)
try:
# Try the method for masked arrays. The other method will not
# respect the mask
r += "Histogram:{}".format(histogram(a.compressed())[0].ravel().tolist())
except:
r += "Histogram: {}".format(histogram(a)[0].ravel().tolist())
return r
def add(s,v,m):
return v+(m*s)
def apply_copy(kernel, a, func=add, nodata=None, mult=True):
"""For all cells in a, or all nonzero cells, apply the kernel
to a new output array
"""
from itertools import izip
o = zeros_like(a)
#
# Generate indices,
if nodata == 0:
indx = nonzero(a)
z = izip(indx[0],indx[1])
elif nodata is not None:
indx = nonzero(a != nodata)
z = izip(indx[0],indx[1])
else:
z = ndindex(a.shape)
for row, col in z:
kernel.apply(o,Point(col,row), func, a[row,col])
return o
| {
"repo_name": "treyhunner/databundles",
"path": "databundles/geo/array.py",
"copies": "1",
"size": "2184",
"license": "bsd-3-clause",
"hash": -5572227541791771000,
"line_mean": 21.75,
"line_max": 81,
"alpha_frac": 0.5114468864,
"autogenerated": false,
"ratio": 3.4285714285714284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44400183149714284,
"avg_score": null,
"num_lines": null
} |
# Array ops
# Given an array A of size n, print the number
# of distinct elements in A for every frame of
# size k
def printKElems(A,k):
for i in xrange(len(A)-k):
print len( set([i:k+i+1]) )
# From a sorted array, find all A[i] + A[j] = 0
def findIfij(A):
m = len(A)/2
if len(A)%2 != 0: m = m+ 1
pairs = []
for i in range(m):
for j in xrange(m,-1,-1):
if A[i] + A[j] == 0: pairs.append((i,j))
print pairs
# Find the number of ocurrences of k in A
def occurencesOfKinA(A,k):
low = 0
high = len(A)
i = 0
while i < len(A):
n = (high-low)/2
p = A[n]
if p > k:
pass
else:
pass
i = i + 1
# Given a binary array maxmize the number
# of zeros by flipping a single bit.
def maxZerosFlipped(A):
P = [0 for _ in range(len(A))]
index,maxnum = -1,-1
P[0] = 1
# Use maximum contiguous subsequence
for i in range(1,len(A)):
if A[i] == A[i - 1] and A[i] == 0:
P[i] = P[i - 1] + 1
else:
if P[i - 1] > maxnum:
index = i - 1
maxnum = P[index]
P[i] = 0
# So we now have two options:
# - Either we flip a bit on the longest subsequence, or
# - We flip a bit on the maximum subsequence split between
# two.
jndex = -1
for i in range(1,len(P)-1):
if P[i] == 0:
if P[i - 1] + P[i + 1] + 1 > P[index]:
jndex = i
A[jndex] = 1
print A
# Here's another
maxDiff, zeroCount = 0,0
for i in range(len(A)):
if A[i] == 0:
zeroCount = zeroCount + 1
count1,count0 = 0,0
for j in range(i, len(A)):
A[j] = count1 if A[j] == 1 else count0
maxDiff = max(maxDiff,count1-count0)
return zeroCount + maxDiff
# From a given array A, print all numbers i,j
# such that i*j = k
def triplet(A,k):
ans = []
for i in range(len(A)-1):
for j in range(i+1,len(A)):
if A[i]*A[j] == k:
ans.append((i,j))
ans.append((j,i))
print ans
# Find n*rot(A)
def findNRot(A):
for i in range(1,len(A)):
if A[i] < A[i - 1]:
return i
# We can do this with binary search:
i = 0
low,high = 0, len(A)
while i < len(A):
n = (high - low )/ 2
p = A[n]
# This subarray is ok
if p < A[low] and p > A[high]:
high = n
else:
low = n
i = i + 1
return n
# From a given array [ 1 1 1 1 0 0 1 0 ] output
# something like [ 0 0 0 1 1 1 1 1 ]
def groupZeros(A):
z = 0
for i in range(len(A)):
if A[i] == 0:
A[z] = 0
A[i] = 1
z = z + 1
print(A)
# Split an array on two arrays of equal sum.
# Return whether it can be done.
# O(n log n). Can we do it in O(n)?
def splitArray(A):
A.sort()
currentSum = A[0]
globalMax = currentSum
# This is probably wrong.
L,R = A[0],A[-1]
larray,rarray = [A[0]],[A[-1]]
j = len(A) - 2
for i in range(1,len(A)-1):
if j == i: break
if L <= R:
L = L + A[i]
larray.append(A[i])
else:
R = R + A[j]
rarray.append(A[j])
j = j - 1
if L != R: return False
print(larray,rarray)
return True | {
"repo_name": "adewynter/Tools",
"path": "Algorithms/arrayOps.py",
"copies": "1",
"size": "2864",
"license": "mit",
"hash": 746480485796998400,
"line_mean": 15.6569767442,
"line_max": 60,
"alpha_frac": 0.5597067039,
"autogenerated": false,
"ratio": 2.2712133227597144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3330920026659715,
"avg_score": null,
"num_lines": null
} |
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
__all__ = ["array2string", "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
# adapted by Francesc Alted 20012-8-18 for carray
import sys
import numpy as np
from numpy.core import numerictypes as _nt
from numpy import maximum, minimum, absolute, not_equal, isnan, isinf
from numpy.core.multiarray import format_longfloat
from numpy.core.fromnumeric import ravel
try:
from numpy.core.multiarray import datetime_as_string, datetime_data
except ImportError:
pass
def product(x, y): return x*y
_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension
_summaryThreshold = 1000 # total items > triggers array summarization
_float_output_precision = 8
_float_output_suppress_small = False
_line_width = 75
_nan_str = 'nan'
_inf_str = 'inf'
_formatter = None # formatting function for array elements
if sys.version_info[0] >= 3:
from functools import reduce
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None,
nanstr=None, infstr=None,
formatter=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int, optional
Number of digits of precision for floating point output (default 8).
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
Whether or not suppress printing of small floating point values
using scientific notation (default False).
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
See Also
--------
get_printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> print np.array([1.123456789])
[ 1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> print np.arange(10)
[0 1 2 ..., 7 8 9]
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3,infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
"""
global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \
_line_width, _float_output_suppress_small, _nan_str, _inf_str, \
_formatter
if linewidth is not None:
_line_width = linewidth
if threshold is not None:
_summaryThreshold = threshold
if edgeitems is not None:
_summaryEdgeItems = edgeitems
if precision is not None:
_float_output_precision = precision
if suppress is not None:
_float_output_suppress_small = not not suppress
if nanstr is not None:
_nan_str = nanstr
if infstr is not None:
_inf_str = infstr
_formatter = formatter
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, set_string_function
"""
d = dict(precision=_float_output_precision,
threshold=_summaryThreshold,
edgeitems=_summaryEdgeItems,
linewidth=_line_width,
suppress=_float_output_suppress_small,
nanstr=_nan_str,
infstr=_inf_str,
formatter=_formatter)
return d
def _leading_trailing(a):
import numpy.core.numeric as _nc
if a.ndim == 1:
if len(a) > 2*_summaryEdgeItems:
b = _nc.concatenate((a[:_summaryEdgeItems],
a[-_summaryEdgeItems:]))
else:
b = a
else:
if len(a) > 2*_summaryEdgeItems:
l = [_leading_trailing(a[i]) for i in range(
min(len(a), _summaryEdgeItems))]
l.extend([_leading_trailing(a[-i]) for i in range(
min(len(a), _summaryEdgeItems),0,-1)])
else:
l = [_leading_trailing(a[i]) for i in range(0, len(a))]
b = _nc.concatenate(tuple(l))
return b
def _boolFormatter(x):
if x:
return ' True'
else:
return 'False'
def repr_format(x):
return repr(x)
def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
prefix="", formatter=None):
if max_line_width is None:
max_line_width = _line_width
if precision is None:
precision = _float_output_precision
if suppress_small is None:
suppress_small = _float_output_suppress_small
if formatter is None:
formatter = _formatter
if a.size > _summaryThreshold:
summary_insert = "..., "
data = _leading_trailing(a)
else:
summary_insert = ""
data = ravel(a)
formatdict = {'bool' : _boolFormatter,
'int' : IntegerFormat(data),
'float' : FloatFormat(data, precision, suppress_small),
'longfloat' : LongFloatFormat(precision),
'complexfloat' : ComplexFormat(data, precision,
suppress_small),
'longcomplexfloat' : LongComplexFormat(precision),
'datetime' : DatetimeFormat(data),
'timedelta' : TimedeltaFormat(data),
'numpystr' : repr_format,
'str' : str}
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = formatter['all']
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = formatter['int_kind']
if 'float_kind' in fkeys:
for key in ['float', 'longfloat']:
formatdict[key] = formatter['float_kind']
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = formatter['complex_kind']
if 'str_kind' in fkeys:
for key in ['numpystr', 'str']:
formatdict[key] = formatter['str_kind']
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = formatter[key]
try:
format_function = a._format
msg = "The `_format` attribute is deprecated in Numpy 2.0 and " \
"will be removed in 2.1. Use the `formatter` kw instead."
import warnings
warnings.warn(msg, DeprecationWarning)
except AttributeError:
# find the right formatting function for the array
dtypeobj = a.dtype.type
if issubclass(dtypeobj, _nt.bool_):
format_function = formatdict['bool']
elif issubclass(dtypeobj, _nt.integer):
if (hasattr(_nt, "timedelta64") and
issubclass(dtypeobj, _nt.timedelta64)):
format_function = formatdict['timedelta']
else:
format_function = formatdict['int']
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
format_function = formatdict['longfloat']
else:
format_function = formatdict['float']
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
format_function = formatdict['longcomplexfloat']
else:
format_function = formatdict['complexfloat']
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
format_function = formatdict['numpystr']
elif(hasattr(_nt, "datetime64") and
issubclass(dtypeobj, _nt.datetime64)):
format_function = formatdict['datetime']
else:
format_function = formatdict['str']
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, len(a.shape), max_line_width,
next_line_prefix, separator,
_summaryEdgeItems, summary_insert)[:-1]
return lst
def _convert_arrays(obj):
import numpy.core.numeric as _nc
newtup = []
for k in obj:
if isinstance(k, _nc.ndarray):
k = k.tolist()
elif isinstance(k, tuple):
k = _convert_arrays(k)
newtup.append(k)
return tuple(newtup)
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=repr, formatter=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
An array is typically printed as::
'prefix(' + array2string(a) + ')'
The length of the prefix string is used to align the
output correctly.
style : function, optional
A function that accepts an ndarray and returns a string. Used only
when the shape of `a` is equal to ``()``, i.e. for 0-D arrays.
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError : if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print np.array2string(x, precision=2, separator=',',
... suppress_small=True)
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
if a.shape == ():
x = a.item()
try:
lst = a._format(x)
msg = "The `_format` attribute is deprecated in Numpy " \
"2.0 and will be removed in 2.1. Use the " \
"`formatter` kw instead."
import warnings
warnings.warn(msg, DeprecationWarning)
except AttributeError:
if isinstance(x, tuple):
x = _convert_arrays(x)
lst = style(x)
elif reduce(product, a.shape) == 0:
# treat as a null array if any of shape elements == 0
lst = "[]"
else:
lst = _array2string(a, max_line_width, precision, suppress_small,
separator, prefix, formatter=formatter)
return lst
def _extendLine(s, line, word, max_line_len, next_line_prefix):
if len(line.rstrip()) + len(word.rstrip()) >= max_line_len:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _formatArray(a, format_function, rank, max_line_len,
next_line_prefix, separator, edge_items, summary_insert):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
if rank == 0:
obj = a.item()
if isinstance(obj, tuple):
obj = _convert_arrays(obj)
return str(obj)
if summary_insert and 2*edge_items < len(a):
leading_items, trailing_items, summary_insert1 = \
edge_items, edge_items, summary_insert
else:
leading_items, trailing_items, summary_insert1 = 0, len(a), ""
if rank == 1:
s = ""
line = next_line_prefix
for i in xrange(leading_items):
word = format_function(a[i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
if summary_insert1:
s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix)
for i in xrange(trailing_items, 1, -1):
word = format_function(a[-i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
word = format_function(a[-1])
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
s += line + "]\n"
s = '[' + s[len(next_line_prefix):]
else:
s = '['
sep = separator.rstrip()
for i in xrange(leading_items):
if i > 0:
s += next_line_prefix
s += _formatArray(a[i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1)
if summary_insert1:
s += next_line_prefix + summary_insert1 + "\n"
for i in xrange(trailing_items, 1, -1):
if leading_items or i != trailing_items:
s += next_line_prefix
s += _formatArray(a[-i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1)
if leading_items or trailing_items > 1:
s += next_line_prefix
s += _formatArray(a[-1], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert).rstrip()+']\n'
return s
class FloatFormat(object):
def __init__(self, data, precision, suppress_small, sign=False):
self.precision = precision
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.max_str_len = 0
try:
self.fillFormat(data)
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
# instantiated in formatdict.
pass
def fillFormat(self, data):
import numpy.core.numeric as _nc
errstate = _nc.seterr(all='ignore')
try:
special = isnan(data) | isinf(data)
valid = not_equal(data, 0) & ~special
non_zero = absolute(data.compress(valid))
if len(non_zero) == 0:
max_val = 0.
min_val = 0.
else:
max_val = maximum.reduce(non_zero)
min_val = minimum.reduce(non_zero)
if max_val >= 1.e8:
self.exp_format = True
if not self.suppress_small and (min_val < 0.0001
or max_val/min_val > 1000.):
self.exp_format = True
finally:
_nc.seterr(**errstate)
if self.exp_format:
self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
self.max_str_len = 8 + self.precision
if self.large_exponent:
self.max_str_len += 1
if self.sign:
format = '%+'
else:
format = '%'
format = format + '%d.%de' % (self.max_str_len, self.precision)
else:
format = '%%.%df' % (self.precision,)
if len(non_zero):
precision = max([_digits(x, self.precision, format)
for x in non_zero])
else:
precision = 0
precision = min(self.precision, precision)
self.max_str_len = len(str(int(max_val))) + precision + 2
if _nc.any(special):
self.max_str_len = max(self.max_str_len,
len(_nan_str),
len(_inf_str)+1)
if self.sign:
format = '%#+'
else:
format = '%#'
format = format + '%d.%df' % (self.max_str_len, precision)
self.special_fmt = '%%%ds' % (self.max_str_len,)
self.format = format
def __call__(self, x, strip_zeros=True):
import numpy.core.numeric as _nc
err = _nc.seterr(invalid='ignore')
try:
if isnan(x):
if self.sign:
return self.special_fmt % ('+' + _nan_str,)
else:
return self.special_fmt % (_nan_str,)
elif isinf(x):
if x > 0:
if self.sign:
return self.special_fmt % ('+' + _inf_str,)
else:
return self.special_fmt % (_inf_str,)
else:
return self.special_fmt % ('-' + _inf_str,)
finally:
_nc.seterr(**err)
s = self.format % x
if self.large_exponent:
# 3-digit exponent
expsign = s[-3]
if expsign == '+' or expsign == '-':
s = s[1:-2] + '0' + s[-2:]
elif self.exp_format:
# 2-digit exponent
if s[-3] == '0':
s = ' ' + s[:-3] + s[-2:]
elif strip_zeros:
z = s.rstrip('0')
s = z + ' '*(len(s)-len(z))
return s
def _digits(x, precision, format):
s = format % x
z = s.rstrip('0')
return precision - len(s) + len(z)
_MAXINT = sys.maxint
_MININT = -sys.maxint-1
class IntegerFormat(object):
def __init__(self, data):
try:
max_str_len = max(len(str(maximum.reduce(data))),
len(str(minimum.reduce(data))))
self.format = '%' + str(max_str_len) + 'd'
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
# instantiated in formatdict.
pass
except ValueError:
# this occurs when everything is NA
pass
def __call__(self, x):
if _MININT < x < _MAXINT:
return self.format % x
else:
return "%s" % x
class LongFloatFormat(object):
# XXX Have to add something to determine the width to use a la FloatFormat
# Right now, things won't line up properly
def __init__(self, precision, sign=False):
self.precision = precision
self.sign = sign
def __call__(self, x):
if isnan(x):
if self.sign:
return '+' + _nan_str
else:
return ' ' + _nan_str
elif isinf(x):
if x > 0:
if self.sign:
return '+' + _inf_str
else:
return ' ' + _inf_str
else:
return '-' + _inf_str
elif x >= 0:
if self.sign:
return '+' + format_longfloat(x, self.precision)
else:
return ' ' + format_longfloat(x, self.precision)
else:
return format_longfloat(x, self.precision)
class LongComplexFormat(object):
def __init__(self, precision):
self.real_format = LongFloatFormat(precision)
self.imag_format = LongFloatFormat(precision, sign=True)
def __call__(self, x):
r = self.real_format(x.real)
i = self.imag_format(x.imag)
return r + i + 'j'
class ComplexFormat(object):
def __init__(self, x, precision, suppress_small):
self.real_format = FloatFormat(x.real, precision, suppress_small)
self.imag_format = FloatFormat(x.imag, precision, suppress_small,
sign=True)
def __call__(self, x):
r = self.real_format(x.real, strip_zeros=False)
i = self.imag_format(x.imag, strip_zeros=False)
if not self.imag_format.exp_format:
z = i.rstrip('0')
i = z + 'j' + ' '*(len(i)-len(z))
else:
i = i + 'j'
return r + i
class DatetimeFormat(object):
def __init__(self, x, unit=None,
timezone=None, casting='same_kind'):
# Get the unit from the dtype
if unit is None:
if x.dtype.kind == 'M':
unit = datetime_data(x.dtype)[0]
else:
unit = 's'
# If timezone is default, make it 'local' or 'UTC' based on the unit
if timezone is None:
# Date units -> UTC, time units -> local
if unit in ('Y', 'M', 'W', 'D'):
self.timezone = 'UTC'
else:
self.timezone = 'local'
else:
self.timezone = timezone
self.unit = unit
self.casting = casting
def __call__(self, x):
return "'%s'" % datetime_as_string(x,
unit=self.unit,
timezone=self.timezone,
casting=self.casting)
class TimedeltaFormat(object):
def __init__(self, data):
if data.dtype.kind == 'm':
v = data.view('i8')
max_str_len = max(len(str(maximum.reduce(v))),
len(str(minimum.reduce(v))))
self.format = '%' + str(max_str_len) + 'd'
def __call__(self, x):
return self.format % x.astype('i8')
| {
"repo_name": "seibert/blaze-core",
"path": "blaze/carray/arrayprint.py",
"copies": "1",
"size": "26135",
"license": "bsd-2-clause",
"hash": 1778063798763836000,
"line_mean": 33.4788918206,
"line_max": 91,
"alpha_frac": 0.5414960781,
"autogenerated": false,
"ratio": 3.9852089051540105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5026704983254011,
"avg_score": null,
"num_lines": null
} |
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
"set_printoptions", "get_printoptions", "printoptions",
"format_float_positional", "format_float_scientific"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
# scalars but for different purposes. scalartypes.c.src has str/reprs for when
# the scalar is printed on its own, while arrayprint.py has strs for when
# scalars are printed inside an ndarray. Only the latter strs are currently
# user-customizable.
import functools
import numbers
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
import numpy as np
from . import numerictypes as _nt
from .umath import absolute, isinf, isfinite, isnat
from . import multiarray
from .multiarray import (array, dragon4_positional, dragon4_scientific,
datetime_as_string, datetime_data, ndarray,
set_legacy_print_mode)
from .fromnumeric import any
from .numeric import concatenate, asarray, errstate
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
flexible)
from .overrides import array_function_dispatch, set_module
import operator
import warnings
import contextlib
_format_options = {
'edgeitems': 3, # repr N leading and trailing items of each dimension
'threshold': 1000, # total items > triggers array summarization
'floatmode': 'maxprec',
'precision': 8, # precision of floating point representations
'suppress': False, # suppress printing small floating values in exp format
'linewidth': 75,
'nanstr': 'nan',
'infstr': 'inf',
'sign': '-',
'formatter': None,
'legacy': False}
def _make_options_dict(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
sign=None, formatter=None, floatmode=None, legacy=None):
""" make a dictionary out of the non-None arguments, plus sanity checks """
options = {k: v for k, v in locals().items() if v is not None}
if suppress is not None:
options['suppress'] = bool(suppress)
modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
if floatmode not in modes + [None]:
raise ValueError("floatmode option must be one of " +
", ".join('"{}"'.format(m) for m in modes))
if sign not in [None, '-', '+', ' ']:
raise ValueError("sign option must be one of ' ', '+', or '-'")
if legacy not in [None, False, '1.13']:
warnings.warn("legacy printing option can currently only be '1.13' or "
"`False`", stacklevel=3)
if threshold is not None:
# forbid the bad threshold arg suggested by stack overflow, gh-12351
if not isinstance(threshold, numbers.Number):
raise TypeError("threshold must be numeric")
if np.isnan(threshold):
raise ValueError("threshold must be non-NAN, try "
"sys.maxsize for untruncated representation")
if precision is not None:
# forbid the bad precision arg as suggested by issue #18254
try:
options['precision'] = operator.index(precision)
except TypeError as e:
raise TypeError('precision must be an integer') from e
return options
@set_module('numpy')
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
formatter=None, sign=None, floatmode=None, *, legacy=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int or None, optional
Number of digits of precision for floating point output (default 8).
May be None if `floatmode` is not `fixed`, to print as many digits as
necessary to uniquely specify the value.
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
To always use the full repr without summarization, pass `sys.maxsize`.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
If True, always print floating point numbers using fixed point
notation, in which case numbers equal to zero in the current precision
will print as zero. If False, then scientific notation is used when
absolute value of the smallest number is < 1e-4 or the ratio of the
maximum absolute value to the minimum is > 1e3. The default is False.
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values. (default '-')
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'object' : `np.object_` arrays
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'numpystr'
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values
(default maxprec_equal):
* 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
* 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
* 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
* 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
See Also
--------
get_printoptions, printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Use `printoptions` as a context manager to set the values temporarily.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> np.array([1.123456789])
[1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> np.arange(10)
array([0, 1, 2, ..., 7, 8, 9])
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3, infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
Also to temporarily override options, use `printoptions` as a context manager:
>>> with np.printoptions(precision=2, suppress=True, threshold=5):
... np.linspace(0, 10, 10)
array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ])
"""
opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
suppress, nanstr, infstr, sign, formatter,
floatmode, legacy)
# formatter is always reset
opt['formatter'] = formatter
_format_options.update(opt)
# set the C variable for legacy mode
if _format_options['legacy'] == '1.13':
set_legacy_print_mode(113)
# reset the sign option in legacy mode to avoid confusion
_format_options['sign'] = '-'
elif _format_options['legacy'] is False:
set_legacy_print_mode(0)
@set_module('numpy')
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
- sign : str
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, printoptions, set_string_function
"""
return _format_options.copy()
@set_module('numpy')
@contextlib.contextmanager
def printoptions(*args, **kwargs):
"""Context manager for setting print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `set_printoptions` for the full description of
available options.
Examples
--------
>>> from numpy.testing import assert_equal
>>> with np.printoptions(precision=2):
... np.array([2.0]) / 3
array([0.67])
The `as`-clause of the `with`-statement gives the current print options:
>>> with np.printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
try:
np.set_printoptions(*args, **kwargs)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _leading_trailing(a, edgeitems, index=()):
"""
Keep only the N-D corners (leading and trailing edges) of an array.
Should be passed a base-class ndarray, since it makes no guarantees about
preserving subclasses.
"""
axis = len(index)
if axis == a.ndim:
return a[index]
if a.shape[axis] > 2*edgeitems:
return concatenate((
_leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
_leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
), axis=axis)
else:
return _leading_trailing(a, edgeitems, index + np.index_exp[:])
def _object_format(o):
""" Object arrays containing lists should be printed unambiguously """
if type(o) is list:
fmt = 'list({!r})'
else:
fmt = '{!r}'
return fmt.format(o)
def repr_format(x):
return repr(x)
def str_format(x):
return str(x)
def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,
formatter, **kwargs):
# note: extra arguments in kwargs are ignored
# wrapped in lambdas to avoid taking a code path with the wrong type of data
formatdict = {
'bool': lambda: BoolFormat(data),
'int': lambda: IntegerFormat(data),
'float': lambda: FloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'longfloat': lambda: FloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'complexfloat': lambda: ComplexFloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'longcomplexfloat': lambda: ComplexFloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'datetime': lambda: DatetimeFormat(data, legacy=legacy),
'timedelta': lambda: TimedeltaFormat(data),
'object': lambda: _object_format,
'void': lambda: str_format,
'numpystr': lambda: repr_format}
# we need to wrap values in `formatter` in a lambda, so that the interface
# is the same as the above values.
def indirect(x):
return lambda: x
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = indirect(formatter['all'])
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = indirect(formatter['int_kind'])
if 'float_kind' in fkeys:
for key in ['float', 'longfloat']:
formatdict[key] = indirect(formatter['float_kind'])
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = indirect(formatter['complex_kind'])
if 'str_kind' in fkeys:
formatdict['numpystr'] = indirect(formatter['str_kind'])
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = indirect(formatter[key])
return formatdict
def _get_format_function(data, **options):
"""
find the right formatting function for the dtype_
"""
dtype_ = data.dtype
dtypeobj = dtype_.type
formatdict = _get_formatdict(data, **options)
if issubclass(dtypeobj, _nt.bool_):
return formatdict['bool']()
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timedelta64):
return formatdict['timedelta']()
else:
return formatdict['int']()
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
return formatdict['longfloat']()
else:
return formatdict['float']()
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
return formatdict['longcomplexfloat']()
else:
return formatdict['complexfloat']()
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
return formatdict['numpystr']()
elif issubclass(dtypeobj, _nt.datetime64):
return formatdict['datetime']()
elif issubclass(dtypeobj, _nt.object_):
return formatdict['object']()
elif issubclass(dtypeobj, _nt.void):
if dtype_.names is not None:
return StructuredVoidFormat.from_data(data, **options)
else:
return formatdict['void']()
else:
return formatdict['numpystr']()
def _recursive_guard(fillvalue='...'):
"""
Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
Decorates a function such that if it calls itself with the same first
argument, it returns `fillvalue` instead of recursing.
Largely copied from reprlib.recursive_repr
"""
def decorating_function(f):
repr_running = set()
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
return f(self, *args, **kwargs)
finally:
repr_running.discard(key)
return wrapper
return decorating_function
# gracefully handle recursive calls, when object arrays contain themselves
@_recursive_guard()
def _array2string(a, options, separator=' ', prefix=""):
# The formatter __init__s in _get_format_function cannot deal with
# subclasses yet, and we also need to avoid recursion issues in
# _formatArray with subclasses which return 0d arrays in place of scalars
data = asarray(a)
if a.shape == ():
a = data
if a.size > options['threshold']:
summary_insert = "..."
data = _leading_trailing(data, options['edgeitems'])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = _get_format_function(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, options['linewidth'],
next_line_prefix, separator, options['edgeitems'],
summary_insert, options['legacy'])
return lst
def _array2string_dispatcher(
a, max_line_width=None, precision=None,
suppress_small=None, separator=None, prefix=None,
style=None, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix=None,
*, legacy=None):
return (a,)
@array_function_dispatch(_array2string_dispatcher, module='numpy')
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix="",
*, legacy=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int or None, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
separator : str, optional
Inserted between elements.
prefix : str, optional
suffix : str, optional
The length of the prefix and suffix strings are used to respectively
align and wrap the output. An array is typically printed as::
prefix + array2string(a) + suffix
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
It should be noted that the content of prefix and suffix strings are
not included in the output.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'void' : type `numpy.void`
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
Defaults to ``numpy.get_printoptions()['threshold']``.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
Defaults to ``numpy.get_printoptions()['edgeitems']``.
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values.
Defaults to ``numpy.get_printoptions()['sign']``.
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types.
Defaults to ``numpy.get_printoptions()['floatmode']``.
Can take the following values:
- 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
- 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
- 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
- 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> np.array2string(x, precision=2, separator=',',
... suppress_small=True)
'[0.,1.,2.,3.]'
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0 0x1 0x2]'
"""
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
sign, formatter, floatmode, legacy)
options = _format_options.copy()
options.update(overrides)
if options['legacy'] == '1.13':
if style is np._NoValue:
style = repr
if a.shape == () and a.dtype.names is None:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
warnings.warn("'style' argument is deprecated and no longer functional"
" except in 1.13 'legacy' mode",
DeprecationWarning, stacklevel=3)
if options['legacy'] != '1.13':
options['linewidth'] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
needs_wrap = len(line) + len(word) > line_width
if legacy != '1.13':
# don't wrap lines if it won't help
if len(line) <= len(next_line_prefix):
needs_wrap = False
if needs_wrap:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy):
"""
Extends line with nicely formatted (possibly multi-line) string ``word``.
"""
words = word.splitlines()
if len(words) == 1 or legacy == '1.13':
return _extendLine(s, line, word, line_width, next_line_prefix, legacy)
max_word_length = max(len(word) for word in words)
if (len(line) + max_word_length > line_width and
len(line) > len(next_line_prefix)):
s += line.rstrip() + '\n'
line = next_line_prefix + words[0]
indent = next_line_prefix
else:
indent = len(line)*' '
line += words[0]
for word in words[1::]:
s += line.rstrip() + '\n'
line = indent + word
suffix_length = max_word_length - len(words[-1])
line += suffix_length*' '
return s, line
def _formatArray(a, format_function, line_width, next_line_prefix,
separator, edge_items, summary_insert, legacy):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
def recurser(index, hanging_indent, curr_width):
"""
By using this local function, we don't need to recurse with all the
arguments. Since this function is not created recursively, the cost is
not significant
"""
axis = len(index)
axes_left = a.ndim - axis
if axes_left == 0:
return format_function(a[index])
# when recursing, add a space to align with the [ added, and reduce the
# length of the line by 1
next_hanging_indent = hanging_indent + ' '
if legacy == '1.13':
next_width = curr_width
else:
next_width = curr_width - len(']')
a_len = a.shape[axis]
show_summary = summary_insert and 2*edge_items < a_len
if show_summary:
leading_items = edge_items
trailing_items = edge_items
else:
leading_items = 0
trailing_items = a_len
# stringify the array with the hanging indent on the first line too
s = ''
# last axis (rows) - wrap elements if they would not fit on one line
if axes_left == 1:
# the length up until the beginning of the separator / bracket
if legacy == '1.13':
elem_width = curr_width - len(separator.rstrip())
else:
elem_width = curr_width - max(len(separator.rstrip()), len(']'))
line = hanging_indent
for i in range(leading_items):
word = recurser(index + (i,), next_hanging_indent, next_width)
s, line = _extendLine_pretty(
s, line, word, elem_width, hanging_indent, legacy)
line += separator
if show_summary:
s, line = _extendLine(
s, line, summary_insert, elem_width, hanging_indent, legacy)
if legacy == '1.13':
line += ", "
else:
line += separator
for i in range(trailing_items, 1, -1):
word = recurser(index + (-i,), next_hanging_indent, next_width)
s, line = _extendLine_pretty(
s, line, word, elem_width, hanging_indent, legacy)
line += separator
if legacy == '1.13':
# width of the separator is not considered on 1.13
elem_width = curr_width
word = recurser(index + (-1,), next_hanging_indent, next_width)
s, line = _extendLine_pretty(
s, line, word, elem_width, hanging_indent, legacy)
s += line
# other axes - insert newlines between rows
else:
s = ''
line_sep = separator.rstrip() + '\n'*(axes_left - 1)
for i in range(leading_items):
nested = recurser(index + (i,), next_hanging_indent, next_width)
s += hanging_indent + nested + line_sep
if show_summary:
if legacy == '1.13':
# trailing space, fixed nbr of newlines, and fixed separator
s += hanging_indent + summary_insert + ", \n"
else:
s += hanging_indent + summary_insert + line_sep
for i in range(trailing_items, 1, -1):
nested = recurser(index + (-i,), next_hanging_indent,
next_width)
s += hanging_indent + nested + line_sep
nested = recurser(index + (-1,), next_hanging_indent, next_width)
s += hanging_indent + nested
# remove the hanging indent, and wrap in []
s = '[' + s[len(hanging_indent):] + ']'
return s
try:
# invoke the recursive part with an initial index and prefix
return recurser(index=(),
hanging_indent=next_line_prefix,
curr_width=line_width)
finally:
# recursive closures have a cyclic reference to themselves, which
# requires gc to collect (gh-10620). To avoid this problem, for
# performance and PyPy friendliness, we break the cycle:
recurser = None
def _none_or_positive_arg(x, name):
if x is None:
return -1
if x < 0:
raise ValueError("{} must be >= 0".format(name))
return x
class FloatingFormat:
""" Formatter for subtypes of np.floating """
def __init__(self, data, precision, floatmode, suppress_small, sign=False,
*, legacy=None):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
self._legacy = legacy
if self._legacy == '1.13':
# when not 0d, legacy does not support '-'
if data.shape != () and sign == '-':
sign = ' '
self.floatmode = floatmode
if floatmode == 'unique':
self.precision = None
else:
self.precision = precision
self.precision = _none_or_positive_arg(self.precision, 'precision')
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.fillFormat(data)
def fillFormat(self, data):
# only the finite values are used to compute the number of digits
finite_vals = data[isfinite(data)]
# choose exponential mode based on the non-zero finite values:
abs_non_zero = absolute(finite_vals[finite_vals != 0])
if len(abs_non_zero) != 0:
max_val = np.max(abs_non_zero)
min_val = np.min(abs_non_zero)
with errstate(over='ignore'): # division can overflow
if max_val >= 1.e8 or (not self.suppress_small and
(min_val < 0.0001 or max_val/min_val > 1000.)):
self.exp_format = True
# do a first pass of printing all the numbers, to determine sizes
if len(finite_vals) == 0:
self.pad_left = 0
self.pad_right = 0
self.trim = '.'
self.exp_size = -1
self.unique = True
self.min_digits = None
elif self.exp_format:
trim, unique = '.', True
if self.floatmode == 'fixed' or self._legacy == '1.13':
trim, unique = 'k', False
strs = (dragon4_scientific(x, precision=self.precision,
unique=unique, trim=trim, sign=self.sign == '+')
for x in finite_vals)
frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
self.exp_size = max(len(s) for s in exp_strs) - 1
self.trim = 'k'
self.precision = max(len(s) for s in frac_part)
self.min_digits = self.precision
self.unique = unique
# for back-compat with np 1.13, use 2 spaces & sign and full prec
if self._legacy == '1.13':
self.pad_left = 3
else:
# this should be only 1 or 2. Can be calculated from sign.
self.pad_left = max(len(s) for s in int_part)
# pad_right is only needed for nan length calculation
self.pad_right = self.exp_size + 2 + self.precision
else:
trim, unique = '.', True
if self.floatmode == 'fixed':
trim, unique = 'k', False
strs = (dragon4_positional(x, precision=self.precision,
fractional=True,
unique=unique, trim=trim,
sign=self.sign == '+')
for x in finite_vals)
int_part, frac_part = zip(*(s.split('.') for s in strs))
if self._legacy == '1.13':
self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
else:
self.pad_left = max(len(s) for s in int_part)
self.pad_right = max(len(s) for s in frac_part)
self.exp_size = -1
self.unique = unique
if self.floatmode in ['fixed', 'maxprec_equal']:
self.precision = self.min_digits = self.pad_right
self.trim = 'k'
else:
self.trim = '.'
self.min_digits = 0
if self._legacy != '1.13':
# account for sign = ' ' by adding one to pad_left
if self.sign == ' ' and not any(np.signbit(finite_vals)):
self.pad_left += 1
# if there are non-finite values, may need to increase pad_left
if data.size != finite_vals.size:
neginf = self.sign != '-' or any(data[isinf(data)] < 0)
nanlen = len(_format_options['nanstr'])
inflen = len(_format_options['infstr']) + neginf
offset = self.pad_right + 1 # +1 for decimal pt
self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset)
def __call__(self, x):
if not np.isfinite(x):
with errstate(invalid='ignore'):
if np.isnan(x):
sign = '+' if self.sign == '+' else ''
ret = sign + _format_options['nanstr']
else: # isinf
sign = '-' if x < 0 else '+' if self.sign == '+' else ''
ret = sign + _format_options['infstr']
return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret
if self.exp_format:
return dragon4_scientific(x,
precision=self.precision,
min_digits=self.min_digits,
unique=self.unique,
trim=self.trim,
sign=self.sign == '+',
pad_left=self.pad_left,
exp_digits=self.exp_size)
else:
return dragon4_positional(x,
precision=self.precision,
min_digits=self.min_digits,
unique=self.unique,
fractional=True,
trim=self.trim,
sign=self.sign == '+',
pad_left=self.pad_left,
pad_right=self.pad_right)
@set_module('numpy')
def format_float_scientific(x, precision=None, unique=True, trim='k',
sign=False, pad_left=None, exp_digits=None,
min_digits=None):
"""
Format a floating-point scalar as a decimal string in scientific notation.
Provides control over rounding, trimming and padding. Uses and assumes
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
Parameters
----------
x : python float or numpy floating scalar
Value to format.
precision : non-negative integer or None, optional
Maximum number of digits to print. May be None if `unique` is
`True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
other values of the same type, by judicious rounding. If `precision`
is given fewer digits than necessary can be printed. If `min_digits`
is given more can be printed, in which cases the last digit is rounded
with unbiased rounding.
If `False`, digits are generated as if printing an infinite-precision
value and stopping after `precision` digits, rounding the remaining
value with unbiased rounding
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
* 'k' : keep trailing zeros, keep decimal point (no trimming)
* '.' : trim all trailing zeros, leave decimal point
* '0' : trim all but the zero before the decimal point. Insert the
zero if it is missing.
* '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Pad the left side of the string with whitespace until at least that
many characters are to the left of the decimal point.
exp_digits : non-negative integer, optional
Pad the exponent with zeros until it contains at least this many digits.
If omitted, the exponent will be at least 2 digits.
min_digits : non-negative integer or None, optional
Minimum number of digits to print. This only has an effect for
`unique=True`. In that case more digits than necessary to uniquely
identify the value may be printed and rounded unbiased.
-- versionadded:: 1.21.0
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_positional
Examples
--------
>>> np.format_float_scientific(np.float32(np.pi))
'3.1415927e+00'
>>> s = np.float32(1.23e24)
>>> np.format_float_scientific(s, unique=False, precision=15)
'1.230000071797338e+24'
>>> np.format_float_scientific(s, exp_digits=4)
'1.23e+0024'
"""
precision = _none_or_positive_arg(precision, 'precision')
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
min_digits = _none_or_positive_arg(min_digits, 'min_digits')
if min_digits > 0 and precision > 0 and min_digits > precision:
raise ValueError("min_digits must be less than or equal to precision")
return dragon4_scientific(x, precision=precision, unique=unique,
trim=trim, sign=sign, pad_left=pad_left,
exp_digits=exp_digits, min_digits=min_digits)
@set_module('numpy')
def format_float_positional(x, precision=None, unique=True,
fractional=True, trim='k', sign=False,
pad_left=None, pad_right=None, min_digits=None):
"""
Format a floating-point scalar as a decimal string in positional notation.
Provides control over rounding, trimming and padding. Uses and assumes
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
Parameters
----------
x : python float or numpy floating scalar
Value to format.
precision : non-negative integer or None, optional
Maximum number of digits to print. May be None if `unique` is
`True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
other values of the same type, by judicious rounding. If `precision`
is given fewer digits than necessary can be printed, or if `min_digits`
is given more can be printed, in which cases the last digit is rounded
with unbiased rounding.
If `False`, digits are generated as if printing an infinite-precision
value and stopping after `precision` digits, rounding the remaining
value with unbiased rounding
fractional : boolean, optional
If `True`, the cutoffs of `precision` and `min_digits` refer to the
total number of digits after the decimal point, including leading
zeros.
If `False`, `precision` and `min_digits` refer to the total number of
significant digits, before or after the decimal point, ignoring leading
zeros.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
* 'k' : keep trailing zeros, keep decimal point (no trimming)
* '.' : trim all trailing zeros, leave decimal point
* '0' : trim all but the zero before the decimal point. Insert the
zero if it is missing.
* '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Pad the left side of the string with whitespace until at least that
many characters are to the left of the decimal point.
pad_right : non-negative integer, optional
Pad the right side of the string with whitespace until at least that
many characters are to the right of the decimal point.
min_digits : non-negative integer or None, optional
Minimum number of digits to print. Only has an effect if `unique=True`
in which case additional digits past those necessary to uniquely
identify the value may be printed, rounding the last additional digit.
-- versionadded:: 1.21.0
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
Examples
--------
>>> np.format_float_positional(np.float32(np.pi))
'3.1415927'
>>> np.format_float_positional(np.float16(np.pi))
'3.14'
>>> np.format_float_positional(np.float16(0.3))
'0.3'
>>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
'0.3000488281'
"""
precision = _none_or_positive_arg(precision, 'precision')
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
pad_right = _none_or_positive_arg(pad_right, 'pad_right')
min_digits = _none_or_positive_arg(min_digits, 'min_digits')
if not fractional and precision == 0:
raise ValueError("precision must be greater than 0 if "
"fractional=False")
if min_digits > 0 and precision > 0 and min_digits > precision:
raise ValueError("min_digits must be less than or equal to precision")
return dragon4_positional(x, precision=precision, unique=unique,
fractional=fractional, trim=trim,
sign=sign, pad_left=pad_left,
pad_right=pad_right, min_digits=min_digits)
class IntegerFormat:
def __init__(self, data):
if data.size > 0:
max_str_len = max(len(str(np.max(data))),
len(str(np.min(data))))
else:
max_str_len = 0
self.format = '%{}d'.format(max_str_len)
def __call__(self, x):
return self.format % x
class BoolFormat:
def __init__(self, data, **kwargs):
# add an extra space so " True" and "False" have the same length and
# array elements align nicely when printed, except in 0d arrays
self.truestr = ' True' if data.shape != () else 'True'
def __call__(self, x):
return self.truestr if x else "False"
class ComplexFloatingFormat:
""" Formatter for subtypes of np.complexfloating """
def __init__(self, x, precision, floatmode, suppress_small,
sign=False, *, legacy=None):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
floatmode_real = floatmode_imag = floatmode
if legacy == '1.13':
floatmode_real = 'maxprec_equal'
floatmode_imag = 'maxprec'
self.real_format = FloatingFormat(
x.real, precision, floatmode_real, suppress_small,
sign=sign, legacy=legacy
)
self.imag_format = FloatingFormat(
x.imag, precision, floatmode_imag, suppress_small,
sign='+', legacy=legacy
)
def __call__(self, x):
r = self.real_format(x.real)
i = self.imag_format(x.imag)
# add the 'j' before the terminal whitespace in i
sp = len(i.rstrip())
i = i[:sp] + 'j' + i[sp:]
return r + i
class _TimelikeFormat:
def __init__(self, data):
non_nat = data[~isnat(data)]
if len(non_nat) > 0:
# Max str length of non-NaT elements
max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
len(self._format_non_nat(np.min(non_nat))))
else:
max_str_len = 0
if len(non_nat) < data.size:
# data contains a NaT
max_str_len = max(max_str_len, 5)
self._format = '%{}s'.format(max_str_len)
self._nat = "'NaT'".rjust(max_str_len)
def _format_non_nat(self, x):
# override in subclass
raise NotImplementedError
def __call__(self, x):
if isnat(x):
return self._nat
else:
return self._format % self._format_non_nat(x)
class DatetimeFormat(_TimelikeFormat):
def __init__(self, x, unit=None, timezone=None, casting='same_kind',
legacy=False):
# Get the unit from the dtype
if unit is None:
if x.dtype.kind == 'M':
unit = datetime_data(x.dtype)[0]
else:
unit = 's'
if timezone is None:
timezone = 'naive'
self.timezone = timezone
self.unit = unit
self.casting = casting
self.legacy = legacy
# must be called after the above are configured
super().__init__(x)
def __call__(self, x):
if self.legacy == '1.13':
return self._format_non_nat(x)
return super().__call__(x)
def _format_non_nat(self, x):
return "'%s'" % datetime_as_string(x,
unit=self.unit,
timezone=self.timezone,
casting=self.casting)
class TimedeltaFormat(_TimelikeFormat):
def _format_non_nat(self, x):
return str(x.astype('i8'))
class SubArrayFormat:
def __init__(self, format_function):
self.format_function = format_function
def __call__(self, arr):
if arr.ndim <= 1:
return "[" + ", ".join(self.format_function(a) for a in arr) + "]"
return "[" + ", ".join(self.__call__(a) for a in arr) + "]"
class StructuredVoidFormat:
"""
Formatter for structured np.void objects.
This does not work on structured alias types like np.dtype(('i4', 'i2,i2')),
as alias scalars lose their field information, and the implementation
relies upon np.void.__getitem__.
"""
def __init__(self, format_functions):
self.format_functions = format_functions
@classmethod
def from_data(cls, data, **options):
"""
This is a second way to initialize StructuredVoidFormat, using the raw data
as input. Added to avoid changing the signature of __init__.
"""
format_functions = []
for field_name in data.dtype.names:
format_function = _get_format_function(data[field_name], **options)
if data.dtype[field_name].shape != ():
format_function = SubArrayFormat(format_function)
format_functions.append(format_function)
return cls(format_functions)
def __call__(self, x):
str_fields = [
format_function(field)
for field, format_function in zip(x, self.format_functions)
]
if len(str_fields) == 1:
return "({},)".format(str_fields[0])
else:
return "({})".format(", ".join(str_fields))
def _void_scalar_repr(x):
"""
Implements the repr for structured-void scalars. It is called from the
scalartypes.c.src code, and is placed here because it uses the elementwise
formatters defined above.
"""
return StructuredVoidFormat.from_data(array(x), **_format_options)(x)
_typelessdata = [int_, float_, complex_, bool_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def dtype_is_implied(dtype):
"""
Determine if the given dtype is implied by the representation of its values.
Parameters
----------
dtype : dtype
Data type
Returns
-------
implied : bool
True if the dtype is implied by the representation of its values.
Examples
--------
>>> np.core.arrayprint.dtype_is_implied(int)
True
>>> np.array([1, 2, 3], int)
array([1, 2, 3])
>>> np.core.arrayprint.dtype_is_implied(np.int8)
False
>>> np.array([1, 2, 3], np.int8)
array([1, 2, 3], dtype=int8)
"""
dtype = np.dtype(dtype)
if _format_options['legacy'] == '1.13' and dtype.type == bool_:
return False
# not just void types can be structured, and names are not part of the repr
if dtype.names is not None:
return False
return dtype.type in _typelessdata
def dtype_short_repr(dtype):
"""
Convert a dtype to a short form which evaluates to the same dtype.
The intent is roughly that the following holds
>>> from numpy import *
>>> dt = np.int64([1, 2]).dtype
>>> assert eval(dtype_short_repr(dt)) == dt
"""
if dtype.names is not None:
# structured dtypes give a list or tuple repr
return str(dtype)
elif issubclass(dtype.type, flexible):
# handle these separately so they don't give garbage like str256
return "'%s'" % str(dtype)
typename = dtype.name
# quote typenames which can't be represented as python variable names
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = repr(typename)
return typename
def _array_repr_implementation(
arr, max_line_width=None, precision=None, suppress_small=None,
array2string=array2string):
"""Internal version of array_repr() that allows overriding array2string."""
if max_line_width is None:
max_line_width = _format_options['linewidth']
if type(arr) is not ndarray:
class_name = type(arr).__name__
else:
class_name = "array"
skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
prefix = class_name + "("
suffix = ")" if skipdtype else ","
if (_format_options['legacy'] == '1.13' and
arr.shape == () and not arr.dtype.names):
lst = repr(arr.item())
elif arr.size > 0 or arr.shape == (0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', prefix, suffix=suffix)
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
arr_str = prefix + lst + suffix
if skipdtype:
return arr_str
dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
# compute whether we should put dtype on a new line: Do so if adding the
# dtype would extend the last line past max_line_width.
# Note: This line gives the correct result even when rfind returns -1.
last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
spacer = " "
if _format_options['legacy'] == '1.13':
if issubclass(arr.dtype.type, flexible):
spacer = '\n' + ' '*len(class_name + "(")
elif last_line_len + len(dtype_str) + 1 > max_line_width:
spacer = '\n' + ' '*len(class_name + "(")
return arr_str + spacer + dtype_str
def _array_repr_dispatcher(
arr, max_line_width=None, precision=None, suppress_small=None):
return (arr,)
@array_function_dispatch(_array_repr_dispatcher, module='numpy')
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([0.000001, 0. , 2. , 3. ])'
"""
return _array_repr_implementation(
arr, max_line_width, precision, suppress_small)
@_recursive_guard()
def _guarded_repr_or_str(v):
if isinstance(v, bytes):
return repr(v)
return str(v)
def _array_str_implementation(
a, max_line_width=None, precision=None, suppress_small=None,
array2string=array2string):
"""Internal version of array_str() that allows overriding array2string."""
if (_format_options['legacy'] == '1.13' and
a.shape == () and not a.dtype.names):
return str(a.item())
# the str of 0d arrays is a special case: It should appear like a scalar,
# so floats are not truncated by `precision`, and strings are not wrapped
# in quotes. So we return the str of the scalar value.
if a.shape == ():
# obtain a scalar and call str on it, avoiding problems for subclasses
# for which indexing with () returns a 0d instead of a scalar by using
# ndarray's getindex. Also guard against recursive 0d object arrays.
return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
return array2string(a, max_line_width, precision, suppress_small, ' ', "")
def _array_str_dispatcher(
a, max_line_width=None, precision=None, suppress_small=None):
return (a,)
@array_function_dispatch(_array_str_dispatcher, module='numpy')
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return _array_str_implementation(
a, max_line_width, precision, suppress_small)
# needed if __array_function__ is disabled
_array2string_impl = getattr(array2string, '__wrapped__', array2string)
_default_array_str = functools.partial(_array_str_implementation,
array2string=_array2string_impl)
_default_array_repr = functools.partial(_array_repr_implementation,
array2string=_array2string_impl)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> _ = a
>>> # [0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(_default_array_repr, 1)
else:
return multiarray.set_string_function(_default_array_str, 0)
else:
return multiarray.set_string_function(f, repr)
| {
"repo_name": "numpy/numpy",
"path": "numpy/core/arrayprint.py",
"copies": "7",
"size": "61625",
"license": "bsd-3-clause",
"hash": 7023058149433561000,
"line_mean": 36.0342548077,
"line_max": 83,
"alpha_frac": 0.5930547667,
"autogenerated": false,
"ratio": 4.091150501228175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004299689181331946,
"num_lines": 1664
} |
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
from __future__ import division, absolute_import, print_function
__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
"set_printoptions", "get_printoptions", "format_float_positional",
"format_float_scientific"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
# scalars but for different purposes. scalartypes.c.src has str/reprs for when
# the scalar is printed on its own, while arrayprint.py has strs for when
# scalars are printed inside an ndarray. Only the latter strs are currently
# user-customizable.
import sys
import functools
if sys.version_info[0] >= 3:
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
else:
try:
from thread import get_ident
except ImportError:
from dummy_thread import get_ident
import numpy as np
from . import numerictypes as _nt
from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat
from . import multiarray
from .multiarray import (array, dragon4_positional, dragon4_scientific,
datetime_as_string, datetime_data, dtype, ndarray,
set_legacy_print_mode)
from .fromnumeric import ravel, any
from .numeric import concatenate, asarray, errstate
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
flexible)
import warnings
_format_options = {
'edgeitems': 3, # repr N leading and trailing items of each dimension
'threshold': 1000, # total items > triggers array summarization
'floatmode': 'maxprec',
'precision': 8, # precision of floating point representations
'suppress': False, # suppress printing small floating values in exp format
'linewidth': 75,
'nanstr': 'nan',
'infstr': 'inf',
'sign': '-',
'formatter': None,
'legacy': False}
def _make_options_dict(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
sign=None, formatter=None, floatmode=None, legacy=None):
""" make a dictionary out of the non-None arguments, plus sanity checks """
options = {k: v for k, v in locals().items() if v is not None}
if suppress is not None:
options['suppress'] = bool(suppress)
modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
if floatmode not in modes + [None]:
raise ValueError("floatmode option must be one of " +
", ".join('"{}"'.format(m) for m in modes))
if sign not in [None, '-', '+', ' ']:
raise ValueError("sign option must be one of ' ', '+', or '-'")
if legacy not in [None, False, '1.13']:
warnings.warn("legacy printing option can currently only be '1.13' or "
"`False`", stacklevel=3)
return options
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
formatter=None, sign=None, floatmode=None, **kwarg):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int or None, optional
Number of digits of precision for floating point output (default 8).
May be `None` if `floatmode` is not `fixed`, to print as many digits as
necessary to uniquely specify the value.
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
If True, always print floating point numbers using fixed point
notation, in which case numbers equal to zero in the current precision
will print as zero. If False, then scientific notation is used when
absolute value of the smallest number is < 1e-4 or the ratio of the
maximum absolute value to the minimum is > 1e3. The default is False.
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values. (default '-')
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'object' : `np.object_` arrays
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values:
- 'fixed' : Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
- 'unique : Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
- 'maxprec' : Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
- 'maxprec_equal' : Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
See Also
--------
get_printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> print(np.array([1.123456789]))
[ 1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> print(np.arange(10))
[0 1 2 ..., 7 8 9]
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3,infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
"""
legacy = kwarg.pop('legacy', None)
if kwarg:
msg = "set_printoptions() got unexpected keyword argument '{}'"
raise TypeError(msg.format(kwarg.popitem()[0]))
opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
suppress, nanstr, infstr, sign, formatter,
floatmode, legacy)
# formatter is always reset
opt['formatter'] = formatter
_format_options.update(opt)
# set the C variable for legacy mode
if _format_options['legacy'] == '1.13':
set_legacy_print_mode(113)
# reset the sign option in legacy mode to avoid confusion
_format_options['sign'] = '-'
elif _format_options['legacy'] is False:
set_legacy_print_mode(0)
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
- sign : str
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, set_string_function
"""
return _format_options.copy()
def _leading_trailing(a, edgeitems, index=()):
"""
Keep only the N-D corners (leading and trailing edges) of an array.
Should be passed a base-class ndarray, since it makes no guarantees about
preserving subclasses.
"""
axis = len(index)
if axis == a.ndim:
return a[index]
if a.shape[axis] > 2*edgeitems:
return concatenate((
_leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
_leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
), axis=axis)
else:
return _leading_trailing(a, edgeitems, index + np.index_exp[:])
def _object_format(o):
""" Object arrays containing lists should be printed unambiguously """
if type(o) is list:
fmt = 'list({!r})'
else:
fmt = '{!r}'
return fmt.format(o)
def repr_format(x):
return repr(x)
def str_format(x):
return str(x)
def _get_formatdict(data, **opt):
prec, fmode = opt['precision'], opt['floatmode']
supp, sign = opt['suppress'], opt['sign']
legacy = opt['legacy']
# wrapped in lambdas to avoid taking a code path with the wrong type of data
formatdict = {
'bool': lambda: BoolFormat(data),
'int': lambda: IntegerFormat(data),
'float': lambda:
FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
'longfloat': lambda:
FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
'complexfloat': lambda:
ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
'longcomplexfloat': lambda:
ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
'datetime': lambda: DatetimeFormat(data, legacy=legacy),
'timedelta': lambda: TimedeltaFormat(data),
'object': lambda: _object_format,
'void': lambda: str_format,
'numpystr': lambda: repr_format,
'str': lambda: str}
# we need to wrap values in `formatter` in a lambda, so that the interface
# is the same as the above values.
def indirect(x):
return lambda: x
formatter = opt['formatter']
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = indirect(formatter['all'])
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = indirect(formatter['int_kind'])
if 'float_kind' in fkeys:
for key in ['float', 'longfloat']:
formatdict[key] = indirect(formatter['float_kind'])
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = indirect(formatter['complex_kind'])
if 'str_kind' in fkeys:
for key in ['numpystr', 'str']:
formatdict[key] = indirect(formatter['str_kind'])
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = indirect(formatter[key])
return formatdict
def _get_format_function(data, **options):
"""
find the right formatting function for the dtype_
"""
dtype_ = data.dtype
dtypeobj = dtype_.type
formatdict = _get_formatdict(data, **options)
if issubclass(dtypeobj, _nt.bool_):
return formatdict['bool']()
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timedelta64):
return formatdict['timedelta']()
else:
return formatdict['int']()
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
return formatdict['longfloat']()
else:
return formatdict['float']()
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
return formatdict['longcomplexfloat']()
else:
return formatdict['complexfloat']()
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
return formatdict['numpystr']()
elif issubclass(dtypeobj, _nt.datetime64):
return formatdict['datetime']()
elif issubclass(dtypeobj, _nt.object_):
return formatdict['object']()
elif issubclass(dtypeobj, _nt.void):
if dtype_.names is not None:
return StructuredVoidFormat.from_data(data, **options)
else:
return formatdict['void']()
else:
return formatdict['numpystr']()
def _recursive_guard(fillvalue='...'):
"""
Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
Decorates a function such that if it calls itself with the same first
argument, it returns `fillvalue` instead of recursing.
Largely copied from reprlib.recursive_repr
"""
def decorating_function(f):
repr_running = set()
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
return f(self, *args, **kwargs)
finally:
repr_running.discard(key)
return wrapper
return decorating_function
# gracefully handle recursive calls, when object arrays contain themselves
@_recursive_guard()
def _array2string(a, options, separator=' ', prefix=""):
# The formatter __init__s in _get_format_function cannot deal with
# subclasses yet, and we also need to avoid recursion issues in
# _formatArray with subclasses which return 0d arrays in place of scalars
data = asarray(a)
if a.shape == ():
a = data
if a.size > options['threshold']:
summary_insert = "..."
data = _leading_trailing(data, options['edgeitems'])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = _get_format_function(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, options['linewidth'],
next_line_prefix, separator, options['edgeitems'],
summary_insert, options['legacy'])
return lst
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix="",
**kwarg):
"""
Return a string representation of an array.
Parameters
----------
a : array_like
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int or None, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
suffix: str, optional
The length of the prefix and suffix strings are used to respectively
align and wrap the output. An array is typically printed as::
prefix + array2string(a) + suffix
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'void' : type `numpy.void`
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values.
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values:
- 'fixed' : Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
- 'unique : Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
- 'maxprec' : Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
- 'maxprec_equal' : Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
legacy = kwarg.pop('legacy', None)
if kwarg:
msg = "array2string() got unexpected keyword argument '{}'"
raise TypeError(msg.format(kwarg.popitem()[0]))
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
sign, formatter, floatmode, legacy)
options = _format_options.copy()
options.update(overrides)
if options['legacy'] == '1.13':
if style is np._NoValue:
style = repr
if a.shape == () and not a.dtype.names:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
warnings.warn("'style' argument is deprecated and no longer functional"
" except in 1.13 'legacy' mode",
DeprecationWarning, stacklevel=3)
if options['legacy'] != '1.13':
options['linewidth'] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
needs_wrap = len(line) + len(word) > line_width
if legacy != '1.13':
s# don't wrap lines if it won't help
if len(line) <= len(next_line_prefix):
needs_wrap = False
if needs_wrap:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _formatArray(a, format_function, line_width, next_line_prefix,
separator, edge_items, summary_insert, legacy):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
def recurser(index, hanging_indent, curr_width):
"""
By using this local function, we don't need to recurse with all the
arguments. Since this function is not created recursively, the cost is
not significant
"""
axis = len(index)
axes_left = a.ndim - axis
if axes_left == 0:
return format_function(a[index])
# when recursing, add a space to align with the [ added, and reduce the
# length of the line by 1
next_hanging_indent = hanging_indent + ' '
if legacy == '1.13':
next_width = curr_width
else:
next_width = curr_width - len(']')
a_len = a.shape[axis]
show_summary = summary_insert and 2*edge_items < a_len
if show_summary:
leading_items = edge_items
trailing_items = edge_items
else:
leading_items = 0
trailing_items = a_len
# stringify the array with the hanging indent on the first line too
s = ''
# last axis (rows) - wrap elements if they would not fit on one line
if axes_left == 1:
# the length up until the beginning of the separator / bracket
if legacy == '1.13':
elem_width = curr_width - len(separator.rstrip())
else:
elem_width = curr_width - max(len(separator.rstrip()), len(']'))
line = hanging_indent
for i in range(leading_items):
word = recurser(index + (i,), next_hanging_indent, next_width)
s, line = _extendLine(
s, line, word, elem_width, hanging_indent, legacy)
line += separator
if show_summary:
s, line = _extendLine(
s, line, summary_insert, elem_width, hanging_indent, legacy)
if legacy == '1.13':
line += ", "
else:
line += separator
for i in range(trailing_items, 1, -1):
word = recurser(index + (-i,), next_hanging_indent, next_width)
s, line = _extendLine(
s, line, word, elem_width, hanging_indent, legacy)
line += separator
if legacy == '1.13':
# width of the seperator is not considered on 1.13
elem_width = curr_width
word = recurser(index + (-1,), next_hanging_indent, next_width)
s, line = _extendLine(
s, line, word, elem_width, hanging_indent, legacy)
s += line
# other axes - insert newlines between rows
else:
s = ''
line_sep = separator.rstrip() + '\n'*(axes_left - 1)
for i in range(leading_items):
nested = recurser(index + (i,), next_hanging_indent, next_width)
s += hanging_indent + nested + line_sep
if show_summary:
if legacy == '1.13':
# trailing space, fixed nbr of newlines, and fixed separator
s += hanging_indent + summary_insert + ", \n"
else:
s += hanging_indent + summary_insert + line_sep
for i in range(trailing_items, 1, -1):
nested = recurser(index + (-i,), next_hanging_indent,
next_width)
s += hanging_indent + nested + line_sep
nested = recurser(index + (-1,), next_hanging_indent, next_width)
s += hanging_indent + nested
# remove the hanging indent, and wrap in []
s = '[' + s[len(hanging_indent):] + ']'
return s
try:
# invoke the recursive part with an initial index and prefix
return recurser(index=(),
hanging_indent=next_line_prefix,
curr_width=line_width)
finally:
# recursive closures have a cyclic reference to themselves, which
# requires gc to collect (gh-10620). To avoid this problem, for
# performance and PyPy friendliness, we break the cycle:
recurser = None
def _none_or_positive_arg(x, name):
if x is None:
return -1
if x < 0:
raise ValueError("{} must be >= 0".format(name))
return x
class FloatingFormat(object):
""" Formatter for subtypes of np.floating """
def __init__(self, data, precision, floatmode, suppress_small, sign=False,
**kwarg):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
self._legacy = kwarg.get('legacy', False)
if self._legacy == '1.13':
# when not 0d, legacy does not support '-'
if data.shape != () and sign == '-':
sign = ' '
self.floatmode = floatmode
if floatmode == 'unique':
self.precision = None
else:
self.precision = precision
self.precision = _none_or_positive_arg(self.precision, 'precision')
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.fillFormat(data)
def fillFormat(self, data):
# only the finite values are used to compute the number of digits
finite_vals = data[isfinite(data)]
# choose exponential mode based on the non-zero finite values:
abs_non_zero = absolute(finite_vals[finite_vals != 0])
if len(abs_non_zero) != 0:
max_val = np.max(abs_non_zero)
min_val = np.min(abs_non_zero)
with errstate(over='ignore'): # division can overflow
if max_val >= 1.e8 or (not self.suppress_small and
(min_val < 0.0001 or max_val/min_val > 1000.)):
self.exp_format = True
# do a first pass of printing all the numbers, to determine sizes
if len(finite_vals) == 0:
self.pad_left = 0
self.pad_right = 0
self.trim = '.'
self.exp_size = -1
self.unique = True
elif self.exp_format:
trim, unique = '.', True
if self.floatmode == 'fixed' or self._legacy == '1.13':
trim, unique = 'k', False
strs = (dragon4_scientific(x, precision=self.precision,
unique=unique, trim=trim, sign=self.sign == '+')
for x in finite_vals)
frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
self.exp_size = max(len(s) for s in exp_strs) - 1
self.trim = 'k'
self.precision = max(len(s) for s in frac_part)
# for back-compat with np 1.13, use 2 spaces & sign and full prec
if self._legacy == '1.13':
self.pad_left = 3
else:
# this should be only 1 or 2. Can be calculated from sign.
self.pad_left = max(len(s) for s in int_part)
# pad_right is only needed for nan length calculation
self.pad_right = self.exp_size + 2 + self.precision
self.unique = False
else:
# first pass printing to determine sizes
trim, unique = '.', True
if self.floatmode == 'fixed':
trim, unique = 'k', False
strs = (dragon4_positional(x, precision=self.precision,
fractional=True,
unique=unique, trim=trim,
sign=self.sign == '+')
for x in finite_vals)
int_part, frac_part = zip(*(s.split('.') for s in strs))
if self._legacy == '1.13':
self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
else:
self.pad_left = max(len(s) for s in int_part)
self.pad_right = max(len(s) for s in frac_part)
self.exp_size = -1
if self.floatmode in ['fixed', 'maxprec_equal']:
self.precision = self.pad_right
self.unique = False
self.trim = 'k'
else:
self.unique = True
self.trim = '.'
if self._legacy != '1.13':
# account for sign = ' ' by adding one to pad_left
if self.sign == ' ' and not any(np.signbit(finite_vals)):
self.pad_left += 1
# if there are non-finite values, may need to increase pad_left
if data.size != finite_vals.size:
neginf = self.sign != '-' or any(data[isinf(data)] < 0)
nanlen = len(_format_options['nanstr'])
inflen = len(_format_options['infstr']) + neginf
offset = self.pad_right + 1 # +1 for decimal pt
self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset)
def __call__(self, x):
if not np.isfinite(x):
with errstate(invalid='ignore'):
if np.isnan(x):
sign = '+' if self.sign == '+' else ''
ret = sign + _format_options['nanstr']
else: # isinf
sign = '-' if x < 0 else '+' if self.sign == '+' else ''
ret = sign + _format_options['infstr']
return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret
if self.exp_format:
return dragon4_scientific(x,
precision=self.precision,
unique=self.unique,
trim=self.trim,
sign=self.sign == '+',
pad_left=self.pad_left,
exp_digits=self.exp_size)
else:
return dragon4_positional(x,
precision=self.precision,
unique=self.unique,
fractional=True,
trim=self.trim,
sign=self.sign == '+',
pad_left=self.pad_left,
pad_right=self.pad_right)
# for back-compatibility, we keep the classes for each float type too
class FloatFormat(FloatingFormat):
def __init__(self, *args, **kwargs):
warnings.warn("FloatFormat has been replaced by FloatingFormat",
DeprecationWarning, stacklevel=2)
super(FloatFormat, self).__init__(*args, **kwargs)
class LongFloatFormat(FloatingFormat):
def __init__(self, *args, **kwargs):
warnings.warn("LongFloatFormat has been replaced by FloatingFormat",
DeprecationWarning, stacklevel=2)
super(LongFloatFormat, self).__init__(*args, **kwargs)
def format_float_scientific(x, precision=None, unique=True, trim='k',
sign=False, pad_left=None, exp_digits=None):
"""
Format a floating-point scalar as a decimal string in scientific notation.
Provides control over rounding, trimming and padding. Uses and assumes
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
Parameters
----------
x : python float or numpy floating scalar
Value to format.
precision : non-negative integer or None, optional
Maximum number of digits to print. May be None if `unique` is
`True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
other values of the same type, by judicious rounding. If `precision`
was omitted, print all necessary digits, otherwise digit generation is
cut off after `precision` digits and the remaining value is rounded.
If `False`, digits are generated as if printing an infinite-precision
value and stopping after `precision` digits, rounding the remaining
value.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
k : keep trailing zeros, keep decimal point (no trimming)
. : trim all trailing zeros, leave decimal point
0 : trim all but the zero before the decimal point. Insert the
zero if it is missing.
- : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Pad the left side of the string with whitespace until at least that
many characters are to the left of the decimal point.
exp_digits : non-negative integer, optional
Pad the exponent with zeros until it contains at least this many digits.
If omitted, the exponent will be at least 2 digits.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_positional
Examples
--------
>>> np.format_float_scientific(np.float32(np.pi))
'3.1415927e+00'
>>> s = np.float32(1.23e24)
>>> np.format_float_scientific(s, unique=False, precision=15)
'1.230000071797338e+24'
>>> np.format_float_scientific(s, exp_digits=4)
'1.23e+0024'
"""
precision = _none_or_positive_arg(precision, 'precision')
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
return dragon4_scientific(x, precision=precision, unique=unique,
trim=trim, sign=sign, pad_left=pad_left,
exp_digits=exp_digits)
def format_float_positional(x, precision=None, unique=True,
fractional=True, trim='k', sign=False,
pad_left=None, pad_right=None):
"""
Format a floating-point scalar as a decimal string in positional notation.
Provides control over rounding, trimming and padding. Uses and assumes
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
Parameters
----------
x : python float or numpy floating scalar
Value to format.
precision : non-negative integer or None, optional
Maximum number of digits to print. May be None if `unique` is
`True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
other values of the same type, by judicious rounding. If `precision`
was omitted, print out all necessary digits, otherwise digit generation
is cut off after `precision` digits and the remaining value is rounded.
If `False`, digits are generated as if printing an infinite-precision
value and stopping after `precision` digits, rounding the remaining
value.
fractional : boolean, optional
If `True`, the cutoff of `precision` digits refers to the total number
of digits after the decimal point, including leading zeros.
If `False`, `precision` refers to the total number of significant
digits, before or after the decimal point, ignoring leading zeros.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
k : keep trailing zeros, keep decimal point (no trimming)
. : trim all trailing zeros, leave decimal point
0 : trim all but the zero before the decimal point. Insert the
zero if it is missing.
- : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Pad the left side of the string with whitespace until at least that
many characters are to the left of the decimal point.
pad_right : non-negative integer, optional
Pad the right side of the string with whitespace until at least that
many characters are to the right of the decimal point.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
Examples
--------
>>> np.format_float_scientific(np.float32(np.pi))
'3.1415927'
>>> np.format_float_positional(np.float16(np.pi))
'3.14'
>>> np.format_float_positional(np.float16(0.3))
'0.3'
>>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
'0.3000488281'
"""
precision = _none_or_positive_arg(precision, 'precision')
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
pad_right = _none_or_positive_arg(pad_right, 'pad_right')
return dragon4_positional(x, precision=precision, unique=unique,
fractional=fractional, trim=trim,
sign=sign, pad_left=pad_left,
pad_right=pad_right)
class IntegerFormat(object):
def __init__(self, data):
if data.size > 0:
max_str_len = max(len(str(np.max(data))),
len(str(np.min(data))))
else:
max_str_len = 0
self.format = '%{}d'.format(max_str_len)
def __call__(self, x):
return self.format % x
class BoolFormat(object):
def __init__(self, data, **kwargs):
# add an extra space so " True" and "False" have the same length and
# array elements align nicely when printed, except in 0d arrays
self.truestr = ' True' if data.shape != () else 'True'
def __call__(self, x):
return self.truestr if x else "False"
class ComplexFloatingFormat(object):
""" Formatter for subtypes of np.complexfloating """
def __init__(self, x, precision, floatmode, suppress_small,
sign=False, **kwarg):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
floatmode_real = floatmode_imag = floatmode
if kwarg.get('legacy', False) == '1.13':
floatmode_real = 'maxprec_equal'
floatmode_imag = 'maxprec'
self.real_format = FloatingFormat(x.real, precision, floatmode_real,
suppress_small, sign=sign, **kwarg)
self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag,
suppress_small, sign='+', **kwarg)
def __call__(self, x):
r = self.real_format(x.real)
i = self.imag_format(x.imag)
# add the 'j' before the terminal whitespace in i
sp = len(i.rstrip())
i = i[:sp] + 'j' + i[sp:]
return r + i
# for back-compatibility, we keep the classes for each complex type too
class ComplexFormat(ComplexFloatingFormat):
def __init__(self, *args, **kwargs):
warnings.warn(
"ComplexFormat has been replaced by ComplexFloatingFormat",
DeprecationWarning, stacklevel=2)
super(ComplexFormat, self).__init__(*args, **kwargs)
class LongComplexFormat(ComplexFloatingFormat):
def __init__(self, *args, **kwargs):
warnings.warn(
"LongComplexFormat has been replaced by ComplexFloatingFormat",
DeprecationWarning, stacklevel=2)
super(LongComplexFormat, self).__init__(*args, **kwargs)
class _TimelikeFormat(object):
def __init__(self, data):
non_nat = data[~isnat(data)]
if len(non_nat) > 0:
# Max str length of non-NaT elements
max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
len(self._format_non_nat(np.min(non_nat))))
else:
max_str_len = 0
if len(non_nat) < data.size:
# data contains a NaT
max_str_len = max(max_str_len, 5)
self._format = '%{}s'.format(max_str_len)
self._nat = "'NaT'".rjust(max_str_len)
def _format_non_nat(self, x):
# override in subclass
raise NotImplementedError
def __call__(self, x):
if isnat(x):
return self._nat
else:
return self._format % self._format_non_nat(x)
class DatetimeFormat(_TimelikeFormat):
def __init__(self, x, unit=None, timezone=None, casting='same_kind',
legacy=False):
# Get the unit from the dtype
if unit is None:
if x.dtype.kind == 'M':
unit = datetime_data(x.dtype)[0]
else:
unit = 's'
if timezone is None:
timezone = 'naive'
self.timezone = timezone
self.unit = unit
self.casting = casting
self.legacy = legacy
# must be called after the above are configured
super(DatetimeFormat, self).__init__(x)
def __call__(self, x):
if self.legacy == '1.13':
return self._format_non_nat(x)
return super(DatetimeFormat, self).__call__(x)
def _format_non_nat(self, x):
return "'%s'" % datetime_as_string(x,
unit=self.unit,
timezone=self.timezone,
casting=self.casting)
class TimedeltaFormat(_TimelikeFormat):
def _format_non_nat(self, x):
return str(x.astype('i8'))
class SubArrayFormat(object):
def __init__(self, format_function):
self.format_function = format_function
def __call__(self, arr):
if arr.ndim <= 1:
return "[" + ", ".join(self.format_function(a) for a in arr) + "]"
return "[" + ", ".join(self.__call__(a) for a in arr) + "]"
class StructuredVoidFormat(object):
"""
Formatter for structured np.void objects.
This does not work on structured alias types like np.dtype(('i4', 'i2,i2')),
as alias scalars lose their field information, and the implementation
relies upon np.void.__getitem__.
"""
def __init__(self, format_functions):
self.format_functions = format_functions
@classmethod
def from_data(cls, data, **options):
"""
This is a second way to initialize StructuredVoidFormat, using the raw data
as input. Added to avoid changing the signature of __init__.
"""
format_functions = []
for field_name in data.dtype.names:
format_function = _get_format_function(data[field_name], **options)
if data.dtype[field_name].shape != ():
format_function = SubArrayFormat(format_function)
format_functions.append(format_function)
return cls(format_functions)
def __call__(self, x):
str_fields = [
format_function(field)
for field, format_function in zip(x, self.format_functions)
]
if len(str_fields) == 1:
return "({},)".format(str_fields[0])
else:
return "({})".format(", ".join(str_fields))
# for backwards compatibility
class StructureFormat(StructuredVoidFormat):
def __init__(self, *args, **kwargs):
# NumPy 1.14, 2018-02-14
warnings.warn(
"StructureFormat has been replaced by StructuredVoidFormat",
DeprecationWarning, stacklevel=2)
super(StructureFormat, self).__init__(*args, **kwargs)
def _void_scalar_repr(x):
"""
Implements the repr for structured-void scalars. It is called from the
scalartypes.c.src code, and is placed here because it uses the elementwise
formatters defined above.
"""
return StructuredVoidFormat.from_data(array(x), **_format_options)(x)
_typelessdata = [int_, float_, complex_, bool_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def dtype_is_implied(dtype):
"""
Determine if the given dtype is implied by the representation of its values.
Parameters
----------
dtype : dtype
Data type
Returns
-------
implied : bool
True if the dtype is implied by the representation of its values.
Examples
--------
>>> np.core.arrayprint.dtype_is_implied(int)
True
>>> np.array([1, 2, 3], int)
array([1, 2, 3])
>>> np.core.arrayprint.dtype_is_implied(np.int8)
False
>>> np.array([1, 2, 3], np.int8)
array([1, 2, 3], dtype=np.int8)
"""
dtype = np.dtype(dtype)
if _format_options['legacy'] == '1.13' and dtype.type == bool_:
return False
# not just void types can be structured, and names are not part of the repr
if dtype.names is not None:
return False
return dtype.type in _typelessdata
def dtype_short_repr(dtype):
"""
Convert a dtype to a short form which evaluates to the same dtype.
The intent is roughly that the following holds
>>> from numpy import *
>>> assert eval(dtype_short_repr(dt)) == dt
"""
if dtype.names is not None:
# structured dtypes give a list or tuple repr
return str(dtype)
elif issubclass(dtype.type, flexible):
# handle these separately so they don't give garbage like str256
return "'%s'" % str(dtype)
typename = dtype.name
# quote typenames which can't be represented as python variable names
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = repr(typename)
return typename
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if max_line_width is None:
max_line_width = _format_options['linewidth']
if type(arr) is not ndarray:
class_name = type(arr).__name__
else:
class_name = "array"
skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
prefix = class_name + "("
suffix = ")" if skipdtype else ","
if (_format_options['legacy'] == '1.13' and
arr.shape == () and not arr.dtype.names):
lst = repr(arr.item())
elif arr.size > 0 or arr.shape == (0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', prefix, suffix=suffix)
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
arr_str = prefix + lst + suffix
if skipdtype:
return arr_str
dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
# compute whether we should put dtype on a new line: Do so if adding the
# dtype would extend the last line past max_line_width.
# Note: This line gives the correct result even when rfind returns -1.
last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
spacer = " "
if _format_options['legacy'] == '1.13':
if issubclass(arr.dtype.type, flexible):
spacer = '\n' + ' '*len(class_name + "(")
elif last_line_len + len(dtype_str) + 1 > max_line_width:
spacer = '\n' + ' '*len(class_name + "(")
return arr_str + spacer + dtype_str
_guarded_str = _recursive_guard()(str)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
if (_format_options['legacy'] == '1.13' and
a.shape == () and not a.dtype.names):
return str(a.item())
# the str of 0d arrays is a special case: It should appear like a scalar,
# so floats are not truncated by `precision`, and strings are not wrapped
# in quotes. So we return the str of the scalar value.
if a.shape == ():
# obtain a scalar and call str on it, avoiding problems for subclasses
# for which indexing with () returns a 0d instead of a scalar by using
# ndarray's getindex. Also guard against recursive 0d object arrays.
return _guarded_str(np.ndarray.__getitem__(a, ()))
return array2string(a, max_line_width, precision, suppress_small, ' ', "")
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print(a)
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
| {
"repo_name": "ryfeus/lambda-packs",
"path": "Keras_tensorflow_nightly/source2.7/numpy/core/arrayprint.py",
"copies": "2",
"size": "57352",
"license": "mit",
"hash": -4653335198745969000,
"line_mean": 36.5094833224,
"line_max": 83,
"alpha_frac": 0.5872332264,
"autogenerated": false,
"ratio": 4.145727916726905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000468428052363823,
"num_lines": 1529
} |
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
from __future__ import division, absolute_import, print_function
__all__ = ["array2string", "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
import sys
from functools import reduce
from . import numerictypes as _nt
from .umath import maximum, minimum, absolute, not_equal, isnan, isinf
from .multiarray import (array, format_longfloat, datetime_as_string,
datetime_data, dtype)
from .fromnumeric import ravel
from .numeric import asarray
if sys.version_info[0] >= 3:
_MAXINT = sys.maxsize
_MININT = -sys.maxsize - 1
else:
_MAXINT = sys.maxint
_MININT = -sys.maxint - 1
def product(x, y):
return x*y
_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension
_summaryThreshold = 1000 # total items > triggers array summarization
_float_output_precision = 8
_float_output_suppress_small = False
_line_width = 75
_nan_str = 'nan'
_inf_str = 'inf'
_formatter = None # formatting function for array elements
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None,
nanstr=None, infstr=None,
formatter=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int, optional
Number of digits of precision for floating point output (default 8).
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
Whether or not suppress printing of small floating point values
using scientific notation (default False).
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
See Also
--------
get_printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> print(np.array([1.123456789]))
[ 1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> print(np.arange(10))
[0 1 2 ..., 7 8 9]
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3,infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
"""
global _summaryThreshold, _summaryEdgeItems, _float_output_precision
global _line_width, _float_output_suppress_small, _nan_str, _inf_str
global _formatter
if linewidth is not None:
_line_width = linewidth
if threshold is not None:
_summaryThreshold = threshold
if edgeitems is not None:
_summaryEdgeItems = edgeitems
if precision is not None:
_float_output_precision = precision
if suppress is not None:
_float_output_suppress_small = not not suppress
if nanstr is not None:
_nan_str = nanstr
if infstr is not None:
_inf_str = infstr
_formatter = formatter
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, set_string_function
"""
d = dict(precision=_float_output_precision,
threshold=_summaryThreshold,
edgeitems=_summaryEdgeItems,
linewidth=_line_width,
suppress=_float_output_suppress_small,
nanstr=_nan_str,
infstr=_inf_str,
formatter=_formatter)
return d
def _leading_trailing(a):
from . import numeric as _nc
if a.ndim == 1:
if len(a) > 2*_summaryEdgeItems:
b = _nc.concatenate((a[:_summaryEdgeItems],
a[-_summaryEdgeItems:]))
else:
b = a
else:
if len(a) > 2*_summaryEdgeItems:
l = [_leading_trailing(a[i]) for i in range(
min(len(a), _summaryEdgeItems))]
l.extend([_leading_trailing(a[-i]) for i in range(
min(len(a), _summaryEdgeItems), 0, -1)])
else:
l = [_leading_trailing(a[i]) for i in range(0, len(a))]
b = _nc.concatenate(tuple(l))
return b
def _boolFormatter(x):
if x:
return ' True'
else:
return 'False'
def repr_format(x):
return repr(x)
def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
prefix="", formatter=None):
if max_line_width is None:
max_line_width = _line_width
if precision is None:
precision = _float_output_precision
if suppress_small is None:
suppress_small = _float_output_suppress_small
if formatter is None:
formatter = _formatter
if a.size > _summaryThreshold:
summary_insert = "..., "
data = _leading_trailing(a)
else:
summary_insert = ""
data = ravel(asarray(a))
formatdict = {'bool': _boolFormatter,
'int': IntegerFormat(data),
'float': FloatFormat(data, precision, suppress_small),
'longfloat': LongFloatFormat(precision),
'complexfloat': ComplexFormat(data, precision,
suppress_small),
'longcomplexfloat': LongComplexFormat(precision),
'datetime': DatetimeFormat(data),
'timedelta': TimedeltaFormat(data),
'numpystr': repr_format,
'str': str}
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = formatter['all']
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = formatter['int_kind']
if 'float_kind' in fkeys:
for key in ['float', 'longfloat']:
formatdict[key] = formatter['float_kind']
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = formatter['complex_kind']
if 'str_kind' in fkeys:
for key in ['numpystr', 'str']:
formatdict[key] = formatter['str_kind']
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = formatter[key]
# find the right formatting function for the array
dtypeobj = a.dtype.type
if issubclass(dtypeobj, _nt.bool_):
format_function = formatdict['bool']
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timedelta64):
format_function = formatdict['timedelta']
else:
format_function = formatdict['int']
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
format_function = formatdict['longfloat']
else:
format_function = formatdict['float']
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
format_function = formatdict['longcomplexfloat']
else:
format_function = formatdict['complexfloat']
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
format_function = formatdict['numpystr']
elif issubclass(dtypeobj, _nt.datetime64):
format_function = formatdict['datetime']
else:
format_function = formatdict['numpystr']
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, len(a.shape), max_line_width,
next_line_prefix, separator,
_summaryEdgeItems, summary_insert)[:-1]
return lst
def _convert_arrays(obj):
from . import numeric as _nc
newtup = []
for k in obj:
if isinstance(k, _nc.ndarray):
k = k.tolist()
elif isinstance(k, tuple):
k = _convert_arrays(k)
newtup.append(k)
return tuple(newtup)
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=repr, formatter=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
An array is typically printed as::
'prefix(' + array2string(a) + ')'
The length of the prefix string is used to align the
output correctly.
style : function, optional
A function that accepts an ndarray and returns a string. Used only
when the shape of `a` is equal to ``()``, i.e. for 0-D arrays.
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
if a.shape == ():
x = a.item()
if isinstance(x, tuple):
x = _convert_arrays(x)
lst = style(x)
elif reduce(product, a.shape) == 0:
# treat as a null array if any of shape elements == 0
lst = "[]"
else:
lst = _array2string(a, max_line_width, precision, suppress_small,
separator, prefix, formatter=formatter)
return lst
def _extendLine(s, line, word, max_line_len, next_line_prefix):
if len(line.rstrip()) + len(word.rstrip()) >= max_line_len:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _formatArray(a, format_function, rank, max_line_len,
next_line_prefix, separator, edge_items, summary_insert):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
if rank == 0:
obj = a.item()
if isinstance(obj, tuple):
obj = _convert_arrays(obj)
return str(obj)
if summary_insert and 2*edge_items < len(a):
leading_items = edge_items
trailing_items = edge_items
summary_insert1 = summary_insert
else:
leading_items = 0
trailing_items = len(a)
summary_insert1 = ""
if rank == 1:
s = ""
line = next_line_prefix
for i in range(leading_items):
word = format_function(a[i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
if summary_insert1:
s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix)
for i in range(trailing_items, 1, -1):
word = format_function(a[-i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
word = format_function(a[-1])
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
s += line + "]\n"
s = '[' + s[len(next_line_prefix):]
else:
s = '['
sep = separator.rstrip()
for i in range(leading_items):
if i > 0:
s += next_line_prefix
s += _formatArray(a[i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1)
if summary_insert1:
s += next_line_prefix + summary_insert1 + "\n"
for i in range(trailing_items, 1, -1):
if leading_items or i != trailing_items:
s += next_line_prefix
s += _formatArray(a[-i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1)
if leading_items or trailing_items > 1:
s += next_line_prefix
s += _formatArray(a[-1], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert).rstrip()+']\n'
return s
class FloatFormat(object):
def __init__(self, data, precision, suppress_small, sign=False):
self.precision = precision
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.max_str_len = 0
try:
self.fillFormat(data)
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
# instantiated in formatdict.
pass
def fillFormat(self, data):
from . import numeric as _nc
with _nc.errstate(all='ignore'):
special = isnan(data) | isinf(data)
valid = not_equal(data, 0) & ~special
non_zero = absolute(data.compress(valid))
if len(non_zero) == 0:
max_val = 0.
min_val = 0.
else:
max_val = maximum.reduce(non_zero)
min_val = minimum.reduce(non_zero)
if max_val >= 1.e8:
self.exp_format = True
if not self.suppress_small and (min_val < 0.0001
or max_val/min_val > 1000.):
self.exp_format = True
if self.exp_format:
self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
self.max_str_len = 8 + self.precision
if self.large_exponent:
self.max_str_len += 1
if self.sign:
format = '%+'
else:
format = '%'
format = format + '%d.%de' % (self.max_str_len, self.precision)
else:
format = '%%.%df' % (self.precision,)
if len(non_zero):
precision = max([_digits(x, self.precision, format)
for x in non_zero])
else:
precision = 0
precision = min(self.precision, precision)
self.max_str_len = len(str(int(max_val))) + precision + 2
if _nc.any(special):
self.max_str_len = max(self.max_str_len,
len(_nan_str),
len(_inf_str)+1)
if self.sign:
format = '%#+'
else:
format = '%#'
format = format + '%d.%df' % (self.max_str_len, precision)
self.special_fmt = '%%%ds' % (self.max_str_len,)
self.format = format
def __call__(self, x, strip_zeros=True):
from . import numeric as _nc
with _nc.errstate(invalid='ignore'):
if isnan(x):
if self.sign:
return self.special_fmt % ('+' + _nan_str,)
else:
return self.special_fmt % (_nan_str,)
elif isinf(x):
if x > 0:
if self.sign:
return self.special_fmt % ('+' + _inf_str,)
else:
return self.special_fmt % (_inf_str,)
else:
return self.special_fmt % ('-' + _inf_str,)
s = self.format % x
if self.large_exponent:
# 3-digit exponent
expsign = s[-3]
if expsign == '+' or expsign == '-':
s = s[1:-2] + '0' + s[-2:]
elif self.exp_format:
# 2-digit exponent
if s[-3] == '0':
s = ' ' + s[:-3] + s[-2:]
elif strip_zeros:
z = s.rstrip('0')
s = z + ' '*(len(s)-len(z))
return s
def _digits(x, precision, format):
if precision > 0:
s = format % x
z = s.rstrip('0')
return precision - len(s) + len(z)
else:
return 0
class IntegerFormat(object):
def __init__(self, data):
try:
max_str_len = max(len(str(maximum.reduce(data))),
len(str(minimum.reduce(data))))
self.format = '%' + str(max_str_len) + 'd'
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
# instantiated in formatdict.
pass
except ValueError:
# this occurs when everything is NA
pass
def __call__(self, x):
if _MININT < x < _MAXINT:
return self.format % x
else:
return "%s" % x
class LongFloatFormat(object):
# XXX Have to add something to determine the width to use a la FloatFormat
# Right now, things won't line up properly
def __init__(self, precision, sign=False):
self.precision = precision
self.sign = sign
def __call__(self, x):
if isnan(x):
if self.sign:
return '+' + _nan_str
else:
return ' ' + _nan_str
elif isinf(x):
if x > 0:
if self.sign:
return '+' + _inf_str
else:
return ' ' + _inf_str
else:
return '-' + _inf_str
elif x >= 0:
if self.sign:
return '+' + format_longfloat(x, self.precision)
else:
return ' ' + format_longfloat(x, self.precision)
else:
return format_longfloat(x, self.precision)
class LongComplexFormat(object):
def __init__(self, precision):
self.real_format = LongFloatFormat(precision)
self.imag_format = LongFloatFormat(precision, sign=True)
def __call__(self, x):
r = self.real_format(x.real)
i = self.imag_format(x.imag)
return r + i + 'j'
class ComplexFormat(object):
def __init__(self, x, precision, suppress_small):
self.real_format = FloatFormat(x.real, precision, suppress_small)
self.imag_format = FloatFormat(x.imag, precision, suppress_small,
sign=True)
def __call__(self, x):
r = self.real_format(x.real, strip_zeros=False)
i = self.imag_format(x.imag, strip_zeros=False)
if not self.imag_format.exp_format:
z = i.rstrip('0')
i = z + 'j' + ' '*(len(i)-len(z))
else:
i = i + 'j'
return r + i
class DatetimeFormat(object):
def __init__(self, x, unit=None, timezone=None, casting='same_kind'):
# Get the unit from the dtype
if unit is None:
if x.dtype.kind == 'M':
unit = datetime_data(x.dtype)[0]
else:
unit = 's'
if timezone is None:
timezone = 'naive'
self.timezone = timezone
self.unit = unit
self.casting = casting
def __call__(self, x):
return "'%s'" % datetime_as_string(x,
unit=self.unit,
timezone=self.timezone,
casting=self.casting)
class TimedeltaFormat(object):
def __init__(self, data):
if data.dtype.kind == 'm':
nat_value = array(['NaT'], dtype=data.dtype)[0]
int_dtype = dtype(data.dtype.byteorder + 'i8')
int_view = data.view(int_dtype)
v = int_view[not_equal(int_view, nat_value.view(int_dtype))]
if len(v) > 0:
# Max str length of non-NaT elements
max_str_len = max(len(str(maximum.reduce(v))),
len(str(minimum.reduce(v))))
else:
max_str_len = 0
if len(v) < len(data):
# data contains a NaT
max_str_len = max(max_str_len, 5)
self.format = '%' + str(max_str_len) + 'd'
self._nat = "'NaT'".rjust(max_str_len)
def __call__(self, x):
# TODO: After NAT == NAT deprecation should be simplified:
if (x + 1).view('i8') == x.view('i8'):
return self._nat
else:
return self.format % x.astype('i8')
| {
"repo_name": "gmcastil/numpy",
"path": "numpy/core/arrayprint.py",
"copies": "1",
"size": "25872",
"license": "bsd-3-clause",
"hash": 4814213765170498000,
"line_mean": 33.0421052632,
"line_max": 91,
"alpha_frac": 0.5441017316,
"autogenerated": false,
"ratio": 3.9571734475374734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9997833370623825,
"avg_score": 0.000688361702729565,
"num_lines": 760
} |
"""Array printing function
"""
from __future__ import absolute_import, division, print_function
__all__ = ["array2string", "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
from ...py2help import xrange
#
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
# and by Oscar Villellas 2013-4-30 for blaze
# and by Andy R. Terrel 2013-12-17 for blaze
import sys
# import numerictypes as _nt
# from umath import maximum, minimum, absolute, not_equal, isnan, isinf
import numpy as np
import numpy.core.umath as _um
import datashape
from datashape import Fixed, Var
from ...datadescriptor import IDataDescriptor, dd_as_py
# These are undesired dependencies:
from numpy import ravel, maximum, minimum, absolute
import inspect
def _dump_data_info(x, ident=None):
ident = (ident if ident is not None
else inspect.currentframe().f_back.f_lineno)
if isinstance(x, IDataDescriptor):
subclass = 'DATA DESCRIPTOR'
elif isinstance(x, np.ndarray):
subclass = 'NUMPY ARRAY'
else:
subclass = 'UNKNOWN'
print('-> %s: %s: %s' % (ident, subclass, repr(x)))
def product(x, y):
return x*y
def isnan(x):
# hacks to remove when isnan/isinf are available for data descriptors
if isinstance(x, IDataDescriptor):
return _um.isnan(dd_as_py(x))
else:
return _um.isnan(x)
def isinf(x):
if isinstance(x, IDataDescriptor):
return _um.isinf(dd_as_py(x))
else:
return _um.isinf(x)
def not_equal(x, val):
if isinstance(x, IDataDescriptor):
return _um.not_equal(dd_as_py(x))
else:
return _um.not_equal(x, val)
# repr N leading and trailing items of each dimension
_summaryEdgeItems = 3
# total items > triggers array summarization
_summaryThreshold = 1000
_float_output_precision = 8
_float_output_suppress_small = False
_line_width = 75
_nan_str = 'nan'
_inf_str = 'inf'
_formatter = None # formatting function for array elements
if sys.version_info[0] >= 3:
from functools import reduce
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None,
nanstr=None, infstr=None,
formatter=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int, optional
Number of digits of precision for floating point output (default 8).
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
Whether or not suppress printing of small floating point values
using scientific notation (default False).
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'float'
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float'
- 'complex_kind' : sets 'complexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
See Also
--------
get_printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> print(np.array([1.123456789]))
[ 1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> print(np.arange(10))
[0 1 2 ..., 7 8 9]
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3,infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
"""
global _summaryThreshold, _summaryEdgeItems, _float_output_precision
global _line_width, _float_output_suppress_small, _nan_str, _inf_str
global _formatter
if linewidth is not None:
_line_width = linewidth
if threshold is not None:
_summaryThreshold = threshold
if edgeitems is not None:
_summaryEdgeItems = edgeitems
if precision is not None:
_float_output_precision = precision
if suppress is not None:
_float_output_suppress_small = not not suppress
if nanstr is not None:
_nan_str = nanstr
if infstr is not None:
_inf_str = infstr
_formatter = formatter
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, set_string_function
"""
d = dict(precision=_float_output_precision,
threshold=_summaryThreshold,
edgeitems=_summaryEdgeItems,
linewidth=_line_width,
suppress=_float_output_suppress_small,
nanstr=_nan_str,
infstr=_inf_str,
formatter=_formatter)
return d
def _extract_summary(a):
return l
def _leading_trailing(a):
import numpy.core.numeric as _nc
if len(a.dshape.shape) == 1:
if len(a) > 2*_summaryEdgeItems:
b = [dd_as_py(a[i]) for i in range(_summaryEdgeItems)]
b.extend([dd_as_py(a[i]) for i in range(-_summaryEdgeItems, 0)])
else:
b = dd_as_py(a)
else:
if len(a) > 2*_summaryEdgeItems:
b = [_leading_trailing(a[i])
for i in range(_summaryEdgeItems)]
b.extend([_leading_trailing(a[-i])
for i in range(-_summaryEdgeItems, 0)])
else:
b = [_leading_trailing(a[i]) for i in range(0, len(a))]
return b
def _boolFormatter(x):
if x:
return ' True'
else:
return 'False'
def repr_format(x):
return repr(x)
def _apply_formatter(format_dict, formatter):
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = formatter['all']
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = formatter['int_kind']
if 'float_kind' in fkeys:
for key in ['float']:
formatdict[key] = formatter['float_kind']
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = formatter['complex_kind']
if 'str_kind' in fkeys:
for key in ['numpystr', 'str']:
formatdict[key] = formatter['str_kind']
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = formatter[key]
def _choose_format(formatdict, ds):
if isinstance(ds, datashape.DataShape):
ds = ds[-1]
if ds == datashape.bool_:
format_function = formatdict['bool']
elif ds in [datashape.int8, datashape.int16,
datashape.int32, datashape.int64,
datashape.uint8, datashape.uint16,
datashape.uint32, datashape.uint64]:
format_function = formatdict['int']
elif ds in [datashape.float32, datashape.float64]:
format_function = formatdict['float']
elif ds in [datashape.complex_float32, datashape.complex_float64]:
format_function = formatdict['complexfloat']
elif isinstance(ds, datashape.String):
format_function = formatdict['numpystr']
else:
format_function = formatdict['numpystr']
return format_function
def _array2string(a, shape, dtype, max_line_width, precision,
suppress_small, separator=' ', prefix="", formatter=None):
if any(isinstance(s, Var) for s in shape):
dim_size = -1
else:
dim_size = reduce(product, shape, 1)
if max_line_width is None:
max_line_width = _line_width
if precision is None:
precision = _float_output_precision
if suppress_small is None:
suppress_small = _float_output_suppress_small
if formatter is None:
formatter = _formatter
if dim_size > _summaryThreshold:
summary_insert = "..., "
data = ravel(np.array(_leading_trailing(a)))
else:
summary_insert = ""
data = ravel(np.array(dd_as_py(a)))
formatdict = {'bool': _boolFormatter,
'int': IntegerFormat(data),
'float': FloatFormat(data, precision, suppress_small),
'complexfloat': ComplexFormat(data, precision,
suppress_small),
'numpystr': repr_format,
'str': str}
if formatter is not None:
_apply_formatter(formatdict, formatter)
assert(not hasattr(a, '_format'))
# find the right formatting function for the array
format_function = _choose_format(formatdict, dtype)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, len(shape), max_line_width,
next_line_prefix, separator,
_summaryEdgeItems, summary_insert).rstrip()
return lst
def _convert_arrays(obj):
import numpy.core.numeric as _nc
newtup = []
for k in obj:
if isinstance(k, _nc.ndarray):
k = k.tolist()
elif isinstance(k, tuple):
k = _convert_arrays(k)
newtup.append(k)
return tuple(newtup)
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=repr, formatter=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
An array is typically printed as::
'prefix(' + array2string(a) + ')'
The length of the prefix string is used to align the
output correctly.
style : function, optional
A function that accepts an ndarray and returns a string. Used only
when the shape of `a` is equal to ``()``, i.e. for 0-D arrays.
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'float'
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError : if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
shape, dtype = (a.dshape[:-1], a.dshape[-1])
shape = tuple(int(x) if isinstance(x, Fixed) else x for x in shape)
lst = _array2string(a, shape, dtype, max_line_width,
precision, suppress_small,
separator, prefix, formatter=formatter)
return lst
def _extendLine(s, line, word, max_line_len, next_line_prefix):
if len(line.rstrip()) + len(word.rstrip()) >= max_line_len:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _formatArray(a, format_function, rank, max_line_len,
next_line_prefix, separator, edge_items, summary_insert):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
if rank == 0:
return format_function(dd_as_py(a)).strip()
if summary_insert and 2*edge_items < len(a):
leading_items = edge_items
trailing_items = edge_items
summary_insert1 = summary_insert
else:
leading_items, trailing_items, summary_insert1 = 0, len(a), ""
if rank == 1:
s = ""
line = next_line_prefix
for i in xrange(leading_items):
word = format_function(dd_as_py(a[i])) + separator
s, line = _extendLine(s, line, word, max_line_len,
next_line_prefix)
if summary_insert1:
s, line = _extendLine(s, line, summary_insert1,
max_line_len, next_line_prefix)
for i in xrange(trailing_items, 1, -1):
word = format_function(dd_as_py(a[-i])) + separator
s, line = _extendLine(s, line, word, max_line_len,
next_line_prefix)
if len(a) > 0:
word = format_function(dd_as_py(a[-1]))
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
s += line + "]\n"
s = '[' + s[len(next_line_prefix):]
else:
s = '['
sep = separator.rstrip()
for i in xrange(leading_items):
if i > 0:
s += next_line_prefix
s += _formatArray(a[i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1)
if summary_insert1:
s += next_line_prefix + summary_insert1 + "\n"
for i in xrange(trailing_items, 1, -1):
if leading_items or i != trailing_items:
s += next_line_prefix
s += _formatArray(a[-i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1)
if leading_items or trailing_items > 1:
s += next_line_prefix
s += _formatArray(a[-1], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert).rstrip()+']\n'
return s
class FloatFormat(object):
def __init__(self, data, precision, suppress_small, sign=False):
self.precision = precision
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.max_str_len = 0
if data.dtype.kind in ['f', 'i', 'u']:
self.fillFormat(data)
def fillFormat(self, data):
import numpy.core.numeric as _nc
errstate = _nc.seterr(all='ignore')
try:
special = isnan(data) | isinf(data)
valid = not_equal(data, 0) & ~special
non_zero = absolute(data.compress(valid))
if len(non_zero) == 0:
max_val = 0.
min_val = 0.
else:
max_val = maximum.reduce(non_zero)
min_val = minimum.reduce(non_zero)
if max_val >= 1.e8:
self.exp_format = True
if not self.suppress_small and (min_val < 0.0001
or max_val/min_val > 1000.):
self.exp_format = True
finally:
_nc.seterr(**errstate)
if self.exp_format:
self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
self.max_str_len = 8 + self.precision
if self.large_exponent:
self.max_str_len += 1
if self.sign:
format = '%+'
else:
format = '%'
format = format + '%d.%de' % (self.max_str_len, self.precision)
else:
format = '%%.%df' % (self.precision,)
if len(non_zero):
precision = max([_digits(x, self.precision, format)
for x in non_zero])
else:
precision = 0
precision = min(self.precision, precision)
self.max_str_len = len(str(int(max_val))) + precision + 2
if _nc.any(special):
self.max_str_len = max(self.max_str_len,
len(_nan_str),
len(_inf_str)+1)
if self.sign:
format = '%#+'
else:
format = '%#'
format = format + '%d.%df' % (self.max_str_len, precision)
self.special_fmt = '%%%ds' % (self.max_str_len,)
self.format = format
def __call__(self, x, strip_zeros=True):
import numpy.core.numeric as _nc
err = _nc.seterr(invalid='ignore')
try:
if isnan(x):
if self.sign:
return self.special_fmt % ('+' + _nan_str,)
else:
return self.special_fmt % (_nan_str,)
elif isinf(x):
if x > 0:
if self.sign:
return self.special_fmt % ('+' + _inf_str,)
else:
return self.special_fmt % (_inf_str,)
else:
return self.special_fmt % ('-' + _inf_str,)
finally:
_nc.seterr(**err)
s = self.format % x
if self.large_exponent:
# 3-digit exponent
expsign = s[-3]
if expsign == '+' or expsign == '-':
s = s[1:-2] + '0' + s[-2:]
elif self.exp_format:
# 2-digit exponent
if s[-3] == '0':
s = ' ' + s[:-3] + s[-2:]
elif strip_zeros:
z = s.rstrip('0')
s = z + ' '*(len(s)-len(z))
return s
def _digits(x, precision, format):
s = format % x
z = s.rstrip('0')
return precision - len(s) + len(z)
if sys.version_info >= (3, 0):
_MAXINT = 2**32 - 1
_MININT = -2**32
else:
_MAXINT = sys.maxint
_MININT = -sys.maxint-1
class IntegerFormat(object):
def __init__(self, data):
try:
max_str_len = max(len(str(maximum.reduce(data))),
len(str(minimum.reduce(data))))
self.format = '%' + str(max_str_len) + 'd'
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
# instantiated in formatdict.
pass
except ValueError:
# this occurs when everything is NA
pass
def __call__(self, x):
if _MININT < x < _MAXINT:
return self.format % x
else:
return "%s" % x
class ComplexFormat(object):
def __init__(self, x, precision, suppress_small):
self.real_format = FloatFormat(x.real, precision, suppress_small)
self.imag_format = FloatFormat(x.imag, precision, suppress_small,
sign=True)
def __call__(self, x):
r = self.real_format(x.real, strip_zeros=False)
i = self.imag_format(x.imag, strip_zeros=False)
if not self.imag_format.exp_format:
z = i.rstrip('0')
i = z + 'j' + ' '*(len(i)-len(z))
else:
i = i + 'j'
return r + i
def _test():
import blaze
arr = blaze.array([2, 3, 4.0])
print(arr.dshape)
print(array2string(arr._data))
arr = blaze.zeros('30, 30, 30, float32')
print(arr.dshape)
print(array2string(arr._data))
| {
"repo_name": "AbhiAgarwal/blaze",
"path": "blaze/io/_printing/_arrayprint.py",
"copies": "7",
"size": "23372",
"license": "bsd-3-clause",
"hash": -7550935390355388000,
"line_mean": 30.7554347826,
"line_max": 80,
"alpha_frac": 0.5571624166,
"autogenerated": false,
"ratio": 3.8108592858307517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7868021702430751,
"avg_score": null,
"num_lines": null
} |
"""Array printing function
"""
from __future__ import absolute_import, division, print_function
from ...py2help import xrange
__all__ = ["array2string", "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
# and by Oscar Villellas 2013-4-30 for blaze
# and by Andy R. Terrel 2013-12-17 for blaze
import sys
# import numerictypes as _nt
# from umath import maximum, minimum, absolute, not_equal, isnan, isinf
import numpy as np
import numpy.core.umath as _um
import datashape
from datashape import Fixed, Var
from ...datadescriptor import IDataDescriptor, dd_as_py
# These are undesired dependencies:
from numpy import ravel, maximum, minimum, absolute
import inspect
def _dump_data_info(x, ident=None):
ident = (ident if ident is not None
else inspect.currentframe().f_back.f_lineno)
if isinstance(x, IDataDescriptor):
subclass = 'DATA DESCRIPTOR'
elif isinstance(x, np.ndarray):
subclass = 'NUMPY ARRAY'
else:
subclass = 'UNKNOWN'
print('-> %s: %s: %s' % (ident, subclass, repr(x)))
def product(x, y):
return x*y
def isnan(x):
# hacks to remove when isnan/isinf are available for data descriptors
if isinstance(x, IDataDescriptor):
return _um.isnan(dd_as_py(x))
else:
return _um.isnan(x)
def isinf(x):
if isinstance(x, IDataDescriptor):
return _um.isinf(dd_as_py(x))
else:
return _um.isinf(x)
def not_equal(x, val):
if isinstance(x, IDataDescriptor):
return _um.not_equal(dd_as_py(x))
else:
return _um.not_equal(x, val)
# repr N leading and trailing items of each dimension
_summaryEdgeItems = 3
# total items > triggers array summarization
_summaryThreshold = 1000
_float_output_precision = 8
_float_output_suppress_small = False
_line_width = 75
_nan_str = 'nan'
_inf_str = 'inf'
_formatter = None # formatting function for array elements
if sys.version_info[0] >= 3:
from functools import reduce
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None,
nanstr=None, infstr=None,
formatter=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int, optional
Number of digits of precision for floating point output (default 8).
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
Whether or not suppress printing of small floating point values
using scientific notation (default False).
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'float'
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float'
- 'complex_kind' : sets 'complexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
See Also
--------
get_printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> print(np.array([1.123456789]))
[ 1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> print(np.arange(10))
[0 1 2 ..., 7 8 9]
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3,infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
"""
global _summaryThreshold, _summaryEdgeItems, _float_output_precision
global _line_width, _float_output_suppress_small, _nan_str, _inf_str
global _formatter
if linewidth is not None:
_line_width = linewidth
if threshold is not None:
_summaryThreshold = threshold
if edgeitems is not None:
_summaryEdgeItems = edgeitems
if precision is not None:
_float_output_precision = precision
if suppress is not None:
_float_output_suppress_small = not not suppress
if nanstr is not None:
_nan_str = nanstr
if infstr is not None:
_inf_str = infstr
_formatter = formatter
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, set_string_function
"""
d = dict(precision=_float_output_precision,
threshold=_summaryThreshold,
edgeitems=_summaryEdgeItems,
linewidth=_line_width,
suppress=_float_output_suppress_small,
nanstr=_nan_str,
infstr=_inf_str,
formatter=_formatter)
return d
def _extract_summary(a):
return l
def _leading_trailing(a):
import numpy.core.numeric as _nc
if len(a.dshape.shape) == 1:
if len(a) > 2*_summaryEdgeItems:
b = [dd_as_py(a[i]) for i in range(_summaryEdgeItems)]
b.extend([dd_as_py(a[i]) for i in range(-_summaryEdgeItems, 0)])
else:
b = dd_as_py(a)
else:
if len(a) > 2*_summaryEdgeItems:
b = [_leading_trailing(a[i])
for i in range(_summaryEdgeItems)]
b.extend([_leading_trailing(a[-i])
for i in range(-_summaryEdgeItems, 0)])
else:
b = [_leading_trailing(a[i]) for i in range(0, len(a))]
return b
def _boolFormatter(x):
if x:
return ' True'
else:
return 'False'
def repr_format(x):
return repr(x)
def _apply_formatter(format_dict, formatter):
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = formatter['all']
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = formatter['int_kind']
if 'float_kind' in fkeys:
for key in ['float']:
formatdict[key] = formatter['float_kind']
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = formatter['complex_kind']
if 'str_kind' in fkeys:
for key in ['numpystr', 'str']:
formatdict[key] = formatter['str_kind']
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = formatter[key]
def _choose_format(formatdict, ds):
if isinstance(ds, datashape.DataShape):
ds = ds[-1]
if ds == datashape.bool_:
format_function = formatdict['bool']
elif ds in [datashape.int8, datashape.int16,
datashape.int32, datashape.int64,
datashape.uint8, datashape.uint16,
datashape.uint32, datashape.uint64]:
format_function = formatdict['int']
elif ds in [datashape.float32, datashape.float64]:
format_function = formatdict['float']
elif ds in [datashape.complex_float32, datashape.complex_float64]:
format_function = formatdict['complexfloat']
elif isinstance(ds, datashape.String):
format_function = formatdict['numpystr']
else:
format_function = formatdict['numpystr']
return format_function
def _array2string(a, shape, dtype, max_line_width, precision,
suppress_small, separator=' ', prefix="", formatter=None):
if any(isinstance(s, Var) for s in shape):
dim_size = -1
else:
dim_size = reduce(product, shape, 1)
if max_line_width is None:
max_line_width = _line_width
if precision is None:
precision = _float_output_precision
if suppress_small is None:
suppress_small = _float_output_suppress_small
if formatter is None:
formatter = _formatter
if dim_size > _summaryThreshold:
summary_insert = "..., "
data = ravel(np.array(_leading_trailing(a)))
else:
summary_insert = ""
data = ravel(np.array(dd_as_py(a)))
formatdict = {'bool': _boolFormatter,
'int': IntegerFormat(data),
'float': FloatFormat(data, precision, suppress_small),
'complexfloat': ComplexFormat(data, precision,
suppress_small),
'numpystr': repr_format,
'str': str}
if formatter is not None:
_apply_formatter(formatdict, formatter)
assert(not hasattr(a, '_format'))
# find the right formatting function for the array
format_function = _choose_format(formatdict, dtype)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, len(shape), max_line_width,
next_line_prefix, separator,
_summaryEdgeItems, summary_insert).rstrip()
return lst
def _convert_arrays(obj):
import numpy.core.numeric as _nc
newtup = []
for k in obj:
if isinstance(k, _nc.ndarray):
k = k.tolist()
elif isinstance(k, tuple):
k = _convert_arrays(k)
newtup.append(k)
return tuple(newtup)
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=repr, formatter=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
An array is typically printed as::
'prefix(' + array2string(a) + ')'
The length of the prefix string is used to align the
output correctly.
style : function, optional
A function that accepts an ndarray and returns a string. Used only
when the shape of `a` is equal to ``()``, i.e. for 0-D arrays.
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'float'
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError : if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
shape, dtype = (a.dshape[:-1], a.dshape[-1])
shape = tuple(int(x) if isinstance(x, Fixed) else x for x in shape)
lst = _array2string(a, shape, dtype, max_line_width,
precision, suppress_small,
separator, prefix, formatter=formatter)
return lst
def _extendLine(s, line, word, max_line_len, next_line_prefix):
if len(line.rstrip()) + len(word.rstrip()) >= max_line_len:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _formatArray(a, format_function, rank, max_line_len,
next_line_prefix, separator, edge_items, summary_insert):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
if rank == 0:
return format_function(dd_as_py(a)).strip()
if summary_insert and 2*edge_items < len(a):
leading_items = edge_items
trailing_items = edge_items
summary_insert1 = summary_insert
else:
leading_items, trailing_items, summary_insert1 = 0, len(a), ""
if rank == 1:
s = ""
line = next_line_prefix
for i in xrange(leading_items):
word = format_function(dd_as_py(a[i])) + separator
s, line = _extendLine(s, line, word, max_line_len,
next_line_prefix)
if summary_insert1:
s, line = _extendLine(s, line, summary_insert1,
max_line_len, next_line_prefix)
for i in xrange(trailing_items, 1, -1):
word = format_function(dd_as_py(a[-i])) + separator
s, line = _extendLine(s, line, word, max_line_len,
next_line_prefix)
if len(a) > 0:
word = format_function(dd_as_py(a[-1]))
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
s += line + "]\n"
s = '[' + s[len(next_line_prefix):]
else:
s = '['
sep = separator.rstrip()
for i in xrange(leading_items):
if i > 0:
s += next_line_prefix
s += _formatArray(a[i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1)
if summary_insert1:
s += next_line_prefix + summary_insert1 + "\n"
for i in xrange(trailing_items, 1, -1):
if leading_items or i != trailing_items:
s += next_line_prefix
s += _formatArray(a[-i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1)
if leading_items or trailing_items > 1:
s += next_line_prefix
s += _formatArray(a[-1], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert).rstrip()+']\n'
return s
class FloatFormat(object):
def __init__(self, data, precision, suppress_small, sign=False):
self.precision = precision
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.max_str_len = 0
if data.dtype.kind in ['f', 'i', 'u']:
self.fillFormat(data)
def fillFormat(self, data):
import numpy.core.numeric as _nc
errstate = _nc.seterr(all='ignore')
try:
special = isnan(data) | isinf(data)
valid = not_equal(data, 0) & ~special
non_zero = absolute(data.compress(valid))
if len(non_zero) == 0:
max_val = 0.
min_val = 0.
else:
max_val = maximum.reduce(non_zero)
min_val = minimum.reduce(non_zero)
if max_val >= 1.e8:
self.exp_format = True
if not self.suppress_small and (min_val < 0.0001
or max_val/min_val > 1000.):
self.exp_format = True
finally:
_nc.seterr(**errstate)
if self.exp_format:
self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
self.max_str_len = 8 + self.precision
if self.large_exponent:
self.max_str_len += 1
if self.sign:
format = '%+'
else:
format = '%'
format = format + '%d.%de' % (self.max_str_len, self.precision)
else:
format = '%%.%df' % (self.precision,)
if len(non_zero):
precision = max([_digits(x, self.precision, format)
for x in non_zero])
else:
precision = 0
precision = min(self.precision, precision)
self.max_str_len = len(str(int(max_val))) + precision + 2
if _nc.any(special):
self.max_str_len = max(self.max_str_len,
len(_nan_str),
len(_inf_str)+1)
if self.sign:
format = '%#+'
else:
format = '%#'
format = format + '%d.%df' % (self.max_str_len, precision)
self.special_fmt = '%%%ds' % (self.max_str_len,)
self.format = format
def __call__(self, x, strip_zeros=True):
import numpy.core.numeric as _nc
err = _nc.seterr(invalid='ignore')
try:
if isnan(x):
if self.sign:
return self.special_fmt % ('+' + _nan_str,)
else:
return self.special_fmt % (_nan_str,)
elif isinf(x):
if x > 0:
if self.sign:
return self.special_fmt % ('+' + _inf_str,)
else:
return self.special_fmt % (_inf_str,)
else:
return self.special_fmt % ('-' + _inf_str,)
finally:
_nc.seterr(**err)
s = self.format % x
if self.large_exponent:
# 3-digit exponent
expsign = s[-3]
if expsign == '+' or expsign == '-':
s = s[1:-2] + '0' + s[-2:]
elif self.exp_format:
# 2-digit exponent
if s[-3] == '0':
s = ' ' + s[:-3] + s[-2:]
elif strip_zeros:
z = s.rstrip('0')
s = z + ' '*(len(s)-len(z))
return s
def _digits(x, precision, format):
s = format % x
z = s.rstrip('0')
return precision - len(s) + len(z)
if sys.version_info >= (3, 0):
_MAXINT = 2**32 - 1
_MININT = -2**32
else:
_MAXINT = sys.maxint
_MININT = -sys.maxint-1
class IntegerFormat(object):
def __init__(self, data):
try:
max_str_len = max(len(str(maximum.reduce(data))),
len(str(minimum.reduce(data))))
self.format = '%' + str(max_str_len) + 'd'
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
# instantiated in formatdict.
pass
except ValueError:
# this occurs when everything is NA
pass
def __call__(self, x):
if _MININT < x < _MAXINT:
return self.format % x
else:
return "%s" % x
class ComplexFormat(object):
def __init__(self, x, precision, suppress_small):
self.real_format = FloatFormat(x.real, precision, suppress_small)
self.imag_format = FloatFormat(x.imag, precision, suppress_small,
sign=True)
def __call__(self, x):
r = self.real_format(x.real, strip_zeros=False)
i = self.imag_format(x.imag, strip_zeros=False)
if not self.imag_format.exp_format:
z = i.rstrip('0')
i = z + 'j' + ' '*(len(i)-len(z))
else:
i = i + 'j'
return r + i
def _test():
import blaze
arr = blaze.array([2, 3, 4.0])
print(arr.dshape)
print(array2string(arr._data))
arr = blaze.zeros('30, 30, 30, float32')
print(arr.dshape)
print(array2string(arr._data))
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/io/_printing/_arrayprint.py",
"copies": "2",
"size": "23373",
"license": "bsd-3-clause",
"hash": -4295731572777492000,
"line_mean": 30.7137042062,
"line_max": 80,
"alpha_frac": 0.5571385787,
"autogenerated": false,
"ratio": 3.811022338170553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5368160916870554,
"avg_score": null,
"num_lines": null
} |
"""Arrays of variable-length arrays.
Examples
========
Create a JaggedArray that stores link IDs for the links attached to the
nodes of a 3x3 grid.
>>> from landlab.utils.jaggedarray import JaggedArray
>>> links_at_node = JaggedArray([
... [0, 6],
... [1, 7, 0],
... [8, 1],
... [2, 9, 6],
... [3, 10, 2, 7],
... [11, 3, 8],
... [4, 7],
... [5, 10, 4],
... [5, 11]])
Make up some data that provides values at each of the links.
>>> value_at_link = np.arange(12, dtype=float)
Create another JaggedArray. Here we store the values at each of the links
attached to nodes of the grid.
>>> values_at_node = JaggedArray.empty_like(links_at_node, dtype=float)
>>> values_at_node.array[:] = value_at_link[links_at_node.array]
Now operate on the link values for each node.
>>> values_at_node.foreach_row(sum)
array([ 6., 8., 9., 17., 22., 22., 11., 19., 16.])
>>> values_at_node.foreach_row(min)
array([ 0., 0., 1., 2., 2., 3., 4., 4., 5.])
>>> values_at_node.foreach_row(np.ptp)
array([ 6., 7., 7., 7., 8., 8., 3., 6., 6.])
"""
import numpy as np
from six.moves import range
class JaggedArray(object):
def __init__(self, *args):
"""JaggedArray([row0, row1, ...])
JaggedArray(values, values_per_row)
Examples
--------
Create a JaggedArray with an array of arrays.
>>> from landlab.utils.jaggedarray import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.array
array([0, 1, 2, 3, 4])
Create a JaggedArray as a 1D array and a list or row lengths.
>>> x = JaggedArray([0, 1, 2, 3, 4], (3, 2))
>>> x.array
array([0, 1, 2, 3, 4])
"""
if len(args) == 1:
values, values_per_row = (np.concatenate(args[0]),
[len(row) for row in args[0]])
else:
values, values_per_row = (np.array(args[0]), args[1])
self._values = values
self._number_of_rows = len(values_per_row)
self._offsets = JaggedArray._offsets_from_values_per_row(values_per_row)
self._offsets.flags['WRITEABLE'] = False
@property
def array(self):
"""The jagged array as a 1D array.
Returns
-------
array :
A view of the underlying 1D array.
Examples
--------
>>> from landlab.utils.jaggedarray import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.array
array([0, 1, 2, 3, 4])
>>> x.array[0] = 1
>>> x.array
array([1, 1, 2, 3, 4])
"""
return self._values
@property
def offset(self):
"""Offsets to rows of a 1D array.
Returns
-------
array :
Offsets into the underlying 1D array.
Examples
--------
>>> from landlab.utils.jaggedarray import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.offset
array([0, 3, 5])
From the offsets you can get values for rows of the jagged array.
>>> x.array[x.offset[0]:x.offset[1]]
array([0, 1, 2])
Once the array is created, you can't change the offsets.
>>> x.offset[0] = 1
Traceback (most recent call last):
ValueError: assignment destination is read-only
"""
return self._offsets
@property
def size(self):
"""Number of array elements.
Returns
-------
int :
Number of values in the array.
Examples
--------
>>> from landlab.utils.jaggedarray import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.size
5
"""
return len(self._values)
@property
def number_of_rows(self):
"""Number of array rows.
Returns
-------
int :
Number of rows in the array.
Examples
--------
>>> from landlab.utils.jaggedarray import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.number_of_rows
2
"""
return self._number_of_rows
@staticmethod
def _offsets_from_values_per_row(values_per_row):
offset = np.empty(len(values_per_row) + 1, dtype=int)
np.cumsum(values_per_row, out=offset[1:])
offset[0] = 0
return offset
@staticmethod
def empty_like(jagged, dtype=None):
return JaggedArray(np.empty_like(jagged.array, dtype=dtype),
np.diff(jagged.offset))
def length_of_row(self, row):
"""Number of values in a given row.
Parameters
----------
row : int
Index to a row.
Returns
-------
int :
Number of values in the row.
Examples
--------
>>> from landlab.utils.jaggedarray import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.length_of_row(0)
3
>>> x.length_of_row(1)
2
"""
return self._offsets[row + 1] - self._offsets[row]
def row(self, row):
"""Values of a row
Parameters
----------
row : int
Index to a row.
Returns
-------
array :
Values in the row as a slice of the underlying array.
Examples
--------
>>> from landlab.utils.jaggedarray import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.row(0)
array([0, 1, 2])
>>> x.row(1)
array([3, 4])
>>> y = x.row(0)
>>> y[0] = 1
>>> x.row(0)
array([1, 1, 2])
"""
return self._values[self._offsets[row]:self._offsets[row + 1]]
def __iter__(self):
"""Iterate over the rows of the array.
Examples
--------
>>> from landlab.utils.jaggedarray import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> for row in x: row
array([0, 1, 2])
array([3, 4])
"""
for n in range(self._number_of_rows):
yield self.row(n)
def foreach_row(self, func, out=None):
"""Apply an operator row-by-row
Examples
--------
>>> from landlab.utils.jaggedarray import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.foreach_row(sum)
array([3, 7])
>>> out = np.empty(2, dtype=int)
>>> x.foreach_row(sum, out=out) is out
True
>>> out
array([3, 7])
"""
if out is None:
out = np.empty(self.number_of_rows, dtype=self._values.dtype)
for (m, row) in enumerate(self):
out[m] = func(row)
return out
| {
"repo_name": "decvalts/landlab",
"path": "landlab/utils/jaggedarray.py",
"copies": "1",
"size": "6829",
"license": "mit",
"hash": 50325160701264320,
"line_mean": 24.9657794677,
"line_max": 80,
"alpha_frac": 0.491872895,
"autogenerated": false,
"ratio": 3.5456905503634477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4537563445363448,
"avg_score": null,
"num_lines": null
} |
"""Arrays of variable-length arrays.
Implements a JaggedArray class using numpy masked arrays.
Examples
========
Create a JaggedArray that stores link IDs for the links attached to the
nodes of a 3x3 grid.
>>> from landlab.utils.jaggedarray_ma import JaggedArray
>>> links_at_node = JaggedArray([
... [0, 6],
... [1, 7, 0],
... [8, 1],
... [2, 9, 6],
... [3, 10, 2, 7],
... [11, 3, 8],
... [4, 7],
... [5, 10, 4],
... [5, 11]])
Make up some data that provides values at each of the links.
>>> value_at_link = np.arange(12, dtype=float)
Create another JaggedArray. Here we store the values at each of the links
attached to nodes of the grid.
>>> values_at_node = JaggedArray.empty_like(links_at_node, dtype=float)
>>> values_at_node.array = value_at_link[links_at_node.array]
Now operate on the link values for each node.
>>> values_at_node.foreach_row(np.sum)
array([ 6., 8., 9., 17., 22., 22., 11., 19., 16.])
>>> values_at_node.foreach_row(np.min)
array([ 0., 0., 1., 2., 2., 3., 4., 4., 5.])
>>> values_at_node.foreach_row(np.ptp)
array([ 6., 7., 7., 7., 8., 8., 3., 6., 6.])
"""
import numpy as np
class JaggedArray(object):
def __init__(self, *args):
"""JaggedArray([row0, row1, ...])
JaggedArray(values, values_per_row)
Examples
--------
Create a JaggedArray with an array of arrays.
>>> from landlab.utils.jaggedarray_ma import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.array
array([0, 1, 2, 3, 4])
Create a JaggedArray as a 1D array and a list or row lengths.
>>> x = JaggedArray([0, 1, 2, 3, 4], (3, 2))
>>> x.array
array([0, 1, 2, 3, 4])
"""
if len(args) == 1:
if isinstance(args[0], np.ma.core.MaskedArray):
mat = args[0]
else:
mat = JaggedArray.ma_from_list_of_lists(args[0])
else:
mat = JaggedArray.ma_from_flat_array(args[0], args[1])
self._values = mat
self._number_of_rows = mat.shape[0]
@staticmethod
def ma_from_list_of_lists(rows, dtype=None):
values_per_row = [len(row) for row in rows]
mat = np.ma.masked_all((len(rows), max(values_per_row)),
dtype=dtype or int)
for (m, row) in enumerate(rows):
mat[m, :len(row)] = row
return mat
@staticmethod
def ma_from_flat_array(array, values_per_row):
array = np.array(array)
mat = np.ma.masked_all((len(values_per_row), max(values_per_row)),
dtype=array.dtype)
offset = 0
for (m, row) in enumerate(mat):
n_valid = values_per_row[m]
mat[m, :n_valid] = array[offset:offset + n_valid]
offset += n_valid
return mat
@property
def array(self):
"""The jagged array as a 1D array.
Returns
-------
array :
A view of the underlying 1D array.
Examples
--------
>>> from landlab.utils.jaggedarray_ma import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.array
array([0, 1, 2, 3, 4])
>>> x.array = np.array([1, 1, 2, 3, 4])
>>> x.array
array([1, 1, 2, 3, 4])
"""
return self._values.compressed()
@array.setter
def array(self, array):
self._values[~ self._values.mask] = array
@property
def size(self):
"""Number of array elements.
Returns
-------
int :
Number of values in the array.
Examples
--------
>>> from landlab.utils.jaggedarray_ma import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.size
5
"""
return self.array.size
@property
def number_of_rows(self):
"""Number of array rows.
Returns
-------
int :
Number of rows in the array.
Examples
--------
>>> from landlab.utils.jaggedarray_ma import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.number_of_rows == 2
True
"""
return self._number_of_rows
@staticmethod
def _offsets_from_values_per_row(values_per_row):
offset = np.empty(len(values_per_row) + 1, dtype=int)
np.cumsum(values_per_row, out=offset[1:])
offset[0] = 0
return offset
@staticmethod
def empty_like(jagged, dtype=None):
return JaggedArray(np.ma.empty_like(jagged._values, dtype=dtype))
def length_of_row(self, row):
"""Number of values in a given row.
Parameters
----------
row : int
Index to a row.
Returns
-------
int :
Number of values in the row.
Examples
--------
>>> from landlab.utils.jaggedarray_ma import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.length_of_row(0)
3
>>> x.length_of_row(1)
2
"""
return len(self.row(row))
def row(self, row):
"""Values of a row
Parameters
----------
row : int
Index to a row.
Returns
-------
array :
Values in the row as a slice of the underlying array.
Examples
--------
>>> from landlab.utils.jaggedarray_ma import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.row(0)
array([0, 1, 2])
>>> x.row(1)
array([3, 4])
"""
return self._values[row].compressed()
def __iter__(self):
"""Iterate over the rows of the array.
Examples
--------
>>> from landlab.utils.jaggedarray_ma import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> for row in x: row
array([0, 1, 2])
array([3, 4])
"""
for row in self._values:
yield row.compressed()
def foreach_row(self, func, out=None):
"""Apply an operator row-by-row
Examples
--------
>>> from landlab.utils.jaggedarray_ma import JaggedArray
>>> x = JaggedArray([[0, 1, 2], [3, 4]])
>>> x.foreach_row(np.sum)
array([3, 7])
>>> out = np.empty(2, dtype=int)
>>> x.foreach_row(np.sum, out=out) is out
True
>>> out
array([3, 7])
"""
if out is None:
return func(self._values, axis=1).compressed()
else:
return func(self._values, axis=1, out=out)
| {
"repo_name": "decvalts/landlab",
"path": "landlab/utils/jaggedarray_ma.py",
"copies": "1",
"size": "6719",
"license": "mit",
"hash": -8999810821059787000,
"line_mean": 25.557312253,
"line_max": 74,
"alpha_frac": 0.4978419408,
"autogenerated": false,
"ratio": 3.4813471502590674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44791890910590676,
"avg_score": null,
"num_lines": null
} |
# arrays.py 19/01/2016 D.J.Whale
class Array():
def __init__(self, *args):
self.data = []
if len(args) != 0:
for a in args:
if type(a)==list:
for item in a:
self.data.append(item)
else:
self.data.append(a)
def __setitem__(self, index, value):
l = len(self.data)
if index >= l:
missing = 1+(index-l)
self.data.extend([0 for i in range(missing)])
self.data[index] = value
def __getitem__(self, index):
if index >= len(self.data):
return 0 # lazy construction
return self.data[index]
def __len__(self):
return len(self.data)
def __repr__(self):
return str(self.data)
class Array2D():
def __init__(self):
self.rows = []
def __getitem__(self, index):
l = len(self.rows)
if index >= l:
missing = 1+(index-l)
self.rows.extend([Array() for i in range(missing)])
return self.rows[index]
def __repr__(self):
return str(self.rows)
# END
| {
"repo_name": "whaleygeek/pc_parser",
"path": "build/release/arrays.py",
"copies": "2",
"size": "1151",
"license": "mit",
"hash": 4193036217346688500,
"line_mean": 22.9791666667,
"line_max": 63,
"alpha_frac": 0.4761077324,
"autogenerated": false,
"ratio": 3.749185667752443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5225293400152442,
"avg_score": null,
"num_lines": null
} |
""" Array support
Ideas based on numpy ndarray, but limited functionality.
Implementation heavily influenced by GDA scisoftpy,
Copyright 2010 Diamond Light Source Ltd.
@author: Kay Kasemir
"""
import math
import org.csstudio.ndarray.NDType as NDType
import org.csstudio.ndarray.NDArray as NDArray
import org.csstudio.ndarray.NDMath as NDMath
import org.csstudio.ndarray.NDMatrix as NDMatrix
import org.csstudio.ndarray.NDCompare as NDCompare
import org.csstudio.ndarray.NDShape as NDShape
import jarray
import java.lang.Class
# Use float to get nan, because float will be replaced with ndarray data type
nan = float('nan')
_float = float
_int = int
_bool = bool
# Data types
float = float64 = NDType.FLOAT64
float32 = NDType.FLOAT32
int = int64 = NDType.INT64
int32 = NDType.INT64
int16 = NDType.INT16
byte = int8 = NDType.INT8
bool = NDType.BOOL
def __isBoolArray__(array):
"""Check if array is boolean
For non-ndarray, only checks first element
"""
if isinstance(array, ndarray):
return array.dtype == NDType.BOOL
else:
return len(array) > 0 and isinstance(array[0], _bool)
def __toNDShape__(shape):
"""Create shape for scalar as well as list"""
if isinstance(shape, (tuple, list)):
if len(shape) == 1:
return __toNDShape__(shape[0])
return NDShape(shape)
return NDShape([shape])
class ndarray_iter:
"""Iterator for elements in ND array
Performs 'flat' iteration over all elements
"""
def __init__(self, iter):
self.iter = iter
def __iter__(self):
return self
def next(self):
if self.iter.hasNext():
return self.iter.nextDouble()
else:
raise StopIteration
class ndarray:
"""N-Dimensional array
Example:
array([ 0, 1, 2, 3 ])
array([ [ 0, 1 ], [ 2, 3 ], [ 4, 5 ] ])
"""
def __init__(self, nda):
self.nda = nda
def getBase(self):
"""Base array, None if this array has no base"""
if self.nda.getBase() is None:
return
return ndarray(self.nda.getBase())
base = property(getBase)
def getShape(self):
"""Shape of the array, one element per dimension"""
return tuple(self.nda.getShape().getSizes())
shape = property(getShape)
def getType(self):
"""Get Data type of array elements"""
return self.nda.getType()
dtype = property(getType)
def getRank(self):
"""Get number of dimensions"""
return self.nda.getRank()
ndim = property(getRank)
def getStrides(self):
"""Get strides
Note that these are array index strides,
not raw byte buffer strides as in NumPy
"""
return tuple(self.nda.getStrides().getStrides())
strides = property(getStrides)
def copy(self):
"""Create a copy of this array"""
return ndarray(self.nda.clone())
def reshape(self, *shape):
"""reshape(shape):
Create array view with new shape
Example:
arange(6).reshape(3, 2)
results in array([ [ 0, 1 ], [ 2, 3 ], [ 4, 5 ] ])
"""
return ndarray(NDMatrix.reshape(self.nda, __toNDShape__(shape)))
def transpose(self):
"""Compute transposed array, i.e. swap 'rows' and 'columns'"""
return ndarray(NDMatrix.transpose(self.nda))
T = property(transpose)
def __len__(self):
"""Returns number of elements for the first dimension"""
if len(self.shape) > 0:
return self.shape[0]
return 0
def __getSlice__(self, indices):
"""@param indices: Indices that may address a slice
@return: NDArray for slice, or None if indices don't refer to slice
"""
# Turn single argument into tuple to allow following iteration code
if not isinstance(indices, tuple):
indices = ( indices, )
given = len(indices)
dim = self.nda.getRank()
any_slice = False
starts = []
stops = []
steps = []
i = 0
for i in range(dim):
if i < given:
index = indices[i]
if isinstance(index, slice):
# Slice provided
any_slice = True
# Replace 'None' in any portion of the slice
start = 0 if index.start is None else index.start
stop = self.nda.getShape().getSize(i) if index.stop is None else index.stop
step = 1 if index.step is None else index.step
else:
# Simple index provided: stop = step = 0 indicates
# to NDArray.getSlice() to 'collapse' this axis,
# using start as index
start = index
stop = step = 0
else:
# Nothing provided for this dimension, use full axis
any_slice = True
start = 0
stop = self.nda.getShape().getSize(i)
step = 1
starts.append(start)
stops.append(stop)
steps.append(step)
if any_slice:
return self.nda.getSlice(starts, stops, steps)
# There was a plain index for every dimension, no slice at all
return None
def __getitem__(self, indices):
"""Get element of array, or fetch sub-array
Example:
a = array([ [ 0, 1 ], [ 2, 3 ], [ 4, 5 ] ])
a[1, 1] # Result is 3
a[1] # Result is second row of the 3x2 array
May also provide slice:
a = arange(10)
a[1:6:2] # Result is [ 1, 3, 5 ]
Differing from numpy, this returns all values as float,
so if they are later used for indexing, int() needs to be used.
"""
slice = self.__getSlice__(indices)
if slice is None:
if isinstance(indices, (list, ndarray)):
N = len(indices)
if __isBoolArray__(indices):
result = []
for i in range(N):
if indices[i]:
result.append(self.nda.getDouble(i))
return array(result)
else:
# Array of indices, each addresses one element of the array
result = zeros(N)
for i in range(N):
# Need _int because int is now set to the NDType name 'int'
result[i] = self.nda.getDouble(_int(indices[i]))
return result
else:
# Indices address one element of the array
return self.nda.getDouble(indices)
# else: Need to return slice/view of array
return ndarray(slice)
def __setitem__(self, indices, value):
"""Set element of array
Example:
a = zeros(3)
a[1] = 1
a = array([ [ 0, 1 ], [ 2, 3 ], [ 4, 5 ] ])
a[1] = array([ 20, 30 ])
"""
if isinstance(value, ndarray):
# Create view for requested section of self,
# then assign the provided value to it
slice = self.__getSlice__(indices)
slice.set(value.nda)
else:
self.nda.setDouble(value, indices)
def __iter__(self):
return ndarray_iter(self.nda.getIterator())
def __neg__(self):
"""Return array where sign of each element has been reversed"""
result = self.nda.clone()
NDMath.negative(result)
return ndarray(result)
def __add__(self, value):
"""Add scalar to all elements, or add other array element-by-element"""
if isinstance(value, ndarray):
return ndarray(NDMath.add(self.nda, value.nda))
else:
result = self.nda.clone()
NDMath.increment(result, value)
return ndarray(result)
def __radd__(self, value):
"""Add scalar to all elements, or add other array element-by-element"""
return self.__add__(value)
def __iadd__(self, value):
"""Add scalar to all elements, or add other array element-by-element"""
if isinstance(value, ndarray):
NDMath.increment(self.nda, value.nda)
else:
NDMath.increment(self.nda, value)
return self
def __sub__(self, value):
"""Subtract scalar from all elements, or sub. other array element-by-element"""
if isinstance(value, ndarray):
return ndarray(NDMath.subtract(self.nda, value.nda))
else:
result = self.nda.clone()
NDMath.increment(result, -value)
return ndarray(result)
def __rsub__(self, value):
"""Subtract scalar from all elements, or sub. other array element-by-element"""
result = self.nda.clone()
NDMath.negative(result)
NDMath.increment(result, value)
return ndarray(result)
def __isub__(self, value):
"""Subtract scalar from all elements, or sub. other array element-by-element"""
return self.__iadd__(-value)
def __mul__(self, value):
"""Multiply by scalar or by other array elements"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDMath.multiply(self.nda, value.nda))
def __rmul__(self, value):
"""Multiply by scalar or by other array elements"""
return self.__mul__(value)
def __imul__(self, value):
"""Scale value by scalar or element-by-element"""
if isinstance(value, ndarray):
NDMath.scale(self.nda, value.nda)
else:
NDMath.scale(self.nda, value)
return self
def __div__(self, value):
"""Divide by scalar or by other array elements"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDMath.divide(self.nda, value.nda))
def __rdiv__(self, value):
"""Divide by scalar or by other array elements"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDMath.divide(value.nda, self.nda))
def __idiv__(self, value):
"""Divide value by scalar or element-by-element"""
if isinstance(value, ndarray):
NDMath.divide_elements(self.nda, value.nda)
else:
NDMath.divide_elements(self.nda, value)
return self
def __pow__(self, value):
"""Raise array elements to power specified by value"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDMath.power(self.nda, value.nda))
def __rpow__(self, value):
"""Raise array elements to power specified by value"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDMath.power(value.nda, self.nda))
def __eq__(self, value):
"""Element-wise comparison"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDCompare.equal_to(self.nda, value.nda))
def __ne__(self, value):
"""Element-wise comparison"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDCompare.not_equal_to(self.nda, value.nda))
def __lt__(self, value):
"""Element-wise comparison"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDCompare.less_than(self.nda, value.nda))
def __le__(self, value):
"""Element-wise comparison"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDCompare.less_equal(self.nda, value.nda))
def __gt__(self, value):
"""Element-wise comparison"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDCompare.greater_than(self.nda, value.nda))
def __ge__(self, value):
"""Element-wise comparison"""
if not isinstance(value, ndarray):
value = array([ value ])
return ndarray(NDCompare.greater_equal(self.nda, value.nda))
def __abs__(self):
"""Element-wise absolute values"""
return ndarray(NDMath.abs(self.nda))
def any(self):
"""Determine if any element is True (not zero)"""
return NDCompare.any(self.nda)
def all(self):
"""Determine if all elements are True (not zero)"""
return NDCompare.all(self.nda)
def sum(self):
"""Returns sum over all array elements"""
return NDMath.sum(self.nda)
def max(self):
"""Returns maximum array element"""
return NDMath.max(self.nda)
def min(self):
"""Returns minimum array element"""
return NDMath.min(self.nda)
def nonzero(self):
"""Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
Compared to numpy, it does not return a tuple of arrays but a matrix,
but either one allows addressing as [dimension, i] to get the index of the i'th non-zero element
"""
return ndarray(NDCompare.nonzero(self.nda))
def __str__(self):
return self.nda.toString()
def __repr__(self):
if self.dtype == float:
return "array(" + self.nda.toString() + ")"
return "array(" + self.nda.toString() + ", dtype=" + str(self.dtype) + ")"
def zeros(shape, dtype=float):
"""zeros(shape, dtype=float)
Create array of zeros, example:
zeros( (2, 3) )
"""
return ndarray(NDMatrix.zeros(dtype, __toNDShape__(shape)))
def ones(shape, dtype=float):
"""ones(shape, dtype=float)
Create array of ones, example:
ones( (2, 3) )
"""
return ndarray(NDMatrix.ones(dtype, __toNDShape__(shape)))
def array(arg, dtype=None):
"""Create N-dimensional array from data
Example:
array([1, 2, 3])
array([ [1, 2], [3, 4]])
"""
if dtype is None:
if isinstance(arg, ndarray):
return ndarray(arg.nda.clone())
else:
return ndarray(NDArray.create(arg))
return ndarray(NDArray.create(arg, dtype))
def arange(start, stop=None, step=1, dtype=None):
"""arange([start,] stop[, step=1])
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in value words, the interval including `start` but excluding `stop`).
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified, `start` must also be given.
Examples:
arange(5)
arange(1, 5, 0.5)
"""
if stop is None:
# Only one number given, which is the 'stop'
stop = start
start = 0
if dtype is None:
return ndarray(NDMatrix.arange(start, stop, step))
else:
return ndarray(NDMatrix.arange(start, stop, step, dtype))
def linspace(start, stop, num=50, dtype=float):
"""linspace(start, stop, num=50, dtype=float)
Return evenly spaced values from start to stop, including stop.
Example:
linspace(2, 10, 5)
"""
return ndarray(NDMatrix.linspace(start, stop, num, dtype))
def any(value):
"""Determine if any element is True (not zero)"""
return value.any()
def all(value):
"""Determine if all elements are True (not zero)"""
return value.all()
def sum(array):
"""Returns sum over all array elements"""
return array.sum()
def sqrt(value):
"""Determine square root of elements"""
if not isinstance(value, ndarray):
if value < 0:
return nan
return math.sqrt(value)
return ndarray(NDMath.sqrt(value.nda))
def exp(value):
"""Determine square root of elements"""
if not isinstance(value, ndarray):
return math.exp(value)
return ndarray(NDMath.exp(value.nda))
def log(value):
"""Determine log of elements"""
if not isinstance(value, ndarray):
return math.log(value)
return ndarray(NDMath.log(value.nda))
def log10(value):
"""Determine log of elements (base 10)"""
if not isinstance(value, ndarray):
return math.log10(value)
return ndarray(NDMath.log10(value.nda))
def copy(a):
"""copy(array):
Create a copy of an array
"""
return ndarray(a.nda.clone())
def reshape(a, shape):
"""reshape(array, shape):
Create array view with new shape
Example:
reshape(arange(6), (3, 2))
results in array([ [ 0, 1 ], [ 2, 3 ], [ 4, 5 ] ])
"""
return ndarray(NDMatrix.reshape(self.nda, __toNDShape__(shape)))
def transpose(a, axes=None):
"""transpose(a, axes=None):
Permute the axes of an array.
By default, they are reversed.
In a 2D array this would swap 'rows' and 'columns'
"""
if axes is None:
return a.transpose()
return ndarray(NDMatrix.transpose(a.nda, axes))
def dot(a, b):
"""dot(a, b):
Determine matrix 'dot' product of arrays a and b
"""
result = ndarray(NDMatrix.dot(a.nda, b.nda))
if result.ndim == 1 and len(result) == 1:
return result[0]
return result
| {
"repo_name": "ESSICS/cs-studio",
"path": "applications/scan/scan-plugins/org.csstudio.numjy/jython/numjy/ndarray.py",
"copies": "3",
"size": "18024",
"license": "epl-1.0",
"hash": -3145576177343833600,
"line_mean": 30.4006968641,
"line_max": 133,
"alpha_frac": 0.5679094541,
"autogenerated": false,
"ratio": 4.016042780748663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020359569529588766,
"num_lines": 574
} |
""" Array support unit test
@author: Kay Kasemir
"""
import unittest
import math
orig_int = int
from numjy import *
class ScanDataTest(unittest.TestCase):
def testVersion(self):
print version
self.assertTrue(version_info > (0, 0))
def testCreateZeros(self):
a = zeros(5)
for i in range(5):
self.assertEqual(0, a[i]);
a = zeros([5, 5])
for i in range(5):
for j in range(5):
self.assertEqual(0, a[i,j]);
def testCreateOnes(self):
a = ones(5)
for i in range(5):
self.assertEqual(1, a[i]);
a = ones([3, 2])
for i in range(3):
for j in range(2):
self.assertEqual(1, a[i,j]);
def testCreateRange(self):
a = arange(5)
for i in range(5):
self.assertEqual(i, a[i]);
a = arange(25).reshape(5, 5)
for i in range(5):
for j in range(5):
self.assertEqual(i*5+j, a[i,j]);
def testCreateLinspace(self):
a = linspace(0, 10, 5)
for i in range(5):
self.assertEqual(i*2.5, a[i]);
def testShape(self):
# Shape returned as tuple. Zeros uses tuple for desired size
a = zeros(5, dtype=byte)
self.assertEqual((5,), a.shape)
self.assertEqual(1, a.ndim)
# When using byte, the NumPy and NumJy strides match.
# Otherwise the NumJy strides address a flat array,
# while NumPy strides through a byte buffer, so
# its strides scale with the element size
self.assertEqual((1,), a.strides)
a = zeros((3,2), dtype=byte)
self.assertEqual((3,2), a.shape)
self.assertEqual(2, a.ndim)
self.assertEqual((2,1), a.strides)
# reshape takes var-arg numbers or tuple
a = zeros(6, dtype=byte).reshape(3,2)
self.assertEqual((3,2), a.shape)
self.assertEqual((2,1), a.strides)
a = zeros(6, dtype=byte).reshape([2,3])
self.assertEqual((2,3), a.shape)
self.assertEqual((3,1), a.strides)
# len()
a = zeros(6)
self.assertEqual(6, len(a))
a = zeros(6).reshape(3, 2)
self.assertEqual(3, len(a))
def testTypes(self):
# Select type
a = array([ 1, 2, 3, 4 ], dtype=float32)
self.assertEqual(float32, a.dtype)
a = array([ 0, 1 ], dtype=bool)
self.assertEqual(bool, a.dtype)
# Pick type automatically
a = array([ 1, 2, 3, 4 ])
# int32 vs. int64 not perfectly handled and differing from numpy
# self.assertEqual(int64, a.dtype)
a = array([ 1.0, 2.0, 3.0, 4.0 ])
self.assertEqual(float64, a.dtype)
a = array([ True, False ])
self.assertEqual(bool, a.dtype)
def testWriteToElements(self):
a = zeros(5)
for i in range(5):
a[i] = i
self.assertTrue(any(a == arange(5)))
def testView(self):
a=arange(6)
# 'b' should be a view of a
b=a.reshape(2, 3)
# Changing 'b' also changes corresponding element in 'a'
b[1,2]=666
self.assertEqual(666, b[1,2])
self.assertEqual(666, a[5])
# Views with different offsets
a = arange(6)
b = a[2:5]
self.assertTrue(all(b == array([ 2, 3, 4 ])))
c = b[1:3]
self.assertTrue(all(c == array([ 3, 4 ])))
def testSlicing(self):
# Simple 1-D subsection
a = arange(10)
sub = a[2:4]
self.assertTrue(all(sub == array([ 2, 3 ])))
sub = a[1:6:2]
self.assertTrue(all(sub == array([ 1.0, 3.0, 5.0 ])))
sub = a[1:]
self.assertTrue(all(sub == array([ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ])))
sub = a[:-1]
self.assertTrue(all(sub == array([ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0 ])))
sub = a[:3]
self.assertTrue(all(sub == array([ 0.0, 1.0, 2.0 ])))
sub = a[::2]
self.assertTrue(all(sub == array([ 0.0, 2.0, 4.0, 6.0, 8.0 ])))
# Assignment to slice changes original array
sub[1] = 666;
self.assertEqual(666, a[2])
# Get view into second 'row' of data
a = arange(6).reshape(2, 3)
sub = a[1]
self.assertTrue(all(sub == array([ 3.0, 4.0, 5.0 ])))
sub[1] = 666
self.assertEqual(666, sub[1])
self.assertEqual(666, a[1, 1])
# Write to 1-D slice with matching array
a = zeros(5)
sub = a[2:4]
sub[:] = array([ 2, 3 ])
self.assertTrue(all(a == array([ 0, 0, 2, 3, 0 ])))
self.assertTrue(all(sub == array([ 2, 3 ])))
self.assertTrue(all(sub[:] == array([ 2, 3 ])))
# Update 2x2 section of 2x3 array with matching array
orig = arange(6).reshape(2, 3)
orig[:, ::2] = array([ [ 40, 42], [ 43, 45 ]])
self.assertTrue(all(orig == array([ [ 40.0, 1.0, 42.0 ],
[ 43.0, 4.0, 45.0 ] ] ) ) )
# Update 2x2 section of 2x3 array with 'column' vector (broadcast)
orig = arange(6).reshape(2, 3)
orig[:, ::2] = array([ [ 41], [ 42 ]])
self.assertTrue(all(orig == array([ [ 41.0, 1.0, 41.0 ],
[ 42.0, 4.0, 42.0 ] ] ) ) )
def testArraysAsIndices(self):
a = array([ 1, 2, 5, 3, 7, 1, 3 ])
sub = a[ [ 2, 3, 5 ] ]
self.assertTrue(all(sub == array([ 5, 3, 1 ] ) ) )
sub = a[ array([ 2, 3, 5 ]) ]
self.assertTrue(all(sub == array([ 5, 3, 1 ] ) ) )
sel = a > 3
sub = a[sel]
self.assertTrue(all(sub == array([ 5.0, 7.0 ] ) ) )
def testIteration(self):
# Iteration is always over flat array
a = arange(10)
i = 0
for n in a:
self.assertEqual(i, n)
i += 1
# View iterates over its elements, not the base array
b = a[2:5]
i = 2
for n in b:
self.assertEqual(i, n)
i += 1
self.assertEqual(i, 5)
def testComparisons(self):
a = array([[ False, False ], [ False, True ]])
self.assertTrue(any(a))
self.assertTrue(a.any())
self.assertFalse(all(a))
self.assertFalse(a.all())
# Compare flat arrays
a = arange(4)
b = arange(4)
c = a == b
self.assertEquals(4, len(c))
self.assertEquals(True, c[0])
self.assertEquals(True, c[1])
self.assertEquals(True, c[2])
self.assertEquals(True, c[3])
# array and scalar
c = a == 2
self.assertEquals(4, len(c))
self.assertEquals(False, c[0])
self.assertEquals(False, c[1])
self.assertEquals(True, c[2])
self.assertEquals(False, c[3])
# Same shape and content, but different strides into orig. data
a = array([ 1, 2, 1, 1, 2, 2], dtype=byte)
b = a[0:2]
c = a[2:5:2]
self.assertEqual((1,), b.strides)
self.assertEqual((2,), c.strides)
d = b == c
self.assertEquals(2, len(d))
self.assertEquals(True, d[0])
self.assertEquals(True, d[1])
# Broadcast
a = array([ [ 1, 1 ], [ 2, 2] ])
b = array([ [ 1 ], [ 2 ] ])
c = a == b
self.assertEquals((2,2), c.shape)
self.assertEquals(True, c[0, 0])
self.assertEquals(True, c[0, 1])
self.assertEquals(True, c[1, 0])
self.assertEquals(True, c[1, 1])
# More...
a = array([[ 1, 2 ], [ 3, 4]])
b = array([[ 1, 1 ], [ 1, 1]]) + array([[ 0, 1 ], [ 2, 3]])
c = a == b
self.assertTrue(all(c))
c = a != b
self.assertFalse(any(c))
c = array([ 1, 2 ]) < array([ 2, 3 ])
self.assertTrue(all(c))
c = array([ 1, 3 ]) <= array([ 2, 3 ])
self.assertTrue(all(c))
c = array([ 2, 3 ]) > array([ 1, 2 ])
self.assertTrue(all(c))
c = array([ 2, 3 ]) >= array([ 1, 3 ])
self.assertTrue(all(c))
def testNonZero(self):
a = arange(12)
n = a.nonzero()
self.assertTrue(all(n == array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ])))
a = a.reshape(4, 3)
a[3,0] = 1
a[3,1] = 2
a[3,2] = 3
n = a.nonzero()
self.assertTrue(all(n[0] == array([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3])))
self.assertTrue(all(n[1] == array([1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2])))
sel = a>=5
n = sel.nonzero()
self.assertTrue(all(n[0] == array([1, 2, 2, 2])))
self.assertTrue(all(n[1] == array([2, 0, 1, 2])))
n = transpose(n)
for i in range(len(n)):
self.assertTrue( a[orig_int(n[i, 0]), orig_int(n[i, 1])] >= 5 )
def testNeg(self):
self.assertTrue(all(-arange(5) == array([ 0, -1, -2, -3, -4 ])))
# View into subsection
a = arange(5)
b = a[2:4]
self.assertTrue(all(-b == array([ -2, -3 ])))
def testSum(self):
self.assertEqual(3, sum(array([ 0, 1, 2 ])))
self.assertEqual(-2, sum(array([ -2 ])))
self.assertEqual(0, sum(array([ ])))
def testMinMax(self):
self.assertEqual(2, max(array([ -2, 1, 2 ])))
self.assertEqual(-2, min(array([ -2, 1, 2 ])))
def testAdd(self):
# Flat array
self.assertTrue(all(arange(5) + arange(5) == array([ 0, 2, 4, 6, 8 ])))
# Array + scalar
self.assertTrue(all(arange(5) + 42 == array([ 42.0, 43.0, 44.0, 45.0, 46.0 ])))
self.assertTrue(all(42 + arange(5) == array([ 42.0, 43.0, 44.0, 45.0, 46.0 ])))
# in-place
a = arange(5)
a += 42
self.assertTrue(all(a == array([ 42.0, 43.0, 44.0, 45.0, 46.0 ])))
a = arange(5)
a += arange(5)
self.assertTrue(all(a == array([ 0, 2, 4, 6, 8 ])))
# Same shape, but different strides
a = arange(6).reshape(2, 3)
b = a[:, 0:2]
c = a[:, ::2]
self.assertTrue(all(b + c == array([ [ 0.0, 3.0 ],
[ 6.0, 9.0 ] ])))
# Broadcast
a = arange(12).reshape(2, 3, 2)
b = array([[ 10, 20, 30]]).T
c = a + b
self.assertTrue(all(c == array([ [ [10, 11], [22, 23], [34, 35]],
[ [16, 17], [28, 29], [40, 41]]])))
def testSub(self):
self.assertTrue(all(arange(5) - arange(5) == array([ 0, 0, 0, 0, 0 ])))
self.assertTrue(all(arange(5) - 42 == array([ -42.0, -41.0, -40.0, -39.0, -38.0 ])))
self.assertTrue(all(42 - arange(5) == array([ 42.0, 41.0, 40.0, 39.0, 38.0 ])))
a = arange(5)
a -= arange(5)
self.assertTrue(all(a == array([ 0, 0, 0, 0, 0 ])))
def testMul(self):
self.assertTrue(all(arange(5) * arange(5) == array([ 0, 1, 4, 9, 16 ])))
self.assertTrue(all(arange(3) * 42 == array([ 0, 42, 84 ])))
self.assertTrue(all(42 * arange(3) == array([ 0, 42, 84 ])))
a = arange(3)
a *= 42
self.assertTrue(all(a == array([ 0, 42, 84 ])))
a = arange(3)
a *= array([ 42, 21, 7])
self.assertTrue(all(a == array([ 0, 21, 14 ])))
def testDiv(self):
self.assertTrue(all(arange(1, 5) / arange(1, 5) == array([ 1, 1, 1, 1 ])))
self.assertTrue(all((arange(3) * 42) / 42 == array([ 0, 1, 2 ])) )
self.assertTrue(all((arange(3) * 42) / array([ 42, 42, 42 ]) == array([ 0, 1, 2 ])))
self.assertTrue(all(42 / arange(1, 3) == array([ 42.0, 21.0 ])))
a = arange(5, dtype=float)
a /= 5
self.assertTrue(all(a == array([ 0.0, 0.2, 0.4, 0.6, 0.8 ])))
a = arange(1, 5)
a /= arange(1, 5)
self.assertTrue(all(a == array([ 1, 1, 1, 1 ])))
def testPower(self):
a = arange(3)
a = a ** 2
self.assertTrue(all(a == array([ 0, 1, 4 ])))
a = arange(3)
a **= 2
self.assertTrue(all(a == array([ 0, 1, 4 ])))
a = pow(arange(3), array([2]))
self.assertTrue(all(a == array([ 0, 1, 4 ])))
a = arange(3)
a = 2 ** a
self.assertTrue(all(a == array([ 1, 2, 4 ])))
def testAbs(self):
a = -arange(3)
a = abs(a)
self.assertTrue(all(a == arange(3)))
self.assertEquals(2.0, abs(-2))
def testSqrt(self):
a = array([ 0, 1, 4, 9, 16 ])
a = sqrt(a)
self.assertTrue(all(a == arange(5)))
self.assertEquals(2.0, sqrt(4))
def testExp(self):
a = array([ 0, 1, 2, -2 ])
a = exp(a)
b = array([ 1.0, exp(1), exp(2), exp(-2) ])
self.assertTrue(all(a == b))
def testLog(self):
a = array([ 1, 2, 4 ])
a = log(a)
b = array([ 0.0, log(2.0), log(4.0) ])
self.assertTrue(all(a == b))
def testLog10(self):
a = array([ 1, 10, 100, 1000, 10000 ])
a = log10(a)
b = arange(5)
self.assertTrue(all(a == b))
def testTranspose(self):
a = arange(4).T
self.assertTrue(all(a == arange(4)))
a = arange(4).reshape(2, 2).T
self.assertTrue(all(a == array([ [ 0, 2], [ 1, 3 ]])))
a = arange(6).reshape(3, 2).T
self.assertTrue(all(a == array([ [ 0, 2, 4], [ 1, 3, 5 ]])))
a = arange(6).reshape(1, 2, 3).T
self.assertTrue(all(a == array([ [[0], [3]], [[1], [4]], [[2], [5]] ])))
# Specifically request the 'default' axis ordering of transpose
a = transpose(arange(6).reshape(1, 2, 3), ( 2, 1, 0 ))
self.assertTrue(all(a == array([ [[0], [3]], [[1], [4]], [[2], [5]] ])))
# Request axis actually stay unchanged
a = transpose(arange(6).reshape(1, 2, 3), ( 0, 1, 2))
self.assertTrue(all(a == arange(6).reshape(1, 2, 3)))
# Request odd axis order
a = transpose(arange(6).reshape(1, 2, 3), ( 0, 2, 1))
self.assertTrue(all(a == array([ [ [0, 3], [1, 4], [2, 5] ] ] )))
def testDot(self):
a = arange(6)
b = arange(6)
c = dot(a, b)
self.assertEquals(55, c)
a = array([[ 1, 2], [ 3, 4] ])
b = array([[ 1, 2], [ 3, 4] ])
c = dot(a, b)
self.assertTrue(all(c == array([[ 7, 10], [15, 22]])))
a = array([[ 1, 2], [ 3, 4] ])
b = array([[ 1, 2, 3, 4], [ 5, 6, 7, 8] ])
c = dot(a, b)
self.assertTrue(all(c == array([[11, 14, 17, 20], [23, 30, 37, 44]])))
a = array([[ 1, 2], [ 3, 4] ])
b = array([10, 20])
c = dot(a, b)
self.assertTrue(all(c == array([[ 50, 110]])))
angle = math.radians(90)
rotate = array( [ [ math.cos(angle), -math.sin(angle) ], [ math.sin(angle), math.cos(angle) ] ] )
vec = array( [ 1, 0 ])
c = dot(rotate, vec)
self.assertTrue(abs(0 - c[0]) < 0.001)
self.assertEquals(1, c[1])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "ControlSystemStudio/cs-studio",
"path": "applications/scan/scan-plugins/org.csstudio.numjy/jython/numjy_test.py",
"copies": "3",
"size": "15201",
"license": "epl-1.0",
"hash": -5402065485102217000,
"line_mean": 31.9025974026,
"line_max": 105,
"alpha_frac": 0.4652983356,
"autogenerated": false,
"ratio": 3.1143208358942838,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5079619171494283,
"avg_score": null,
"num_lines": null
} |
"""array_to_latex converts Numpy and Pandas arrays to formatted LaTeX."""
from setuptools import setup
import os
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
"""Read the readme.rst file."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open('array_to_latex/__init__.py', 'rb') as fid:
for line in fid:
line = line.decode('utf-8')
if line.startswith('__version__'):
version = line.strip().split()[-1][1:-1]
break
setup(name='array_to_latex',
# Note: Version must also be set in __init__.py
# Version must also be set in download_url.
version=version,
description='Return Numpy and Pandas arrays as formatted LaTeX arrays.',
author='Joseph C. Slater',
author_email='joseph.c.slater@gmail.com',
url='https://github.com/josephcslater/array_to_latex/',
# download_url='https://github.com/josephcslater
# /array_to_latex/archive/0.42.tar.gz',
packages=['array_to_latex'],
long_description=read('README.rst'),
keywords=['latex', 'array', 'format', 'numpy', 'scipy'],
install_requires=['numpy', 'pandas', 'clipboard'],
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Text Processing :: Markup :: LaTeX',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Topic :: Utilities']
)
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
| {
"repo_name": "josephcslater/array_to_latex",
"path": "setup.py",
"copies": "1",
"size": "2458",
"license": "mit",
"hash": -2347454406275935000,
"line_mean": 40.6610169492,
"line_max": 79,
"alpha_frac": 0.5687550854,
"autogenerated": false,
"ratio": 4.2822299651567945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 59
} |
""" Array utilities
"""
from __future__ import absolute_import
import numpy as np
def strides_from(shape, dtype, order='C'):
""" Return strides as for continuous array shape `shape` and given `dtype`
Parameters
----------
shape : sequence
shape of array to calculate strides from
dtype : dtype-like
dtype specifier for array
order : {'C', 'F'}, optional
whether array is C or FORTRAN ordered
Returns
-------
strides : tuple
seqence length ``len(shape)`` giving strides for continuous array with
given `shape`, `dtype` and `order`
Examples
--------
>>> strides_from((2,3,4), 'i4')
(48, 16, 4)
>>> strides_from((3,2), np.float)
(16, 8)
>>> strides_from((5,4,3), np.bool, order='F')
(1, 5, 20)
"""
dt = np.dtype(dtype)
if dt.itemsize == 0:
raise ValueError('Empty dtype "%s"' % dt)
if order == 'F':
strides = np.cumprod([dt.itemsize] + list(shape[:-1]))
elif order == 'C':
strides = np.cumprod([dt.itemsize] + list(shape)[::-1][:-1])
strides = strides[::-1]
else:
raise ValueError('Unexpected order "%s"' % order)
return tuple(strides)
| {
"repo_name": "alexis-roche/nipy",
"path": "nipy/utils/arrays.py",
"copies": "3",
"size": "1213",
"license": "bsd-3-clause",
"hash": -3136404276239238000,
"line_mean": 27.2093023256,
"line_max": 78,
"alpha_frac": 0.5630667766,
"autogenerated": false,
"ratio": 3.7208588957055215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5783925672305521,
"avg_score": null,
"num_lines": null
} |
""" Array utilities
"""
import numpy as np
def strides_from(shape, dtype, order='C'):
""" Return strides as for continuous array shape `shape` and given `dtype`
Parameters
----------
shape : sequence
shape of array to calculate strides from
dtype : dtype-like
dtype specifier for array
order : {'C', 'F'}, optional
whether array is C or FORTRAN ordered
Returns
-------
strides : tuple
seqence length ``len(shape)`` giving strides for continuous array with
given `shape`, `dtype` and `order`
Examples
--------
>>> strides_from((2,3,4), 'i4')
(48, 16, 4)
>>> strides_from((3,2), np.float)
(16, 8)
>>> strides_from((5,4,3), np.bool, order='F')
(1, 5, 20)
"""
dt = np.dtype(dtype)
if dt.itemsize == 0:
raise ValueError('Empty dtype "%s"' % dt)
if order == 'F':
strides = np.cumprod([dt.itemsize] + list(shape[:-1]))
elif order == 'C':
strides = np.cumprod([dt.itemsize] + list(shape)[::-1][:-1])
strides = strides[::-1]
else:
raise ValueError('Unexpected order "%s"' % order)
return tuple(strides)
| {
"repo_name": "arokem/nipy",
"path": "nipy/utils/arrays.py",
"copies": "3",
"size": "1174",
"license": "bsd-3-clause",
"hash": 4027894527222715400,
"line_mean": 26.9523809524,
"line_max": 78,
"alpha_frac": 0.5562180579,
"autogenerated": false,
"ratio": 3.7034700315457414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5759688089445741,
"avg_score": null,
"num_lines": null
} |
"""Arridity"""
import datetime
from pandas.io.sql import read_sql
import numpy as np
import matplotlib.dates as mdates
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc["cache"] = 86400
desc[
"description"
] = """
This plot presents a time series of Arridity Index.
This index computes the standardized high temperature departure subtracted
by the standardized precipitation departure. For the purposes of this
plot, this index is computed daily over a trailing period of days of your
choice. The climatology is based on the present period of record
statistics. You can optionally plot this index for two other period of
days of your choice. Entering '0' will disable additional lines appearing
on the plot.
<br />You can also optionally generate this plot for the same period of
days over different years of your choice. When plotted over multiple
years, only "Number of Days #1' is considered. An additional year is
plotted representing the best root mean squared error fit to the selected
year's data.
"""
today = datetime.date.today()
sts = today - datetime.timedelta(days=180)
desc["arguments"] = [
dict(
type="station",
name="station",
default="IA0200",
network="IACLIMATE",
label="Select Station:",
),
dict(type="int", name="days", default=91, label="Number of Days #1"),
dict(
type="int",
name="days2",
default=0,
label="Number of Days #2 (0 disables)",
),
dict(
type="int",
name="days3",
default=0,
label="Number of Days #3 (0 disables)",
),
dict(
type="year",
name="year2",
default=2004,
optional=True,
label="Compare with year (optional):",
),
dict(
type="year",
name="year3",
default=2012,
optional=True,
label="Compare with year (optional)",
),
dict(
type="date",
name="sdate",
default=sts.strftime("%Y/%m/%d"),
min="1893/01/01",
label="Start Date of Plot",
),
dict(
type="date",
name="edate",
default=today.strftime("%Y/%m/%d"),
min="1893/01/01",
label="End Date of Plot",
),
]
return desc
def plotter(fdict):
""" Go """
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
days = ctx["days"]
days2 = ctx["days2"]
_days2 = days2 if days2 > 0 else 1
days3 = ctx["days3"]
_days3 = days3 if days3 > 0 else 1
sts = ctx["sdate"]
ets = ctx["edate"]
yrrange = ets.year - sts.year
year2 = ctx.get("year2") # could be null!
year3 = ctx.get("year3") # could be null!
pgconn = get_dbconn("coop")
table = "alldata_%s" % (station[:2],)
df = read_sql(
f"""
WITH agg as (
SELECT o.day, o.sday,
avg(high) OVER (ORDER by day ASC ROWS %s PRECEDING) as avgt,
sum(precip) OVER (ORDER by day ASC ROWS %s PRECEDING) as sump,
count(*) OVER (ORDER by day ASC ROWS %s PRECEDING) as cnt,
avg(high) OVER (ORDER by day ASC ROWS %s PRECEDING) as avgt2,
sum(precip) OVER (ORDER by day ASC ROWS %s PRECEDING) as sump2,
count(*) OVER (ORDER by day ASC ROWS %s PRECEDING) as cnt2,
avg(high) OVER (ORDER by day ASC ROWS %s PRECEDING) as avgt3,
sum(precip) OVER (ORDER by day ASC ROWS %s PRECEDING) as sump3,
count(*) OVER (ORDER by day ASC ROWS %s PRECEDING) as cnt3
from {table} o WHERE station = %s),
agg2 as (
SELECT sday,
avg(avgt) as avg_avgt, stddev(avgt) as std_avgt,
avg(sump) as avg_sump, stddev(sump) as std_sump,
avg(avgt2) as avg_avgt2, stddev(avgt2) as std_avgt2,
avg(sump2) as avg_sump2, stddev(sump2) as std_sump2,
avg(avgt3) as avg_avgt3, stddev(avgt3) as std_avgt3,
avg(sump3) as avg_sump3, stddev(sump3) as std_sump3
from agg WHERE cnt = %s GROUP by sday)
SELECT day,
(a.avgt - b.avg_avgt) / b.std_avgt as t,
(a.sump - b.avg_sump) / b.std_sump as p,
(a.avgt2 - b.avg_avgt2) / b.std_avgt2 as t2,
(a.sump2 - b.avg_sump2) / b.std_sump2 as p2,
(a.avgt3 - b.avg_avgt3) / b.std_avgt3 as t3,
(a.sump3 - b.avg_sump3) / b.std_sump3 as p3
from agg a JOIN agg2 b on (a.sday = b.sday)
ORDER by day ASC
""",
pgconn,
params=(
days - 1,
days - 1,
days - 1,
_days2 - 1,
_days2 - 1,
_days2 - 1,
_days3 - 1,
_days3 - 1,
_days3 - 1,
station,
days,
),
index_col="day",
)
if df.empty:
raise NoDataFound("No Data Found.")
df["arridity"] = df["t"] - df["p"]
df["arridity2"] = df["t2"] - df["p2"]
df["arridity3"] = df["t3"] - df["p3"]
(fig, ax) = plt.subplots(1, 1)
if year2 is None:
df2 = df.loc[sts:ets]
ax.plot(
df2.index.values,
df2["arridity"],
color="r",
lw=2,
label="%s days" % (days,),
)
maxval = df2["arridity"].abs().max() + 0.25
if days2 > 0:
ax.plot(
df2.index.values,
df2["arridity2"],
color="b",
lw=2,
label="%s days" % (days2,),
)
maxval = max([maxval, df2["arridity2"].abs().max() + 0.25])
if days3 > 0:
ax.plot(
df2.index.values,
df2["arridity3"],
color="g",
lw=2,
label="%s days" % (days3,),
)
maxval = max([maxval, df2["arridity3"].abs().max() + 0.25])
ax.xaxis.set_major_formatter(mdates.DateFormatter("%-d %b\n%Y"))
title = ""
else:
df2 = df.loc[sts:ets]
ax.plot(
np.arange(len(df2.index)),
df2["arridity"],
color="r",
lw=2,
label="%s" % (ets.year,),
)
maxval = df2["arridity"].abs().max() + 0.25
if year2 is not None:
sts2 = sts.replace(year=(year2 - yrrange))
ets2 = ets.replace(year=year2)
xticks = []
xticklabels = []
now = sts2
i = 0
while now < ets2:
if now.day == 1:
xticks.append(i)
xticklabels.append(now.strftime("%b"))
i += 1
now += datetime.timedelta(days=1)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
df2 = df.loc[sts2:ets2]
ax.plot(
np.arange(len(df2.index)),
df2["arridity"],
color="b",
lw=2,
label="%s" % (year2,),
)
maxval = max([maxval, df2["arridity"].abs().max() + 0.25])
if year3 is not None:
sts2 = sts.replace(year=(year3 - yrrange))
ets2 = ets.replace(year=year3)
df2 = df.loc[sts2:ets2]
ax.plot(
np.arange(len(df2.index)),
df2["arridity"],
color="g",
lw=2,
label="%s" % (year3,),
)
maxval = max([maxval, df2["arridity"].abs().max() + 0.25])
# Compute year of best fit
arridity = df.loc[sts:ets, "arridity"].values
mae = 100
useyear = None
for _year in range(1951, datetime.date.today().year + 1):
if _year == ets.year:
continue
sts2 = sts.replace(year=(_year - yrrange))
ets2 = ets.replace(year=_year)
arridity2 = df.loc[sts2:ets2, "arridity"].values
sz = min([len(arridity2), len(arridity)])
error = (np.mean((arridity2[:sz] - arridity[:sz]) ** 2)) ** 0.5
if error < mae:
mae = error
useyear = _year
if useyear:
sts2 = sts.replace(year=(useyear - yrrange))
ets2 = ets.replace(year=useyear)
df2 = df.loc[sts2:ets2]
ax.plot(
np.arange(len(df2.index)),
df2["arridity"],
color="k",
lw=2,
label="%s (%s best match)" % (useyear, ets.year),
)
maxval = max([maxval, df2["arridity"].abs().max() + 0.25])
title = "%s Day" % (days,)
ax.set_xlabel(
"%s to %s" % (sts.strftime("%-d %b"), ets.strftime("%-d %b"))
)
ax.grid(True)
ax.set_title(
(
"%s [%s] %s Arridity Index\n"
"Std. High Temp Departure minus Std. Precip Departure"
)
% (ctx["_nt"].sts[station]["name"], station, title)
)
ax.set_ylim(0 - maxval, maxval)
ax.set_ylabel("Arridity Index")
ax.text(
1.01,
0.75,
"<-- More Water Stress",
ha="left",
va="center",
transform=ax.transAxes,
rotation=-90,
)
ax.text(
1.01,
0.25,
"Less Water Stress -->",
ha="left",
va="center",
transform=ax.transAxes,
rotation=-90,
)
ax.legend(ncol=4, loc="best", fontsize=10)
return fig, df
if __name__ == "__main__":
plotter(dict())
| {
"repo_name": "akrherz/iem",
"path": "htdocs/plotting/auto/scripts100/p149.py",
"copies": "1",
"size": "9852",
"license": "mit",
"hash": 3203322699647321600,
"line_mean": 31.3016393443,
"line_max": 78,
"alpha_frac": 0.4935038571,
"autogenerated": false,
"ratio": 3.3682051282051284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43617089853051283,
"avg_score": null,
"num_lines": null
} |
arr = [
"37107287533902102798797998220837590246510135740250",
"46376937677490009712648124896970078050417018260538",
"74324986199524741059474233309513058123726617309629",
"91942213363574161572522430563301811072406154908250",
"23067588207539346171171980310421047513778063246676",
"89261670696623633820136378418383684178734361726757",
"28112879812849979408065481931592621691275889832738",
"44274228917432520321923589422876796487670272189318",
"47451445736001306439091167216856844588711603153276",
"70386486105843025439939619828917593665686757934951",
"62176457141856560629502157223196586755079324193331",
"64906352462741904929101432445813822663347944758178",
"92575867718337217661963751590579239728245598838407",
"58203565325359399008402633568948830189458628227828",
"80181199384826282014278194139940567587151170094390",
"35398664372827112653829987240784473053190104293586",
"86515506006295864861532075273371959191420517255829",
"71693888707715466499115593487603532921714970056938",
"54370070576826684624621495650076471787294438377604",
"53282654108756828443191190634694037855217779295145",
"36123272525000296071075082563815656710885258350721",
"45876576172410976447339110607218265236877223636045",
"17423706905851860660448207621209813287860733969412",
"81142660418086830619328460811191061556940512689692",
"51934325451728388641918047049293215058642563049483",
"62467221648435076201727918039944693004732956340691",
"15732444386908125794514089057706229429197107928209",
"55037687525678773091862540744969844508330393682126",
"18336384825330154686196124348767681297534375946515",
"80386287592878490201521685554828717201219257766954",
"78182833757993103614740356856449095527097864797581",
"16726320100436897842553539920931837441497806860984",
"48403098129077791799088218795327364475675590848030",
"87086987551392711854517078544161852424320693150332",
"59959406895756536782107074926966537676326235447210",
"69793950679652694742597709739166693763042633987085",
"41052684708299085211399427365734116182760315001271",
"65378607361501080857009149939512557028198746004375",
"35829035317434717326932123578154982629742552737307",
"94953759765105305946966067683156574377167401875275",
"88902802571733229619176668713819931811048770190271",
"25267680276078003013678680992525463401061632866526",
"36270218540497705585629946580636237993140746255962",
"24074486908231174977792365466257246923322810917141",
"91430288197103288597806669760892938638285025333403",
"34413065578016127815921815005561868836468420090470",
"23053081172816430487623791969842487255036638784583",
"11487696932154902810424020138335124462181441773470",
"63783299490636259666498587618221225225512486764533",
"67720186971698544312419572409913959008952310058822",
"95548255300263520781532296796249481641953868218774",
"76085327132285723110424803456124867697064507995236",
"37774242535411291684276865538926205024910326572967",
"23701913275725675285653248258265463092207058596522",
"29798860272258331913126375147341994889534765745501",
"18495701454879288984856827726077713721403798879715",
"38298203783031473527721580348144513491373226651381",
"34829543829199918180278916522431027392251122869539",
"40957953066405232632538044100059654939159879593635",
"29746152185502371307642255121183693803580388584903",
"41698116222072977186158236678424689157993532961922",
"62467957194401269043877107275048102390895523597457",
"23189706772547915061505504953922979530901129967519",
"86188088225875314529584099251203829009407770775672",
"11306739708304724483816533873502340845647058077308",
"82959174767140363198008187129011875491310547126581",
"97623331044818386269515456334926366572897563400500",
"42846280183517070527831839425882145521227251250327",
"55121603546981200581762165212827652751691296897789",
"32238195734329339946437501907836945765883352399886",
"75506164965184775180738168837861091527357929701337",
"62177842752192623401942399639168044983993173312731",
"32924185707147349566916674687634660915035914677504",
"99518671430235219628894890102423325116913619626622",
"73267460800591547471830798392868535206946944540724",
"76841822524674417161514036427982273348055556214818",
"97142617910342598647204516893989422179826088076852",
"87783646182799346313767754307809363333018982642090",
"10848802521674670883215120185883543223812876952786",
"71329612474782464538636993009049310363619763878039",
"62184073572399794223406235393808339651327408011116",
"66627891981488087797941876876144230030984490851411",
"60661826293682836764744779239180335110989069790714",
"85786944089552990653640447425576083659976645795096",
"66024396409905389607120198219976047599490197230297",
"64913982680032973156037120041377903785566085089252",
"16730939319872750275468906903707539413042652315011",
"94809377245048795150954100921645863754710598436791",
"78639167021187492431995700641917969777599028300699",
"15368713711936614952811305876380278410754449733078",
"40789923115535562561142322423255033685442488917353",
"44889911501440648020369068063960672322193204149535",
"41503128880339536053299340368006977710650566631954",
"81234880673210146739058568557934581403627822703280",
"82616570773948327592232845941706525094512325230608",
"22918802058777319719839450180888072429661980811197",
"77158542502016545090413245809786882778948721859617",
"72107838435069186155435662884062257473692284509516",
"20849603980134001723930671666823555245252804609722",
"53503534226472524250874054075591789781264330331690"
]
carry = 0
sstr = ""
for j in range(len(arr[0])-1,-1,-1):
ds = carry
for i in range(0,len(arr)):
dig = int(arr[i][j])
ds += dig
print i , j , dig , ds, carry
ld = ds % 10
carry = ds / 10
sstr = str(ld) + sstr
sstr = str(carry * 10 + int(sstr[0])) + sstr[1:]
print sstr[0:10]
| {
"repo_name": "parin2092/cook",
"path": "euler/p13.py",
"copies": "2",
"size": "5700",
"license": "mit",
"hash": 4656719075403551000,
"line_mean": 46.5,
"line_max": 53,
"alpha_frac": 0.9050877193,
"autogenerated": false,
"ratio": 2.2118742724097786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4116961991709779,
"avg_score": null,
"num_lines": null
} |
##Arrr, me mateys! Yer' challenge fer' today be a tough one. It be gettin awfully borein' on the high seas, so yer' job be to create a pirate based fightin' game! This game oughter' be turn based, and you oughter' be able to pick yer attacks every turn. The best game'll be winnin' some custom flair, and all the rest o' ya will be walkin the plank!
##Translations from http://funtranslations.com/pirate
from random import randint
pirates = []
piratesHealth= []
piratesAttack = []
def evaluateBattle(one, two):
winner = ["Scissors", "Paper", "Stone"]
loser = ["Paper", "Stone", "Scissors"]
if one not in winner or two not in winner:
return "Again"
elif loser[winner.index(one)] == two:
return "Win"
elif loser[winner.index(two)] == one:
return "Lose"
else:
return "Draw"
while True:
number = input("How many players? ")
for player in range(int(number)):
name = input("Player " + str(player + 1) + "\'s name? ")
health = input("Player " + str(player + 1) + "\'s health? ")
attackMin = input("Player " + str(player + 1) + "\'s minimum attack? ")
attackMax = input("Player " + str(player + 1) + "\'s maximum attack? ")
pirates.append(name)
piratesHealth.append(int(health))
piratesAttack.append([int(attackMin), int(attackMax)])
print("Let's begin!")
currentPlayer = 0
while len(pirates) > 1:
if piratesHealth[currentPlayer] < 1:
pirates.pop(currentPlayer)
piratesHealth.pop(currentPlayer)
piratesAttack.pop(currentPlayer)
currentPlayer += 1
if currentPlayer > (len(pirates) - 1):
currentPlayer = 0
if len(pirates) == 1:
break
target = input("Player " + str(currentPlayer + 1) + ", who do ye want \'t attack? Check \'t check health. ")
if target == "check":
print(", ".join(pirates) + " have " + ", ".join(str(pirate) for pirate in piratesHealth) + " health respectively.")
else:
attackerSign = input("Player " + str(currentPlayer + 1) + ", Scissors, Paper, or Stone? ")
targetSign = input("Player " + str(pirates.index(target) + 1) + ", Scissors, Paper, or Stone? ")
result = evaluateBattle(attackerSign, targetSign)
if result == "Win":
damage = randint(piratesAttack[currentPlayer][0], piratesAttack[currentPlayer][1])
piratesHealth[pirates.index(target)] -= damage
print("Aye, avast! " + target + " be left with " + str(piratesHealth[pirates.index(target)]) + " health!")
currentPlayer += 1
elif result == "Lose":
damage = randint(piratesAttack[pirates.index(target)][0], piratesAttack[pirates.index(target)][1])
piratesHealth[currentPlayer] -= damage
print("Nay, by Blackbeard's sword! Ye be left with " + str(piratesHealth[currentPlayer]) + " health!")
currentPlayer += 1
elif result == "Draw":
print("Nobody gets hurt!")
currentPlayer += 1
elif result == "Again":
print("Try again!")
print(pirates[0] + " be th\' winner!")
| {
"repo_name": "ngmhprogramming/dailyprogrammer",
"path": "Python/python_hard_5.py",
"copies": "1",
"size": "3324",
"license": "mit",
"hash": 5689617239030351000,
"line_mean": 48.3636363636,
"line_max": 349,
"alpha_frac": 0.5767148014,
"autogenerated": false,
"ratio": 3.5286624203821657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4605377221782166,
"avg_score": null,
"num_lines": null
} |
ARRUMAR
import Pmw
import Tkinter as tk
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from os import chdir,listdir,path,getcwd
class PlotWindow():
def __init__(self,shot=0,plot_frame=0):
self.shot=shot
self.frame=plot_frame
self.plot_profiles()
def changeshot(self,shot):
self.shot=shot
def plot_profiles(self):
prof_folder=path.join(getcwd(), "..", "PROC","%s" % self.shot,"level_0")
chdir(prof_folder)
ne=np.loadtxt('ne.dat')
info=np.loadtxt('prof_info.dat')
prof_list=listdir(prof_folder)
prof_list.sort()
position=np.empty(shape=((len(prof_list)-2),len(ne)))
times=np.empty(len(prof_list)-2)
i=0
for r_file in prof_list:
name=r_file.strip('.dat')
if name not in ('prof_info','ne'):
position[i]=np.loadtxt(r_file)*1e2
times[i]=name
i+=1
self.fig=plt.figure()
self.canvas = FigureCanvasTkAgg(self.fig, master=self.frame)
self.canvas.get_tk_widget().grid(column=0,row=1)
ax = self.fig.add_subplot(111)
plt.subplots_adjust(left=0.25, bottom=0.25)
l, = ax.plot(position[0],ne, lw=2, color='blue')
ax.axis([0, 20, ne[0],ne[-1]])
ax.legend(loc='upper left')
ax.set_xlabel('r (cm)')
ax.set_ylabel('density (10^19 m^-3)')
ax.set_title("# %s" % self.shot)
axcolor = 'lightgoldenrodyellow'
axfreq = mpl.axes.Axes(self.fig,[0.25, 0.4, 0.65, 0.03], axisbg=axcolor)
stime = Slider(axfreq, 'time', info[1], info[2], valinit=info[1]+(info[2]-info[1])*0.2)
def update(val):
time = stime.val
i=(abs(time*1e3-times)).argmin()
l.set_xdata(position[i])
self.fig.canvas.draw_idle()
stime.on_changed(update)
plt.show()
class PlotProf():
def __init__(self,root):
self.root=root
self.draw()
def draw(self):
self.infobox(70)
self.st.appendtext('Input shot')
self.entries_caption=['Shot:']
self.entries_default=['32214']
self.keys_entries=['shot']
self.number_entries = len(self.keys_entries)
self.entries = {}
for e in range(self.number_entries):
self.makeentry(self.keys_entries[e], self.entries_caption[e], self.entries_default[e])
Pmw.alignlabels(self.entries.values())
self.buttons()
self.entries['shot'].configure(command=self.choose_shot)
self.entries['shot'].focus_set()
self.plot_frame=tk.Frame()
self.plot_frame.pack()
def infobox(self,height=150):
# Create the ScrolledText with headers.
#'Helvetica', 'Times', 'Fixed', 'Courier' or 'Typewriter'
# fixedFont = Pmw.logicalfont('Fixed',size=12)
self.st = Pmw.ScrolledText(self.root,
borderframe = 1,
usehullsize = 1,
hull_height = height,
text_wrap='word',
# text_font = fixedFont,
text_padx = 4,
text_pady = 4,
)
self.st.pack(side='bottom', padx = 5, pady = 5, fill = 'both', expand = 1)
# Prevent users' modifying text
self.st.configure(text_state = 'disabled')
def buttons(self):
# Create the button box
self.buttonBox = Pmw.ButtonBox(self.root,labelpos = 'nw')
self.buttonBox.pack(fill = 'x', expand = 1, padx = 3, pady = 3)
# Add buttons to the ButtonBox.
self.buttonBox.add('About', command=self.about_gui)
self.buttonBox.add('Close', command = self.close)
self.buttonBox.add('Set', command = self.choose_shot)
# Make all the buttons the same width.
self.buttonBox.alignbuttons()
def makeentry(self,key,caption, default):
self.entries[key]=Pmw.EntryField(self.root,
labelpos = 'w',
label_text = caption,
value= default)
self.entries[key].pack(side= 'top', fill= 'x', expand=1, padx=10, pady=5)
def choose_shot(self):
self.shot=int(self.entries['shot'].get())
self.st.clear()
self.st.appendtext('Reflectometry parameters:')
if hasattr(self,'plotwindow'):
self.plotwindow.changeshot(self.shot)
else:
self.plotwindow=PlotWindow(self.shot,self.plot_frame)
def close(self):
self.root.destroy()
def about_gui(self):
Pmw.aboutversion('1.0\n Mar 15 2015')
Pmw.aboutcopyright('Author: Cassio H. S. Amador')
Pmw.aboutcontact(
'For more informations/bug reporting:\n' +
' email: cassioamador@yahoo.com.br'
)
self.about = Pmw.AboutDialog(self.root, applicationname = 'Ref Setup')
self.about.withdraw()
self.about.show()
if __name__== '__main__':
root = tk.Tk()
root.title('Ref Profiles')
Pmw.initialise(fontScheme='pmw2')
plotprof=PlotProf(root)
root.mainloop() | {
"repo_name": "CassioAmador/profile_tcabr",
"path": "visualization_tools/plot_prof2.py",
"copies": "1",
"size": "5175",
"license": "mit",
"hash": -547969588773308500,
"line_mean": 33.0526315789,
"line_max": 98,
"alpha_frac": 0.5785507246,
"autogenerated": false,
"ratio": 3.400131406044678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9373282473251332,
"avg_score": 0.021079931478669208,
"num_lines": 152
} |
ARRUMAR
"""
Function: Evaluates the maximum density of the plasma, if it is lower than the maximum
probing frequency. Try to find a jump in the group delay
Authors:
Cassio Amador (cassioamador at yahoo.com.br)
Gilson Ronchi (gronchi at if.usp.br)
TODO: choose best one and erase the other
"""
from os import path, getcwd, makedirs
import pylab as p
import numpy as np
import scipy.signal
from proc_group_delay import ProcGroupDelay
def find_ne_max(self):
if not hasattr(self, "ne_max"):
self.ne_max = []
zeros_gd = p.where(self.gd_m <= 0.0)[0]
# print(max(p.diff(self.gd_m)))
if len(zeros_gd) != 0:
print("pequeno")
self.ne_max.append(rf.freq2den(1e9 * self.X[zeros_gd[0]]))
self.gd_m[zeros_gd[0]:] = float("NaN")
else:
dif_gd = p.where(p.diff(self.gd_m) > 3.5)[0]
if len(dif_gd) != 0:
print("grande")
self.ne_max.append(rf.freq2den(1e9 * self.X[dif_gd[-1]]))
self.gd_m[dif_gd[0]:] = float("NaN")
else:
self.ne_max.append(float("NaN"))
# zero_jumps = p.where(p.diff(zero_gd) > 1)[0]
def find_ne_max2(self):
"""Find maximum density."""
self.freqs_full = np.sort(np.concatenate((self.X_k, self.X_ka)))
self.ne_full = rf.freq2den(1e9 * self.freqs_full)
index = np.where(np.logical_or(self.gd_k < 0, self.gd_k > 0.95 * wall_corr))[0]
if len(index) > 0:
self.ne_max = rf.freq2den(1e9 * self.X_k[index[0]])
# No plasma?
if self.ne_max < 0.45e19:
self.no_plasma = True
self.ne_max = np.nan
self.gd2 = []
self.freqs2 = []
else:
self.no_plasma = False
self.gd2 = self.gd_k[:index[0]]
self.freqs2 = self.X_k[:index[0]]
else:
index = np.where(np.logical_or(self.gd_ka < 0, self.gd_ka > 0.95 * wall_corr))[0]
if len(index) > 0:
self.ne_max = rf.freq2den(1e9 * self.X_ka[index[0]])
if index[0] == 0:
self.gd2 = self.gd_k[:index[0]]
self.freqs2 = self.X_k[:index[0]]
return
else:
# plasma density > max probing density
index = [-1]
self.ne_max = np.nan
freqs = np.concatenate((self.X_k, self.X_ka[:index[0]]))
sort_index = np.argsort(freqs)
self.gd2 = np.concatenate((self.gd_k, self.gd_ka[:index[0]]))[sort_index]
self.freqs2 = freqs[sort_index]
self.no_plasma = False
return
| {
"repo_name": "CassioAmador/profile_tcabr",
"path": "src/proc_ne_max.py",
"copies": "1",
"size": "2727",
"license": "mit",
"hash": -2557700547420067300,
"line_mean": 37.4084507042,
"line_max": 93,
"alpha_frac": 0.5137513751,
"autogenerated": false,
"ratio": 3.0744081172491544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40881594923491543,
"avg_score": null,
"num_lines": null
} |
ARRUMAR
"""
Given a shot_number, it reads all files in 'prof_folder' to plot it.
With a slider, it is possible to change time of profile with a mouse.
author: Cassio Amador
TODO: read from 'ini' file.
???
"""
from os import chdir,listdir,path
import pylab as p
from input_explorer import inputExplorer
from proc_profile import ProcProfile
prof_folder=path.join(getcwd(), "..", "PROC")
chdir(prof_folder)
ne=p.loadtxt('ne.dat')
info=p.loadtxt('prof_info.dat')
shot_number=28061
shot_number=28749
prof_list=listdir(prof_folder)
prof_list.sort()
position=p.empty(shape=((len(prof_list)-2),len(ne)))
times=p.empty(len(prof_list)-2)
print len(times)
i=0
for r_file in prof_list:
name=r_file.strip('.dat')
if name not in ('prof_info','ne'):
position[i]=p.loadtxt(r_file)*1e2
times[i]=name
i+=1
fig,ax = p.subplots(1)
ax.hold(False)
def plotDynamics(time):
#ax.clear()
i=(abs(time*1e3-times)).argmin()
#print i,time
ax.plot(position[i],ne,label='%.2f ms' % time)
ax.legend(loc='upper left')
ax.set_xlabel('r (cm)')
ax.set_ylabel('density (10^19 m^-3)')
ax.set_xlim(0,20)
ax.set_title("# %s" % shot_number)
fig.canvas.draw()
sliders = [ { 'label' : label, 'valmin': info[1] , 'valmax': info[2],
'valinit': info[1]+(info[2]-info[1])*0.2 }
for label in [ 'time' ] ]
inputExplorer(plotDynamics,sliders)
| {
"repo_name": "CassioAmador/profile_tcabr",
"path": "visualization_tools/plot_prof4.py",
"copies": "1",
"size": "1414",
"license": "mit",
"hash": 979322776490369000,
"line_mean": 23.3793103448,
"line_max": 71,
"alpha_frac": 0.6371994342,
"autogenerated": false,
"ratio": 2.72972972972973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.386692916392973,
"avg_score": null,
"num_lines": null
} |
'''Arsenal API bulk node_groups.'''
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from datetime import datetime
from pyramid.view import view_config
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.exc import MultipleResultsFound
from arsenalweb.models.common import (
DBSession,
)
from arsenalweb.models.nodes import (
NodeAudit,
)
from arsenalweb.views import (
get_authenticated_user,
)
from arsenalweb.views.api.common import (
api_200,
api_400,
api_404,
api_500,
)
from arsenalweb.views.api.nodes import (
find_node_by_id,
)
LOG = logging.getLogger(__name__)
def remove_node_groups(node_ids, auth_user):
'''Remove all node_groups from a list of node_ids.'''
resp = {'nodes': []}
try:
for node_id in node_ids:
node = find_node_by_id(node_id)
LOG.debug('Removing all node_groups from node: {0} '
'node_groups: {1}'.format(node.name,
[ng.name for ng in node.node_groups]))
resp['nodes'].append(node.name)
utcnow = datetime.utcnow()
LOG.debug('node_group objects: {0}'.format(node.node_groups))
for node_group in list(node.node_groups):
LOG.debug('Removing node_group: {0}'.format(node_group.name))
try:
audit = NodeAudit(object_id=node.id,
field='node_group_id',
old_value=node_group.id,
new_value='deleted',
updated_by=auth_user['user_id'],
created=utcnow)
DBSession.add(audit)
LOG.debug('Trying to remove node_group: {0} from '
'node: {1}'.format(node_group.name, node.name,))
node.node_groups.remove(node_group)
LOG.debug('Successfully removed node_group: {0} from '
'node: {1}'.format(node_group.name, node.name,))
except (ValueError, AttributeError) as ex:
LOG.debug('Died removing node_group: {0}'.format(ex))
DBSession.remove(audit)
except Exception as ex:
LOG.debug('Died removing node_group: {0}'.format(ex))
DBSession.remove(audit)
LOG.debug('Final node_groups: {0}'.format([ng.name for ng in node.node_groups]))
DBSession.add(node)
DBSession.flush()
except (NoResultFound, AttributeError):
return api_404(msg='node_group not found')
except MultipleResultsFound:
msg = 'Bad request: node_id is not unique: {0}'.format(node_id)
return api_400(msg=msg)
except Exception as ex:
LOG.error('Error removing all node_groups from node. exception={0}'.format(ex))
return api_500()
return resp
@view_config(route_name='api_b_node_groups_deassign', permission='node_group_delete', request_method='DELETE', renderer='json')
def api_b_node_groups_deassign(request):
'''Process delete requests for the /api/bulk/node_groups/deassign route.
Takes a list of nodes and deassigns all node_groups from them.'''
try:
payload = request.json_body
node_ids = payload['node_ids']
auth_user = get_authenticated_user(request)
LOG.debug('Updating {0}'.format(request.url))
try:
resp = remove_node_groups(node_ids, auth_user)
except KeyError:
msg = 'Missing required parameter: {0}'.format(payload)
return api_400(msg=msg)
except Exception as ex:
LOG.error('Error removing all node_groups from '
'node={0},exception={1}'.format(request.url, ex))
return api_500(msg=str(ex))
return api_200(results=resp)
except Exception as ex:
LOG.error('Error updating node_groups={0},exception={1}'.format(request.url, ex))
return api_500(msg=str(ex))
| {
"repo_name": "CityGrid/arsenal",
"path": "server/arsenalweb/views/api/bulk_node_groups.py",
"copies": "1",
"size": "4689",
"license": "apache-2.0",
"hash": 380222964193164100,
"line_mean": 38.075,
"line_max": 127,
"alpha_frac": 0.5886116443,
"autogenerated": false,
"ratio": 4.000853242320819,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5089464886620818,
"avg_score": null,
"num_lines": null
} |
'''Arsenal API data_centers.'''
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from datetime import datetime
from pyramid.view import view_config
from sqlalchemy.orm.exc import NoResultFound
from arsenalweb.views import (
get_authenticated_user,
)
from arsenalweb.views.api.common import (
api_200,
api_400,
api_404,
api_500,
api_501,
collect_params,
)
from arsenalweb.models.common import (
DBSession,
)
from arsenalweb.models.data_centers import (
DataCenter,
DataCenterAudit,
)
from arsenalweb.models.statuses import (
Status,
)
LOG = logging.getLogger(__name__)
# Functions
def find_status_by_name(status_name):
'''Find a status by name.'''
status = DBSession.query(Status)
status = status.filter(Status.name == status_name)
return status.one()
def find_data_center_by_name(name):
'''Find a data_center by name. Returns a data_center object if found,
raises NoResultFound otherwise.'''
LOG.debug('Searching for datacenter by name: {0}'.format(name))
data_center = DBSession.query(DataCenter)
data_center = data_center.filter(DataCenter.name == name)
return data_center.one()
def find_data_center_by_id(data_center_id):
'''Find a data_center by id.'''
LOG.debug('Searching for datacenter by id: {0}'.format(data_center_id))
data_center = DBSession.query(DataCenter)
data_center = data_center.filter(DataCenter.id == data_center_id)
return data_center.one()
def create_data_center(name=None, updated_by=None, **kwargs):
'''Create a new data_center.
Required params:
name : A string that is the name of the datacenter.
updated_by: A string that is the user making the update.
Optional kwargs:
status_id : An integer representing the status_id from the statuses table.
If not sent, the data_center will be set to status_id 2.
'''
try:
LOG.info('Creating new data_center name: {0}'.format(name))
utcnow = datetime.utcnow()
# Set status to setup if the client doesn't send it.
try:
stat = kwargs['status']
LOG.debug('status keyword sent: {0}'.format(stat))
my_status = find_status_by_name(stat)
kwargs['status_id'] = my_status.id
del kwargs['status']
except KeyError:
if 'status_id' not in kwargs or not kwargs['status_id']:
LOG.debug('status_id not present, setting status_id to 2')
kwargs['status_id'] = 2
data_center = DataCenter(name=name,
updated_by=updated_by,
created=utcnow,
updated=utcnow,
**kwargs)
DBSession.add(data_center)
DBSession.flush()
audit = DataCenterAudit(object_id=data_center.id,
field='name',
old_value='created',
new_value=data_center.name,
updated_by=updated_by,
created=utcnow)
DBSession.add(audit)
DBSession.flush()
return api_200(results=data_center)
except Exception as ex:
msg = 'Error creating new data_center name: {0} exception: {1}'.format(name,
ex)
LOG.error(msg)
return api_500(msg=msg)
def update_data_center(data_center, **kwargs):
'''Update an existing data_center.
Required params:
data_center: A string that is the name of the data_center.
updated_by : A string that is the user making the update.
Optional kwargs:
status_id : An integer representing the status_id from the statuses table.
'''
try:
# Convert everything that is defined to a string.
my_attribs = kwargs.copy()
for my_attr in my_attribs:
if my_attribs.get(my_attr):
my_attribs[my_attr] = str(my_attribs[my_attr])
LOG.info('Updating data_center: {0}'.format(data_center.name))
utcnow = datetime.utcnow()
for attribute in my_attribs:
if attribute == 'name':
LOG.debug('Skipping update to data_center.name.')
continue
old_value = getattr(data_center, attribute)
new_value = my_attribs[attribute]
if old_value != new_value and new_value:
if not old_value:
old_value = 'None'
LOG.debug('Updating data_center: {0} attribute: '
'{1} new_value: {2}'.format(data_center.name,
attribute,
new_value))
audit = DataCenterAudit(object_id=data_center.id,
field=attribute,
old_value=old_value,
new_value=new_value,
updated_by=my_attribs['updated_by'],
created=utcnow)
DBSession.add(audit)
setattr(data_center, attribute, new_value)
DBSession.flush()
return api_200(results=data_center)
except Exception as ex:
msg = 'Error updating data_center name: {0} updated_by: {1} exception: ' \
'{2}'.format(data_center.name,
my_attribs['updated_by'],
repr(ex))
LOG.error(msg)
raise
# Routes
@view_config(route_name='api_data_centers', request_method='GET', request_param='schema=true', renderer='json')
def api_data_centers_schema(request):
'''Schema document for the data_centers API.'''
data_centers = {
}
return data_centers
@view_config(route_name='api_data_centers', permission='data_center_write', request_method='PUT', renderer='json')
def api_data_centers_write(request):
'''Process write requests for /api/data_centers route.'''
try:
req_params = [
'name',
]
opt_params = [
'status',
]
params = collect_params(request, req_params, opt_params)
LOG.debug('Searching for data_center name: {0}'.format(params['name']))
try:
data_center = find_data_center_by_name(params['name'])
update_data_center(data_center, **params)
except NoResultFound:
data_center = create_data_center(**params)
return data_center
except Exception as ex:
msg = 'Error writing to data_centers API: {0} exception: {1}'.format(request.url, ex)
LOG.error(msg)
return api_500(msg=msg)
@view_config(route_name='api_data_center_r', permission='data_center_delete', request_method='DELETE', renderer='json')
@view_config(route_name='api_data_center_r', permission='data_center_write', request_method='PUT', renderer='json')
def api_data_center_write_attrib(request):
'''Process write requests for the /api/data_centers/{id}/{resource} route.'''
resource = request.matchdict['resource']
payload = request.json_body
auth_user = get_authenticated_user(request)
LOG.debug('Updating {0}'.format(request.url))
# First get the data_center, then figure out what to do to it.
data_center = find_data_center_by_id(request.matchdict['id'])
LOG.debug('data_center is: {0}'.format(data_center))
# List of resources allowed
resources = [
'nothing_yet',
]
# There's nothing to do here yet. Maye add updates to existing datacenters?
if resource in resources:
try:
actionable = payload[resource]
except KeyError:
msg = 'Missing required parameter: {0}'.format(resource)
return api_400(msg=msg)
except Exception as ex:
LOG.error('Error updating data_centers: {0} exception: {1}'.format(request.url, ex))
return api_500(msg=str(ex))
else:
return api_501()
return resp
| {
"repo_name": "CityGrid/arsenal",
"path": "server/arsenalweb/views/api/data_centers.py",
"copies": "1",
"size": "8778",
"license": "apache-2.0",
"hash": 8244757016190514000,
"line_mean": 32.632183908,
"line_max": 119,
"alpha_frac": 0.5830485304,
"autogenerated": false,
"ratio": 4.082790697674419,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5165839228074419,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.