code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Calculus functionality (differentiation and integration) for polynomials (objects implementing the
:class:`~polynomials_on_simplices.polynomial.polynomials_base.PolynomialBase` interface).
"""
import copy
import numpy as np
import polynomials_on_simplices.algebra.multiindex as multiindex
from polynomials_on_simplices.calculus.polynomial.polynomials_simplex_bernstein_basis_calculus import (
integrate_bernstein_polynomial_simplex, integrate_bernstein_polynomial_unit_simplex)
from polynomials_on_simplices.calculus.polynomial.polynomials_simplex_lagrange_basis_calculus import (
integrate_lagrange_polynomial_simplex, integrate_lagrange_polynomial_unit_simplex)
from polynomials_on_simplices.calculus.polynomial.polynomials_simplex_monomial_basis_calculus import (
integrate_polynomial_unit_simplex)
from polynomials_on_simplices.polynomial.polynomials_base import PolynomialBase
from polynomials_on_simplices.polynomial.polynomials_monomial_basis import unique_identifier_monomial_basis
from polynomials_on_simplices.polynomial.polynomials_unit_simplex_bernstein_basis import (
unique_identifier_bernstein_basis)
from polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis import unique_identifier_lagrange_basis
def gradient(p):
r"""
Compute the gradient of a polynomial,
.. math:: (\nabla p)_i = \frac{\partial p}{\partial x^i}.
:param p: Polynomial whose gradient we want to compute.
:type p: Implementation of the :class:`~polynomials_on_simplices.polynomial.polynomials_base.PolynomialBase`
interface
:return: Gradient of the polynomial as a list of polynomials (where the i:th entry is the i:th partial derivative
of p).
"""
assert isinstance(p, PolynomialBase)
m = p.domain_dimension()
n = p.target_dimension()
if n > 1:
return jacobian(p)
return [p.partial_derivative(i) for i in range(m)]
def jacobian(p):
r"""
Compute the Jacobian of a polynomial,
.. math:: (J_p)^i_j = \frac{\partial p^i}{\partial x^j}.
:param p: Polynomial whose Jacobian we want to compute.
:type p: Implementation of the :class:`~polynomials_on_simplices.polynomial.polynomials_base.PolynomialBase`
interface
:return: Jacobian of the polynomial as a list of list of polynomials (where the i:th entry is the gradient of the
i:th entry of p).
"""
assert isinstance(p, PolynomialBase)
n = p.target_dimension()
return [gradient(p[i]) for i in range(n)]
def derivative(p, alpha):
r"""
Compute the higher order partial derivative :math:`\partial^{\alpha}` of the given polynomial.
.. math:: \partial^{\alpha} : \mathcal{P} \to \mathcal{P},
.. math::
(\partial^{\alpha} p)(x)
= \frac{\partial^{|\alpha|} p(x)}{\partial (x^1)^{\alpha_1} \ldots \partial (x^n)^{\alpha_n}}.
:param p: Polynomial we want to take the derivative of.
:type p: Implementation of the :class:`~polynomials_on_simplices.polynomial.polynomials_base.PolynomialBase`
interface
:param alpha: Multi-index defining the higher order partial derivative.
:type alpha: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...]
:return: Higher order partial derivative of the given polynomial.
"""
assert isinstance(p, PolynomialBase)
dp = copy.deepcopy(p)
try:
m = len(alpha)
assert m == p.domain_dimension()
# Multivariate polynomial
for i in range(m):
for j in range(alpha[i]):
dp = dp.partial_derivative(i)
return dp
except TypeError:
m = 1
assert m == p.domain_dimension()
# univariate polynomial
for i in range(alpha):
dp = dp.partial_derivative()
return dp
def hessian(p):
r"""
Compute the Hessian matrix of a polynomial,
.. math:: (H_p)_{ij} = \frac{\partial^2 p}{\partial x^i \partial x^j}.
:param p: Polynomial whose Hessian we want to compute.
:type p: Implementation of the :class:`~polynomials_on_simplices.polynomial.polynomials_base.PolynomialBase`
interface
:return: Hessian of the polynomial as a size (n, n) 2d-array of polynomials (where element (i, j) of the array
is equal to :math:`\frac{\partial^2 p}{\partial x^i \partial x^j}` and n is the dimension of the polynomial
domain).
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
assert isinstance(p, PolynomialBase)
m = p.domain_dimension()
n = p.target_dimension()
# Can only compute the Hessian of a scalar valued polynomial
assert n == 1
h = np.empty((m, m), dtype=object)
for i in range(m):
for j in range(i, m):
alpha = multiindex.zero_multiindex(m)
alpha[i] += 1
alpha[j] += 1
h[i][j] = derivative(p, alpha)
if i != j:
h[j][i] = h[i][j]
return h
def partial_derivatives_array(p, r):
r"""
Compute the multi-array consisting of all the degree r partial derivatives of a polynomial,
.. math::
(D_p)_{i_1, i_2, \ldots, i_r} = \frac{\partial^r p}{\partial x^{i_1} \partial x^{i_2} \cdots \partial x^{i_r}}.
For r = 1 this gives the gradient of p and for r = 2 it gives the Hessian matrix.
:param p: Polynomial whose derivative multi-array we want to compute.
:type p: Implementation of the :class:`~polynomials_on_simplices.polynomial.polynomials_base.PolynomialBase`
interface
:param int r: Degree of the partial derivatives we want to compute.
:return: Degree r derivatives of the polynomial as an r-dimensional array where each array is indexed from 0 to
n-1 (where element :math:`(i_1, i_2, \ldots, i_r)` of the array is equal to
:math:`\frac{\partial^r p}{\partial x^{i_1} \partial x^{i_2} \cdots \partial x^{i_r}}` and n is the
dimension of the polynomial domain).
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
assert isinstance(p, PolynomialBase)
m = p.domain_dimension()
n = p.target_dimension()
# Can only compute the derivative multi-array of a scalar valued polynomial
assert n == 1
# Partial derivatives can't have negative degree
assert r >= 0
# Handle the special case of zero:th order partial derivatives
if r == 0:
return p
d = np.empty((m,) * r, dtype=object)
for i in multiindex.generate_all_non_decreasing(r, m - 1):
alpha = multiindex.zero_multiindex(m)
for r in range(len(i)):
alpha[i[r]] += 1
pd = derivative(p, alpha)
for ip in _unique_permutations(i.to_tuple()):
d[ip] = pd
return d
def integrate_unit_simplex(p):
r"""
Integrate a general degree r polynomial over the n-dimensional unit simplex :math:`\Delta_c^n`.
.. math::
\int_{\Delta_c^n} p(x) \, dx.
:param p: Polynomial that should be integrated over the unit simplex.
:type p: Implementation of the :class:`~polynomials_on_simplices.polynomial.polynomials_base.PolynomialBase`
interface
:return: Evaluation of the integral.
"""
assert isinstance(p, PolynomialBase)
if p.basis() == unique_identifier_monomial_basis():
return integrate_polynomial_unit_simplex(p.degree(), p.domain_dimension(), p.coeff)
if p.basis() == unique_identifier_lagrange_basis():
return integrate_lagrange_polynomial_unit_simplex(p.degree(), p.coeff, p.domain_dimension())
if p.basis() == unique_identifier_bernstein_basis():
return integrate_bernstein_polynomial_unit_simplex(p.degree(), p.coeff, p.domain_dimension())
raise TypeError("Cannot integrate polynomial: Unknown polynomial basis")
def integrate_simplex(p, vertices):
r"""
Integrate a general degree r polynomial over an n-dimensional simplex T.
.. math::
\int_{T} p(x) \, dx.
:param p: Polynomial that should be integrated over the simplex.
:type p: Implementation of the :class:`~polynomials_on_simplices.polynomial.polynomials_base.PolynomialBase`
interface
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:return: Evaluation of the integral.
"""
assert isinstance(p, PolynomialBase)
if p.basis().startswith("Lagrange"):
return integrate_lagrange_polynomial_simplex(p.degree(), p.coeff, vertices)
if p.basis().startswith("Bernstein"):
return integrate_bernstein_polynomial_simplex(p.degree(), p.coeff, vertices)
raise TypeError("Cannot integrate polynomial: Unknown polynomial basis")
def _unique_permutations(t):
"""
Compute all unique permutations of the elements of a tuple.
:param t: Tuple of elements which we want to find all permutations of.
:type t: tuple
:return: List of all permutations of the input tuple.
:rtype: List[tuple].
.. rubric:: Examples
>>> t = (0, 0, 1)
>>> p = set(_unique_permutations(t))
>>> p_expected = {(1, 0, 0), (0, 1, 0), (0, 0, 1)}
>>> p == p_expected
True
>>> t = (0, 0, 1, 2)
>>> p = set(_unique_permutations(t))
>>> p_expected_1 = {(0, 0, 1, 2), (0, 0, 2, 1), (0, 1, 0, 2), (0, 1, 2, 0), (0, 2, 0, 1), (0, 2, 1, 0)}
>>> p_expected_2 = {(1, 0, 0, 2), (1, 0, 2, 0), (1, 2, 0, 0), (2, 0, 0, 1), (2, 0, 1, 0), (2, 1, 0, 0)}
>>> p_expected = p_expected_1.union(p_expected_2)
>>> p == p_expected
True
"""
permutations = set()
from polynomials_on_simplices.algebra.permutations import permutations as generate_permutations
from polynomials_on_simplices.algebra.permutations import permute_positions
for permutation in generate_permutations(len(t)):
permutations.add(tuple(permute_positions(permutation, list(t))))
return list(permutations)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"polynomials_on_simplices.algebra.multiindex.generate_all_non_decreasing",
"polynomials_on_simplices.algebra.multiindex.zero_multiindex",
"polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis.unique_identifier_lagrange_basis",
"polynomials_on_simplices.polynomial.polynomials_monomial_ba... | [((3372, 3388), 'copy.deepcopy', 'copy.deepcopy', (['p'], {}), '(p)\n', (3385, 3388), False, 'import copy\n'), ((4662, 4692), 'numpy.empty', 'np.empty', (['(m, m)'], {'dtype': 'object'}), '((m, m), dtype=object)\n', (4670, 4692), True, 'import numpy as np\n'), ((6394, 6426), 'numpy.empty', 'np.empty', (['((m,) * r)'], {'dtype': 'object'}), '((m,) * r, dtype=object)\n', (6402, 6426), True, 'import numpy as np\n'), ((6440, 6488), 'polynomials_on_simplices.algebra.multiindex.generate_all_non_decreasing', 'multiindex.generate_all_non_decreasing', (['r', '(m - 1)'], {}), '(r, m - 1)\n', (6478, 6488), True, 'import polynomials_on_simplices.algebra.multiindex as multiindex\n'), ((9925, 9942), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (9940, 9942), False, 'import doctest\n'), ((6506, 6535), 'polynomials_on_simplices.algebra.multiindex.zero_multiindex', 'multiindex.zero_multiindex', (['m'], {}), '(m)\n', (6532, 6535), True, 'import polynomials_on_simplices.algebra.multiindex as multiindex\n'), ((7234, 7268), 'polynomials_on_simplices.polynomial.polynomials_monomial_basis.unique_identifier_monomial_basis', 'unique_identifier_monomial_basis', ([], {}), '()\n', (7266, 7268), False, 'from polynomials_on_simplices.polynomial.polynomials_monomial_basis import unique_identifier_monomial_basis\n'), ((7382, 7416), 'polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis.unique_identifier_lagrange_basis', 'unique_identifier_lagrange_basis', ([], {}), '()\n', (7414, 7416), False, 'from polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis import unique_identifier_lagrange_basis\n'), ((7539, 7574), 'polynomials_on_simplices.polynomial.polynomials_unit_simplex_bernstein_basis.unique_identifier_bernstein_basis', 'unique_identifier_bernstein_basis', ([], {}), '()\n', (7572, 7574), False, 'from polynomials_on_simplices.polynomial.polynomials_unit_simplex_bernstein_basis import unique_identifier_bernstein_basis\n'), ((4766, 4795), 'polynomials_on_simplices.algebra.multiindex.zero_multiindex', 'multiindex.zero_multiindex', (['m'], {}), '(m)\n', (4792, 4795), True, 'import polynomials_on_simplices.algebra.multiindex as multiindex\n')] |
import numpy as np
from numba import jit
# Euler angles in matrix representation
@jit(nopython=True)
def euler2rotmat(ang):
g = np.zeros((3, 3))
sa = np.sin(ang[0])
ca = np.cos(ang[0])
sb = np.sin(ang[1])
cb = np.cos(ang[1])
sc = np.sin(ang[2])
cc = np.cos(ang[2])
g[0, 0] = ca*cc - sa*sc*cb
g[0, 1] = sa*cc + ca*sc*cb
g[0, 2] = sc*sb
g[1, 0] = -ca*sc - sa*cc*cb
g[1, 1] = -sa*sc + ca*cc*cb
g[1, 2] = cc*sb
g[2, 0] = sa*sb
g[2, 1] = -ca*sb
g[2, 2] = cb
#np.array([[ca*cc - sa*sc*cb, sa*cc + ca*sc*cb, sc*sb],
# [-ca*sc - sa*cc*cb, -sa*sc + ca*cc*cb, cc*sb],
# [sa*sb, -ca*sb, cb]])
#g = np.array([[ca*cc - sa*sc*cb, sa*cc + ca*sc*cb, sc*sb],
# [-ca*sc - sa*cc*cb, -sa*sc + ca*cc*cb, cc*sb],
# [sa*sb, -ca*sb, cb]])
#g = np.matrix([g1, g2, g3])
return g
# axis/angle in Euler angles representation
def axisangle2euler(axisangle):
c = np.cos(axisangle[0]*np.pi/180)
s = np.sin(axisangle[0]*np.pi/180)
t =1 - c
u = axisangle[1]/(axisangle[1]**2+axisangle[2]**2+axisangle[3]**2)**0.5
v = axisangle[2]/(axisangle[1]**2+axisangle[2]**2+axisangle[3]**2)**0.5
w = axisangle[3]/(axisangle[1]**2+axisangle[2]**2+axisangle[3]**2)**0.5
g1 = [t*u*u + c, t*u*v - w*s, t*u*w + v*s]
g2 = [t*u*v + w*s, t*v*v + c, t*v*w - u*s]
g3 = [t*u*w - v*s, t*v*w + u*s, t*w*w + c]
phi1 = np.arctan2(-(t*u*w - v*s), (t*v*w + u*s))
Phi = np.arccos(t*w*w + c)
phi2 = np.arctan2((t*u*w + v*s), (t*v*w - u*s))
return phi1, Phi, phi2
# axis/angle in rotation matrix representation
def axisangle2rotmat(angle,axis):
c = np.cos(angle*np.pi/180)
s = np.sin(angle*np.pi/180)
t =1 - c
u = axis[0]/(axis[0]**2+axis[1]**2+axis[2]**2)**0.5
v = axis[1]/(axis[0]**2+axis[1]**2+axis[2]**2)**0.5
w = axis[2]/(axis[0]**2+axis[1]**2+axis[2]**2)**0.5
g1 = [t*u*u + c, t*u*v - w*s, t*u*w + v*s]
g2 = [t*u*v + w*s, t*v*v + c, t*v*w - u*s]
g3 = [t*u*w - v*s, t*v*w + u*s, t*w*w + c]
g = np.matrix([g1, g2, g3])
return g
def ggt(x, y):
while y != 0:
x, y = y, x%y
return x
def round_miller(omega):
min_idx = 1
p_min = 1
# The highest uvw value is chosen to be 20
for i in range(1, 20, 1):
omega_2 = [r*i for r in omega]
p = abs(omega_2[0] - round(omega_2[0]))
+ abs(omega_2[1] - round(omega_2[1]))
+ abs(omega_2[2] - round(omega_2[2]))
if p < p_min:
p_min = p
min_idx = i
omega = [int(round(i*min_idx)) for i in omega]
if ggt(abs(omega[0]), abs(omega[1])) == ggt(abs(omega[0]), abs(omega[2])) == ggt(abs(omega[1]), abs(omega[2])):
omega = [x/abs(ggt(omega[0], omega[1])) for x in omega]
return omega
# Calculate ideal orientations in [hkl]<uvw> representation
def euler2miller(ang):
sa = np.sin(ang[0])
ca = np.cos(ang[0])
sb = np.sin(ang[1])
cb = np.cos(ang[1])
sc = np.sin(ang[2])
cc = np.cos(ang[2])
g1 = [ca*cc - sa*sc*cb, sa*cc + ca*sc*cb, sc*sb]
g2 = [-ca*sc - sa*cc*cb, -sa*sc + ca*cc*cb, cc*sb]
g3 = [sa*sb, -ca*sb, cb]
uvw = [g1[0], g2[0], g3[0]]
hkl = [g1[2], g2[2], g3[2]]
uvw = round_miller(uvw)
hkl = round_miller(hkl)
return hkl, uvw
# Calculate misorientation axis/angle and misorientation angle
def rotmat2misor_axisangle(rotmat, Symmetry_group):
rotmat_sym = [rotmat*x for x in Symmetry_group]
x_trace = [x.trace() for x in rotmat_sym]
min_idx = 0
for i in range(len(x_trace)):
if x_trace[min_idx] < x_trace[i]:
min_idx = i
Theta = np.arccos(((x_trace[min_idx]) - 1)/2)
omega = (1/(2*np.sin(Theta))
*[rotmat_sym[min_idx][2,1] - rotmat_sym[min_idx][1,2],
rotmat_sym[min_idx][0,2] - rotmat_sym[min_idx][2,0],
rotmat_sym[min_idx][1,0] - rotmat_sym[min_idx][0,1]])
omega = omega.tolist()
omega = round_miller(omega[0])
return omega, Theta*180/np.pi
@jit(nopython=True)
def rotmat2misor_angle(rotmat, Symmetry_group):
x_trace = 0
for i in range(len(Symmetry_group)):
x = np.dot(rotmat, Symmetry_group[i])
x_tr = x[0, 0] + x[1, 1] + x[2, 2]
if x_trace < x_tr:
x_trace = x_tr
Theta = np.arccos(((x_trace) - 1)/2)
return Theta*180/np.pi
def get_IPF_color_vals(ang):
h = np.sin(ang[1])*np.sin(ang[2])
k = np.sin(ang[1])*np.cos(ang[2])
l = np.cos(ang[1])
n = (h**2 + k**2 + l**2)**0.5
h= h * n
k= k * n
l= l * n
hkl_max = min([abs(h), abs(k), abs(l)])
if hkl_max != 0:
h = abs(h)/hkl_max
k = abs(k)/hkl_max
l = abs(l)/hkl_max
if h < k:
h, k = k, h
if k > l:
k, l = l, k
if h > l:
h, l = l, h
c_max = max([abs(l - h), abs(h - k), abs(k)])
r = (l - h)/c_max
g = (h - k)/c_max
b = k/c_max
return abs(r), abs(g), abs(b)
if __name__ == '__main__':
Euler_angles1 = [0, 0, 0]
Euler_angles2 = [90, 45, 0]
Euler_angles3 = [149, 54, 45]
Euler_angles_sigma_3 = [63.43, 48.18, 333.4]
#Euler_angles = [x*np.pi/180 for x in Euler_angles]
#r,g,b = get_IPF_color_vals(Euler_angles)
#print(Euler_angles)
g1 = euler2rotmat(Euler_angles2)
g2 = euler2rotmat(Euler_angles3)
print(g1)
print(g2)
print(np.dot(g1,g2))
#ideal_or = euler2miller(Euler_angles)
#print(ideal_or) | [
"numpy.arccos",
"numpy.zeros",
"numba.jit",
"numpy.arctan2",
"numpy.cos",
"numpy.dot",
"numpy.sin",
"numpy.matrix"
] | [((84, 102), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (87, 102), False, 'from numba import jit\n'), ((4119, 4137), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (4122, 4137), False, 'from numba import jit\n'), ((134, 150), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (142, 150), True, 'import numpy as np\n'), ((160, 174), 'numpy.sin', 'np.sin', (['ang[0]'], {}), '(ang[0])\n', (166, 174), True, 'import numpy as np\n'), ((184, 198), 'numpy.cos', 'np.cos', (['ang[0]'], {}), '(ang[0])\n', (190, 198), True, 'import numpy as np\n'), ((209, 223), 'numpy.sin', 'np.sin', (['ang[1]'], {}), '(ang[1])\n', (215, 223), True, 'import numpy as np\n'), ((233, 247), 'numpy.cos', 'np.cos', (['ang[1]'], {}), '(ang[1])\n', (239, 247), True, 'import numpy as np\n'), ((257, 271), 'numpy.sin', 'np.sin', (['ang[2]'], {}), '(ang[2])\n', (263, 271), True, 'import numpy as np\n'), ((281, 295), 'numpy.cos', 'np.cos', (['ang[2]'], {}), '(ang[2])\n', (287, 295), True, 'import numpy as np\n'), ((997, 1031), 'numpy.cos', 'np.cos', (['(axisangle[0] * np.pi / 180)'], {}), '(axisangle[0] * np.pi / 180)\n', (1003, 1031), True, 'import numpy as np\n'), ((1036, 1070), 'numpy.sin', 'np.sin', (['(axisangle[0] * np.pi / 180)'], {}), '(axisangle[0] * np.pi / 180)\n', (1042, 1070), True, 'import numpy as np\n'), ((1471, 1522), 'numpy.arctan2', 'np.arctan2', (['(-(t * u * w - v * s))', '(t * v * w + u * s)'], {}), '(-(t * u * w - v * s), t * v * w + u * s)\n', (1481, 1522), True, 'import numpy as np\n'), ((1524, 1548), 'numpy.arccos', 'np.arccos', (['(t * w * w + c)'], {}), '(t * w * w + c)\n', (1533, 1548), True, 'import numpy as np\n'), ((1557, 1605), 'numpy.arctan2', 'np.arctan2', (['(t * u * w + v * s)', '(t * v * w - u * s)'], {}), '(t * u * w + v * s, t * v * w - u * s)\n', (1567, 1605), True, 'import numpy as np\n'), ((1721, 1748), 'numpy.cos', 'np.cos', (['(angle * np.pi / 180)'], {}), '(angle * np.pi / 180)\n', (1727, 1748), True, 'import numpy as np\n'), ((1753, 1780), 'numpy.sin', 'np.sin', (['(angle * np.pi / 180)'], {}), '(angle * np.pi / 180)\n', (1759, 1780), True, 'import numpy as np\n'), ((2113, 2136), 'numpy.matrix', 'np.matrix', (['[g1, g2, g3]'], {}), '([g1, g2, g3])\n', (2122, 2136), True, 'import numpy as np\n'), ((2948, 2962), 'numpy.sin', 'np.sin', (['ang[0]'], {}), '(ang[0])\n', (2954, 2962), True, 'import numpy as np\n'), ((2972, 2986), 'numpy.cos', 'np.cos', (['ang[0]'], {}), '(ang[0])\n', (2978, 2986), True, 'import numpy as np\n'), ((2997, 3011), 'numpy.sin', 'np.sin', (['ang[1]'], {}), '(ang[1])\n', (3003, 3011), True, 'import numpy as np\n'), ((3021, 3035), 'numpy.cos', 'np.cos', (['ang[1]'], {}), '(ang[1])\n', (3027, 3035), True, 'import numpy as np\n'), ((3045, 3059), 'numpy.sin', 'np.sin', (['ang[2]'], {}), '(ang[2])\n', (3051, 3059), True, 'import numpy as np\n'), ((3069, 3083), 'numpy.cos', 'np.cos', (['ang[2]'], {}), '(ang[2])\n', (3075, 3083), True, 'import numpy as np\n'), ((3723, 3760), 'numpy.arccos', 'np.arccos', (['((x_trace[min_idx] - 1) / 2)'], {}), '((x_trace[min_idx] - 1) / 2)\n', (3732, 3760), True, 'import numpy as np\n'), ((4402, 4430), 'numpy.arccos', 'np.arccos', (['((x_trace - 1) / 2)'], {}), '((x_trace - 1) / 2)\n', (4411, 4430), True, 'import numpy as np\n'), ((4577, 4591), 'numpy.cos', 'np.cos', (['ang[1]'], {}), '(ang[1])\n', (4583, 4591), True, 'import numpy as np\n'), ((4255, 4288), 'numpy.dot', 'np.dot', (['rotmat', 'Symmetry_group[i]'], {}), '(rotmat, Symmetry_group[i])\n', (4261, 4288), True, 'import numpy as np\n'), ((4501, 4515), 'numpy.sin', 'np.sin', (['ang[1]'], {}), '(ang[1])\n', (4507, 4515), True, 'import numpy as np\n'), ((4516, 4530), 'numpy.sin', 'np.sin', (['ang[2]'], {}), '(ang[2])\n', (4522, 4530), True, 'import numpy as np\n'), ((4539, 4553), 'numpy.sin', 'np.sin', (['ang[1]'], {}), '(ang[1])\n', (4545, 4553), True, 'import numpy as np\n'), ((4554, 4568), 'numpy.cos', 'np.cos', (['ang[2]'], {}), '(ang[2])\n', (4560, 4568), True, 'import numpy as np\n'), ((5483, 5497), 'numpy.dot', 'np.dot', (['g1', 'g2'], {}), '(g1, g2)\n', (5489, 5497), True, 'import numpy as np\n'), ((3784, 3797), 'numpy.sin', 'np.sin', (['Theta'], {}), '(Theta)\n', (3790, 3797), True, 'import numpy as np\n')] |
# --------------
import numpy as np
# Read the data using numpy module
data_ipl = np.genfromtxt(path, delimiter=',', skip_header=1, dtype=str)
# Calculate the unique no. of matches in the provided dataset ?
data_ipl[:,3].shape
# Find the set of all unique teams that played in the matches in the data set.
# this exercise deals with you getting to know that which are all those six teams that played in the tournament.
team1_set = set(data_ipl[:, 3])
team2_set = set(data_ipl[:, 4])
unique_teams = team1_set.union(team2_set)
unique_teams
# Find sum of all extras in all deliveries in all matches in the dataset
# An exercise to make you familiar with indexing and slicing up within data.
extras = data_ipl[:, 17]
extras_int = extras.astype(np.int16)
extras_int.sum()
# Get the array of all delivery numbers when a given player got out. Also mention the wicket type.
wicket_filter = (data_ipl[:, 20] == 'SR Tendulkar')
wickets_arr = data_ipl[wicket_filter]
wickets_arr[:, 11]
# How many matches the team Mumbai Indians has won the toss?
# this exercise will help you get the statistics on one particular team
team_records = data_ipl[data_ipl[:, 5] == 'Mumbai Indians']
unique_matches = set(team_records[:, 0])
len(unique_matches)
# Create a filter that filters only those records where the batsman scored 6 runs. Also who has scored the maximum no. of sixes overall ?
# An exercise to know who is the most aggresive player or maybe the scoring player
sixes = data_ipl[data_ipl[:, 16].astype(np.int16) == 6]
from collections import Counter
most_sixes_scored = Counter(sixes[:,13],)
most_sixes_scored.most_common(3)
| [
"collections.Counter",
"numpy.genfromtxt"
] | [((83, 143), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)', 'dtype': 'str'}), "(path, delimiter=',', skip_header=1, dtype=str)\n", (96, 143), True, 'import numpy as np\n'), ((1572, 1593), 'collections.Counter', 'Counter', (['sixes[:, 13]'], {}), '(sixes[:, 13])\n', (1579, 1593), False, 'from collections import Counter\n')] |
import numpy as np
def random_draw_vectors(number_draws, number_frames, first_uniform=False):
"""Generate draw or weight vectors for bootstrap resampling.
Args:
number_draws (int):
Number of draws.
number_frames (int):
Number of frames to pick from.
first_uniform (bool):
Set True to receive the first draw as uniform wights/ all ones.
Returns:
sample_draw_vectors (numpy.ndarray):
Array of shape (nDraws, nFrames) containing the counts of how often
the particular frame at position (draw, frame) is picked.
"""
# Draw the frame indexes randomly
shape = (number_draws, number_frames)
draw_indizes = np.random.randint(number_frames, size=shape)
# Combine into array of vectors with weights ("counts")
sample_draw_vectors = np.zeros(shape=shape)
for sample_draw_index, sample_draw_vector in enumerate(sample_draw_vectors):
unique_indizes, counts = np.unique(draw_indizes[sample_draw_index], return_counts=True)
sample_draw_vectors[sample_draw_index][unique_indizes] = counts
# Replace first draw vector with a homogeneous one
if first_uniform:
sample_draw_vectors[0] = 1
return sample_draw_vectors
| [
"numpy.random.randint",
"numpy.unique",
"numpy.zeros"
] | [((724, 768), 'numpy.random.randint', 'np.random.randint', (['number_frames'], {'size': 'shape'}), '(number_frames, size=shape)\n', (741, 768), True, 'import numpy as np\n'), ((856, 877), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape'}), '(shape=shape)\n', (864, 877), True, 'import numpy as np\n'), ((992, 1054), 'numpy.unique', 'np.unique', (['draw_indizes[sample_draw_index]'], {'return_counts': '(True)'}), '(draw_indizes[sample_draw_index], return_counts=True)\n', (1001, 1054), True, 'import numpy as np\n')] |
'''
This is a implementation of Quantum State Tomography for Qutrits,
using techniques of following papars.
'Iterative algorithm for reconstruction of entangled states(10.1103/PhysRevA.63.040303)'
'Diluted maximum-likelihood algorithm for quantum tomography(10.1103/PhysRevA.75.042108)'
'Qudit Quantum State Tomography(10.1103/PhysRevA.66.012303)'
'On-chip generation of high-dimensional entangled quantum states and their coherent control(Nature volume 546, pages622-626(2017))'
'''
import numpy as np
from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random
from scipy.linalg import sqrtm
from datetime import datetime
from concurrent import futures
import os
from pathlib import Path
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle
"""
Definition of Three Frequency Bases:
fb1 = array([1, 0, 0])
fb2 = array([0, 1, 0])
fb3 = array([0, 0, 1])
"""
zero_base_array1 = zeros((1,3))
zero_base_array1[0][0] = 1
fb1 = zero_base_array1
zero_base_array2 = zeros((1,3))
zero_base_array2[0][1] = 1
fb2 = zero_base_array2
zero_base_array3 = zeros((1,3))
zero_base_array3[0][2] = 1
fb3 = zero_base_array3
""" Make Measurement Bases """
mb1 = (conjugate((fb1 + fb2).T) @ (fb1 + fb2)) / 2
mb2 = (conjugate((fb1 + fb3).T) @ (fb1 + fb3)) / 2
mb3 = (conjugate((fb2 + fb3).T) @ (fb2 + fb3)) / 2
mb4 = (conjugate((exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3)) * fb2).T) @ (exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3) * fb2))) / 2
mb5 = (conjugate((exp(-2*pi*1j/3) * fb1 + (exp( 2*pi*1j/3)) * fb2).T) @ (exp(-2*pi*1j/3) * fb1 + (exp( 2*pi*1j/3) * fb2))) / 2
mb6 = (conjugate((exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3)) * fb3).T) @ (exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3) * fb3))) / 2
mb7 = (conjugate((exp(-2*pi*1j/3) * fb1 + (exp( 2*pi*1j/3)) * fb3).T) @ (exp(-2*pi*1j/3) * fb1 + (exp( 2*pi*1j/3) * fb3))) / 2
mb8 = (conjugate((exp( 2*pi*1j/3) * fb2 + (exp(-2*pi*1j/3)) * fb3).T) @ (exp( 2*pi*1j/3) * fb2 + (exp(-2*pi*1j/3) * fb3))) / 2
mb9 = (conjugate((exp(-2*pi*1j/3) * fb2 + (exp( 2*pi*1j/3)) * fb3).T) @ (exp(-2*pi*1j/3) * fb2 + (exp( 2*pi*1j/3) * fb3))) / 2
bases = array([mb1, mb2, mb3, mb4, mb5, mb6, mb7, mb8, mb9])
def makeBases(numberOfQutrits, bases):
for _ in range(numberOfQutrits-1):
fixedBases = []
for base1 in bases:
fixedBases.extend([kron(base1, base2) for base2 in bases])
bases = fixedBases.copy()
return array(bases)
""" Get Experimental Data """
def getExperimentalData(pathOfExperimentalData):
"""
getExperimentalData(pathOfExperimentalData(string)):
This function is getting experimental data from file at "pathOfExperimentalData",
----------------------------------------------------------------------------------------
return:
np.array of them.
"""
with open(pathOfExperimentalData) as f:
experimentalData = []
for s in f.readlines():
experimentalData.extend(map(int, s.strip().split()))
return array(experimentalData)
""" Iterative Algorithm for Quantum Tomography """
def doIterativeAlgorithm(numberOfQutrits, bases, listOfExperimentalData):
"""
doIterativeAlgorithm():
This function is to do iterative algorithm(10.1103/PhysRevA.63.040303) and diluted MLE algorithm(10.1103/PhysRevA.75.042108) to a set of data given from a experiment.
This recieve four variables (numberOfQutrits, bases, maxNumberOfIteration, listAsExperimentalData),
and return most likely estimated density matrix (np.array) and total time of calculation(datetime.timedelta).
First quantum state is an Identity: Maximum Mixed State.
--------------------------------------------------------------------------------------------------------------
Return:
most likely estimated density matrix(np.array)
"""
""" Setting initial parameters """
iter = 0
epsilon = 1000
endDiff = 10e-10
diff = 100
# TolFun = 10e-11
# traceDistance = 100
maxNumberOfIteration = 100000
dataList = listOfExperimentalData
totalCountOfData = sum(dataList)
nDataList = dataList / totalCountOfData # nDataList is a list of normarized data
densityMatrix = identity(3 ** numberOfQutrits) # Initial State: Maximun Mixed State
""" Start iteration """
# while traceDistance > TolFun and iter <= maxNumberOfIteration:
while diff > endDiff and iter <= maxNumberOfIteration:
probList = [trace(bases[i] @ densityMatrix) for i in range(len(bases))]
nProbList = probList / sum(probList)
rotationMatrix = sum([(nDataList[i] / probList[i]) * bases[i] for i in range(9 ** numberOfQutrits)])
""" Normalization of Matrices for Measurement Bases """
U = np.linalg.inv(sum(bases)) / sum(probList)
rotationMatrixLeft = (identity(3 ** numberOfQutrits) + epsilon * U @ rotationMatrix) / (1 + epsilon)
rotationMatrixRight = (identity(3 ** numberOfQutrits) + epsilon * rotationMatrix @ U) / (1 + epsilon)
""" Calculation of updated density matrix """
modifiedDensityMatrix = rotationMatrixLeft @ densityMatrix @ rotationMatrixRight / trace(rotationMatrixLeft @ densityMatrix @ rotationMatrixRight)
eigValueArray, eigVectors = np.linalg.eig(densityMatrix - modifiedDensityMatrix)
traceDistance = sum(np.absolute(eigValueArray)) / 2
""" Update Likelihood Function, and Compared with older one """
LikelihoodFunction = sum([nDataList[i] * np.log(nProbList[i]) for i in range(9 ** numberOfQutrits)])
probList = [trace(bases[i] @ modifiedDensityMatrix) for i in range(len(bases))]
nProbList = probList / sum(probList)
modifiedLikelihoodFunction = sum([nDataList[i] * np.log(nProbList[i]) for i in range(9 ** numberOfQutrits)])
diff = modifiedLikelihoodFunction - LikelihoodFunction
""" Show Progress of Calculation """
progress = 100 * iter / maxNumberOfIteration
if progress % 5 == 0:
msg = "Progress of calculation: " + str(int(progress)) + "%"
print(msg)
""" Increment """
iter += 1
""" Check Increasing of Likelihood Function """
if diff < 0:
epsilon = epsilon * 0.1
continue
""" Update Density Matrix """
densityMatrix = modifiedDensityMatrix.copy()
""" Check That Max Iteration Number was appropriate """
if iter >= maxNumberOfIteration:
print("----------------------------------------------")
print("Iteration time reached max iteration number.")
print("The number of max iteration times is too small.")
print("----------------------------------------------")
""" Show the total number of iteration """
endIterationTimes = iter
emsg = "Iteration was '" + str(endIterationTimes) + "' times."
print(emsg)
return modifiedDensityMatrix
""" Calculate Fidelity """
def calculateFidelity(idealDensityMatrix, estimatedDensityMatrix):
"""
calculateFidelity(idealDensityMatrix, estimatedDensityMatrix):
"""
fidelity = np.real(trace(sqrtm(sqrtm(idealDensityMatrix) @ estimatedDensityMatrix @ sqrtm(idealDensityMatrix))))
return fidelity
""" Iterative Simulation """
def doIterativeSimulation(numberOfQutrits, bases, pathOfExperimentalData, idealDensityMatrix, resultDirectoryName):
"""
doIterativeSimulation(numberOfQutrits, bases, pathOfExperimentalData, idealDensityMatrix, resultDirectoryName)
"""
""" Get Experimental Data"""
listOfExperimentalData = getExperimentalData(pathOfExperimentalData)
""" Calculate """
estimatedDensityMatrix = doIterativeAlgorithm(numberOfQutrits, bases, listOfExperimentalData)
fidelity = calculateFidelity(idealDensityMatrix, estimatedDensityMatrix)
""" Make File Name of result """
l = 0
r = len(pathOfExperimentalData)-1
for i in range(len(pathOfExperimentalData)):
if pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == ".":
r = len(pathOfExperimentalData)-1-i
if pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == "/" or pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == "\\":
l = len(pathOfExperimentalData)-i
break
resultFileName = pathOfExperimentalData[l:r]
resultFilePath = '.\\result\\qutrit\\iterative\\' + resultDirectoryName + '\\' + resultFileName + '_result' + '.txt'
""" Save Result """
with open(resultFilePath, mode='a') as f:
f.writelines(str(fidelity) + '\n')
""" Make 3D Plot """
plotResult(numberOfQutrits, estimatedDensityMatrix)
""" Poisson Distributed Simulation """
def doPoissonDistributedSimulation(numberOfQutrits, bases, pathOfExperimentalData, idealDensityMatrix, resultDirectoryName):
"""
doPoissonDistributedSimulation(numberOfQutrits, bases, pathOfExperimentalData, idealDensityMatrix, resultDirectoryName)
"""
""" Get Experimental Data"""
listOfExperimentalData = getExperimentalData(pathOfExperimentalData)
""" Calculate """
estimatedDensityMatrix = doIterativeAlgorithm(numberOfQutrits, bases, random.poisson(listOfExperimentalData))
fidelity = calculateFidelity(idealDensityMatrix, estimatedDensityMatrix)
""" Make File Name of result """
l = 0
r = len(pathOfExperimentalData)-1
for i in range(len(pathOfExperimentalData)):
if pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == ".":
r = len(pathOfExperimentalData)-1-i
if pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == "/" or pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == "\\":
l = len(pathOfExperimentalData)-i
break
resultFileName = pathOfExperimentalData[l:r]
resultFilePath = '.\\result\\qutrit\\iterative\\' + resultDirectoryName + "\\" + resultFileName + '_result' + '.txt'
""" Save Result """
with open(resultFilePath, mode='a') as f:
f.write(str(fidelity) + '\n')
def plotResult(numberOfQutrits, densityMatrix):
"""
plotResult(numberOfQutrits, densityMatrix)
"""
baseNames = np.array([])
""" Plot Setting """
xedges, yedges = np.arange(3**numberOfQutrits), np.arange(3**numberOfQutrits)
xpos, ypos = np.meshgrid(xedges, yedges) # x,y座標を3D用の形式に変換(その1)
zpos = 0 # zは常に0を始点にする
dx = 1 # x座標の幅を設定
dy = 1 # y座標の幅を設定
dz = densityMatrix.ravel() # z座標の幅は棒の長さに相当
xpos = xpos.ravel() # x座標を3D用の形式に変換(その2)
ypos = ypos.ravel() # y座標を3D用の形式に変換(その2)
fig = plt.figure() # 描画領域を作成
ax1 = fig.add_subplot(121, projection="3d") # 3Dの軸を作成
ax1.bar3d(xpos,ypos,zpos,dx,dy,np.real(dz), edgecolor='black') # ヒストグラムを3D空間に表示
plt.title("Real Part") # タイトル表示
# plt.xlabel("X") # x軸の内容表示
plt.xticks(np.arange(0, 3**numberOfQutrits, 1), labels=baseNames)
# plt.ylabel("Y") # y軸の内容表示
plt.yticks(np.arange(0, 3**numberOfQutrits, 1), labels=baseNames)
# ax1.set_zlabel("Z") # z軸の内容表示
ax1.set_zlim(-0.1, 0.5)
ax2 = fig.add_subplot(122, projection="3d") # 3Dの軸を作成
ax2.bar3d(xpos,ypos,zpos,dx,dy,np.imag(dz), edgecolor='black') # ヒストグラムを3D空間に表示
plt.title("Imaginary Part") # タイトル表示
# plt.xlabel("X") # x軸の内容表示
plt.xticks(np.arange(0, 3**numberOfQutrits, 1), labels=baseNames)
# plt.ylabel("Y") # y軸の内容表示
plt.yticks(np.arange(0, 3**numberOfQutrits, 1), labels=baseNames)
# ax2.set_zlabel("Z") # z軸の内容表示
ax2.set_zlim(-0.1, 0.5)
plt.show()
with open('qutritplottest'+'_plot.pkl', mode='wb') as f:
pickle.dump(fig, f)
""" Get Number of Qutrits """
def getNumberOfQutrits():
"""
getNumberOfQutrits()
"""
print("------------------------------------------------------------")
print("PLEASE ENTER NUMBER OF QUTRITS")
print("------------------------------------------------------------")
print(">>")
numberOfQutrits = int(input())
return numberOfQutrits
""" Get Path of Experimental Data Directory """
def getExperimentalDataDirectoryPath():
"""
getExperimentalDataDirectoryPath()
"""
print("------------------------------------------------------------")
print("PLEASE ENTER PATH OF EXPERIMENTAL DATA DIRECTORY")
print("")
print("LIKE THIS >> .\\datadirectory")
print("------------------------------------------------------------")
print(">>")
return Path(input())
""" Get Name of Result Directory AND FILE """
def getNameOfResultDirectory():
"""
getNameOfResultDirectory()
"""
print("------------------------------------------------------------")
print("PLEASE ENTER NAME OF RESULT DIRECTORY ")
print("")
print("THE RESULT DATA WILL SAVED AT ")
print("'.\\result\\qutrit\\iterative(or poisson)\\{ YOUR ENTED DIRECTORY NAME }\\{ EXPERIMENTAL DATA FILE NAME }_result.txt'")
print("")
print("IF EMPTY, THE NAME OF RESULT DIRECTORY IS 'default'")
print("------------------------------------------------------------")
print(">>")
nameOfResultDirectory = input()
if nameOfResultDirectory == "":
nameOfResultDirectory = "default"
return nameOfResultDirectory
""" Whether Do Poisson Distributed Simulation """
def checkPoisson():
"""
checkPoisson()
"""
print("------------------------------------------------------------")
print("PLEASE ENTER ANSWER WHETHER DO POISSON DISTRIBUTED SIMULATION")
print("IF YOU DO, PLEASE ENTER 'yes'")
print("IF YOU ENTER ANOTHER WORD OR EMPTY, YOUR ANSWER IS REGARED AS 'no'")
print("------------------------------------------------------------")
print(">>")
answer = input()
if answer == "yes" or answer == "Yes" or answer == "YES":
print("YOUR ANSWER IS: 'yes'")
poissonPaths = getExperimentalDataPaths()
eachIterationTime = getEachIterationTime()
return True, poissonPaths*eachIterationTime
else:
print("YOUR ANSWER IS: 'no'")
return False, []
""" Get Each Iteration Time """
def getEachIterationTime():
"""
getEachIterationTime()
"""
print("------------------------------------------------------------")
print("PLEASE ENTER ITERATION TIME OF EACH POISSON SIMULATION")
print("------------------------------------------------------------")
print(">>")
eachIterationTime = input()
if eachIterationTime == "":
eachIterationTime = 0
else:
eachIterationTime = int(eachIterationTime)
return eachIterationTime
""" Get Number of Parallel Comuting """
def getNumberOfParallelComputing():
"""
getNumberOfParallelComputing()
"""
print("------------------------------------------------------------")
print("HOW MANY TIMES DO YOU WANT TO PARALLELIZE?")
print("IF THE NUMBER IS TOO LARGE, THE PARFORMANCE OF SIMULATION BECOME LOWER.")
print("THE NUMBER OF LOGICAL PROCESSOR OF YOUR COMPUTER IS >>")
print(os.cpu_count())
print("RECOMENDED NUMBER IS LESS THAN THE ABOVE NUMBER.")
print("------------------------------------------------------------")
print(">>")
n = input()
if n != '':
numberOfParallelComputing = int(n)
else:
numberOfParallelComputing = 1
return numberOfParallelComputing
if __name__ == "__main__":
""" Get Number of Qutrits """
numberOfQutrits = getNumberOfQutrits()
""" Get Paths of Experimental Data Directory """
directoryPath = getExperimentalDataDirectoryPath()
paths = list(directoryPath.glob("*.txt"))
""" Get Name of Result Directory """
resultDirectoryName = getNameOfResultDirectory()
""" Check Poisson Distributed Simulation """
check, poissonPaths = checkPoisson()
""" Get Number of Parallel Computing """
numberOfParallelComputing = getNumberOfParallelComputing()
""" Make Bases """
basesOfQutrits = makeBases(numberOfQutrits, bases)
""" Make Ideal Density Matrix """
baseVecter = np.zeros([1, 3**numberOfQutrits])
baseVecter[0][0] = 1 / sqrt(3)
baseVecter[0][4] = 1 / sqrt(3)
baseVecter[0][3**numberOfQutrits-1] = 1 / sqrt(3)
idealDensityMatrix = baseVecter.T @ baseVecter
""" Make Result Directory """
if not os.path.exists('.\\result\\qutrit\\iterative\\' + resultDirectoryName):
os.makedirs('.\\result\\qutrit\\iterative\\' + resultDirectoryName)
""" Start Tomography """
with futures.ProcessPoolExecutor(max_workers=numberOfParallelComputing) as executor:
for path in paths:
executor.submit(fn=doIterativeSimulation, numberOfQutrits=numberOfQutrits, bases=basesOfQutrits, pathOfExperimentalData=str(path), idealDensityMatrix=idealDensityMatrix, resultDirectoryName=resultDirectoryName)
""" Start Poisson Distributed Simulation """
if check:
""" Make Result Directory for Poisson Distributed Simulation """
if not os.path.exists('.\\result\\qutrit\\posson\\' + resultDirectoryName):
os.makedirs('.\\result\\qutrit\\poisson\\' + resultDirectoryName)
with futures.ProcessPoolExecutor(max_workers=numberOfParallelComputing) as executor:
for poissonPath in poissonPaths:
executor.submit(fn=doPoissonDistributedSimulation, numberOfQutrits=numberOfQutrits, bases=basesOfQutrits, pathOfExperimentalData=poissonPath, idealDensityMatrix=idealDensityMatrix, resultDirectoryName=resultDirectoryName)
| [
"numpy.trace",
"numpy.sqrt",
"numpy.log",
"numpy.array",
"os.cpu_count",
"numpy.imag",
"numpy.arange",
"os.path.exists",
"numpy.random.poisson",
"numpy.conjugate",
"numpy.exp",
"numpy.real",
"numpy.meshgrid",
"numpy.identity",
"scipy.linalg.sqrtm",
"numpy.linalg.eig",
"numpy.kron",
... | [((959, 972), 'numpy.zeros', 'zeros', (['(1, 3)'], {}), '((1, 3))\n', (964, 972), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1042, 1055), 'numpy.zeros', 'zeros', (['(1, 3)'], {}), '((1, 3))\n', (1047, 1055), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1125, 1138), 'numpy.zeros', 'zeros', (['(1, 3)'], {}), '((1, 3))\n', (1130, 1138), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((2147, 2199), 'numpy.array', 'array', (['[mb1, mb2, mb3, mb4, mb5, mb6, mb7, mb8, mb9]'], {}), '([mb1, mb2, mb3, mb4, mb5, mb6, mb7, mb8, mb9])\n', (2152, 2199), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((2448, 2460), 'numpy.array', 'array', (['bases'], {}), '(bases)\n', (2453, 2460), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((3046, 3069), 'numpy.array', 'array', (['experimentalData'], {}), '(experimentalData)\n', (3051, 3069), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((4279, 4309), 'numpy.identity', 'identity', (['(3 ** numberOfQutrits)'], {}), '(3 ** numberOfQutrits)\n', (4287, 4309), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((10240, 10252), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10248, 10252), True, 'import numpy as np\n'), ((10380, 10407), 'numpy.meshgrid', 'np.meshgrid', (['xedges', 'yedges'], {}), '(xedges, yedges)\n', (10391, 10407), True, 'import numpy as np\n'), ((10660, 10672), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10670, 10672), True, 'import matplotlib.pyplot as plt\n'), ((10833, 10855), 'matplotlib.pyplot.title', 'plt.title', (['"""Real Part"""'], {}), "('Real Part')\n", (10842, 10855), True, 'import matplotlib.pyplot as plt\n'), ((11284, 11311), 'matplotlib.pyplot.title', 'plt.title', (['"""Imaginary Part"""'], {}), "('Imaginary Part')\n", (11293, 11311), True, 'import matplotlib.pyplot as plt\n'), ((11594, 11604), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11602, 11604), True, 'import matplotlib.pyplot as plt\n'), ((16102, 16137), 'numpy.zeros', 'np.zeros', (['[1, 3 ** numberOfQutrits]'], {}), '([1, 3 ** numberOfQutrits])\n', (16110, 16137), True, 'import numpy as np\n'), ((1230, 1254), 'numpy.conjugate', 'conjugate', (['(fb1 + fb2).T'], {}), '((fb1 + fb2).T)\n', (1239, 1254), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1281, 1305), 'numpy.conjugate', 'conjugate', (['(fb1 + fb3).T'], {}), '((fb1 + fb3).T)\n', (1290, 1305), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1332, 1356), 'numpy.conjugate', 'conjugate', (['(fb2 + fb3).T'], {}), '((fb2 + fb3).T)\n', (1341, 1356), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((5330, 5382), 'numpy.linalg.eig', 'np.linalg.eig', (['(densityMatrix - modifiedDensityMatrix)'], {}), '(densityMatrix - modifiedDensityMatrix)\n', (5343, 5382), True, 'import numpy as np\n'), ((9245, 9283), 'numpy.random.poisson', 'random.poisson', (['listOfExperimentalData'], {}), '(listOfExperimentalData)\n', (9259, 9283), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((10300, 10331), 'numpy.arange', 'np.arange', (['(3 ** numberOfQutrits)'], {}), '(3 ** numberOfQutrits)\n', (10309, 10331), True, 'import numpy as np\n'), ((10331, 10362), 'numpy.arange', 'np.arange', (['(3 ** numberOfQutrits)'], {}), '(3 ** numberOfQutrits)\n', (10340, 10362), True, 'import numpy as np\n'), ((10780, 10791), 'numpy.real', 'np.real', (['dz'], {}), '(dz)\n', (10787, 10791), True, 'import numpy as np\n'), ((10912, 10949), 'numpy.arange', 'np.arange', (['(0)', '(3 ** numberOfQutrits)', '(1)'], {}), '(0, 3 ** numberOfQutrits, 1)\n', (10921, 10949), True, 'import numpy as np\n'), ((11014, 11051), 'numpy.arange', 'np.arange', (['(0)', '(3 ** numberOfQutrits)', '(1)'], {}), '(0, 3 ** numberOfQutrits, 1)\n', (11023, 11051), True, 'import numpy as np\n'), ((11231, 11242), 'numpy.imag', 'np.imag', (['dz'], {}), '(dz)\n', (11238, 11242), True, 'import numpy as np\n'), ((11368, 11405), 'numpy.arange', 'np.arange', (['(0)', '(3 ** numberOfQutrits)', '(1)'], {}), '(0, 3 ** numberOfQutrits, 1)\n', (11377, 11405), True, 'import numpy as np\n'), ((11470, 11507), 'numpy.arange', 'np.arange', (['(0)', '(3 ** numberOfQutrits)', '(1)'], {}), '(0, 3 ** numberOfQutrits, 1)\n', (11479, 11507), True, 'import numpy as np\n'), ((11675, 11694), 'pickle.dump', 'pickle.dump', (['fig', 'f'], {}), '(fig, f)\n', (11686, 11694), False, 'import pickle\n'), ((15071, 15085), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (15083, 15085), False, 'import os\n'), ((16163, 16170), 'numpy.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (16167, 16170), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((16198, 16205), 'numpy.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (16202, 16205), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((16252, 16259), 'numpy.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (16256, 16259), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((16357, 16427), 'os.path.exists', 'os.path.exists', (["('.\\\\result\\\\qutrit\\\\iterative\\\\' + resultDirectoryName)"], {}), "('.\\\\result\\\\qutrit\\\\iterative\\\\' + resultDirectoryName)\n", (16371, 16427), False, 'import os\n'), ((16437, 16504), 'os.makedirs', 'os.makedirs', (["('.\\\\result\\\\qutrit\\\\iterative\\\\' + resultDirectoryName)"], {}), "('.\\\\result\\\\qutrit\\\\iterative\\\\' + resultDirectoryName)\n", (16448, 16504), False, 'import os\n'), ((16544, 16610), 'concurrent.futures.ProcessPoolExecutor', 'futures.ProcessPoolExecutor', ([], {'max_workers': 'numberOfParallelComputing'}), '(max_workers=numberOfParallelComputing)\n', (16571, 16610), False, 'from concurrent import futures\n'), ((4529, 4560), 'numpy.trace', 'trace', (['(bases[i] @ densityMatrix)'], {}), '(bases[i] @ densityMatrix)\n', (4534, 4560), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((5230, 5293), 'numpy.trace', 'trace', (['(rotationMatrixLeft @ densityMatrix @ rotationMatrixRight)'], {}), '(rotationMatrixLeft @ densityMatrix @ rotationMatrixRight)\n', (5235, 5293), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((5645, 5684), 'numpy.trace', 'trace', (['(bases[i] @ modifiedDensityMatrix)'], {}), '(bases[i] @ modifiedDensityMatrix)\n', (5650, 5684), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((17026, 17093), 'os.path.exists', 'os.path.exists', (["('.\\\\result\\\\qutrit\\\\posson\\\\' + resultDirectoryName)"], {}), "('.\\\\result\\\\qutrit\\\\posson\\\\' + resultDirectoryName)\n", (17040, 17093), False, 'import os\n'), ((17107, 17172), 'os.makedirs', 'os.makedirs', (["('.\\\\result\\\\qutrit\\\\poisson\\\\' + resultDirectoryName)"], {}), "('.\\\\result\\\\qutrit\\\\poisson\\\\' + resultDirectoryName)\n", (17118, 17172), False, 'import os\n'), ((17187, 17253), 'concurrent.futures.ProcessPoolExecutor', 'futures.ProcessPoolExecutor', ([], {'max_workers': 'numberOfParallelComputing'}), '(max_workers=numberOfParallelComputing)\n', (17214, 17253), False, 'from concurrent import futures\n'), ((1449, 1471), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (1452, 1471), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1474, 1497), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (1477, 1497), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1576, 1599), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (1579, 1599), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1601, 1623), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (1604, 1623), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1703, 1725), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (1706, 1725), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1728, 1751), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (1731, 1751), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1830, 1853), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (1833, 1853), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1855, 1877), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (1858, 1877), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1957, 1979), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (1960, 1979), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1982, 2005), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (1985, 2005), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((2084, 2107), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (2087, 2107), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((2109, 2131), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (2112, 2131), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((4895, 4925), 'numpy.identity', 'identity', (['(3 ** numberOfQutrits)'], {}), '(3 ** numberOfQutrits)\n', (4903, 4925), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((5005, 5035), 'numpy.identity', 'identity', (['(3 ** numberOfQutrits)'], {}), '(3 ** numberOfQutrits)\n', (5013, 5035), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((5411, 5437), 'numpy.absolute', 'np.absolute', (['eigValueArray'], {}), '(eigValueArray)\n', (5422, 5437), True, 'import numpy as np\n'), ((2362, 2380), 'numpy.kron', 'kron', (['base1', 'base2'], {}), '(base1, base2)\n', (2366, 2380), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((5565, 5585), 'numpy.log', 'np.log', (['nProbList[i]'], {}), '(nProbList[i])\n', (5571, 5585), True, 'import numpy as np\n'), ((5815, 5835), 'numpy.log', 'np.log', (['nProbList[i]'], {}), '(nProbList[i])\n', (5821, 5835), True, 'import numpy as np\n'), ((7264, 7289), 'scipy.linalg.sqrtm', 'sqrtm', (['idealDensityMatrix'], {}), '(idealDensityMatrix)\n', (7269, 7289), False, 'from scipy.linalg import sqrtm\n'), ((1394, 1416), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (1397, 1416), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1419, 1442), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (1422, 1442), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1521, 1544), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (1524, 1544), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1546, 1568), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (1549, 1568), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1648, 1670), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (1651, 1670), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1673, 1696), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (1676, 1696), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1775, 1798), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (1778, 1798), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1800, 1822), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (1803, 1822), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1902, 1924), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (1905, 1924), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((1927, 1950), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (1930, 1950), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((2029, 2052), 'numpy.exp', 'exp', (['(-2 * pi * 1.0j / 3)'], {}), '(-2 * pi * 1.0j / 3)\n', (2032, 2052), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((2054, 2076), 'numpy.exp', 'exp', (['(2 * pi * 1.0j / 3)'], {}), '(2 * pi * 1.0j / 3)\n', (2057, 2076), False, 'from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random\n'), ((7211, 7236), 'scipy.linalg.sqrtm', 'sqrtm', (['idealDensityMatrix'], {}), '(idealDensityMatrix)\n', (7216, 7236), False, 'from scipy.linalg import sqrtm\n')] |
'''
Created on 09.11.2017
@author: sfs
'''
import numpy as np
from matplotlib import pylab
from pylab import *
nums = np.arange(51)
probs = np.random.randint(low = 0, high = 10, size= (1, 51))
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.7),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 0.5, 1.0))}
def display(probs):
my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)
pcolor(probs,cmap=my_cmap)
colorbar()
# without the line below, the figure won't show
pylab.show()
display(probs) | [
"numpy.random.randint",
"matplotlib.pylab.show",
"numpy.arange"
] | [((141, 154), 'numpy.arange', 'np.arange', (['(51)'], {}), '(51)\n', (150, 154), True, 'import numpy as np\n'), ((166, 213), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(10)', 'size': '(1, 51)'}), '(low=0, high=10, size=(1, 51))\n', (183, 213), True, 'import numpy as np\n'), ((679, 691), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (689, 691), False, 'from matplotlib import pylab\n')] |
import operator as op
import numpy as np
import jax
import jax.scipy.signal as signal
import pytest
from scico.linop import Convolve, ConvolveByX, LinearOperator
from scico.random import randn
from scico.test.linop.test_linop import AbsMatOp, adjoint_AAt_test, adjoint_AtA_test
class TestConvolve:
def setup_method(self, method):
self.key = jax.random.PRNGKey(12345)
@pytest.mark.parametrize("input_dtype", [np.float32, np.complex64])
@pytest.mark.parametrize("input_shape", [(32,), (32, 48)])
@pytest.mark.parametrize("mode", ["full", "valid", "same"])
@pytest.mark.parametrize("jit", [False, True])
def test_eval(self, input_shape, input_dtype, mode, jit):
ndim = len(input_shape)
filter_shape = (3, 4)[:ndim]
x, key = randn(input_shape, dtype=input_dtype, key=self.key)
psf, key = randn(filter_shape, dtype=input_dtype, key=key)
A = Convolve(h=psf, input_shape=input_shape, input_dtype=input_dtype, mode=mode, jit=jit)
Ax = A @ x
y = signal.convolve(x, psf, mode=mode)
np.testing.assert_allclose(Ax.ravel(), y.ravel(), rtol=1e-4)
@pytest.mark.parametrize("input_dtype", [np.float32, np.complex64])
@pytest.mark.parametrize("input_shape", [(32,), (32, 48)])
@pytest.mark.parametrize("mode", ["full", "valid", "same"])
@pytest.mark.parametrize("jit", [False, True])
def test_adjoint(self, input_shape, mode, jit, input_dtype):
ndim = len(input_shape)
filter_shape = (3, 4)[:ndim]
x, key = randn(input_shape, dtype=input_dtype, key=self.key)
psf, key = randn(filter_shape, dtype=input_dtype, key=key)
A = Convolve(h=psf, input_shape=input_shape, input_dtype=input_dtype, mode=mode, jit=jit)
adjoint_AtA_test(A, self.key)
adjoint_AAt_test(A, self.key)
class ConvolveTestObj:
def __init__(self):
dtype = np.float32
key = jax.random.PRNGKey(12345)
self.psf_A, key = randn((3,), dtype=dtype, key=key)
self.psf_B, key = randn((3,), dtype=dtype, key=key)
self.psf_C, key = randn((5,), dtype=dtype, key=key)
self.A = Convolve(input_shape=(32,), h=self.psf_A)
self.B = Convolve(input_shape=(32,), h=self.psf_B)
self.C = Convolve(input_shape=(32,), h=self.psf_C)
# Matrix for a 'generic linop'
m = self.A.output_shape[0]
n = self.A.input_shape[0]
G_mat, key = randn((m, n), dtype=dtype, key=key)
self.G = AbsMatOp(G_mat)
self.x, key = randn((32,), dtype=dtype, key=key)
self.scalar = 3.141
@pytest.fixture
def testobj(request):
yield ConvolveTestObj()
@pytest.mark.parametrize("operator", [op.mul, op.truediv])
def test_scalar_left(testobj, operator):
A = operator(testobj.A, testobj.scalar)
x = testobj.x
B = Convolve(input_shape=(32,), h=operator(testobj.psf_A, testobj.scalar))
np.testing.assert_allclose(A @ x, B @ x, rtol=5e-5)
@pytest.mark.parametrize("operator", [op.mul, op.truediv])
def test_scalar_right(testobj, operator):
if operator == op.truediv:
pytest.xfail("scalar / LinearOperator is not supported")
A = operator(testobj.scalar, testobj.A)
x = testobj.x
B = Convolve(input_shape=(32,), h=operator(testobj.scalar, testobj.psf_A))
np.testing.assert_allclose(A @ x, B @ x, rtol=5e-5)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_convolve_add_sub(testobj, operator):
A = testobj.A
B = testobj.B
C = testobj.C
x = testobj.x
# Two operators of same size
AB = operator(A, B)
ABx = AB @ x
AxBx = operator(A @ x, B @ x)
np.testing.assert_allclose(ABx, AxBx, rtol=5e-5)
# Two operators of different size
with pytest.raises(ValueError):
operator(A, C)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_add_sub_different_mode(testobj, operator):
# These tests get caught inside of the _wrap_add_sub input/output shape checks,
# not the explicit mode check inside of the wrapped __add__ method
B_same = Convolve(input_shape=(32,), h=testobj.psf_B, mode="same")
with pytest.raises(ValueError):
operator(testobj.A, B_same)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_add_sum_generic_linop(testobj, operator):
# Combine a AbsMatOp and Convolve, get a generic LinearOperator
AG = operator(testobj.A, testobj.G)
assert isinstance(AG, LinearOperator)
# Check evaluation
a = AG @ testobj.x
b = operator(testobj.A @ testobj.x, testobj.G @ testobj.x)
np.testing.assert_allclose(a, b, rtol=5e-5)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_add_sum_conv(testobj, operator):
# Combine a AbsMatOp and Convolve, get a generic LinearOperator
AA = operator(testobj.A, testobj.A)
assert isinstance(AA, Convolve)
# Check evaluation
a = AA @ testobj.x
b = operator(testobj.A @ testobj.x, testobj.A @ testobj.x)
np.testing.assert_allclose(a, b, rtol=5e-5)
@pytest.mark.parametrize("operator", [op.mul, op.truediv])
def test_mul_div_generic_linop(testobj, operator):
# not defined between Convolve and AbsMatOp
with pytest.raises(TypeError):
operator(testobj.A, testobj.G)
def test_invalid_mode(testobj):
# mode that doesn't exist
with pytest.raises(ValueError):
Convolve(input_shape=(32,), h=testobj.psf_A, mode="foo")
def test_dimension_mismatch(testobj):
with pytest.raises(ValueError):
# 2-dim input shape, 1-dim filter
Convolve(input_shape=(32, 32), h=testobj.psf_A)
def test_ndarray_h():
h = np.random.randn(3, 3).astype(np.float32)
A = Convolve(input_shape=(32, 32), h=h)
assert isinstance(A.h, jax.interpreters.xla.DeviceArray)
class TestConvolveByX:
def setup_method(self, method):
self.key = jax.random.PRNGKey(12345)
@pytest.mark.parametrize("input_dtype", [np.float32, np.complex64])
@pytest.mark.parametrize("input_shape", [(32,), (32, 48)])
@pytest.mark.parametrize("mode", ["full", "valid", "same"])
@pytest.mark.parametrize("jit", [False, True])
def test_eval(self, input_shape, input_dtype, mode, jit):
ndim = len(input_shape)
x_shape = (3, 4)[:ndim]
h, key = randn(input_shape, dtype=input_dtype, key=self.key)
x, key = randn(x_shape, dtype=input_dtype, key=key)
A = ConvolveByX(x=x, input_shape=input_shape, input_dtype=input_dtype, mode=mode, jit=jit)
Ax = A @ h
y = signal.convolve(x, h, mode=mode)
np.testing.assert_allclose(Ax.ravel(), y.ravel(), rtol=1e-4)
@pytest.mark.parametrize("input_dtype", [np.float32, np.complex64])
@pytest.mark.parametrize("input_shape", [(32,), (32, 48)])
@pytest.mark.parametrize("mode", ["full", "valid", "same"])
@pytest.mark.parametrize("jit", [False, True])
def test_adjoint(self, input_shape, mode, jit, input_dtype):
ndim = len(input_shape)
x_shape = (3, 4)[:ndim]
x, key = randn(input_shape, dtype=input_dtype, key=self.key)
x, key = randn(x_shape, dtype=input_dtype, key=key)
A = ConvolveByX(x=x, input_shape=input_shape, input_dtype=input_dtype, mode=mode, jit=jit)
adjoint_AtA_test(A, self.key)
adjoint_AAt_test(A, self.key)
class ConvolveByXTestObj:
def __init__(self):
dtype = np.float32
key = jax.random.PRNGKey(12345)
self.x_A, key = randn((3,), dtype=dtype, key=key)
self.x_B, key = randn((3,), dtype=dtype, key=key)
self.x_C, key = randn((5,), dtype=dtype, key=key)
self.A = ConvolveByX(input_shape=(32,), x=self.x_A)
self.B = ConvolveByX(input_shape=(32,), x=self.x_B)
self.C = ConvolveByX(input_shape=(32,), x=self.x_C)
# Matrix for a 'generic linop'
m = self.A.output_shape[0]
n = self.A.input_shape[0]
G_mat, key = randn((m, n), dtype=dtype, key=key)
self.G = AbsMatOp(G_mat)
self.h, key = randn((32,), dtype=dtype, key=key)
self.scalar = 3.141
@pytest.fixture
def cbx_testobj(request):
yield ConvolveByXTestObj()
@pytest.mark.parametrize("operator", [op.mul, op.truediv])
def test_cbx_scalar_left(cbx_testobj, operator):
A = operator(cbx_testobj.A, cbx_testobj.scalar)
h = cbx_testobj.h
B = ConvolveByX(input_shape=(32,), x=operator(cbx_testobj.x_A, cbx_testobj.scalar))
np.testing.assert_allclose(A @ h, B @ h, rtol=5e-5)
@pytest.mark.parametrize("operator", [op.mul, op.truediv])
def test_cbx_scalar_right(cbx_testobj, operator):
if operator == op.truediv:
pytest.xfail("scalar / LinearOperator is not supported")
A = operator(cbx_testobj.scalar, cbx_testobj.A)
h = cbx_testobj.h
B = ConvolveByX(input_shape=(32,), x=operator(cbx_testobj.scalar, cbx_testobj.x_A))
np.testing.assert_allclose(A @ h, B @ h, rtol=5e-5)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_convolve_add_sub(cbx_testobj, operator):
A = cbx_testobj.A
B = cbx_testobj.B
C = cbx_testobj.C
h = cbx_testobj.h
# Two operators of same size
AB = operator(A, B)
ABh = AB @ h
AfiltBh = operator(A @ h, B @ h)
np.testing.assert_allclose(ABh, AfiltBh, rtol=5e-5)
# Two operators of different size
with pytest.raises(ValueError):
operator(A, C)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_add_sub_different_mode(cbx_testobj, operator):
# These tests get caught inside of the _wrap_add_sub input/output shape checks,
# not the explicit mode check inside of the wrapped __add__ method
B_same = ConvolveByX(input_shape=(32,), x=cbx_testobj.x_B, mode="same")
with pytest.raises(ValueError):
operator(cbx_testobj.A, B_same)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_add_sum_generic_linop(cbx_testobj, operator):
# Combine a AbsMatOp and ConvolveByX, get a generic LinearOperator
AG = operator(cbx_testobj.A, cbx_testobj.G)
assert isinstance(AG, LinearOperator)
# Check evaluation
a = AG @ cbx_testobj.h
b = operator(cbx_testobj.A @ cbx_testobj.h, cbx_testobj.G @ cbx_testobj.h)
np.testing.assert_allclose(a, b, rtol=5e-5)
@pytest.mark.parametrize("operator", [op.add, op.sub])
def test_add_sum_conv(cbx_testobj, operator):
# Combine a AbsMatOp and ConvolveByX, get a generic LinearOperator
AA = operator(cbx_testobj.A, cbx_testobj.A)
assert isinstance(AA, ConvolveByX)
# Check evaluation
a = AA @ cbx_testobj.h
b = operator(cbx_testobj.A @ cbx_testobj.h, cbx_testobj.A @ cbx_testobj.h)
np.testing.assert_allclose(a, b, rtol=5e-5)
@pytest.mark.parametrize("operator", [op.mul, op.truediv])
def test_mul_div_generic_linop(cbx_testobj, operator):
# not defined between ConvolveByX and AbsMatOp
with pytest.raises(TypeError):
operator(cbx_testobj.A, cbx_testobj.G)
def test_invalid_mode(cbx_testobj):
# mode that doesn't exist
with pytest.raises(ValueError):
ConvolveByX(input_shape=(32,), x=cbx_testobj.x_A, mode="foo")
def test_dimension_mismatch(cbx_testobj):
with pytest.raises(ValueError):
# 2-dim input shape, 1-dim xer
ConvolveByX(input_shape=(32, 32), x=cbx_testobj.x_A)
def test_ndarray_x():
x = np.random.randn(3, 3).astype(np.float32)
A = ConvolveByX(input_shape=(32, 32), x=x)
assert isinstance(A.x, jax.interpreters.xla.DeviceArray)
| [
"jax.random.PRNGKey",
"scico.random.randn",
"jax.scipy.signal.convolve",
"scico.test.linop.test_linop.adjoint_AtA_test",
"numpy.testing.assert_allclose",
"scico.linop.ConvolveByX",
"pytest.mark.parametrize",
"scico.linop.Convolve",
"pytest.raises",
"scico.test.linop.test_linop.AbsMatOp",
"scico.... | [((2669, 2726), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.mul, op.truediv]'], {}), "('operator', [op.mul, op.truediv])\n", (2692, 2726), False, 'import pytest\n'), ((2968, 3025), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.mul, op.truediv]'], {}), "('operator', [op.mul, op.truediv])\n", (2991, 3025), False, 'import pytest\n'), ((3364, 3417), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.add, op.sub]'], {}), "('operator', [op.add, op.sub])\n", (3387, 3417), False, 'import pytest\n'), ((3799, 3852), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.add, op.sub]'], {}), "('operator', [op.add, op.sub])\n", (3822, 3852), False, 'import pytest\n'), ((4206, 4259), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.add, op.sub]'], {}), "('operator', [op.add, op.sub])\n", (4229, 4259), False, 'import pytest\n'), ((4622, 4675), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.add, op.sub]'], {}), "('operator', [op.add, op.sub])\n", (4645, 4675), False, 'import pytest\n'), ((5023, 5080), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.mul, op.truediv]'], {}), "('operator', [op.mul, op.truediv])\n", (5046, 5080), False, 'import pytest\n'), ((8144, 8201), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.mul, op.truediv]'], {}), "('operator', [op.mul, op.truediv])\n", (8167, 8201), False, 'import pytest\n'), ((8472, 8529), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.mul, op.truediv]'], {}), "('operator', [op.mul, op.truediv])\n", (8495, 8529), False, 'import pytest\n'), ((8897, 8950), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.add, op.sub]'], {}), "('operator', [op.add, op.sub])\n", (8920, 8950), False, 'import pytest\n'), ((9358, 9411), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.add, op.sub]'], {}), "('operator', [op.add, op.sub])\n", (9381, 9411), False, 'import pytest\n'), ((9778, 9831), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.add, op.sub]'], {}), "('operator', [op.add, op.sub])\n", (9801, 9831), False, 'import pytest\n'), ((10229, 10282), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.add, op.sub]'], {}), "('operator', [op.add, op.sub])\n", (10252, 10282), False, 'import pytest\n'), ((10668, 10725), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator"""', '[op.mul, op.truediv]'], {}), "('operator', [op.mul, op.truediv])\n", (10691, 10725), False, 'import pytest\n'), ((391, 457), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_dtype"""', '[np.float32, np.complex64]'], {}), "('input_dtype', [np.float32, np.complex64])\n", (414, 457), False, 'import pytest\n'), ((463, 520), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_shape"""', '[(32,), (32, 48)]'], {}), "('input_shape', [(32,), (32, 48)])\n", (486, 520), False, 'import pytest\n'), ((526, 584), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['full', 'valid', 'same']"], {}), "('mode', ['full', 'valid', 'same'])\n", (549, 584), False, 'import pytest\n'), ((590, 635), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""jit"""', '[False, True]'], {}), "('jit', [False, True])\n", (613, 635), False, 'import pytest\n'), ((1145, 1211), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_dtype"""', '[np.float32, np.complex64]'], {}), "('input_dtype', [np.float32, np.complex64])\n", (1168, 1211), False, 'import pytest\n'), ((1217, 1274), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_shape"""', '[(32,), (32, 48)]'], {}), "('input_shape', [(32,), (32, 48)])\n", (1240, 1274), False, 'import pytest\n'), ((1280, 1338), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['full', 'valid', 'same']"], {}), "('mode', ['full', 'valid', 'same'])\n", (1303, 1338), False, 'import pytest\n'), ((1344, 1389), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""jit"""', '[False, True]'], {}), "('jit', [False, True])\n", (1367, 1389), False, 'import pytest\n'), ((2913, 2965), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(A @ x)', '(B @ x)'], {'rtol': '(5e-05)'}), '(A @ x, B @ x, rtol=5e-05)\n', (2939, 2965), True, 'import numpy as np\n'), ((3309, 3361), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(A @ x)', '(B @ x)'], {'rtol': '(5e-05)'}), '(A @ x, B @ x, rtol=5e-05)\n', (3335, 3361), True, 'import numpy as np\n'), ((3649, 3698), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ABx', 'AxBx'], {'rtol': '(5e-05)'}), '(ABx, AxBx, rtol=5e-05)\n', (3675, 3698), True, 'import numpy as np\n'), ((4073, 4130), 'scico.linop.Convolve', 'Convolve', ([], {'input_shape': '(32,)', 'h': 'testobj.psf_B', 'mode': '"""same"""'}), "(input_shape=(32,), h=testobj.psf_B, mode='same')\n", (4081, 4130), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((4575, 4619), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['a', 'b'], {'rtol': '(5e-05)'}), '(a, b, rtol=5e-05)\n', (4601, 4619), True, 'import numpy as np\n'), ((4976, 5020), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['a', 'b'], {'rtol': '(5e-05)'}), '(a, b, rtol=5e-05)\n', (5002, 5020), True, 'import numpy as np\n'), ((5674, 5709), 'scico.linop.Convolve', 'Convolve', ([], {'input_shape': '(32, 32)', 'h': 'h'}), '(input_shape=(32, 32), h=h)\n', (5682, 5709), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((5883, 5949), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_dtype"""', '[np.float32, np.complex64]'], {}), "('input_dtype', [np.float32, np.complex64])\n", (5906, 5949), False, 'import pytest\n'), ((5955, 6012), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_shape"""', '[(32,), (32, 48)]'], {}), "('input_shape', [(32,), (32, 48)])\n", (5978, 6012), False, 'import pytest\n'), ((6018, 6076), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['full', 'valid', 'same']"], {}), "('mode', ['full', 'valid', 'same'])\n", (6041, 6076), False, 'import pytest\n'), ((6082, 6127), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""jit"""', '[False, True]'], {}), "('jit', [False, True])\n", (6105, 6127), False, 'import pytest\n'), ((6624, 6690), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_dtype"""', '[np.float32, np.complex64]'], {}), "('input_dtype', [np.float32, np.complex64])\n", (6647, 6690), False, 'import pytest\n'), ((6696, 6753), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_shape"""', '[(32,), (32, 48)]'], {}), "('input_shape', [(32,), (32, 48)])\n", (6719, 6753), False, 'import pytest\n'), ((6759, 6817), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['full', 'valid', 'same']"], {}), "('mode', ['full', 'valid', 'same'])\n", (6782, 6817), False, 'import pytest\n'), ((6823, 6868), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""jit"""', '[False, True]'], {}), "('jit', [False, True])\n", (6846, 6868), False, 'import pytest\n'), ((8417, 8469), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(A @ h)', '(B @ h)'], {'rtol': '(5e-05)'}), '(A @ h, B @ h, rtol=5e-05)\n', (8443, 8469), True, 'import numpy as np\n'), ((8842, 8894), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(A @ h)', '(B @ h)'], {'rtol': '(5e-05)'}), '(A @ h, B @ h, rtol=5e-05)\n', (8868, 8894), True, 'import numpy as np\n'), ((9205, 9257), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ABh', 'AfiltBh'], {'rtol': '(5e-05)'}), '(ABh, AfiltBh, rtol=5e-05)\n', (9231, 9257), True, 'import numpy as np\n'), ((9636, 9698), 'scico.linop.ConvolveByX', 'ConvolveByX', ([], {'input_shape': '(32,)', 'x': 'cbx_testobj.x_B', 'mode': '"""same"""'}), "(input_shape=(32,), x=cbx_testobj.x_B, mode='same')\n", (9647, 9698), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((10182, 10226), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['a', 'b'], {'rtol': '(5e-05)'}), '(a, b, rtol=5e-05)\n', (10208, 10226), True, 'import numpy as np\n'), ((10621, 10665), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['a', 'b'], {'rtol': '(5e-05)'}), '(a, b, rtol=5e-05)\n', (10647, 10665), True, 'import numpy as np\n'), ((11349, 11387), 'scico.linop.ConvolveByX', 'ConvolveByX', ([], {'input_shape': '(32, 32)', 'x': 'x'}), '(input_shape=(32, 32), x=x)\n', (11360, 11387), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((359, 384), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(12345)'], {}), '(12345)\n', (377, 384), False, 'import jax\n'), ((786, 837), 'scico.random.randn', 'randn', (['input_shape'], {'dtype': 'input_dtype', 'key': 'self.key'}), '(input_shape, dtype=input_dtype, key=self.key)\n', (791, 837), False, 'from scico.random import randn\n'), ((857, 904), 'scico.random.randn', 'randn', (['filter_shape'], {'dtype': 'input_dtype', 'key': 'key'}), '(filter_shape, dtype=input_dtype, key=key)\n', (862, 904), False, 'from scico.random import randn\n'), ((918, 1007), 'scico.linop.Convolve', 'Convolve', ([], {'h': 'psf', 'input_shape': 'input_shape', 'input_dtype': 'input_dtype', 'mode': 'mode', 'jit': 'jit'}), '(h=psf, input_shape=input_shape, input_dtype=input_dtype, mode=mode,\n jit=jit)\n', (926, 1007), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((1035, 1069), 'jax.scipy.signal.convolve', 'signal.convolve', (['x', 'psf'], {'mode': 'mode'}), '(x, psf, mode=mode)\n', (1050, 1069), True, 'import jax.scipy.signal as signal\n'), ((1542, 1593), 'scico.random.randn', 'randn', (['input_shape'], {'dtype': 'input_dtype', 'key': 'self.key'}), '(input_shape, dtype=input_dtype, key=self.key)\n', (1547, 1593), False, 'from scico.random import randn\n'), ((1613, 1660), 'scico.random.randn', 'randn', (['filter_shape'], {'dtype': 'input_dtype', 'key': 'key'}), '(filter_shape, dtype=input_dtype, key=key)\n', (1618, 1660), False, 'from scico.random import randn\n'), ((1674, 1763), 'scico.linop.Convolve', 'Convolve', ([], {'h': 'psf', 'input_shape': 'input_shape', 'input_dtype': 'input_dtype', 'mode': 'mode', 'jit': 'jit'}), '(h=psf, input_shape=input_shape, input_dtype=input_dtype, mode=mode,\n jit=jit)\n', (1682, 1763), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((1769, 1798), 'scico.test.linop.test_linop.adjoint_AtA_test', 'adjoint_AtA_test', (['A', 'self.key'], {}), '(A, self.key)\n', (1785, 1798), False, 'from scico.test.linop.test_linop import AbsMatOp, adjoint_AAt_test, adjoint_AtA_test\n'), ((1807, 1836), 'scico.test.linop.test_linop.adjoint_AAt_test', 'adjoint_AAt_test', (['A', 'self.key'], {}), '(A, self.key)\n', (1823, 1836), False, 'from scico.test.linop.test_linop import AbsMatOp, adjoint_AAt_test, adjoint_AtA_test\n'), ((1927, 1952), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(12345)'], {}), '(12345)\n', (1945, 1952), False, 'import jax\n'), ((1980, 2013), 'scico.random.randn', 'randn', (['(3,)'], {'dtype': 'dtype', 'key': 'key'}), '((3,), dtype=dtype, key=key)\n', (1985, 2013), False, 'from scico.random import randn\n'), ((2040, 2073), 'scico.random.randn', 'randn', (['(3,)'], {'dtype': 'dtype', 'key': 'key'}), '((3,), dtype=dtype, key=key)\n', (2045, 2073), False, 'from scico.random import randn\n'), ((2100, 2133), 'scico.random.randn', 'randn', (['(5,)'], {'dtype': 'dtype', 'key': 'key'}), '((5,), dtype=dtype, key=key)\n', (2105, 2133), False, 'from scico.random import randn\n'), ((2152, 2193), 'scico.linop.Convolve', 'Convolve', ([], {'input_shape': '(32,)', 'h': 'self.psf_A'}), '(input_shape=(32,), h=self.psf_A)\n', (2160, 2193), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((2211, 2252), 'scico.linop.Convolve', 'Convolve', ([], {'input_shape': '(32,)', 'h': 'self.psf_B'}), '(input_shape=(32,), h=self.psf_B)\n', (2219, 2252), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((2270, 2311), 'scico.linop.Convolve', 'Convolve', ([], {'input_shape': '(32,)', 'h': 'self.psf_C'}), '(input_shape=(32,), h=self.psf_C)\n', (2278, 2311), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((2442, 2477), 'scico.random.randn', 'randn', (['(m, n)'], {'dtype': 'dtype', 'key': 'key'}), '((m, n), dtype=dtype, key=key)\n', (2447, 2477), False, 'from scico.random import randn\n'), ((2495, 2510), 'scico.test.linop.test_linop.AbsMatOp', 'AbsMatOp', (['G_mat'], {}), '(G_mat)\n', (2503, 2510), False, 'from scico.test.linop.test_linop import AbsMatOp, adjoint_AAt_test, adjoint_AtA_test\n'), ((2534, 2568), 'scico.random.randn', 'randn', (['(32,)'], {'dtype': 'dtype', 'key': 'key'}), '((32,), dtype=dtype, key=key)\n', (2539, 2568), False, 'from scico.random import randn\n'), ((3107, 3163), 'pytest.xfail', 'pytest.xfail', (['"""scalar / LinearOperator is not supported"""'], {}), "('scalar / LinearOperator is not supported')\n", (3119, 3163), False, 'import pytest\n'), ((3746, 3771), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3759, 3771), False, 'import pytest\n'), ((4140, 4165), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4153, 4165), False, 'import pytest\n'), ((5189, 5213), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5202, 5213), False, 'import pytest\n'), ((5327, 5352), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5340, 5352), False, 'import pytest\n'), ((5362, 5418), 'scico.linop.Convolve', 'Convolve', ([], {'input_shape': '(32,)', 'h': 'testobj.psf_A', 'mode': '"""foo"""'}), "(input_shape=(32,), h=testobj.psf_A, mode='foo')\n", (5370, 5418), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((5468, 5493), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5481, 5493), False, 'import pytest\n'), ((5545, 5592), 'scico.linop.Convolve', 'Convolve', ([], {'input_shape': '(32, 32)', 'h': 'testobj.psf_A'}), '(input_shape=(32, 32), h=testobj.psf_A)\n', (5553, 5592), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((5851, 5876), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(12345)'], {}), '(12345)\n', (5869, 5876), False, 'import jax\n'), ((6273, 6324), 'scico.random.randn', 'randn', (['input_shape'], {'dtype': 'input_dtype', 'key': 'self.key'}), '(input_shape, dtype=input_dtype, key=self.key)\n', (6278, 6324), False, 'from scico.random import randn\n'), ((6342, 6384), 'scico.random.randn', 'randn', (['x_shape'], {'dtype': 'input_dtype', 'key': 'key'}), '(x_shape, dtype=input_dtype, key=key)\n', (6347, 6384), False, 'from scico.random import randn\n'), ((6398, 6489), 'scico.linop.ConvolveByX', 'ConvolveByX', ([], {'x': 'x', 'input_shape': 'input_shape', 'input_dtype': 'input_dtype', 'mode': 'mode', 'jit': 'jit'}), '(x=x, input_shape=input_shape, input_dtype=input_dtype, mode=\n mode, jit=jit)\n', (6409, 6489), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((6516, 6548), 'jax.scipy.signal.convolve', 'signal.convolve', (['x', 'h'], {'mode': 'mode'}), '(x, h, mode=mode)\n', (6531, 6548), True, 'import jax.scipy.signal as signal\n'), ((7016, 7067), 'scico.random.randn', 'randn', (['input_shape'], {'dtype': 'input_dtype', 'key': 'self.key'}), '(input_shape, dtype=input_dtype, key=self.key)\n', (7021, 7067), False, 'from scico.random import randn\n'), ((7085, 7127), 'scico.random.randn', 'randn', (['x_shape'], {'dtype': 'input_dtype', 'key': 'key'}), '(x_shape, dtype=input_dtype, key=key)\n', (7090, 7127), False, 'from scico.random import randn\n'), ((7141, 7232), 'scico.linop.ConvolveByX', 'ConvolveByX', ([], {'x': 'x', 'input_shape': 'input_shape', 'input_dtype': 'input_dtype', 'mode': 'mode', 'jit': 'jit'}), '(x=x, input_shape=input_shape, input_dtype=input_dtype, mode=\n mode, jit=jit)\n', (7152, 7232), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((7237, 7266), 'scico.test.linop.test_linop.adjoint_AtA_test', 'adjoint_AtA_test', (['A', 'self.key'], {}), '(A, self.key)\n', (7253, 7266), False, 'from scico.test.linop.test_linop import AbsMatOp, adjoint_AAt_test, adjoint_AtA_test\n'), ((7275, 7304), 'scico.test.linop.test_linop.adjoint_AAt_test', 'adjoint_AAt_test', (['A', 'self.key'], {}), '(A, self.key)\n', (7291, 7304), False, 'from scico.test.linop.test_linop import AbsMatOp, adjoint_AAt_test, adjoint_AtA_test\n'), ((7398, 7423), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(12345)'], {}), '(12345)\n', (7416, 7423), False, 'import jax\n'), ((7449, 7482), 'scico.random.randn', 'randn', (['(3,)'], {'dtype': 'dtype', 'key': 'key'}), '((3,), dtype=dtype, key=key)\n', (7454, 7482), False, 'from scico.random import randn\n'), ((7507, 7540), 'scico.random.randn', 'randn', (['(3,)'], {'dtype': 'dtype', 'key': 'key'}), '((3,), dtype=dtype, key=key)\n', (7512, 7540), False, 'from scico.random import randn\n'), ((7565, 7598), 'scico.random.randn', 'randn', (['(5,)'], {'dtype': 'dtype', 'key': 'key'}), '((5,), dtype=dtype, key=key)\n', (7570, 7598), False, 'from scico.random import randn\n'), ((7617, 7659), 'scico.linop.ConvolveByX', 'ConvolveByX', ([], {'input_shape': '(32,)', 'x': 'self.x_A'}), '(input_shape=(32,), x=self.x_A)\n', (7628, 7659), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((7677, 7719), 'scico.linop.ConvolveByX', 'ConvolveByX', ([], {'input_shape': '(32,)', 'x': 'self.x_B'}), '(input_shape=(32,), x=self.x_B)\n', (7688, 7719), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((7737, 7779), 'scico.linop.ConvolveByX', 'ConvolveByX', ([], {'input_shape': '(32,)', 'x': 'self.x_C'}), '(input_shape=(32,), x=self.x_C)\n', (7748, 7779), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((7910, 7945), 'scico.random.randn', 'randn', (['(m, n)'], {'dtype': 'dtype', 'key': 'key'}), '((m, n), dtype=dtype, key=key)\n', (7915, 7945), False, 'from scico.random import randn\n'), ((7963, 7978), 'scico.test.linop.test_linop.AbsMatOp', 'AbsMatOp', (['G_mat'], {}), '(G_mat)\n', (7971, 7978), False, 'from scico.test.linop.test_linop import AbsMatOp, adjoint_AAt_test, adjoint_AtA_test\n'), ((8002, 8036), 'scico.random.randn', 'randn', (['(32,)'], {'dtype': 'dtype', 'key': 'key'}), '((32,), dtype=dtype, key=key)\n', (8007, 8036), False, 'from scico.random import randn\n'), ((8619, 8675), 'pytest.xfail', 'pytest.xfail', (['"""scalar / LinearOperator is not supported"""'], {}), "('scalar / LinearOperator is not supported')\n", (8631, 8675), False, 'import pytest\n'), ((9305, 9330), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9318, 9330), False, 'import pytest\n'), ((9708, 9733), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9721, 9733), False, 'import pytest\n'), ((10841, 10865), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (10854, 10865), False, 'import pytest\n'), ((10991, 11016), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11004, 11016), False, 'import pytest\n'), ((11026, 11087), 'scico.linop.ConvolveByX', 'ConvolveByX', ([], {'input_shape': '(32,)', 'x': 'cbx_testobj.x_A', 'mode': '"""foo"""'}), "(input_shape=(32,), x=cbx_testobj.x_A, mode='foo')\n", (11037, 11087), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((11141, 11166), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11154, 11166), False, 'import pytest\n'), ((11215, 11267), 'scico.linop.ConvolveByX', 'ConvolveByX', ([], {'input_shape': '(32, 32)', 'x': 'cbx_testobj.x_A'}), '(input_shape=(32, 32), x=cbx_testobj.x_A)\n', (11226, 11267), False, 'from scico.linop import Convolve, ConvolveByX, LinearOperator\n'), ((5625, 5646), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (5640, 5646), True, 'import numpy as np\n'), ((11300, 11321), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (11315, 11321), True, 'import numpy as np\n')] |
import os
import random
import numpy as np
import tensorflow as tf
import mahotas
import cv2
import itertools
import time
import csv
from typing import List, Dict
from tkinter import messagebox
from PIL import Image
import matplotlib.pyplot as plt
class Algorithms:
BIRADS_CLASSES = ["1", "2", "3", "4"]
def __init__(self) -> None:
self.images: Dict[str, List[Image.Image]] = {}
self.set_number_of_shades_of_gray(32)
self.set_used_descriptors(
True, True, True, True, True, True, True,
True, True, True, True, True, True, True
)
@staticmethod
def get_7_invariant_hu_moments(image):
all_hu_moments = cv2.moments(image)
return cv2.HuMoments(all_hu_moments).flatten()
@staticmethod
def get_haralick_descriptors(image):
"""
Angular Second Moment: Energy or Uniformity
Contrast
Correlation
Sum of Squares: Variance
Inverse Difference Moment: Texture Homogeneity
Sum Average
Sum Variance
Sum Entropy
Entropy
Difference Variance
Difference Entropy
Information Measures of Correlation
Information Measures of Correlation
Left-to-Right
Top-to-Bottom
Top Left-to-Bottom Right
Top Right-to-Bottom Left
"""
radii = [1, 2, 4, 8, 16]
num_of_haralick_descriptors = 13
haralick_descriptors_for_all_radii: np.ndarray = np.empty(
shape=(len(radii), num_of_haralick_descriptors))
for i in range(len(radii)):
haralick_descriptors: np.ndarray = np.array(mahotas.features.haralick(
image, distance=radii[i]
))
haralick_descriptors = haralick_descriptors.mean(axis=0) # Mean of each column
haralick_descriptors_for_all_radii[i] = haralick_descriptors
return haralick_descriptors_for_all_radii.mean(axis=0) # Mean of each column
def get_all_image_descriptors(self, image):
descriptors = self.get_haralick_descriptors(image)
descriptors = np.append(
descriptors, self.get_7_invariant_hu_moments(image), axis=0)
return descriptors
def get_image_descriptors(self, image):
descriptors = self.get_all_image_descriptors(image)
return np.array([descriptors[i] for i in self.indexes_of_the_used_descriptors])
def resample_image(self, image):
return np.round(np.array(image) / self.resample_ratio).astype(np.uint8)
def set_number_of_shades_of_gray(self, number_of_shades: int):
self.number_of_shades = number_of_shades
self.resample_ratio = int(round(255 / number_of_shades))
def get_training_and_test_set_for_birads_class(self, birads_class):
images = self.images[birads_class]
testing_set_size = int(len(images) / 4)
training_set: np.ndarray = np.empty(
shape=(len(images), len(self.indexes_of_the_used_descriptors)),
dtype=np.float64
)
for i in range(len(images)):
image = self.resample_image(images[i])
training_set[i] = self.get_image_descriptors(image)
testing_set: np.ndarray = np.empty(
shape=(testing_set_size, len(self.indexes_of_the_used_descriptors)),
dtype=np.float64
)
for i in range(testing_set_size):
random_index = random.randint(0, len(training_set) - 1)
testing_set[i] = training_set[random_index]
training_set = np.delete(training_set, random_index, axis=0)
return (training_set, testing_set)
def get_training_and_test_set(self):
self.images_training_set: np.ndarray = np.empty(
shape=(0, len(self.indexes_of_the_used_descriptors)),
dtype=np.float64
)
self.images_training_labels_set = []
self.images_testing_set: np.ndarray = np.empty(
shape=(0, len(self.indexes_of_the_used_descriptors)),
dtype=np.float64
)
self.images_testing_labels_set = []
for birads_class in self.BIRADS_CLASSES:
training_set, testing_set = self.get_training_and_test_set_for_birads_class(
birads_class)
self.images_training_set = np.append(self.images_training_set, training_set, axis=0)
training_labels = [int(birads_class) - 1] * len(training_set)
self.images_training_labels_set.extend(training_labels)
self.images_testing_set = np.append(self.images_testing_set, testing_set, axis=0)
testing_labels = [int(birads_class) - 1] * len(testing_set)
self.images_testing_labels_set.extend(testing_labels)
self.images_testing_labels_set: np.ndarray = np.array(
self.images_testing_labels_set
)
self.images_training_labels_set: np.ndarray = np.array(
self.images_training_labels_set
)
def set_used_descriptors(
self,
energyCheck: bool,
contrastCheck: bool,
correlationCheck: bool,
varianceCheck: bool,
homogeneityCheck: bool,
sumAverageCheck: bool,
sumVarianceCheck: bool,
sumEntropyCheck: bool,
entropyCheck: bool,
differenceVarianceCheck: bool,
differenceEntropyCheck: bool,
informationMeasuresOfCorrelation12Check: bool,
informationMeasuresOfCorrelation13Check: bool,
sevenInvariantHuMomentsCheck: bool
):
descriptors_flags = [
energyCheck,
contrastCheck,
correlationCheck,
varianceCheck,
homogeneityCheck,
sumAverageCheck,
sumVarianceCheck,
sumEntropyCheck,
entropyCheck,
differenceVarianceCheck,
differenceEntropyCheck,
informationMeasuresOfCorrelation12Check,
informationMeasuresOfCorrelation13Check
]
self.indexes_of_the_used_descriptors = [
i for i in range(len(descriptors_flags)) if descriptors_flags[i]
]
if sevenInvariantHuMomentsCheck:
self.indexes_of_the_used_descriptors.extend(
list(range(len(descriptors_flags), len(descriptors_flags) + 7))
)
def create_and_compile_model(self, num_neurons):
input_shape = (len(self.indexes_of_the_used_descriptors),)
self.model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=input_shape),
tf.keras.layers.Dense(num_neurons, activation=tf.nn.relu),
# tf.keras.layers.Dense(256, activation=tf.nn.sigmoid),
# tf.keras.layers.Dense(1024, activation=tf.nn.softmax),
tf.keras.layers.Dense(4, activation=tf.nn.softmax),
])
self.model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
def get_confusion_matrix(self) -> np.ndarray:
predictions = self.model.predict(self.images_testing_set)
predictions = np.argmax(predictions, axis=1)
return tf.math.confusion_matrix(
self.images_testing_labels_set,
predictions
).numpy()
@staticmethod
def plot_confusion_matrix(
confusion_matrix: np.ndarray,
classes,
normalize=False,
title='Confusion matrix',
color_map=plt.cm.Blues
):
plt.imshow(confusion_matrix, interpolation='nearest', cmap=color_map)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
confusion_matrix = (
confusion_matrix.astype('float') /
confusion_matrix.sum(axis=1)[:, np.newaxis]
)
thresh = confusion_matrix.max() / 2.
for i, j in itertools.product(
range(confusion_matrix.shape[0]),
range(confusion_matrix.shape[1]
)):
plt.text(j, i, confusion_matrix[i, j],
horizontalalignment="center",
color="white" if confusion_matrix[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
@staticmethod
def get_metrics(confusion_matrix: np.ndarray) -> Dict[str, np.ndarray]:
FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)
FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
TP = np.diag(confusion_matrix)
TN = confusion_matrix.sum() - (FP + FN + TP)
TPR = TP / (TP + FN) # Sensitivity, hit rate, recall, or true positive rate
TNR = TN / (TN + FP) # Specificity or true negative rate
ACC = (TP + TN) / (TP + FP + FN + TN) # Overall accuracy
sensitivity = round(TPR.mean(), 2)
specificity = round(TNR.mean(), 2)
accuracy = TP.sum() / confusion_matrix.sum()
return {
'FP': FP,
'FN': FN,
'TP': TP,
'TN': TN,
'TPR': TPR,
'TNR': TNR,
'ACC': ACC,
'sensitivity': sensitivity,
'specificity': specificity,
'accuracy': accuracy
}
def train(self, num_neurons = 256, num_epochs = 2000):
self.create_and_compile_model(num_neurons)
self.get_training_and_test_set()
start = time.time()
self.model.fit(
self.images_training_set,
self.images_training_labels_set,
epochs=num_epochs
)
executionTime = time.time() - start
confusion_matrix = self.get_confusion_matrix()
self.plot_confusion_matrix(confusion_matrix, self.BIRADS_CLASSES)
return executionTime, self.get_metrics(confusion_matrix)
def train_different_number_of_neurons_and_epochs(self):
num_neurons_arr = [32, 64, 128, 256, 512, 1024]
num_epochs_arr = [100, 500, 1000, 2000, 3000, 4000, 5000]
results: List[List[float]] = []
for num_neurons in num_neurons_arr:
for num_epochs in num_epochs_arr:
execution_time, metrics = self.train(num_neurons, num_epochs)
results.append([
execution_time,
metrics['accuracy'],
metrics['sensitivity'],
metrics['specificity'],
num_neurons,
num_epochs,
])
with open('train_results.csv', 'w') as results_file:
csv_writer = csv.writer(results_file)
csv_writer.writerow([
'Execution time (sec)',
'Accuracy',
'Sensitivity',
'Specificity',
'Num. of neurons',
'Num. of epochs',
])
csv_writer.writerows(results)
return results
def predict(self, image):
predictions = self.model.predict(np.array([
self.get_image_descriptors(image)
]))
return np.argmax(predictions[0])
@staticmethod
def load_images_from_dir(dirname: str):
images: List[Image.Image] = []
# percorre os arquivos e diretórios que existirem dentro de imagens/1 por exemplo
for dir_path, dirs, files in os.walk(os.path.join("imagens", dirname)):
for file in files:
if not "_cropped" in file and ".png" in file:
complete_path = os.path.join(dir_path, file)
# print('Loading image:', complete_path)
images.append(Image.open(complete_path))
return images
def load_images(self, show_msg_box = True):
if not os.path.exists('imagens'):
messagebox.showinfo('Error', 'The images folder was not found')
exit(0)
for birads_class in self.BIRADS_CLASSES:
self.images[birads_class] = self.load_images_from_dir(birads_class)
if show_msg_box: messagebox.showinfo('Concluded', 'Images were loaded')
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"tensorflow.keras.layers.Dense",
"mahotas.features.haralick",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"tensorflow.math.confusion_matrix",
"tkinter.messagebox.showinfo",
"mat... | [((689, 707), 'cv2.moments', 'cv2.moments', (['image'], {}), '(image)\n', (700, 707), False, 'import cv2\n'), ((2349, 2421), 'numpy.array', 'np.array', (['[descriptors[i] for i in self.indexes_of_the_used_descriptors]'], {}), '([descriptors[i] for i in self.indexes_of_the_used_descriptors])\n', (2357, 2421), True, 'import numpy as np\n'), ((4800, 4840), 'numpy.array', 'np.array', (['self.images_testing_labels_set'], {}), '(self.images_testing_labels_set)\n', (4808, 4840), True, 'import numpy as np\n'), ((4917, 4958), 'numpy.array', 'np.array', (['self.images_training_labels_set'], {}), '(self.images_training_labels_set)\n', (4925, 4958), True, 'import numpy as np\n'), ((7206, 7236), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (7215, 7236), True, 'import numpy as np\n'), ((7575, 7644), 'matplotlib.pyplot.imshow', 'plt.imshow', (['confusion_matrix'], {'interpolation': '"""nearest"""', 'cmap': 'color_map'}), "(confusion_matrix, interpolation='nearest', cmap=color_map)\n", (7585, 7644), True, 'import matplotlib.pyplot as plt\n'), ((7653, 7669), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7662, 7669), True, 'import matplotlib.pyplot as plt\n'), ((7678, 7692), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7690, 7692), True, 'import matplotlib.pyplot as plt\n'), ((7746, 7790), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (7756, 7790), True, 'import matplotlib.pyplot as plt\n'), ((7799, 7830), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (7809, 7830), True, 'import matplotlib.pyplot as plt\n'), ((8384, 8402), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8400, 8402), True, 'import matplotlib.pyplot as plt\n'), ((8411, 8435), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (8421, 8435), True, 'import matplotlib.pyplot as plt\n'), ((8444, 8473), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (8454, 8473), True, 'import matplotlib.pyplot as plt\n'), ((8482, 8492), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8490, 8492), True, 'import matplotlib.pyplot as plt\n'), ((8743, 8768), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (8750, 8768), True, 'import numpy as np\n'), ((9648, 9659), 'time.time', 'time.time', ([], {}), '()\n', (9657, 9659), False, 'import time\n'), ((11303, 11328), 'numpy.argmax', 'np.argmax', (['predictions[0]'], {}), '(predictions[0])\n', (11312, 11328), True, 'import numpy as np\n'), ((3559, 3604), 'numpy.delete', 'np.delete', (['training_set', 'random_index'], {'axis': '(0)'}), '(training_set, random_index, axis=0)\n', (3568, 3604), True, 'import numpy as np\n'), ((4313, 4370), 'numpy.append', 'np.append', (['self.images_training_set', 'training_set'], {'axis': '(0)'}), '(self.images_training_set, training_set, axis=0)\n', (4322, 4370), True, 'import numpy as np\n'), ((4552, 4607), 'numpy.append', 'np.append', (['self.images_testing_set', 'testing_set'], {'axis': '(0)'}), '(self.images_testing_set, testing_set, axis=0)\n', (4561, 4607), True, 'import numpy as np\n'), ((8211, 8347), 'matplotlib.pyplot.text', 'plt.text', (['j', 'i', 'confusion_matrix[i, j]'], {'horizontalalignment': '"""center"""', 'color': "('white' if confusion_matrix[i, j] > thresh else 'black')"}), "(j, i, confusion_matrix[i, j], horizontalalignment='center', color=\n 'white' if confusion_matrix[i, j] > thresh else 'black')\n", (8219, 8347), True, 'import matplotlib.pyplot as plt\n'), ((8632, 8657), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (8639, 8657), True, 'import numpy as np\n'), ((8704, 8729), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (8711, 8729), True, 'import numpy as np\n'), ((9831, 9842), 'time.time', 'time.time', ([], {}), '()\n', (9840, 9842), False, 'import time\n'), ((10807, 10831), 'csv.writer', 'csv.writer', (['results_file'], {}), '(results_file)\n', (10817, 10831), False, 'import csv\n'), ((11567, 11599), 'os.path.join', 'os.path.join', (['"""imagens"""', 'dirname'], {}), "('imagens', dirname)\n", (11579, 11599), False, 'import os\n'), ((11977, 12002), 'os.path.exists', 'os.path.exists', (['"""imagens"""'], {}), "('imagens')\n", (11991, 12002), False, 'import os\n'), ((12016, 12079), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error"""', '"""The images folder was not found"""'], {}), "('Error', 'The images folder was not found')\n", (12035, 12079), False, 'from tkinter import messagebox\n'), ((12256, 12310), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Concluded"""', '"""Images were loaded"""'], {}), "('Concluded', 'Images were loaded')\n", (12275, 12310), False, 'from tkinter import messagebox\n'), ((723, 752), 'cv2.HuMoments', 'cv2.HuMoments', (['all_hu_moments'], {}), '(all_hu_moments)\n', (736, 752), False, 'import cv2\n'), ((1650, 1701), 'mahotas.features.haralick', 'mahotas.features.haralick', (['image'], {'distance': 'radii[i]'}), '(image, distance=radii[i])\n', (1675, 1701), False, 'import mahotas\n'), ((6512, 6560), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': 'input_shape'}), '(input_shape=input_shape)\n', (6535, 6560), True, 'import tensorflow as tf\n'), ((6574, 6631), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_neurons'], {'activation': 'tf.nn.relu'}), '(num_neurons, activation=tf.nn.relu)\n', (6595, 6631), True, 'import tensorflow as tf\n'), ((6782, 6832), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(4)'], {'activation': 'tf.nn.softmax'}), '(4, activation=tf.nn.softmax)\n', (6803, 6832), True, 'import tensorflow as tf\n'), ((6896, 6922), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (6920, 6922), True, 'import tensorflow as tf\n'), ((6941, 6988), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), '()\n', (6986, 6988), True, 'import tensorflow as tf\n'), ((7253, 7322), 'tensorflow.math.confusion_matrix', 'tf.math.confusion_matrix', (['self.images_testing_labels_set', 'predictions'], {}), '(self.images_testing_labels_set, predictions)\n', (7277, 7322), True, 'import tensorflow as tf\n'), ((7011, 7055), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), '()\n', (7053, 7055), True, 'import tensorflow as tf\n'), ((11731, 11759), 'os.path.join', 'os.path.join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (11743, 11759), False, 'import os\n'), ((2484, 2499), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2492, 2499), True, 'import numpy as np\n'), ((11855, 11880), 'PIL.Image.open', 'Image.open', (['complete_path'], {}), '(complete_path)\n', (11865, 11880), False, 'from PIL import Image\n')] |
import sys
sys.path.append("..")
import os
import math
import torch
import torch.nn as nn
import torchvision
import model.E.E_Blur as BE
import model.E.E_PG as BE_PG
import model.E.E_BIG as BE_BIG
from model.utils.custom_adam import LREQAdam
import lpips
from metric.grad_cam import GradCAM, GradCamPlusPlus, GuidedBackPropagation, mask2cam
import tensorboardX
import numpy as np
import argparse
from model.stylegan1.net import Generator, Mapping #StyleGANv1
import model.stylegan2_generator as model_v2 #StyleGANv2
import model.pggan.pggan_generator as model_pggan #PGGAN
from model.biggan_generator import BigGAN #BigGAN
from model.utils.biggan_config import BigGANConfig
from training_utils import *
#torch.backends.cudnn.enabled = True
#torch.backends.cudnn.benchmark = True
#torch.backends.cudnn.deterministic = False # faster
def train(tensor_writer = None, args = None):
type = args.mtype
model_path = args.checkpoint_dir_GAN
if type == 1: # StyleGAN1
Gs = Generator(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3)
Gs.load_state_dict(torch.load(model_path+'Gs_dict.pth'))
Gm = Mapping(num_layers=int(math.log(args.img_size,2)-1)*2, mapping_layers=8, latent_size=512, dlatent_size=512, mapping_fmaps=512) #num_layers: 14->256 / 16->512 / 18->1024
Gm.load_state_dict(torch.load(model_path+'/Gm_dict.pth'))
Gm.buffer1 = torch.load(model_path+'/center_tensor.pt')
const_ = Gs.const
const1 = const_.repeat(args.batch_size,1,1,1).detach().clone().cuda()
layer_num = int(math.log(args.img_size,2)-1)*2 # 14->256 / 16 -> 512 / 18->1024
layer_idx = torch.arange(layer_num)[np.newaxis, :, np.newaxis] # shape:[1,18,1], layer_idx = [0,1,2,3,4,5,6。。。,17]
ones = torch.ones(layer_idx.shape, dtype=torch.float32) # shape:[1,18,1], ones = [1,1,1,1,1,1,1,1]
coefs = torch.where(layer_idx < layer_num//2, 0.7 * ones, ones) # 18个变量前8个裁剪比例truncation_psi [0.7,0.7,...,1,1,1]
Gs.cuda()
Gm.eval()
E = BE.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3)
elif type == 2: # StyleGAN2
generator = model_v2.StyleGAN2Generator(resolution=args.img_size).to(device)
checkpoint = torch.load(model_path) #map_location='cpu'
if 'generator_smooth' in checkpoint: #default
generator.load_state_dict(checkpoint['generator_smooth'])
else:
generator.load_state_dict(checkpoint['generator'])
synthesis_kwargs = dict(trunc_psi=0.7,trunc_layers=8,randomize_noise=False)
#Gs = generator.synthesis
#Gm = generator.mapping
const_r = torch.randn(args.batch_size)
const1 = generator.synthesis.early_layer(const_r).detach().clone() #[n,512,4,4]
#E = BE.BE(startf=64, maxf=512, layer_count=7, latent_size=512, channels=3) # 256
E = BE.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3) # layer_count: 7->256 8->512 9->1024
elif type == 3: # PGGAN
generator = model_pggan.PGGANGenerator(resolution=args.img_size).to(device)
checkpoint = torch.load(model_path) #map_location='cpu'
if 'generator_smooth' in checkpoint: #默认是这个
generator.load_state_dict(checkpoint['generator_smooth'])
else:
generator.load_state_dict(checkpoint['generator'])
const1 = torch.tensor(0)
E = BE_PG.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3, pggan=True)
elif type == 4:
config = BigGANConfig.from_json_file(args.config_dir)
generator = BigGAN(config).to(device)
generator.load_state_dict(torch.load(model_path))
E = BE_BIG.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3, biggan=True)
else:
print('error')
return
if args.checkpoint_dir_E != None:
E.load_state_dict(torch.load(args.checkpoint_dir_E))
E.cuda()
writer = tensor_writer
E_optimizer = LREQAdam([{'params': E.parameters()},], lr=0.0015, betas=(0.0, 0.99), weight_decay=0)
#用这个adam不会报错:RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
loss_lpips = lpips.LPIPS(net='vgg').to('cuda')
batch_size = args.batch_size
it_d = 0
#vgg16->Grad-CAM
vgg16 = torchvision.models.vgg16(pretrained=True).cuda()
final_layer = None
for name, m in vgg16.named_modules():
if isinstance(m, nn.Conv2d):
final_layer = name
grad_cam_plus_plus = GradCamPlusPlus(vgg16, final_layer)
gbp = GuidedBackPropagation(vgg16)
it_d = 0
for iteration in range(0,args.iterations):
set_seed(iteration%30000)
z = torch.randn(batch_size, args.z_dim) #[32, 512]
if type == 1:
with torch.no_grad(): #这里需要生成图片和变量
w1 = Gm(z,coefs_m=coefs).cuda() #[batch_size,18,512]
imgs1 = Gs.forward(w1,int(math.log(args.img_size,2)-2)) # 7->512 / 6->256
elif type == 2:
with torch.no_grad():
result_all = generator(z.cuda(), **synthesis_kwargs)
imgs1 = result_all['image']
w1 = result_all['wp']
elif type == 3:
with torch.no_grad(): #这里需要生成图片和变量
w1 = z.cuda()
result_all = generator(w1)
imgs1 = result_all['image']
elif type == 4:
z = truncated_noise_sample(truncation=0.4, batch_size=batch_size, seed=iteration%30000)
#label = np.random.randint(1000,size=batch_size) # 生成标签
flag = np.random.randint(1000)
label = np.ones(batch_size)
label = flag * label
label = one_hot(label)
w1 = torch.tensor(z, dtype=torch.float).cuda()
conditions = torch.tensor(label, dtype=torch.float).cuda() # as label
truncation = torch.tensor(0.4, dtype=torch.float).cuda()
with torch.no_grad(): #这里需要生成图片和变量
imgs1, const1 = generator(w1, conditions, truncation) # const1 are conditional vectors in BigGAN
if type != 4:
const2,w2 = E(imgs1)
else:
const2,w2 = E(imgs1, const1)
if type == 1:
imgs2=Gs.forward(w2,int(math.log(args.img_size,2)-2))
elif type == 2 or type == 3:
imgs2=generator.synthesis(w2)['image']
elif type == 4:
imgs2, _ = generator(w2, conditions, truncation)
else:
print('model type error')
return
E_optimizer.zero_grad()
#Image Vectors
mask_1 = grad_cam_plus_plus(imgs1,None) #[c,1,h,w]
mask_2 = grad_cam_plus_plus(imgs2,None)
# imgs1.retain_grad()
# imgs2.retain_grad()
imgs1_ = imgs1.detach().clone()
imgs1_.requires_grad = True
imgs2_ = imgs2.detach().clone()
imgs2_.requires_grad = True
grad_1 = gbp(imgs1_) # [n,c,h,w]
grad_2 = gbp(imgs2_)
heatmap_1,cam_1 = mask2cam(mask_1,imgs1)
heatmap_2,cam_2 = mask2cam(mask_2,imgs2)
loss_grad, loss_grad_info = space_loss(grad_1,grad_2,lpips_model=loss_lpips)
##--Image
loss_imgs, loss_imgs_info = space_loss(imgs1,imgs2,lpips_model=loss_lpips)
E_optimizer.zero_grad()
loss_imgs.backward(retain_graph=True)
E_optimizer.step()
##--Mask_Cam as AT1 (HeatMap from Mask)
mask_1 = mask_1.float().to(device)
mask_1.requires_grad=True
mask_2 = mask_2.float().to(device)
mask_2.requires_grad=True
loss_mask, loss_mask_info = space_loss(mask_1,mask_2,lpips_model=loss_lpips)
loss_mask = loss_mask * 5.0
E_optimizer.zero_grad()
loss_mask.backward(retain_graph=True)
E_optimizer.step()
##--Grad_CAM as AT2 (from mask with img)
cam_1 = cam_1.float().to(device)
cam_1.requires_grad=True
cam_2 = cam_2.float().to(device)
cam_2.requires_grad=True
loss_Gcam, loss_Gcam_info = space_loss(cam_1,cam_2,lpips_model=loss_lpips)
loss_Gcam = loss_Gcam * 9.0
E_optimizer.zero_grad()
loss_Gcam.backward(retain_graph=True)
E_optimizer.step()
#Latent Vectors
##--C
loss_c, loss_c_info = space_loss(const1,const2,image_space = False)
##--W
loss_w, loss_w_info = space_loss(w1,w2,image_space = False)
E_optimizer.zero_grad()
loss_w.backward()
E_optimizer.step()
print('ep_%d_iter_%d'%(iteration//30000,iteration%30000))
print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_ssim, loss_imgs_cosine, loss_kl_imgs, loss_imgs_lpips]')
print('---------ImageSpace--------')
print('loss_mask_info: %s'%loss_mask_info)
print('loss_grad_info: %s'%loss_grad_info)
print('loss_imgs_info: %s'%loss_imgs_info)
print('loss_Gcam_info: %s'%loss_Gcam_info)
print('---------LatentSpace--------')
print('loss_w_info: %s'%loss_w_info)
print('loss_c_info: %s'%loss_c_info)
it_d += 1
writer.add_scalar('loss_mask_mse', loss_mask_info[0][0], global_step=it_d)
writer.add_scalar('loss_mask_mse_mean', loss_mask_info[0][1], global_step=it_d)
writer.add_scalar('loss_mask_mse_std', loss_mask_info[0][2], global_step=it_d)
writer.add_scalar('loss_mask_kl', loss_mask_info[1], global_step=it_d)
writer.add_scalar('loss_mask_cosine', loss_mask_info[2], global_step=it_d)
writer.add_scalar('loss_mask_ssim', loss_mask_info[3], global_step=it_d)
writer.add_scalar('loss_mask_lpips', loss_mask_info[4], global_step=it_d)
writer.add_scalar('loss_grad_mse', loss_grad_info[0][0], global_step=it_d)
writer.add_scalar('loss_grad_mse_mean', loss_grad_info[0][1], global_step=it_d)
writer.add_scalar('loss_grad_mse_std', loss_grad_info[0][2], global_step=it_d)
writer.add_scalar('loss_grad_kl', loss_grad_info[1], global_step=it_d)
writer.add_scalar('loss_grad_cosine', loss_grad_info[2], global_step=it_d)
writer.add_scalar('loss_grad_ssim', loss_grad_info[3], global_step=it_d)
writer.add_scalar('loss_grad_lpips', loss_grad_info[4], global_step=it_d)
writer.add_scalar('loss_imgs_mse', loss_imgs_info[0][0], global_step=it_d)
writer.add_scalar('loss_imgs_mse_mean', loss_imgs_info[0][1], global_step=it_d)
writer.add_scalar('loss_imgs_mse_std', loss_imgs_info[0][2], global_step=it_d)
writer.add_scalar('loss_imgs_kl', loss_imgs_info[1], global_step=it_d)
writer.add_scalar('loss_imgs_cosine', loss_imgs_info[2], global_step=it_d)
writer.add_scalar('loss_imgs_ssim', loss_imgs_info[3], global_step=it_d)
writer.add_scalar('loss_imgs_lpips', loss_imgs_info[4], global_step=it_d)
writer.add_scalar('loss_Gcam', loss_Gcam_info[0][0], global_step=it_d)
writer.add_scalar('loss_Gcam_mean', loss_Gcam_info[0][1], global_step=it_d)
writer.add_scalar('loss_Gcam_std', loss_Gcam_info[0][2], global_step=it_d)
writer.add_scalar('loss_Gcam_kl', loss_Gcam_info[1], global_step=it_d)
writer.add_scalar('loss_Gcam_cosine', loss_Gcam_info[2], global_step=it_d)
writer.add_scalar('loss_Gcam_ssim', loss_Gcam_info[3], global_step=it_d)
writer.add_scalar('loss_Gcam_lpips', loss_Gcam_info[4], global_step=it_d)
writer.add_scalar('loss_w_mse', loss_w_info[0][0], global_step=it_d)
writer.add_scalar('loss_w_mse_mean', loss_w_info[0][1], global_step=it_d)
writer.add_scalar('loss_w_mse_std', loss_w_info[0][2], global_step=it_d)
writer.add_scalar('loss_w_kl', loss_w_info[1], global_step=it_d)
writer.add_scalar('loss_w_cosine', loss_w_info[2], global_step=it_d)
writer.add_scalar('loss_w_ssim', loss_w_info[3], global_step=it_d)
writer.add_scalar('loss_w_lpips', loss_w_info[4], global_step=it_d)
writer.add_scalar('loss_c_mse', loss_c_info[0][0], global_step=it_d)
writer.add_scalar('loss_c_mse_mean', loss_c_info[0][1], global_step=it_d)
writer.add_scalar('loss_c_mse_std', loss_c_info[0][2], global_step=it_d)
writer.add_scalar('loss_c_kl', loss_c_info[1], global_step=it_d)
writer.add_scalar('loss_c_cosine', loss_c_info[2], global_step=it_d)
writer.add_scalar('loss_c_ssim', loss_c_info[3], global_step=it_d)
writer.add_scalar('loss_c_lpips', loss_c_info[4], global_step=it_d)
writer.add_scalars('Image_Space_MSE', {'loss_mask_mse':loss_mask_info[0][0],'loss_grad_mse':loss_grad_info[0][0],'loss_img_mse':loss_imgs_info[0][0]}, global_step=it_d)
writer.add_scalars('Image_Space_KL', {'loss_mask_kl':loss_mask_info[1],'loss_grad_kl':loss_grad_info[1],'loss_imgs_kl':loss_imgs_info[1]}, global_step=it_d)
writer.add_scalars('Image_Space_Cosine', {'loss_mask_cosine':loss_mask_info[2],'loss_grad_cosine':loss_grad_info[2],'loss_imgs_cosine':loss_imgs_info[2]}, global_step=it_d)
writer.add_scalars('Image_Space_SSIM', {'loss_mask_ssim':loss_mask_info[3],'loss_grad_ssim':loss_grad_info[3],'loss_img_ssim':loss_imgs_info[3]}, global_step=it_d)
writer.add_scalars('Image_Space_Lpips', {'loss_mask_lpips':loss_mask_info[4],'loss_grad_lpips':loss_grad_info[4],'loss_img_lpips':loss_imgs_info[4]}, global_step=it_d)
writer.add_scalars('Latent Space W', {'loss_w_mse':loss_w_info[0][0],'loss_w_mse_mean':loss_w_info[0][1],'loss_w_mse_std':loss_w_info[0][2],'loss_w_kl':loss_w_info[1],'loss_w_cosine':loss_w_info[2]}, global_step=it_d)
writer.add_scalars('Latent Space C', {'loss_c_mse':loss_c_info[0][0],'loss_c_mse_mean':loss_c_info[0][1],'loss_c_mse_std':loss_c_info[0][2],'loss_c_kl':loss_w_info[1],'loss_c_cosine':loss_w_info[2]}, global_step=it_d)
if iteration % 100 == 0:
n_row = batch_size
test_img = torch.cat((imgs1[:n_row],imgs2[:n_row]))*0.5+0.5
torchvision.utils.save_image(test_img, resultPath1_1+'/ep%d_iter%d.png'%(iteration//30000,iteration%30000),nrow=n_row) # nrow=3
heatmap=torch.cat((heatmap_1,heatmap_2))
cam=torch.cat((cam_1,cam_2))
grads = torch.cat((grad_1,grad_2))
grads = grads.data.cpu().numpy() # [n,c,h,w]
grads -= np.max(np.min(grads), 0)
grads /= np.max(grads)
torchvision.utils.save_image(torch.tensor(heatmap),resultPath_grad_cam+'/heatmap_%d.png'%(iteration),nrow=n_row)
torchvision.utils.save_image(torch.tensor(cam),resultPath_grad_cam+'/cam_%d.png'%(iteration),nrow=n_row)
torchvision.utils.save_image(torch.tensor(grads),resultPath_grad_cam+'/gb_%d.png'%(iteration),nrow=n_row)
with open(resultPath+'/Loss.txt', 'a+') as f:
print('i_'+str(iteration),file=f)
print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]',file=f)
print('---------ImageSpace--------',file=f)
print('loss_mask_info: %s'%loss_mask_info,file=f)
print('loss_grad_info: %s'%loss_grad_info,file=f)
print('loss_imgs_info: %s'%loss_imgs_info,file=f)
print('loss_Gcam_info: %s'%loss_Gcam_info,file=f)
print('---------LatentSpace--------',file=f)
print('loss_w_info: %s'%loss_w_info,file=f)
print('loss_c_info: %s'%loss_c_info,file=f)
if iteration % 5000 == 0:
torch.save(E.state_dict(), resultPath1_2+'/E_model_ep%d_iter%d.pth'%(iteration//30000,iteration%30000))
#torch.save(Gm.buffer1,resultPath1_2+'/center_tensor_iter%d.pt'%iteration)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='the training args')
parser.add_argument('--iterations', type=int, default=120001)
parser.add_argument('--lr', type=float, default=0.0015)
parser.add_argument('--beta_1', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--experiment_dir', default=None)
parser.add_argument('--checkpoint_dir_GAN', default='../checkpoint/stylegan_v2/stylegan2_cat256.pth')
parser.add_argument('--config_dir', default='./checkpoint/biggan/256/biggan-deep-256-config.json') # BigGAN needs it
parser.add_argument('--checkpoint_dir_E', default=None)#'./result/StyleGAN2-CAT256-MisAligned-solveDetach&Clone-FronterImageVecvtors/models/E_model_ep0_iter20000.pth'
parser.add_argument('--img_size',type=int, default=256)
parser.add_argument('--img_channels', type=int, default=3)# RGB:3 ,L:1
parser.add_argument('--z_dim', type=int, default=512) # BigGAN,z=128, PGGAN and StyleGANs = 512
parser.add_argument('--mtype', type=int, default=2) # StyleGANv1=1, StyleGANv2=2, PGGAN=3, BigGAN00
parser.add_argument('--start_features', type=int, default=64) # 16->1024 32->512 64->256
args = parser.parse_args()
if not os.path.exists('./result'): os.mkdir('./result')
resultPath = args.experiment_dir
if resultPath == None:
resultPath = "./result/StyleGAN2-Cat256-Case2-MisAligned-w"
if not os.path.exists(resultPath): os.mkdir(resultPath)
resultPath1_1 = resultPath+"/imgs"
if not os.path.exists(resultPath1_1): os.mkdir(resultPath1_1)
resultPath1_2 = resultPath+"/models"
if not os.path.exists(resultPath1_2): os.mkdir(resultPath1_2)
resultPath_grad_cam = resultPath+"/grad_cam"
if not os.path.exists(resultPath_grad_cam): os.mkdir(resultPath_grad_cam)
use_gpu = True
device = torch.device("cuda" if use_gpu else "cpu")
writer_path = os.path.join(resultPath, './summaries')
if not os.path.exists(writer_path): os.mkdir(writer_path)
writer = tensorboardX.SummaryWriter(writer_path)
train(tensor_writer=writer, args= args) | [
"model.utils.biggan_config.BigGANConfig.from_json_file",
"math.log",
"torchvision.utils.save_image",
"sys.path.append",
"torch.arange",
"os.path.exists",
"lpips.LPIPS",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"numpy.max",
"os.mkdir",
"numpy.min",
"model.biggan_generator.BigG... | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((4789, 4824), 'metric.grad_cam.GradCamPlusPlus', 'GradCamPlusPlus', (['vgg16', 'final_layer'], {}), '(vgg16, final_layer)\n', (4804, 4824), False, 'from metric.grad_cam import GradCAM, GradCamPlusPlus, GuidedBackPropagation, mask2cam\n'), ((4835, 4863), 'metric.grad_cam.GuidedBackPropagation', 'GuidedBackPropagation', (['vgg16'], {}), '(vgg16)\n', (4856, 4863), False, 'from metric.grad_cam import GradCAM, GradCamPlusPlus, GuidedBackPropagation, mask2cam\n'), ((16026, 16082), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""the training args"""'}), "(description='the training args')\n", (16049, 16082), False, 'import argparse\n'), ((17883, 17925), 'torch.device', 'torch.device', (["('cuda' if use_gpu else 'cpu')"], {}), "('cuda' if use_gpu else 'cpu')\n", (17895, 17925), False, 'import torch\n'), ((17945, 17984), 'os.path.join', 'os.path.join', (['resultPath', '"""./summaries"""'], {}), "(resultPath, './summaries')\n", (17957, 17984), False, 'import os\n'), ((18060, 18099), 'tensorboardX.SummaryWriter', 'tensorboardX.SummaryWriter', (['writer_path'], {}), '(writer_path)\n', (18086, 18099), False, 'import tensorboardX\n'), ((1446, 1490), 'torch.load', 'torch.load', (["(model_path + '/center_tensor.pt')"], {}), "(model_path + '/center_tensor.pt')\n", (1456, 1490), False, 'import torch\n'), ((1821, 1869), 'torch.ones', 'torch.ones', (['layer_idx.shape'], {'dtype': 'torch.float32'}), '(layer_idx.shape, dtype=torch.float32)\n', (1831, 1869), False, 'import torch\n'), ((1929, 1986), 'torch.where', 'torch.where', (['(layer_idx < layer_num // 2)', '(0.7 * ones)', 'ones'], {}), '(layer_idx < layer_num // 2, 0.7 * ones, ones)\n', (1940, 1986), False, 'import torch\n'), ((4972, 5007), 'torch.randn', 'torch.randn', (['batch_size', 'args.z_dim'], {}), '(batch_size, args.z_dim)\n', (4983, 5007), False, 'import torch\n'), ((7274, 7297), 'metric.grad_cam.mask2cam', 'mask2cam', (['mask_1', 'imgs1'], {}), '(mask_1, imgs1)\n', (7282, 7297), False, 'from metric.grad_cam import GradCAM, GradCamPlusPlus, GuidedBackPropagation, mask2cam\n'), ((7323, 7346), 'metric.grad_cam.mask2cam', 'mask2cam', (['mask_2', 'imgs2'], {}), '(mask_2, imgs2)\n', (7331, 7346), False, 'from metric.grad_cam import GradCAM, GradCamPlusPlus, GuidedBackPropagation, mask2cam\n'), ((17263, 17289), 'os.path.exists', 'os.path.exists', (['"""./result"""'], {}), "('./result')\n", (17277, 17289), False, 'import os\n'), ((17291, 17311), 'os.mkdir', 'os.mkdir', (['"""./result"""'], {}), "('./result')\n", (17299, 17311), False, 'import os\n'), ((17559, 17588), 'os.path.exists', 'os.path.exists', (['resultPath1_1'], {}), '(resultPath1_1)\n', (17573, 17588), False, 'import os\n'), ((17590, 17613), 'os.mkdir', 'os.mkdir', (['resultPath1_1'], {}), '(resultPath1_1)\n', (17598, 17613), False, 'import os\n'), ((17667, 17696), 'os.path.exists', 'os.path.exists', (['resultPath1_2'], {}), '(resultPath1_2)\n', (17681, 17696), False, 'import os\n'), ((17698, 17721), 'os.mkdir', 'os.mkdir', (['resultPath1_2'], {}), '(resultPath1_2)\n', (17706, 17721), False, 'import os\n'), ((17783, 17818), 'os.path.exists', 'os.path.exists', (['resultPath_grad_cam'], {}), '(resultPath_grad_cam)\n', (17797, 17818), False, 'import os\n'), ((17820, 17849), 'os.mkdir', 'os.mkdir', (['resultPath_grad_cam'], {}), '(resultPath_grad_cam)\n', (17828, 17849), False, 'import os\n'), ((17996, 18023), 'os.path.exists', 'os.path.exists', (['writer_path'], {}), '(writer_path)\n', (18010, 18023), False, 'import os\n'), ((18025, 18046), 'os.mkdir', 'os.mkdir', (['writer_path'], {}), '(writer_path)\n', (18033, 18046), False, 'import os\n'), ((1137, 1175), 'torch.load', 'torch.load', (["(model_path + 'Gs_dict.pth')"], {}), "(model_path + 'Gs_dict.pth')\n", (1147, 1175), False, 'import torch\n'), ((1385, 1424), 'torch.load', 'torch.load', (["(model_path + '/Gm_dict.pth')"], {}), "(model_path + '/Gm_dict.pth')\n", (1395, 1424), False, 'import torch\n'), ((1703, 1726), 'torch.arange', 'torch.arange', (['layer_num'], {}), '(layer_num)\n', (1715, 1726), False, 'import torch\n'), ((2342, 2364), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (2352, 2364), False, 'import torch\n'), ((2754, 2782), 'torch.randn', 'torch.randn', (['args.batch_size'], {}), '(args.batch_size)\n', (2765, 2782), False, 'import torch\n'), ((4143, 4176), 'torch.load', 'torch.load', (['args.checkpoint_dir_E'], {}), '(args.checkpoint_dir_E)\n', (4153, 4176), False, 'import torch\n'), ((4467, 4489), 'lpips.LPIPS', 'lpips.LPIPS', ([], {'net': '"""vgg"""'}), "(net='vgg')\n", (4478, 4489), False, 'import lpips\n'), ((4582, 4623), 'torchvision.models.vgg16', 'torchvision.models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (4606, 4623), False, 'import torchvision\n'), ((14220, 14352), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['test_img', "(resultPath1_1 + '/ep%d_iter%d.png' % (iteration // 30000, iteration % 30000))"], {'nrow': 'n_row'}), "(test_img, resultPath1_1 + '/ep%d_iter%d.png' %\n (iteration // 30000, iteration % 30000), nrow=n_row)\n", (14248, 14352), False, 'import torchvision\n'), ((14368, 14401), 'torch.cat', 'torch.cat', (['(heatmap_1, heatmap_2)'], {}), '((heatmap_1, heatmap_2))\n', (14377, 14401), False, 'import torch\n'), ((14417, 14442), 'torch.cat', 'torch.cat', (['(cam_1, cam_2)'], {}), '((cam_1, cam_2))\n', (14426, 14442), False, 'import torch\n'), ((14462, 14489), 'torch.cat', 'torch.cat', (['(grad_1, grad_2)'], {}), '((grad_1, grad_2))\n', (14471, 14489), False, 'import torch\n'), ((14613, 14626), 'numpy.max', 'np.max', (['grads'], {}), '(grads)\n', (14619, 14626), True, 'import numpy as np\n'), ((17459, 17485), 'os.path.exists', 'os.path.exists', (['resultPath'], {}), '(resultPath)\n', (17473, 17485), False, 'import os\n'), ((17487, 17507), 'os.mkdir', 'os.mkdir', (['resultPath'], {}), '(resultPath)\n', (17495, 17507), False, 'import os\n'), ((3264, 3286), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (3274, 3286), False, 'import torch\n'), ((3523, 3538), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (3535, 3538), False, 'import torch\n'), ((5059, 5074), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5072, 5074), False, 'import torch\n'), ((14574, 14587), 'numpy.min', 'np.min', (['grads'], {}), '(grads)\n', (14580, 14587), True, 'import numpy as np\n'), ((14668, 14689), 'torch.tensor', 'torch.tensor', (['heatmap'], {}), '(heatmap)\n', (14680, 14689), False, 'import torch\n'), ((14793, 14810), 'torch.tensor', 'torch.tensor', (['cam'], {}), '(cam)\n', (14805, 14810), False, 'import torch\n'), ((14910, 14929), 'torch.tensor', 'torch.tensor', (['grads'], {}), '(grads)\n', (14922, 14929), False, 'import torch\n'), ((1617, 1643), 'math.log', 'math.log', (['args.img_size', '(2)'], {}), '(args.img_size, 2)\n', (1625, 1643), False, 'import math\n'), ((2256, 2309), 'model.stylegan2_generator.StyleGAN2Generator', 'model_v2.StyleGAN2Generator', ([], {'resolution': 'args.img_size'}), '(resolution=args.img_size)\n', (2283, 2309), True, 'import model.stylegan2_generator as model_v2\n'), ((3723, 3767), 'model.utils.biggan_config.BigGANConfig.from_json_file', 'BigGANConfig.from_json_file', (['args.config_dir'], {}), '(args.config_dir)\n', (3750, 3767), False, 'from model.utils.biggan_config import BigGANConfig\n'), ((5289, 5304), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5302, 5304), False, 'import torch\n'), ((14159, 14200), 'torch.cat', 'torch.cat', (['(imgs1[:n_row], imgs2[:n_row])'], {}), '((imgs1[:n_row], imgs2[:n_row]))\n', (14168, 14200), False, 'import torch\n'), ((1051, 1077), 'math.log', 'math.log', (['args.img_size', '(2)'], {}), '(args.img_size, 2)\n', (1059, 1077), False, 'import math\n'), ((2143, 2169), 'math.log', 'math.log', (['args.img_size', '(2)'], {}), '(args.img_size, 2)\n', (2151, 2169), False, 'import math\n'), ((3179, 3231), 'model.pggan.pggan_generator.PGGANGenerator', 'model_pggan.PGGANGenerator', ([], {'resolution': 'args.img_size'}), '(resolution=args.img_size)\n', (3205, 3231), True, 'import model.pggan.pggan_generator as model_pggan\n'), ((3848, 3870), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (3858, 3870), False, 'import torch\n'), ((5498, 5513), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5511, 5513), False, 'import torch\n'), ((5856, 5879), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (5873, 5879), True, 'import numpy as np\n'), ((5900, 5919), 'numpy.ones', 'np.ones', (['batch_size'], {}), '(batch_size)\n', (5907, 5919), True, 'import numpy as np\n'), ((6528, 6554), 'math.log', 'math.log', (['args.img_size', '(2)'], {}), '(args.img_size, 2)\n', (6536, 6554), False, 'import math\n'), ((1212, 1238), 'math.log', 'math.log', (['args.img_size', '(2)'], {}), '(args.img_size, 2)\n', (1220, 1238), False, 'import math\n'), ((3033, 3059), 'math.log', 'math.log', (['args.img_size', '(2)'], {}), '(args.img_size, 2)\n', (3041, 3059), False, 'import math\n'), ((3788, 3802), 'model.biggan_generator.BigGAN', 'BigGAN', (['config'], {}), '(config)\n', (3794, 3802), False, 'from model.biggan_generator import BigGAN\n'), ((5200, 5226), 'math.log', 'math.log', (['args.img_size', '(2)'], {}), '(args.img_size, 2)\n', (5208, 5226), False, 'import math\n'), ((6215, 6230), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6228, 6230), False, 'import torch\n'), ((3614, 3640), 'math.log', 'math.log', (['args.img_size', '(2)'], {}), '(args.img_size, 2)\n', (3622, 3640), False, 'import math\n'), ((6005, 6039), 'torch.tensor', 'torch.tensor', (['z'], {'dtype': 'torch.float'}), '(z, dtype=torch.float)\n', (6017, 6039), False, 'import torch\n'), ((6072, 6110), 'torch.tensor', 'torch.tensor', (['label'], {'dtype': 'torch.float'}), '(label, dtype=torch.float)\n', (6084, 6110), False, 'import torch\n'), ((6154, 6190), 'torch.tensor', 'torch.tensor', (['(0.4)'], {'dtype': 'torch.float'}), '(0.4, dtype=torch.float)\n', (6166, 6190), False, 'import torch\n'), ((3957, 3983), 'math.log', 'math.log', (['args.img_size', '(2)'], {}), '(args.img_size, 2)\n', (3965, 3983), False, 'import math\n')] |
#!/usr/bin/env python3
"""
Script and class for creating a tf.Record dataset for the simulated disc
tracking task
"""
import numpy as np
import logging
import os
import cv2
import matplotlib.pyplot as plt
import argparse
import sys
from differentiable_filters.utils import recordio as tfr
class DiscTrackingData():
def __init__(self, name, out_dir, width, num_examples, sequence_length,
file_size, rescale=False, debug=False):
"""
Class for creating a tf.Record dataset for the simulated disc tracking
task.
Parameters
----------
name : str
Name of the dataset.
out_dir : str
Output directory
width : int
Width (and height) of the image observations. Note: Images are
always generated with size [120, 120, 3]. If width is set to
a different value, the images are rescaled accordingly.
num_examples : int
Maximum number of trainign examples to generate.
sequence_length : int
Number of timesteps in the sequence.
file_size : int
Maximum number of examples stored in one file.
rescale : bool, optional
If true, the state-space is rescaled to be in [-1, 1].
The default is False.
debug : bool, optional
Turns on debugging output. The default is False.
Returns
-------
None.
"""
self.im_size = 120
self.factor = self.im_size / width
self.out_dir = out_dir
self.name = name
self.num_examples = num_examples
self.sequence_length = sequence_length
self.file_size = min(self.num_examples, file_size)
self.rescale = rescale
self.debug = debug
# parameters of the process model
self.spring_force = 0.05
self.drag_force = 0.0075
# colors for the distractor discs
self.cols = [(0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 0, 255),
(255, 255, 0), (255, 255, 255)]
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
# setup logging
self.log = logging.getLogger(name)
self.log.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s: [%(name)s] ' +
'[%(levelname)s] %(message)s')
# create console handler
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.log.addHandler(ch)
# create file handler which logs warnings errors and criticals
if os.path.exists(os.path.join(self.out_dir,
self.name + '_error.log')):
os.remove(os.path.join(self.out_dir,
self.name + '_error.log'))
fh = logging.FileHandler(os.path.join(self.out_dir,
self.name + '_error.log'))
fh.setLevel(logging.WARNING)
fh.setFormatter(formatter)
self.log.addHandler(fh)
def create_dataset(self, num_distractors, hetero_q, corr_q, pos_noise):
"""
Creates a tf.Record dataset with the desired characteristics.
Parameters
----------
num_distractors : int
The number of distractor discs.
hetero_q : bool
If true, the process noise on the velecity components is
heteroscedastic.
corr_q : bool
If true, the process noise is correlated. In this case, the full
covariance matrix Q is stored, otherwise, we only store the
diagonal.
pos_noise : float
Magnitude of the process noise on the position components.
Returns
-------
None.
"""
# setup directory for debug output if desired
if self.debug:
self.debug_dir = os.path.join(self.out_dir, 'debug', self.name)
if not os.path.exists(self.debug_dir):
os.makedirs(self.debug_dir)
train_count = 0
val_count = 0
test_count = 0
self.keys = ['start_image', 'start_state', 'image', 'state', 'q',
'visible']
train_data = {key: [] for key in self.keys}
self.record_writer_train = \
tfr.RecordioWriter(self.out_dir, self.file_size,
self.name + '_train_')
self.record_meta_train = tfr.RecordMeta(self.name + '_train_')
self.record_writer_val = \
tfr.RecordioWriter(self.out_dir, self.file_size,
self.name + '_val_')
self.record_meta_val = tfr.RecordMeta(self.name + '_val_')
self.record_writer_test = \
tfr.RecordioWriter(self.out_dir, self.file_size,
self.name + '_test_')
self.record_meta_test = tfr.RecordMeta(self.name + '_test_')
self.ct = 0
self.log.info('Starting to generate dataset ' + self.name)
while train_count < self.num_examples:
values = self._get_data(num_distractors, hetero_q, corr_q,
pos_noise)
self.ct += 1
for key in self.keys:
train_data[key] += [values[key]]
if len(train_data['image']) * 8 // 10 > self.file_size:
train_size, val_size, test_size = self._save(train_data)
train_count += train_size
val_count += val_size
test_count += test_size
train_data = {key: [] for key in self.keys}
if (len(train_data['image']) * 8 / 10 + train_count) % 250 == 0:
num = len(train_data['image']) * 8 // 10 + train_count
self.log.info('Done ' + str(num) + ' of ' +
str(self.num_examples))
if len(train_data['image']) > 0:
train_size, val_size, test_size = self._save(train_data)
train_count += train_size
val_count += val_size
test_count += test_size
# save the meta information
count = train_count + val_count + test_count
fi = open(os.path.join(self.out_dir, 'info_' + self.name + '.txt'),
'w')
fi.write('Num data points: ' + str(count) + '\n')
fi.write('Num train: ' + str(train_count) + '\n')
fi.write('Num val: ' + str(val_count) + '\n')
fi.write('Num test: ' + str(test_count) + '\n')
fi.close()
self.log.info('Done')
self.record_writer_train.close()
self.record_writer_test.close()
self.record_writer_val.close()
return
def _get_data(self, num_distractors, hetero_q, corr_q, pos_noise):
"""
Generates one example.
Parameters
----------
num_distractors : int
The number of distractor discs.
hetero_q : bool
If true, the process noise on the velecity components is
heteroscedastic.
corr_q : bool
If true, the process noise is correlated. In this case, the full
covariance matrix Q is stored, otherwise, we only store the
diagonal.
pos_noise : float
Magnitude of the process noise on the position components.
Returns
-------
example : dict
Dictionary containign the data
"""
states = []
images = []
qs = []
viss = []
# the state consists of the red disc's position and velocity
# draw a random position
pos = np.random.uniform(-self.im_size//2, self.im_size//2, size=(2))
# draw a random velocity
vel = np.random.normal(loc=0., scale=1., size=(2)) * 3
initial_state = np.array([pos[0], pos[1], vel[0], vel[1]])
distractors = []
for dist in range(num_distractors):
# also draw a random starting positions for distractors
pos = np.random.uniform(-self.im_size//2, self.im_size//2,
size=(2))
# draw a random velocity
vel = np.random.normal(loc=0, scale=1, size=(2)) * 3
# and a random radius
rad = np.random.choice(np.arange(3, 10))
# draw a random color
col = np.random.choice(len(self.cols))
distractors += [(rad, np.array([pos[0], pos[1], vel[0], vel[1]]),
col)]
# generate the initial image
initial_im, initial_vis = self._observation_model(initial_state,
distractors)
last_state = initial_state
for step in range(self.sequence_length):
# get the next state
state, q = self._process_model(last_state, hetero_q, corr_q,
pos_noise)
# also move the distractors
new_distractors = []
for d in distractors:
d_new, _ = self._process_model(d[1], hetero_q, corr_q,
pos_noise)
new_distractors += [(d[0], d_new, d[2])]
# get the new image
im, vis = self._observation_model(state, new_distractors)
states += [state]
images += [im]
qs += [q]
viss += [vis]
distractors = new_distractors
last_state = state
if self.ct < 3 and self.debug:
for i, im in enumerate(images):
fig, ax = plt.subplots()
ax.set_axis_off()
ax.imshow(im)
ax.plot(states[i][0]+60, states[i][1]+60, 'bo')
if i + 1 < len(images):
ax.plot(states[i+1][0]+60, states[i+1][1]+60, 'go')
ax.plot([states[i][0]+60, states[i][0] + states[i][2]+60],
[states[i][1]+60, states[i][1] + states[i][3]+60],
'g')
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9,
wspace=0.1, hspace=0.1)
fig.savefig(os.path.join(self.debug_dir, str(self.ct) +
"_tracking_" + str(i)),
bbox_inches="tight")
plt.close(fig)
# we found it helpful to rescale the state space to be roughly in
# [-1, 1]
if self.rescale:
initial_state /= self.im_size / 2
states = np.array(states) / (self.im_size / 2)
qs = np.array(qs) / (self.im_size / 2)
if corr_q:
qs /= self.im_size / 2.
# compress the images by encoding them as png byte-strings
for ind, im in enumerate(images):
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
im = cv2.imencode('.png', im)[1].tobytes()
images[ind] = im
initial_im = cv2.cvtColor(initial_im, cv2.COLOR_RGB2BGR)
initial_im = cv2.imencode('.png', initial_im)[1].tobytes()
return {'start_image': initial_im, 'start_state': initial_state,
'image': np.array(images), 'state': states,
'q': qs, 'visible': np.array(viss)}
def _observation_model(self, state, distractors):
"""
Generates an observation image for the current state.
Parameters
----------
state : np.array
The state (position and velocity) of the target disc
distractors : list
List with the radius, state and color of each distractor disc
Returns
-------
im : np.array
The image data.
vis : float
The number of visible pixels of the target disc.
"""
im = np.zeros((self.im_size, self.im_size, 3), dtype=np.uint8)
# draw the red disc
cv2.circle(im, (int(state[0]+self.im_size//2),
int(state[1]+self.im_size//2)),
radius=7, color=[255, 0, 0], thickness=-1)
# draw the other distractors
for d in distractors:
cv2.circle(im, (int(d[1][0]+self.im_size//2),
int(d[1][1]+self.im_size//2)),
radius=d[0], color=self.cols[d[2]], thickness=-1)
# get the number of pixels visible from the red disc
mask = np.logical_and(im[:, :, 0] == 255,
np.logical_and(im[:, :, 1] == 0,
im[:, :, 2] == 0))
vis = np.sum(mask.astype(np.float32))
im = cv2.resize(im, (int(self.im_size/self.factor),
int(self.im_size/self.factor)))
vis /= self.factor
return im, vis
def _process_model(self, state, hetero_q, correlated, pos_noise):
"""
Calculates the next state of the target disc.
Parameters
----------
state : np.array
The state (position and velocity) of the target disc
hetero_q : bool
If true, the process noise on the velecity components is
heteroscedastic.
correlated : bool
If true, the process noise is correlated. In this case, the full
covariance matrix Q is stored, otherwise, we only store the
diagonal.
pos_noise : float
Magnitude of the process noise on the position components.
Returns
-------
new_state : np.array
The next state (position and velocity) of the target disc
q : np.array
The process noise used in this step
"""
new_state = np.copy(state)
pull_force = - self.spring_force * state[:2]
drag_force = - self.drag_force * state[2:]**2 * np.sign(state[2:])
new_state[0] += state[2]
new_state[1] += state[3]
new_state[2] += pull_force[0] + drag_force[0]
new_state[3] += pull_force[1] + drag_force[1]
if not correlated:
position_noise = np.random.normal(loc=0, scale=pos_noise, size=(2))
if hetero_q:
if np.abs(state[0]) > self.im_size//2 - self.im_size//6 or \
np.abs(state[1]) > self.im_size//2 - self.im_size//6:
velocity_noise = np.random.normal(loc=0, scale=0.1,
size=(2))
q = 0.1
elif np.abs(state[0]) > self.im_size//2 - self.im_size//3 or \
np.abs(state[1]) > self.im_size//2 - self.im_size//3:
velocity_noise = np.random.normal(loc=0, scale=1.,
size=(2))
q = 1.
else:
velocity_noise = np.random.normal(loc=0, scale=3.,
size=(2))
q = 3.
else:
velocity_noise = np.random.normal(loc=0, scale=2.,
size=(2))
q = 2.
new_state[:2] += position_noise
new_state[2:] += velocity_noise
q = np.array([pos_noise, pos_noise, q, q])
else:
pn = 3.0
cn = 2
c1 = -0.4
c2 = 0.2
c3 = 0.9
c4 = -0.1
c5 = 0
covar = np.array([[pn**2, c1*pn*pn, c2*pn*cn, c3*pn*cn],
[c1*pn*pn, pn**2, c4*pn*cn, c5*pn*cn],
[c2*pn*cn, c4*pn*cn, cn**2, 0],
[c3*pn*cn, c5*pn*cn, 0, cn**2]])
mean = np.zeros((4))
noise = np.random.multivariate_normal(mean, covar)
q = covar
new_state += noise
return new_state, q
def _save(self, data):
"""
Save a portion of the data to file and splits it into training,
validation and test data
Parameters
----------
data : dict of lists
A dictionary containing the example data
Returns
-------
train_size : int
Number of training examples saved
val_size : int
Number of validation examples saved.
test_size : int
Number of test examples saved.
"""
length = len(data['image'])
# convert lists to numpy arrays
for key in self.keys:
if type(data[key]) == np.ndarray and data[key].dtype == np.float64:
data[key] = np.array(data[key]).astype(np.float32)
# shuffle the arrays together
permutation = np.random.permutation(length)
for key in self.keys:
vals = np.copy(data[key])
data[key] = vals[permutation]
train_size = int(np.floor(length * 8. / 10.))
val_size = int(np.floor(length * 1. / 10.))
test_size = length - train_size - val_size
if train_size > 0:
train_data = {}
for key in self.keys:
train_data[key] = np.copy(data[key][:train_size])
rw = self.record_writer_train
rm = self.record_meta_train
tfr.write_tfr(train_data, rw, rm, self.out_dir)
if val_size > 0:
val_data = {}
for key in self.keys:
val_data[key] = \
np.copy(data[key][train_size:train_size+val_size])
rw = self.record_writer_val
rm = self.record_meta_val
tfr.write_tfr(val_data, rw, rm, self.out_dir)
if test_size > 0:
test_data = {}
for key in self.keys:
test_data[key] = np.copy(data[key][train_size+val_size:])
rw = self.record_writer_test
rm = self.record_meta_test
tfr.write_tfr(test_data, rw, rm, self.out_dir)
return train_size, val_size, test_size
def main(argv=None):
parser = argparse.ArgumentParser('create disc tracking datset')
parser.add_argument('--out-dir', dest='out_dir', type=str, required=True,
help='where to store results')
parser.add_argument('--name', dest='name', type=str,
default='disc_tracking')
parser.add_argument('--sequence-length', dest='sequence_length', type=int,
default=50, help='length of the generated sequences')
parser.add_argument('--width', dest='width', type=int, default=120,
help='width (= height) of the generated observations')
parser.add_argument('--num-examples', dest='num_examples', type=int,
default=2000,
help='how many training examples should be generated')
parser.add_argument('--file-size', dest='file_size', type=int,
default=500,
help='how many examples should be saved in one file')
parser.add_argument('--hetero-q', dest='hetero_q', type=int,
default=0, choices=[0, 1],
help='if the process noise should be heteroscedastic '
+ 'or contstant')
parser.add_argument('--correlated-q', dest='correlated_q', type=int,
default=0, choices=[0, 1],
help='if the process noise should have a full or a '
+ 'diagonal covariance matrix')
parser.add_argument('--pos-noise', dest='pos_noise', type=float,
default=0.1,
help='sigma for the positional process noise')
parser.add_argument('--num-distractors', dest='num_distractors', type=int,
default=5, help='number of distractor disc')
parser.add_argument('--rescale', dest='rescale', type=int,
default=0, choices=[0, 1],
help='Rescale the state space to be roughly in [-1, 1]?')
parser.add_argument('--debug', dest='debug', type=int,
default=0, choices=[0, 1],
help='Write out images for three sequences as debug ' +
'output')
args = parser.parse_args(argv)
name = args.name + '_pn=' + str(args.pos_noise) \
+ '_d=' + str(args.num_distractors)
if args.correlated_q:
name += '_corr'
if args.hetero_q:
name += '_hetero'
else:
name += '_const'
if not os.path.exists(os.path.join(args.out_dir, 'info_' + name + '.txt')):
c = DiscTrackingData(name, args.out_dir, args.width, args.num_examples,
args.sequence_length, args.file_size, args.debug)
c.create_dataset(args.num_distractors, args.hetero_q,
args.correlated_q, args.pos_noise)
else:
print('A dataset with this name already exists at ' + args.out_dir)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"numpy.array",
"numpy.arange",
"differentiable_filters.utils.recordio.RecordioWriter",
"os.path.exists",
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"numpy.random.permutation",
"numpy.random.normal",
"numpy.abs",
"differentiable_filters.u... | [((18232, 18286), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""create disc tracking datset"""'], {}), "('create disc tracking datset')\n", (18255, 18286), False, 'import argparse\n'), ((2212, 2235), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (2229, 2235), False, 'import logging\n'), ((2351, 2428), 'logging.Formatter', 'logging.Formatter', (["('%(asctime)s: [%(name)s] ' + '[%(levelname)s] %(message)s')"], {}), "('%(asctime)s: [%(name)s] ' + '[%(levelname)s] %(message)s')\n", (2368, 2428), False, 'import logging\n'), ((2513, 2546), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (2534, 2546), False, 'import logging\n'), ((4460, 4531), 'differentiable_filters.utils.recordio.RecordioWriter', 'tfr.RecordioWriter', (['self.out_dir', 'self.file_size', "(self.name + '_train_')"], {}), "(self.out_dir, self.file_size, self.name + '_train_')\n", (4478, 4531), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((4596, 4633), 'differentiable_filters.utils.recordio.RecordMeta', 'tfr.RecordMeta', (["(self.name + '_train_')"], {}), "(self.name + '_train_')\n", (4610, 4633), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((4681, 4750), 'differentiable_filters.utils.recordio.RecordioWriter', 'tfr.RecordioWriter', (['self.out_dir', 'self.file_size', "(self.name + '_val_')"], {}), "(self.out_dir, self.file_size, self.name + '_val_')\n", (4699, 4750), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((4813, 4848), 'differentiable_filters.utils.recordio.RecordMeta', 'tfr.RecordMeta', (["(self.name + '_val_')"], {}), "(self.name + '_val_')\n", (4827, 4848), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((4897, 4967), 'differentiable_filters.utils.recordio.RecordioWriter', 'tfr.RecordioWriter', (['self.out_dir', 'self.file_size', "(self.name + '_test_')"], {}), "(self.out_dir, self.file_size, self.name + '_test_')\n", (4915, 4967), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((5031, 5067), 'differentiable_filters.utils.recordio.RecordMeta', 'tfr.RecordMeta', (["(self.name + '_test_')"], {}), "(self.name + '_test_')\n", (5045, 5067), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((7777, 7841), 'numpy.random.uniform', 'np.random.uniform', (['(-self.im_size // 2)', '(self.im_size // 2)'], {'size': '(2)'}), '(-self.im_size // 2, self.im_size // 2, size=2)\n', (7794, 7841), True, 'import numpy as np\n'), ((7960, 8002), 'numpy.array', 'np.array', (['[pos[0], pos[1], vel[0], vel[1]]'], {}), '([pos[0], pos[1], vel[0], vel[1]])\n', (7968, 8002), True, 'import numpy as np\n'), ((11164, 11207), 'cv2.cvtColor', 'cv2.cvtColor', (['initial_im', 'cv2.COLOR_RGB2BGR'], {}), '(initial_im, cv2.COLOR_RGB2BGR)\n', (11176, 11207), False, 'import cv2\n'), ((12010, 12067), 'numpy.zeros', 'np.zeros', (['(self.im_size, self.im_size, 3)'], {'dtype': 'np.uint8'}), '((self.im_size, self.im_size, 3), dtype=np.uint8)\n', (12018, 12067), True, 'import numpy as np\n'), ((13902, 13916), 'numpy.copy', 'np.copy', (['state'], {}), '(state)\n', (13909, 13916), True, 'import numpy as np\n'), ((16925, 16954), 'numpy.random.permutation', 'np.random.permutation', (['length'], {}), '(length)\n', (16946, 16954), True, 'import numpy as np\n'), ((2100, 2128), 'os.path.exists', 'os.path.exists', (['self.out_dir'], {}), '(self.out_dir)\n', (2114, 2128), False, 'import os\n'), ((2142, 2167), 'os.makedirs', 'os.makedirs', (['self.out_dir'], {}), '(self.out_dir)\n', (2153, 2167), False, 'import os\n'), ((2747, 2799), 'os.path.join', 'os.path.join', (['self.out_dir', "(self.name + '_error.log')"], {}), "(self.out_dir, self.name + '_error.log')\n", (2759, 2799), False, 'import os\n'), ((2985, 3037), 'os.path.join', 'os.path.join', (['self.out_dir', "(self.name + '_error.log')"], {}), "(self.out_dir, self.name + '_error.log')\n", (2997, 3037), False, 'import os\n'), ((4039, 4085), 'os.path.join', 'os.path.join', (['self.out_dir', '"""debug"""', 'self.name'], {}), "(self.out_dir, 'debug', self.name)\n", (4051, 4085), False, 'import os\n'), ((6340, 6396), 'os.path.join', 'os.path.join', (['self.out_dir', "('info_' + self.name + '.txt')"], {}), "(self.out_dir, 'info_' + self.name + '.txt')\n", (6352, 6396), False, 'import os\n'), ((7887, 7931), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(2)'}), '(loc=0.0, scale=1.0, size=2)\n', (7903, 7931), True, 'import numpy as np\n'), ((8159, 8223), 'numpy.random.uniform', 'np.random.uniform', (['(-self.im_size // 2)', '(self.im_size // 2)'], {'size': '(2)'}), '(-self.im_size // 2, self.im_size // 2, size=2)\n', (8176, 8223), True, 'import numpy as np\n'), ((11023, 11058), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2BGR'], {}), '(im, cv2.COLOR_RGB2BGR)\n', (11035, 11058), False, 'import cv2\n'), ((11374, 11390), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (11382, 11390), True, 'import numpy as np\n'), ((11445, 11459), 'numpy.array', 'np.array', (['viss'], {}), '(viss)\n', (11453, 11459), True, 'import numpy as np\n'), ((12670, 12720), 'numpy.logical_and', 'np.logical_and', (['(im[:, :, 1] == 0)', '(im[:, :, 2] == 0)'], {}), '(im[:, :, 1] == 0, im[:, :, 2] == 0)\n', (12684, 12720), True, 'import numpy as np\n'), ((14026, 14044), 'numpy.sign', 'np.sign', (['state[2:]'], {}), '(state[2:])\n', (14033, 14044), True, 'import numpy as np\n'), ((14276, 14324), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'pos_noise', 'size': '(2)'}), '(loc=0, scale=pos_noise, size=2)\n', (14292, 14324), True, 'import numpy as np\n'), ((15448, 15486), 'numpy.array', 'np.array', (['[pos_noise, pos_noise, q, q]'], {}), '([pos_noise, pos_noise, q, q])\n', (15456, 15486), True, 'import numpy as np\n'), ((15667, 15876), 'numpy.array', 'np.array', (['[[pn ** 2, c1 * pn * pn, c2 * pn * cn, c3 * pn * cn], [c1 * pn * pn, pn ** \n 2, c4 * pn * cn, c5 * pn * cn], [c2 * pn * cn, c4 * pn * cn, cn ** 2, 0\n ], [c3 * pn * cn, c5 * pn * cn, 0, cn ** 2]]'], {}), '([[pn ** 2, c1 * pn * pn, c2 * pn * cn, c3 * pn * cn], [c1 * pn *\n pn, pn ** 2, c4 * pn * cn, c5 * pn * cn], [c2 * pn * cn, c4 * pn * cn, \n cn ** 2, 0], [c3 * pn * cn, c5 * pn * cn, 0, cn ** 2]])\n', (15675, 15876), True, 'import numpy as np\n'), ((15930, 15941), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (15938, 15941), True, 'import numpy as np\n'), ((15964, 16006), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'covar'], {}), '(mean, covar)\n', (15993, 16006), True, 'import numpy as np\n'), ((17004, 17022), 'numpy.copy', 'np.copy', (['data[key]'], {}), '(data[key])\n', (17011, 17022), True, 'import numpy as np\n'), ((17091, 17120), 'numpy.floor', 'np.floor', (['(length * 8.0 / 10.0)'], {}), '(length * 8.0 / 10.0)\n', (17099, 17120), True, 'import numpy as np\n'), ((17143, 17172), 'numpy.floor', 'np.floor', (['(length * 1.0 / 10.0)'], {}), '(length * 1.0 / 10.0)\n', (17151, 17172), True, 'import numpy as np\n'), ((17473, 17520), 'differentiable_filters.utils.recordio.write_tfr', 'tfr.write_tfr', (['train_data', 'rw', 'rm', 'self.out_dir'], {}), '(train_data, rw, rm, self.out_dir)\n', (17486, 17520), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((17802, 17847), 'differentiable_filters.utils.recordio.write_tfr', 'tfr.write_tfr', (['val_data', 'rw', 'rm', 'self.out_dir'], {}), '(val_data, rw, rm, self.out_dir)\n', (17815, 17847), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((18102, 18148), 'differentiable_filters.utils.recordio.write_tfr', 'tfr.write_tfr', (['test_data', 'rw', 'rm', 'self.out_dir'], {}), '(test_data, rw, rm, self.out_dir)\n', (18115, 18148), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((20740, 20791), 'os.path.join', 'os.path.join', (['args.out_dir', "('info_' + name + '.txt')"], {}), "(args.out_dir, 'info_' + name + '.txt')\n", (20752, 20791), False, 'import os\n'), ((2863, 2915), 'os.path.join', 'os.path.join', (['self.out_dir', "(self.name + '_error.log')"], {}), "(self.out_dir, self.name + '_error.log')\n", (2875, 2915), False, 'import os\n'), ((4105, 4135), 'os.path.exists', 'os.path.exists', (['self.debug_dir'], {}), '(self.debug_dir)\n', (4119, 4135), False, 'import os\n'), ((4153, 4180), 'os.makedirs', 'os.makedirs', (['self.debug_dir'], {}), '(self.debug_dir)\n', (4164, 4180), False, 'import os\n'), ((8313, 8353), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': '(2)'}), '(loc=0, scale=1, size=2)\n', (8329, 8353), True, 'import numpy as np\n'), ((8429, 8445), 'numpy.arange', 'np.arange', (['(3)', '(10)'], {}), '(3, 10)\n', (8438, 8445), True, 'import numpy as np\n'), ((9754, 9768), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9766, 9768), True, 'import matplotlib.pyplot as plt\n'), ((10544, 10558), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10553, 10558), True, 'import matplotlib.pyplot as plt\n'), ((10744, 10760), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (10752, 10760), True, 'import numpy as np\n'), ((10799, 10811), 'numpy.array', 'np.array', (['qs'], {}), '(qs)\n', (10807, 10811), True, 'import numpy as np\n'), ((15225, 15267), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(2.0)', 'size': '(2)'}), '(loc=0, scale=2.0, size=2)\n', (15241, 15267), True, 'import numpy as np\n'), ((17347, 17378), 'numpy.copy', 'np.copy', (['data[key][:train_size]'], {}), '(data[key][:train_size])\n', (17354, 17378), True, 'import numpy as np\n'), ((17661, 17713), 'numpy.copy', 'np.copy', (['data[key][train_size:train_size + val_size]'], {}), '(data[key][train_size:train_size + val_size])\n', (17668, 17713), True, 'import numpy as np\n'), ((17969, 18011), 'numpy.copy', 'np.copy', (['data[key][train_size + val_size:]'], {}), '(data[key][train_size + val_size:])\n', (17976, 18011), True, 'import numpy as np\n'), ((8566, 8608), 'numpy.array', 'np.array', (['[pos[0], pos[1], vel[0], vel[1]]'], {}), '([pos[0], pos[1], vel[0], vel[1]])\n', (8574, 8608), True, 'import numpy as np\n'), ((11229, 11261), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'initial_im'], {}), "('.png', initial_im)\n", (11241, 11261), False, 'import cv2\n'), ((14544, 14586), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(0.1)', 'size': '(2)'}), '(loc=0, scale=0.1, size=2)\n', (14560, 14586), True, 'import numpy as np\n'), ((11076, 11100), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'im'], {}), "('.png', im)\n", (11088, 11100), False, 'import cv2\n'), ((14371, 14387), 'numpy.abs', 'np.abs', (['state[0]'], {}), '(state[0])\n', (14377, 14387), True, 'import numpy as np\n'), ((14453, 14469), 'numpy.abs', 'np.abs', (['state[1]'], {}), '(state[1])\n', (14459, 14469), True, 'import numpy as np\n'), ((14865, 14907), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1.0)', 'size': '(2)'}), '(loc=0, scale=1.0, size=2)\n', (14881, 14907), True, 'import numpy as np\n'), ((15049, 15091), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(3.0)', 'size': '(2)'}), '(loc=0, scale=3.0, size=2)\n', (15065, 15091), True, 'import numpy as np\n'), ((16825, 16844), 'numpy.array', 'np.array', (['data[key]'], {}), '(data[key])\n', (16833, 16844), True, 'import numpy as np\n'), ((14692, 14708), 'numpy.abs', 'np.abs', (['state[0]'], {}), '(state[0])\n', (14698, 14708), True, 'import numpy as np\n'), ((14774, 14790), 'numpy.abs', 'np.abs', (['state[1]'], {}), '(state[1])\n', (14780, 14790), True, 'import numpy as np\n')] |
import datetime as dt
from dateutil.relativedelta import relativedelta
import numpy as np
import pandas as pds
# Orbits period is a pandas.Timedelta kwarg, and the pandas repr
# does not include a module name. Import required to run eval
# on Orbit representation
from pandas import Timedelta # noqa: F401
import pytest
import pysat
class TestOrbitsUserInterface():
def setup(self):
""" Set up User Interface unit tests
"""
self.in_args = ['pysat', 'testing']
self.in_kwargs = {'clean_level': 'clean', 'update_files': True}
self.testInst = None
self.stime = dt.datetime(2009, 1, 1)
def teardown(self):
""" Tear down user interface tests
"""
del self.in_args, self.in_kwargs, self.testInst, self.stime
def test_orbit_w_bad_kind(self):
""" Test orbit failure with bad 'kind' input
"""
self.in_kwargs['orbit_info'] = {'index': 'mlt', 'kind': 'cats'}
with pytest.raises(ValueError):
self.testInst = pysat.Instrument(*self.in_args, **self.in_kwargs)
@pytest.mark.parametrize("info", [({'index': 'magnetic local time',
'kind': 'longitude'}),
(None),
({'index': 'magnetic local time',
'kind': 'lt'}),
({'index': 'magnetic local time',
'kind': 'polar'}),
({'index': 'magnetic local time',
'kind': 'orbit'})])
def test_orbit_w_bad_orbit_info(self, info):
""" Test orbit failure on iteration with orbit initialization
"""
self.in_kwargs['orbit_info'] = info
self.testInst = pysat.Instrument(*self.in_args, **self.in_kwargs)
self.testInst.load(date=self.stime)
with pytest.raises(ValueError):
self.testInst.orbits.next()
@pytest.mark.parametrize("info", [({'index': 'magnetic local time',
'kind': 'polar'}),
({'index': 'magnetic local time',
'kind': 'orbit'}),
({'index': 'magnetic local time',
'kind': 'longitude'}),
({'index': 'magnetic local time',
'kind': 'lt'})])
def test_orbit_polar_w_missing_orbit_index(self, info):
""" Test orbit failure oon iteration with missing orbit index
"""
self.in_kwargs['orbit_info'] = info
self.testInst = pysat.Instrument(*self.in_args, **self.in_kwargs)
# Force index to None beforee loading and iterating
self.testInst.orbits.orbit_index = None
self.testInst.load(date=self.stime)
with pytest.raises(ValueError):
self.testInst.orbits.next()
def test_orbit_repr(self):
""" Test the Orbit representation
"""
self.in_kwargs['orbit_info'] = {'index': 'mlt'}
self.testInst = pysat.Instrument(*self.in_args, **self.in_kwargs)
out_str = self.testInst.orbits.__repr__()
assert out_str.find("Orbits(") >= 0
def test_orbit_str(self):
""" Test the Orbit string representation with data
"""
self.in_kwargs['orbit_info'] = {'index': 'mlt'}
self.testInst = pysat.Instrument(*self.in_args, **self.in_kwargs)
self.testInst.load(date=self.stime)
out_str = self.testInst.orbits.__str__()
assert out_str.find("Orbit Settings") >= 0
assert out_str.find("Orbit Lind: local time") < 0
class TestSpecificUTOrbits():
def setup(self):
"""Runs before every method to create a clean testing setup
"""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
self.inc_min = 97
self.etime = None
def teardown(self):
"""Runs after every method to clean up previous testing
"""
del self.testInst, self.stime, self.inc_min, self.etime
@pytest.mark.parametrize('orbit_inc', [(0), (1), (-1), (-2), (14)])
def test_single_orbit_call_by_index(self, orbit_inc):
"""Test successful orbit call by index
"""
# Load the data
self.testInst.load(date=self.stime)
self.testInst.orbits[orbit_inc]
# Increment the time
if orbit_inc >= 0:
self.stime += dt.timedelta(minutes=orbit_inc * self.inc_min)
else:
self.stime += dt.timedelta(minutes=self.inc_min
* (np.ceil(1440.0 / self.inc_min)
+ orbit_inc))
self.etime = self.stime + dt.timedelta(seconds=(self.inc_min * 60 - 1))
# Test the time
assert (self.testInst.index[0] == self.stime)
assert (self.testInst.index[-1] == self.etime)
@pytest.mark.parametrize("orbit_ind,raise_err", [(17, Exception),
(None, TypeError)])
def test_single_orbit_call_bad_index(self, orbit_ind, raise_err):
""" Test orbit failure with bad index
"""
self.testInst.load(date=self.stime)
with pytest.raises(raise_err):
self.testInst.orbits[orbit_ind]
def test_oribt_number_via_current_multiple_orbit_calls_in_day(self):
""" Test orbit number with mulitple orbits calls in a day
"""
self.testInst.load(date=self.stime)
self.testInst.bounds = (self.stime, None)
true_vals = np.arange(15)
true_vals[-1] = 0
test_vals = []
for i, inst in enumerate(self.testInst.orbits):
if i > 14:
break
test_vals.append(inst.orbits.current)
assert inst.orbits.current == self.testInst.orbits.current
assert np.all(test_vals == true_vals)
def test_all_single_orbit_calls_in_day(self):
""" Test all single orbit calls in a day
"""
self.testInst.load(date=self.stime)
self.testInst.bounds = (self.stime, None)
for i, inst in enumerate(self.testInst.orbits):
if i > 14:
break
# Test the start index
self.etime = self.stime + i * relativedelta(minutes=self.inc_min)
assert inst.index[0] == self.etime
assert self.testInst.index[0] == self.etime
# Test the end index
self.etime += relativedelta(seconds=((self.inc_min * 60) - 1))
assert inst.index[-1] == self.etime
assert self.testInst.index[-1] == self.etime
def test_orbit_next_call_no_loaded_data(self):
""" Test orbit next call without loading data
"""
self.testInst.orbits.next()
assert (self.testInst.index[0] == dt.datetime(2008, 1, 1))
assert (self.testInst.index[-1] == dt.datetime(2008, 1, 1, 0, 38, 59))
def test_orbit_prev_call_no_loaded_data(self):
""" Test orbit previous call without loading data
"""
self.testInst.orbits.prev()
# this isn't a full orbit
assert (self.testInst.index[-1]
== dt.datetime(2010, 12, 31, 23, 59, 59))
assert (self.testInst.index[0] == dt.datetime(2010, 12, 31, 23, 49))
def test_single_orbit_call_orbit_starts_0_UT_using_next(self):
""" Test orbit next call with data
"""
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
self.etime = self.stime + dt.timedelta(seconds=(self.inc_min * 60 - 1))
assert (self.testInst.index[0] == self.stime)
assert (self.testInst.index[-1] == self.etime)
def test_single_orbit_call_orbit_starts_0_UT_using_prev(self):
""" Test orbit prev call with data
"""
self.testInst.load(date=self.stime)
self.testInst.orbits.prev()
self.stime += 14 * relativedelta(minutes=self.inc_min)
self.etime = self.stime + dt.timedelta(seconds=((self.inc_min * 60)
- 1))
assert self.testInst.index[0] == self.stime
assert self.testInst.index[-1] == self.etime
def test_single_orbit_call_orbit_starts_off_0_UT_using_next(self):
""" Test orbit next call with data for previous day
"""
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
assert (self.testInst.index[0] == dt.datetime(2008, 12, 30, 23, 45))
assert (self.testInst.index[-1]
== (dt.datetime(2008, 12, 30, 23, 45)
+ relativedelta(seconds=(self.inc_min * 60 - 1))))
def test_single_orbit_call_orbit_starts_off_0_UT_using_prev(self):
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.prev()
assert (self.testInst.index[0]
== (dt.datetime(2009, 1, 1)
- relativedelta(minutes=self.inc_min)))
assert (self.testInst.index[-1]
== (dt.datetime(2009, 1, 1) - relativedelta(seconds=1)))
class TestGeneralOrbitsMLT():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
return
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
return
def test_equality_with_copy(self):
"""Test that copy is the same as original"""
self.out = self.testInst.orbits.copy()
assert self.out == self.testInst.orbits
return
def test_equality_with_data_with_copy(self):
"""Test that copy is the same as original"""
# Load data
self.testInst.load(date=self.stime)
# Load up an orbit
self.testInst.orbits[0]
self.out = self.testInst.orbits.copy()
assert self.out == self.testInst.orbits
return
def test_inequality_different_data(self):
"""Test that equality is false if different data"""
# Load data
self.testInst.load(date=self.stime)
# Load up an orbit
self.testInst.orbits[0]
# Make copy
self.out = self.testInst.orbits.copy()
# Modify data
self.out._full_day_data = self.testInst._null_data
assert self.out != self.testInst.orbits
return
def test_inequality_modified_object(self):
"""Test that equality is false if other missing attributes"""
self.out = self.testInst.orbits.copy()
# Remove attribute
del self.out.orbit_index
assert self.testInst.orbits != self.out
return
def test_inequality_reduced_object(self):
"""Test that equality is false if self missing attributes"""
self.out = self.testInst.orbits.copy()
self.out.hi_there = 'hi'
assert self.testInst.orbits != self.out
return
def test_inequality_different_type(self):
"""Test that equality is false if different type"""
assert self.testInst.orbits != self.testInst
return
def test_eval_repr(self):
"""Test eval of repr recreates object"""
# eval and repr don't play nice for custom functions
if len(self.testInst.custom_functions) != 0:
self.testInst.custom_clear()
self.out = eval(self.testInst.orbits.__repr__())
assert self.out == self.testInst.orbits
return
def test_repr_and_copy(self):
"""Test repr consistent with object copy"""
# Not tested with eval due to issues with datetime
self.out = self.testInst.orbits.__repr__()
second_out = self.testInst.orbits.copy().__repr__()
assert self.out == second_out
return
def test_load_orbits_w_empty_data(self):
""" Test orbit loading outside of the instrument data range
"""
self.stime -= dt.timedelta(days=365 * 100)
self.testInst.load(date=self.stime)
self.testInst.orbits[0]
with pytest.raises(StopIteration):
self.testInst.orbits.next()
def test_less_than_one_orbit_of_data(self):
"""Test successful load with less than one orbit of data
"""
def filter_data(inst):
""" Local helper function to reduce available data
"""
inst.data = inst[0:20]
self.testInst.custom_attach(filter_data)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
# a recusion issue has been observed in this area
# checking for date to limit reintroduction potential
assert self.testInst.date == self.stime
def test_less_than_one_orbit_of_data_two_ways(self):
def filter_data(inst):
inst.data = inst[0:5]
self.testInst.custom_attach(filter_data)
self.testInst.load(date=self.stime)
# starting from no orbit calls next loads first orbit
self.testInst.orbits.next()
# store comparison data
saved_data = self.testInst.copy()
self.testInst.load(date=self.stime)
self.testInst.orbits[0]
assert all(self.testInst.data == saved_data.data)
# a recusion issue has been observed in this area
# checking for date to limit reintroduction potential
d1check = self.testInst.date == saved_data.date
assert d1check
def test_less_than_one_orbit_of_data_four_ways_two_days(self):
""" Test successful loading of different parital orbits
"""
# create situation where the < 1 orbit split across two days
def filter_data(inst):
"""Local function for breaking up orbits
"""
if inst.date == dt.datetime(2009, 1, 5):
inst.data = inst[0:20]
elif inst.date == dt.datetime(2009, 1, 4):
inst.data = inst[-20:]
return
self.testInst.custom_attach(filter_data)
self.stime += dt.timedelta(days=3)
self.testInst.load(date=self.stime)
# starting from no orbit calls next loads first orbit
self.testInst.orbits.next()
# store comparison data
saved_data = self.testInst.copy()
self.testInst.load(date=self.stime + dt.timedelta(days=1))
self.testInst.orbits[0]
if self.testInst.orbits.num == 1:
# equivalence only when only one orbit
# some test settings can violate this assumption
assert all(self.testInst.data == saved_data.data)
self.testInst.load(date=self.stime)
self.testInst.orbits[0]
assert all(self.testInst.data == saved_data.data)
self.testInst.load(date=self.stime + dt.timedelta(days=1))
self.testInst.orbits.prev()
if self.testInst.orbits.num == 1:
assert all(self.testInst.data == saved_data.data)
# a recusion issue has been observed in this area
# checking for date to limit reintroduction potential
d1check = self.testInst.date == saved_data.date
assert d1check
def test_repeated_orbit_calls_symmetric_single_day_start_with_last(self):
self.testInst.load(date=self.stime)
# start on last orbit of last day
self.testInst.orbits[0]
self.testInst.orbits.prev()
control = self.testInst.copy()
for j in range(10):
self.testInst.orbits.next()
for j in range(10):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_symmetric_single_day_0_UT(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(10):
self.testInst.orbits.next()
for j in range(10):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_symmetric_multi_day_0_UT(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(20):
self.testInst.orbits.next()
for j in range(20):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_symmetric_single_day_off_0_UT(self):
""" Test successful orbit calls for a day about a time off 00:00 UT
"""
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(10):
self.testInst.orbits.next()
for j in range(10):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_symmetric_multi_day_off_0_UT(self):
""" Test successful orbit calls for days about a time off 00:00 UT
"""
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(20):
self.testInst.orbits.next()
for j in range(20):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_antisymmetric_multi_day_off_0_UT(self):
""" Test successful orbit calls for different days about a time off 0 UT
"""
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(10):
self.testInst.orbits.next()
for j in range(20):
self.testInst.orbits.prev()
for j in range(10):
self.testInst.orbits.next()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_antisymmetric_multi_multi_day_off_0_UT(self):
""" Test successful orbit calls for more days about a time off 0 UT
"""
self.stime -= dt.timedelta(days=1)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(20):
self.testInst.orbits.next()
for j in range(40):
self.testInst.orbits.prev()
for j in range(20):
self.testInst.orbits.next()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_antisymmetric_multi_day_0_UT(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(10):
self.testInst.orbits.next()
for j in range(20):
self.testInst.orbits.prev()
for j in range(10):
self.testInst.orbits.next()
assert all(control.data == self.testInst.data)
def test_repeated_orbit_calls_antisymmetric_multi_multi_day_0_UT(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(20):
self.testInst.orbits.next()
for j in range(40):
self.testInst.orbits.prev()
for j in range(20):
self.testInst.orbits.next()
assert all(control.data == self.testInst.data)
def test_repeat_orbit_calls_asym_multi_day_0_UT_long_time_gap(self):
"""Test successful orbit calls for many different days with a long gap
"""
self.stime += dt.timedelta(days=334)
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(20):
self.testInst.orbits.next()
for j in range(20):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeat_orbit_calls_asym_multi_day_0_UT_really_long_time_gap(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
for j in range(400):
self.testInst.orbits.next()
for j in range(400):
self.testInst.orbits.prev()
assert all(control.data == self.testInst.data)
def test_repeat_orbit_calls_asym_multi_day_0_UT_multiple_time_gaps(self):
self.testInst.load(date=self.stime)
self.testInst.orbits.next()
control = self.testInst.copy()
n_time = []
p_time = []
for j in range(40):
n_time.append(self.testInst.index[0])
self.testInst.orbits.next()
for j in range(40):
self.testInst.orbits.prev()
p_time.append(self.testInst.index[0])
check = np.all(p_time == n_time[::-1])
assert all(control.data == self.testInst.data) & check
class TestGeneralOrbitsMLTxarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.stime = pysat.instruments.pysat_testing_xarray._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsNonStandardIteration():
"""Create an iteration window that is larger than step size.
Ensure the overlapping data doesn't end up in the orbit iteration."""
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.testInst.bounds = (self.testInst.files.files.index[0],
self.testInst.files.files.index[11],
'2D', dt.timedelta(days=3))
self.orbit_starts = []
self.orbit_stops = []
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.orbit_starts, self.orbit_stops
def test_no_orbit_overlap_with_overlapping_iteration(self):
"""Ensure error when overlap in iteration data."""
with pytest.raises(ValueError):
self.testInst.orbits.next()
return
@pytest.mark.parametrize("bounds_type", ['by_date', 'by_file'])
def test_no_orbit_overlap_with_nonoverlapping_iteration(self, bounds_type):
"""Test no orbit data overlap when overlap in iteration data"""
if bounds_type == 'by_date':
bounds = (self.testInst.files.files.index[0],
self.testInst.files.files.index[11],
'2D', dt.timedelta(days=2))
elif bounds_type == 'by_file':
bounds = (self.testInst.files[0], self.testInst.files[11], 2, 2)
self.testInst.bounds = bounds
for inst in self.testInst.orbits:
self.orbit_starts.append(inst.index[0])
self.orbit_stops.append(inst.index[-1])
self.orbit_starts = pds.Series(self.orbit_starts)
self.orbit_stops = pds.Series(self.orbit_stops)
assert self.orbit_starts.is_monotonic_increasing
assert self.orbit_stops.is_monotonic_increasing
return
class TestGeneralOrbitsLong(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'longitude',
'kind': 'longitude'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsLongxarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'longitude',
'kind': 'longitude'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsOrbitNumber(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'orbit_num',
'kind': 'orbit'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsOrbitNumberXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'orbit_num',
'kind': 'orbit'},
update_files=True)
self.stime = pysat.instruments.pysat_testing_xarray._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsLatitude(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'latitude',
'kind': 'polar'},
update_files=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestGeneralOrbitsLatitudeXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'latitude',
'kind': 'polar'},
update_files=True)
self.stime = pysat.instruments.pysat_testing_xarray._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
def filter_data(inst):
"""Remove data from instrument, simulating gaps"""
times = [[dt.datetime(2009, 1, 1, 1, 37), dt.datetime(2009, 1, 1, 3, 14)],
[dt.datetime(2009, 1, 1, 10), dt.datetime(2009, 1, 1, 12)],
[dt.datetime(2009, 1, 1, 22), dt.datetime(2009, 1, 2, 2)],
[dt.datetime(2009, 1, 13), dt.datetime(2009, 1, 15)],
[dt.datetime(2009, 1, 20, 1), dt.datetime(2009, 1, 25, 23)],
[dt.datetime(2009, 1, 25, 23, 30), dt.datetime(2009, 1, 26, 3)]
]
for time in times:
idx, = np.where((inst.index > time[1]) | (inst.index < time[0]))
inst.data = inst[idx]
def filter_data2(inst, times=None):
"""Remove data from instrument, simulating gaps"""
for time in times:
idx, = np.where((inst.index > time[1]) | (inst.index < time[0]))
inst.data = inst[idx]
class TestOrbitsGappyData(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyDataXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'mlt'},
update_files=True)
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyData2(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'mlt'})
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
times = [[dt.datetime(2008, 12, 31, 4),
dt.datetime(2008, 12, 31, 5, 37)],
[dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 1, 1, 37)]]
for seconds in np.arange(38):
day = (dt.datetime(2009, 1, 2)
+ dt.timedelta(days=int(seconds)))
times.append([day, day
+ dt.timedelta(hours=1, minutes=37,
seconds=int(seconds))
- dt.timedelta(seconds=20)])
self.testInst.custom_attach(filter_data2, kwargs={'times': times})
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyData2Xarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'mlt'})
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
times = [[dt.datetime(2008, 12, 31, 4),
dt.datetime(2008, 12, 31, 5, 37)],
[dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 1, 1, 37)]]
for seconds in np.arange(38):
day = (dt.datetime(2009, 1, 2)
+ dt.timedelta(days=int(seconds)))
times.append([day, day
+ dt.timedelta(hours=1, minutes=37,
seconds=int(seconds))
- dt.timedelta(seconds=20)])
self.testInst.custom_attach(filter_data2, kwargs={'times': times})
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyLongData(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'longitude',
'kind': 'longitude'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyLongDataXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'longitude',
'kind': 'longitude'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyOrbitNumData(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'orbit_num',
'kind': 'orbit'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyOrbitNumDataXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'orbit_num',
'kind': 'orbit'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyOrbitLatData(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info={'index': 'latitude',
'kind': 'polar'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
class TestOrbitsGappyOrbitLatDataXarray(TestGeneralOrbitsMLT):
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing_xarray',
clean_level='clean',
orbit_info={'index': 'latitude',
'kind': 'polar'})
self.testInst.custom_attach(filter_data)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.stime
| [
"datetime.datetime",
"pandas.Series",
"numpy.ceil",
"pysat.Instrument",
"dateutil.relativedelta.relativedelta",
"numpy.where",
"datetime.timedelta",
"pytest.mark.parametrize",
"pytest.raises",
"numpy.all",
"numpy.arange"
] | [((1086, 1343), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""info"""', "[{'index': 'magnetic local time', 'kind': 'longitude'}, None, {'index':\n 'magnetic local time', 'kind': 'lt'}, {'index': 'magnetic local time',\n 'kind': 'polar'}, {'index': 'magnetic local time', 'kind': 'orbit'}]"], {}), "('info', [{'index': 'magnetic local time', 'kind':\n 'longitude'}, None, {'index': 'magnetic local time', 'kind': 'lt'}, {\n 'index': 'magnetic local time', 'kind': 'polar'}, {'index':\n 'magnetic local time', 'kind': 'orbit'}])\n", (1109, 1343), False, 'import pytest\n'), ((2032, 2282), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""info"""', "[{'index': 'magnetic local time', 'kind': 'polar'}, {'index':\n 'magnetic local time', 'kind': 'orbit'}, {'index':\n 'magnetic local time', 'kind': 'longitude'}, {'index':\n 'magnetic local time', 'kind': 'lt'}]"], {}), "('info', [{'index': 'magnetic local time', 'kind':\n 'polar'}, {'index': 'magnetic local time', 'kind': 'orbit'}, {'index':\n 'magnetic local time', 'kind': 'longitude'}, {'index':\n 'magnetic local time', 'kind': 'lt'}])\n", (2055, 2282), False, 'import pytest\n'), ((4474, 4530), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""orbit_inc"""', '[0, 1, -1, -2, 14]'], {}), "('orbit_inc', [0, 1, -1, -2, 14])\n", (4497, 4530), False, 'import pytest\n'), ((5319, 5407), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""orbit_ind,raise_err"""', '[(17, Exception), (None, TypeError)]'], {}), "('orbit_ind,raise_err', [(17, Exception), (None,\n TypeError)])\n", (5342, 5407), False, 'import pytest\n'), ((23497, 23559), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bounds_type"""', "['by_date', 'by_file']"], {}), "('bounds_type', ['by_date', 'by_file'])\n", (23520, 23559), False, 'import pytest\n'), ((615, 638), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (626, 638), True, 'import datetime as dt\n'), ((1851, 1900), 'pysat.Instrument', 'pysat.Instrument', (['*self.in_args'], {}), '(*self.in_args, **self.in_kwargs)\n', (1867, 1900), False, 'import pysat\n'), ((2763, 2812), 'pysat.Instrument', 'pysat.Instrument', (['*self.in_args'], {}), '(*self.in_args, **self.in_kwargs)\n', (2779, 2812), False, 'import pysat\n'), ((3212, 3261), 'pysat.Instrument', 'pysat.Instrument', (['*self.in_args'], {}), '(*self.in_args, **self.in_kwargs)\n', (3228, 3261), False, 'import pysat\n'), ((3539, 3588), 'pysat.Instrument', 'pysat.Instrument', (['*self.in_args'], {}), '(*self.in_args, **self.in_kwargs)\n', (3555, 3588), False, 'import pysat\n'), ((3949, 4059), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'mlt'}", 'update_files': '(True)'}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'mlt'}, update_files=True)\n", (3965, 4059), False, 'import pysat\n'), ((5978, 5991), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (5987, 5991), True, 'import numpy as np\n'), ((6279, 6309), 'numpy.all', 'np.all', (['(test_vals == true_vals)'], {}), '(test_vals == true_vals)\n', (6285, 6309), True, 'import numpy as np\n'), ((8782, 8802), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8794, 8802), True, 'import datetime as dt\n'), ((9219, 9239), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (9231, 9239), True, 'import datetime as dt\n'), ((9725, 9835), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'mlt'}", 'update_files': '(True)'}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'mlt'}, update_files=True)\n", (9741, 9835), False, 'import pysat\n'), ((12712, 12740), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(365 * 100)'}), '(days=365 * 100)\n', (12724, 12740), True, 'import datetime as dt\n'), ((14781, 14801), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (14793, 14801), True, 'import datetime as dt\n'), ((17278, 17298), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (17290, 17298), True, 'import datetime as dt\n'), ((17789, 17809), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (17801, 17809), True, 'import datetime as dt\n'), ((18310, 18330), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (18322, 18330), True, 'import datetime as dt\n'), ((18900, 18920), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (18912, 18920), True, 'import datetime as dt\n'), ((20390, 20412), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(334)'}), '(days=334)\n', (20402, 20412), True, 'import datetime as dt\n'), ((21608, 21638), 'numpy.all', 'np.all', (['(p_time == n_time[::-1])'], {}), '(p_time == n_time[::-1])\n', (21614, 21638), True, 'import numpy as np\n'), ((21877, 21994), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing_xarray"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'mlt'}", 'update_files': '(True)'}), "('pysat', 'testing_xarray', clean_level='clean', orbit_info\n ={'index': 'mlt'}, update_files=True)\n", (21893, 21994), False, 'import pysat\n'), ((22629, 22739), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'mlt'}", 'update_files': '(True)'}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'mlt'}, update_files=True)\n", (22645, 22739), False, 'import pysat\n'), ((24247, 24276), 'pandas.Series', 'pds.Series', (['self.orbit_starts'], {}), '(self.orbit_starts)\n', (24257, 24276), True, 'import pandas as pds\n'), ((24304, 24332), 'pandas.Series', 'pds.Series', (['self.orbit_stops'], {}), '(self.orbit_stops)\n', (24314, 24332), True, 'import pandas as pds\n'), ((24632, 24769), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'longitude', 'kind': 'longitude'}", 'update_files': '(True)'}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'longitude', 'kind': 'longitude'}, update_files=True)\n", (24648, 24769), False, 'import pysat\n'), ((25322, 25466), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing_xarray"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'longitude', 'kind': 'longitude'}", 'update_files': '(True)'}), "('pysat', 'testing_xarray', clean_level='clean', orbit_info\n ={'index': 'longitude', 'kind': 'longitude'}, update_files=True)\n", (25338, 25466), False, 'import pysat\n'), ((26020, 26153), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'orbit_num', 'kind': 'orbit'}", 'update_files': '(True)'}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'orbit_num', 'kind': 'orbit'}, update_files=True)\n", (26036, 26153), False, 'import pysat\n'), ((26713, 26853), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing_xarray"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'orbit_num', 'kind': 'orbit'}", 'update_files': '(True)'}), "('pysat', 'testing_xarray', clean_level='clean', orbit_info\n ={'index': 'orbit_num', 'kind': 'orbit'}, update_files=True)\n", (26729, 26853), False, 'import pysat\n'), ((27411, 27543), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'latitude', 'kind': 'polar'}", 'update_files': '(True)'}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'latitude', 'kind': 'polar'}, update_files=True)\n", (27427, 27543), False, 'import pysat\n'), ((28100, 28239), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing_xarray"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'latitude', 'kind': 'polar'}", 'update_files': '(True)'}), "('pysat', 'testing_xarray', clean_level='clean', orbit_info\n ={'index': 'latitude', 'kind': 'polar'}, update_files=True)\n", (28116, 28239), False, 'import pysat\n'), ((29198, 29255), 'numpy.where', 'np.where', (['((inst.index > time[1]) | (inst.index < time[0]))'], {}), '((inst.index > time[1]) | (inst.index < time[0]))\n', (29206, 29255), True, 'import numpy as np\n'), ((29418, 29475), 'numpy.where', 'np.where', (['((inst.index > time[1]) | (inst.index < time[0]))'], {}), '((inst.index > time[1]) | (inst.index < time[0]))\n', (29426, 29475), True, 'import numpy as np\n'), ((29674, 29784), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'mlt'}", 'update_files': '(True)'}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'mlt'}, update_files=True)\n", (29690, 29784), False, 'import pysat\n'), ((30330, 30447), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing_xarray"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'mlt'}", 'update_files': '(True)'}), "('pysat', 'testing_xarray', clean_level='clean', orbit_info\n ={'index': 'mlt'}, update_files=True)\n", (30346, 30447), False, 'import pysat\n'), ((30988, 31079), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'mlt'}"}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'mlt'})\n", (31004, 31079), False, 'import pysat\n'), ((31448, 31461), 'numpy.arange', 'np.arange', (['(38)'], {}), '(38)\n', (31457, 31461), True, 'import numpy as np\n'), ((32157, 32255), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing_xarray"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'mlt'}"}), "('pysat', 'testing_xarray', clean_level='clean', orbit_info\n ={'index': 'mlt'})\n", (32173, 32255), False, 'import pysat\n'), ((32624, 32637), 'numpy.arange', 'np.arange', (['(38)'], {}), '(38)\n', (32633, 32637), True, 'import numpy as np\n'), ((33330, 33448), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'longitude', 'kind': 'longitude'}"}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'longitude', 'kind': 'longitude'})\n", (33346, 33448), False, 'import pysat\n'), ((34011, 34136), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing_xarray"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'longitude', 'kind': 'longitude'}"}), "('pysat', 'testing_xarray', clean_level='clean', orbit_info\n ={'index': 'longitude', 'kind': 'longitude'})\n", (34027, 34136), False, 'import pysat\n'), ((34696, 34810), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'orbit_num', 'kind': 'orbit'}"}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'orbit_num', 'kind': 'orbit'})\n", (34712, 34810), False, 'import pysat\n'), ((35376, 35497), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing_xarray"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'orbit_num', 'kind': 'orbit'}"}), "('pysat', 'testing_xarray', clean_level='clean', orbit_info\n ={'index': 'orbit_num', 'kind': 'orbit'})\n", (35392, 35497), False, 'import pysat\n'), ((36057, 36170), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'latitude', 'kind': 'polar'}"}), "('pysat', 'testing', clean_level='clean', orbit_info={\n 'index': 'latitude', 'kind': 'polar'})\n", (36073, 36170), False, 'import pysat\n'), ((36737, 36857), 'pysat.Instrument', 'pysat.Instrument', (['"""pysat"""', '"""testing_xarray"""'], {'clean_level': '"""clean"""', 'orbit_info': "{'index': 'latitude', 'kind': 'polar'}"}), "('pysat', 'testing_xarray', clean_level='clean', orbit_info\n ={'index': 'latitude', 'kind': 'polar'})\n", (36753, 36857), False, 'import pysat\n'), ((975, 1000), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (988, 1000), False, 'import pytest\n'), ((1030, 1079), 'pysat.Instrument', 'pysat.Instrument', (['*self.in_args'], {}), '(*self.in_args, **self.in_kwargs)\n', (1046, 1079), False, 'import pysat\n'), ((1959, 1984), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1972, 1984), False, 'import pytest\n'), ((2979, 3004), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2992, 3004), False, 'import pytest\n'), ((4849, 4895), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(orbit_inc * self.inc_min)'}), '(minutes=orbit_inc * self.inc_min)\n', (4861, 4895), True, 'import datetime as dt\n'), ((5133, 5176), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': '(self.inc_min * 60 - 1)'}), '(seconds=self.inc_min * 60 - 1)\n', (5145, 5176), True, 'import datetime as dt\n'), ((5642, 5666), 'pytest.raises', 'pytest.raises', (['raise_err'], {}), '(raise_err)\n', (5655, 5666), False, 'import pytest\n'), ((6894, 6938), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'seconds': '(self.inc_min * 60 - 1)'}), '(seconds=self.inc_min * 60 - 1)\n', (6907, 6938), False, 'from dateutil.relativedelta import relativedelta\n'), ((7244, 7267), 'datetime.datetime', 'dt.datetime', (['(2008)', '(1)', '(1)'], {}), '(2008, 1, 1)\n', (7255, 7267), True, 'import datetime as dt\n'), ((7312, 7346), 'datetime.datetime', 'dt.datetime', (['(2008)', '(1)', '(1)', '(0)', '(38)', '(59)'], {}), '(2008, 1, 1, 0, 38, 59)\n', (7323, 7346), True, 'import datetime as dt\n'), ((7599, 7636), 'datetime.datetime', 'dt.datetime', (['(2010)', '(12)', '(31)', '(23)', '(59)', '(59)'], {}), '(2010, 12, 31, 23, 59, 59)\n', (7610, 7636), True, 'import datetime as dt\n'), ((7680, 7713), 'datetime.datetime', 'dt.datetime', (['(2010)', '(12)', '(31)', '(23)', '(49)'], {}), '(2010, 12, 31, 23, 49)\n', (7691, 7713), True, 'import datetime as dt\n'), ((7952, 7995), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': '(self.inc_min * 60 - 1)'}), '(seconds=self.inc_min * 60 - 1)\n', (7964, 7995), True, 'import datetime as dt\n'), ((8337, 8372), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'minutes': 'self.inc_min'}), '(minutes=self.inc_min)\n', (8350, 8372), False, 'from dateutil.relativedelta import relativedelta\n'), ((8407, 8450), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': '(self.inc_min * 60 - 1)'}), '(seconds=self.inc_min * 60 - 1)\n', (8419, 8450), True, 'import datetime as dt\n'), ((8925, 8958), 'datetime.datetime', 'dt.datetime', (['(2008)', '(12)', '(30)', '(23)', '(45)'], {}), '(2008, 12, 30, 23, 45)\n', (8936, 8958), True, 'import datetime as dt\n'), ((12830, 12858), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (12843, 12858), False, 'import pytest\n'), ((23033, 23053), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (23045, 23053), True, 'import datetime as dt\n'), ((23409, 23434), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23422, 23434), False, 'import pytest\n'), ((28717, 28747), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)', '(1)', '(37)'], {}), '(2009, 1, 1, 1, 37)\n', (28728, 28747), True, 'import datetime as dt\n'), ((28749, 28779), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)', '(3)', '(14)'], {}), '(2009, 1, 1, 3, 14)\n', (28760, 28779), True, 'import datetime as dt\n'), ((28796, 28823), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)', '(10)'], {}), '(2009, 1, 1, 10)\n', (28807, 28823), True, 'import datetime as dt\n'), ((28825, 28852), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)', '(12)'], {}), '(2009, 1, 1, 12)\n', (28836, 28852), True, 'import datetime as dt\n'), ((28869, 28896), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)', '(22)'], {}), '(2009, 1, 1, 22)\n', (28880, 28896), True, 'import datetime as dt\n'), ((28898, 28924), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(2)', '(2)'], {}), '(2009, 1, 2, 2)\n', (28909, 28924), True, 'import datetime as dt\n'), ((28941, 28965), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(13)'], {}), '(2009, 1, 13)\n', (28952, 28965), True, 'import datetime as dt\n'), ((28967, 28991), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(15)'], {}), '(2009, 1, 15)\n', (28978, 28991), True, 'import datetime as dt\n'), ((29008, 29035), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(20)', '(1)'], {}), '(2009, 1, 20, 1)\n', (29019, 29035), True, 'import datetime as dt\n'), ((29037, 29065), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(25)', '(23)'], {}), '(2009, 1, 25, 23)\n', (29048, 29065), True, 'import datetime as dt\n'), ((29082, 29114), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(25)', '(23)', '(30)'], {}), '(2009, 1, 25, 23, 30)\n', (29093, 29114), True, 'import datetime as dt\n'), ((29116, 29143), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(26)', '(3)'], {}), '(2009, 1, 26, 3)\n', (29127, 29143), True, 'import datetime as dt\n'), ((9020, 9053), 'datetime.datetime', 'dt.datetime', (['(2008)', '(12)', '(30)', '(23)', '(45)'], {}), '(2008, 12, 30, 23, 45)\n', (9031, 9053), True, 'import datetime as dt\n'), ((9076, 9120), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'seconds': '(self.inc_min * 60 - 1)'}), '(seconds=self.inc_min * 60 - 1)\n', (9089, 9120), False, 'from dateutil.relativedelta import relativedelta\n'), ((9379, 9402), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (9390, 9402), True, 'import datetime as dt\n'), ((9425, 9460), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'minutes': 'self.inc_min'}), '(minutes=self.inc_min)\n', (9438, 9460), False, 'from dateutil.relativedelta import relativedelta\n'), ((9523, 9546), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (9534, 9546), True, 'import datetime as dt\n'), ((9549, 9573), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (9562, 9573), False, 'from dateutil.relativedelta import relativedelta\n'), ((14532, 14555), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(5)'], {}), '(2009, 1, 5)\n', (14543, 14555), True, 'import datetime as dt\n'), ((23895, 23915), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (23907, 23915), True, 'import datetime as dt\n'), ((31248, 31276), 'datetime.datetime', 'dt.datetime', (['(2008)', '(12)', '(31)', '(4)'], {}), '(2008, 12, 31, 4)\n', (31259, 31276), True, 'import datetime as dt\n'), ((31296, 31328), 'datetime.datetime', 'dt.datetime', (['(2008)', '(12)', '(31)', '(5)', '(37)'], {}), '(2008, 12, 31, 5, 37)\n', (31307, 31328), True, 'import datetime as dt\n'), ((31349, 31372), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (31360, 31372), True, 'import datetime as dt\n'), ((31392, 31422), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)', '(1)', '(37)'], {}), '(2009, 1, 1, 1, 37)\n', (31403, 31422), True, 'import datetime as dt\n'), ((31482, 31505), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(2)'], {}), '(2009, 1, 2)\n', (31493, 31505), True, 'import datetime as dt\n'), ((32424, 32452), 'datetime.datetime', 'dt.datetime', (['(2008)', '(12)', '(31)', '(4)'], {}), '(2008, 12, 31, 4)\n', (32435, 32452), True, 'import datetime as dt\n'), ((32472, 32504), 'datetime.datetime', 'dt.datetime', (['(2008)', '(12)', '(31)', '(5)', '(37)'], {}), '(2008, 12, 31, 5, 37)\n', (32483, 32504), True, 'import datetime as dt\n'), ((32525, 32548), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (32536, 32548), True, 'import datetime as dt\n'), ((32568, 32598), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)', '(1)', '(37)'], {}), '(2009, 1, 1, 1, 37)\n', (32579, 32598), True, 'import datetime as dt\n'), ((32658, 32681), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(2)'], {}), '(2009, 1, 2)\n', (32669, 32681), True, 'import datetime as dt\n'), ((6695, 6730), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'minutes': 'self.inc_min'}), '(minutes=self.inc_min)\n', (6708, 6730), False, 'from dateutil.relativedelta import relativedelta\n'), ((14626, 14649), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(4)'], {}), '(2009, 1, 4)\n', (14637, 14649), True, 'import datetime as dt\n'), ((15065, 15085), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (15077, 15085), True, 'import datetime as dt\n'), ((15516, 15536), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (15528, 15536), True, 'import datetime as dt\n'), ((31748, 31772), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': '(20)'}), '(seconds=20)\n', (31760, 31772), True, 'import datetime as dt\n'), ((32924, 32948), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': '(20)'}), '(seconds=20)\n', (32936, 32948), True, 'import datetime as dt\n'), ((5012, 5042), 'numpy.ceil', 'np.ceil', (['(1440.0 / self.inc_min)'], {}), '(1440.0 / self.inc_min)\n', (5019, 5042), True, 'import numpy as np\n')] |
import numpy as np
from . import opt_abc as opt
class BsmNdMc(opt.OptMaABC):
"""
Monte-Carlo simulation of multiasset (N-d) BSM (geometric Brownian Motion)
Examples:
>>> import pyfeng as pf
>>> spot = np.ones(4)*100
>>> sigma = np.ones(4)*0.4
>>> texp = 5
>>> payoff = lambda x: np.fmax(np.mean(x,axis=1) - strike, 0) # Basket option
>>> strikes = np.arange(80, 121, 10)
>>> m = pf.BsmNdMc(sigma, cor=0.5, rn_seed=1234)
>>> m.simulate(n_path=20000, tobs=[texp])
>>> p = []
>>> for strike in strikes:
>>> p.append(m.price_european(spot, texp, payoff))
>>> np.array(p)
array([36.31612946, 31.80861014, 27.91269315, 24.55319506, 21.62677625])
"""
spot = np.ones(2)
sigma = np.ones(2) * 0.1
# MC params
rn_seed = None
rng = None
antithetic = True
# tobs and path stored in the class
n_path = 0
path = tobs = None
def __init__(self, sigma, cor=None, intr=0.0, divr=0.0, rn_seed=None, antithetic=True):
self.rn_seed = rn_seed
self.rng = np.random.default_rng(rn_seed)
self.antithetic = antithetic
super().__init__(sigma, cor=cor, intr=intr, divr=divr, is_fwd=False)
def _bm_incr(self, tobs, n_path):
"""
Calculate incremental Brownian Motions
Args:
tobs: array of observation times
n_path: number of paths to simulate
Returns:
price path (time, path, asset)
"""
dt = np.diff(np.atleast_1d(tobs), prepend=0)
n_t = len(dt)
n_path_gen = n_path // 2 if self.antithetic else n_path
# generate random number in the order of path, time, asset and transposed
# in this way, the same paths are generated when increasing n_path
bm_incr = self.rng.standard_normal((n_path_gen, n_t, self.n_asset)).transpose((1, 0, 2))
np.multiply(bm_incr, np.sqrt(dt[:, None, None]), out=bm_incr)
bm_incr = np.dot(bm_incr, self.chol_m.T)
if self.antithetic:
bm_incr = np.stack([bm_incr, -bm_incr], axis=2).reshape(
(n_t, n_path, self.n_asset)
)
return bm_incr
def simulate(self, tobs, n_path, store=True):
"""
Simulate the price paths and store in the class.
The initial prices are normalized to 0 and spot should be multiplied later.
Args:
tobs: array of observation times
n_path: number of paths to simulate
store: if True (default), store path, tobs, and n_path in the class
Returns:
price path (time, path, asset) if store is False
"""
# (n_t, n_path, n_asset) * (n_asset, n_asset)
tobs = np.atleast_1d(tobs)
path = self._bm_incr(tobs=tobs, n_path=n_path)
# Add drift and convexity
dt = np.diff(tobs, prepend=0)
path += (self.intr - self.divr - 0.5 * self.sigma ** 2) * dt[:, None, None]
np.cumsum(path, axis=0, out=path)
np.exp(path, out=path)
if store:
self.n_path = n_path
self.path = path
self.tobs = tobs
else:
return path
def price_european(self, spot, texp, payoff):
"""
The European price of that payoff at the expiry.
Args:
spot: array of spot prices
texp: time-to-expiry
payoff: payoff function applicable to the time-slice of price path
Returns:
The MC price of the payoff
"""
if self.n_path == 0:
raise ValueError("Simulated paths are not available. Run simulate() first.")
# check if texp is in tobs
ind, *_ = np.where(np.isclose(self.tobs, texp))
if len(ind) == 0:
raise ValueError(f"Stored tobs does not contain t={texp}")
path = self.path[ind[0], ] * spot
price = np.exp(-self.intr * texp) * np.mean(payoff(path), axis=0)
return price
class NormNdMc(BsmNdMc):
"""
Monte-Carlo simulation of multiasset (N-d) Normal/Bachelier model (arithmetic Brownian Motion)
Examples:
>>> import pyfeng as pf
>>> spot = np.ones(4)*100
>>> sigma = np.ones(4)*0.4
>>> texp = 5
>>> payoff = lambda x: np.fmax(np.mean(x,axis=1) - strike, 0) # Basket option
>>> strikes = np.arange(80, 121, 10)
>>> m = pf.NormNdMc(sigma*spot, cor=0.5, rn_seed=1234)
>>> m.simulate(tobs=[texp], n_path=20000)
>>> p = []
>>> for strike in strikes:
>>> p.append(m.price_european(spot, texp, payoff))
>>> np.array(p)
array([39.42304794, 33.60383167, 28.32667559, 23.60383167, 19.42304794])
"""
def simulate(self, tobs, n_path, store=True):
"""
Simulate the price paths and store in the class.
The initial prices are normalized to 0 and spot should be added later.
Args:
tobs: array of observation times
n_path: number of paths to simulate
store: if True (default), store path, tobs, and n_path in the class
Returns:
price path (time, path, asset) if store is False
"""
tobs = np.atleast_1d(tobs)
path = self._bm_incr(tobs, n_path)
np.cumsum(path, axis=0, out=path)
if store:
self.n_path = n_path
self.path = path
self.tobs = tobs
else:
return path
def price_european(self, spot, texp, payoff):
"""
The European price of that payoff at the expiry.
Args:
spot: array of spot prices
texp: time-to-expiry
payoff: payoff function applicable to the time-slice of price path
Returns:
The MC price of the payoff
"""
if self.n_path == 0:
raise ValueError("Simulated paths are not available. Run simulate() first.")
# check if texp is in tobs
ind, *_ = np.where(np.isclose(self.tobs, texp))
if len(ind) == 0:
raise ValueError(f"Stored tobs does not contain t={texp}")
path = self.path[ind[0], ] + spot
price = np.exp(-self.intr * texp) * np.mean(payoff(path), axis=0)
return price
| [
"numpy.sqrt",
"numpy.ones",
"numpy.random.default_rng",
"numpy.isclose",
"numpy.diff",
"numpy.exp",
"numpy.stack",
"numpy.dot",
"numpy.cumsum",
"numpy.atleast_1d"
] | [((782, 792), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (789, 792), True, 'import numpy as np\n'), ((805, 815), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (812, 815), True, 'import numpy as np\n'), ((1117, 1147), 'numpy.random.default_rng', 'np.random.default_rng', (['rn_seed'], {}), '(rn_seed)\n', (1138, 1147), True, 'import numpy as np\n'), ((2024, 2054), 'numpy.dot', 'np.dot', (['bm_incr', 'self.chol_m.T'], {}), '(bm_incr, self.chol_m.T)\n', (2030, 2054), True, 'import numpy as np\n'), ((2787, 2806), 'numpy.atleast_1d', 'np.atleast_1d', (['tobs'], {}), '(tobs)\n', (2800, 2806), True, 'import numpy as np\n'), ((2909, 2933), 'numpy.diff', 'np.diff', (['tobs'], {'prepend': '(0)'}), '(tobs, prepend=0)\n', (2916, 2933), True, 'import numpy as np\n'), ((3026, 3059), 'numpy.cumsum', 'np.cumsum', (['path'], {'axis': '(0)', 'out': 'path'}), '(path, axis=0, out=path)\n', (3035, 3059), True, 'import numpy as np\n'), ((3068, 3090), 'numpy.exp', 'np.exp', (['path'], {'out': 'path'}), '(path, out=path)\n', (3074, 3090), True, 'import numpy as np\n'), ((5276, 5295), 'numpy.atleast_1d', 'np.atleast_1d', (['tobs'], {}), '(tobs)\n', (5289, 5295), True, 'import numpy as np\n'), ((5347, 5380), 'numpy.cumsum', 'np.cumsum', (['path'], {'axis': '(0)', 'out': 'path'}), '(path, axis=0, out=path)\n', (5356, 5380), True, 'import numpy as np\n'), ((1562, 1581), 'numpy.atleast_1d', 'np.atleast_1d', (['tobs'], {}), '(tobs)\n', (1575, 1581), True, 'import numpy as np\n'), ((1965, 1991), 'numpy.sqrt', 'np.sqrt', (['dt[:, None, None]'], {}), '(dt[:, None, None])\n', (1972, 1991), True, 'import numpy as np\n'), ((3775, 3802), 'numpy.isclose', 'np.isclose', (['self.tobs', 'texp'], {}), '(self.tobs, texp)\n', (3785, 3802), True, 'import numpy as np\n'), ((3960, 3985), 'numpy.exp', 'np.exp', (['(-self.intr * texp)'], {}), '(-self.intr * texp)\n', (3966, 3985), True, 'import numpy as np\n'), ((6065, 6092), 'numpy.isclose', 'np.isclose', (['self.tobs', 'texp'], {}), '(self.tobs, texp)\n', (6075, 6092), True, 'import numpy as np\n'), ((6250, 6275), 'numpy.exp', 'np.exp', (['(-self.intr * texp)'], {}), '(-self.intr * texp)\n', (6256, 6275), True, 'import numpy as np\n'), ((2105, 2142), 'numpy.stack', 'np.stack', (['[bm_incr, -bm_incr]'], {'axis': '(2)'}), '([bm_incr, -bm_incr], axis=2)\n', (2113, 2142), True, 'import numpy as np\n')] |
import copy
import itertools
from typing import List, Tuple, Union
import numpy as np
from scipy.sparse import csr_matrix
from scipy.linalg import kron
from quara.objects.elemental_system import ElementalSystem
from quara.objects.matrix_basis import MatrixBasis, get_comp_basis
class CompositeSystem:
"""Class for describing Composite system"""
def __init__(self, systems: List[ElementalSystem]):
"""Constructor
Parameters
----------
systems : List[ElementalSystem]
list of ElementalSystem of this CompositeSystem.
Raises
------
ValueError
duplicate ElementalSystem instance.
ValueError
duplicate ElementalSystem name.
"""
# Validation
# Check for duplicate ElementalSystem
names: List[int] = []
e_sys_ids: List[int] = []
for e_sys in systems:
if e_sys.system_id in e_sys_ids:
raise ValueError(
f"Duplicate ElementalSystem. \n system_id={e_sys.system_id}, name={e_sys.name}"
)
e_sys_ids.append(e_sys.system_id)
if e_sys.name in names:
raise ValueError(f"Duplicate ElementalSystem name. name={e_sys.name}")
names.append(e_sys.name)
# Sort by name of ElementalSystem
# Copy to avoid affecting the original source.
# ElementalSystem should remain the same instance as the original source
# to check if the instances are the same in the tensor product calculation.
# Therefore, use `copy.copy` instead of `copy.deepcopy`.
sored_e_syses = copy.copy(systems)
sored_e_syses.sort(key=lambda x: x.name)
# Set
self._elemental_systems: Tuple[ElementalSystem, ...] = tuple(sored_e_syses)
is_orthonormal_hermitian_0thpropIs = [
e_sys.is_orthonormal_hermitian_0thprop_identity
for e_sys in self._elemental_systems
]
self._is_orthonormal_hermitian_0thprop_identity = all(
is_orthonormal_hermitian_0thpropIs
)
is_hermitian_list = [e_sys.is_hermitian for e_sys in self._elemental_systems]
self._is_basis_hermitian = all(is_hermitian_list)
# calculate tensor product of ElamentalSystem list for getting total MatrixBasis
if len(self._elemental_systems) == 1:
self._total_basis = self._elemental_systems[0].basis
else:
basis_list = [e_sys.basis for e_sys in self._elemental_systems]
temp = basis_list[0]
for elem in basis_list[1:]:
temp = [
kron(val1, val2) for val1, val2 in itertools.product(temp, elem)
]
self._total_basis = MatrixBasis(temp)
self._basis_basisconjugate = None
self._dict_from_hs_to_choi = None
self._dict_from_choi_to_hs = None
self._basis_T_sparse = None
self._basisconjugate_sparse = None
self._basisconjugate_basis_sparse = None
self._basis_basisconjugate_T_sparse = None
self._basis_basisconjugate_T_sparse_from_1 = None
self._basishermitian_basis_T_from_1 = None
def comp_basis(self, mode: str = "row_major") -> MatrixBasis:
"""returns computational basis of CompositeSystem.
Parameters
----------
mode : str, optional
specify whether the order of basis is "row_major" or "column_major", by default "row_major".
Returns
-------
MatrixBasis
computational basis of CompositeSystem.
Raises
------
ValueError
``mode`` is unsupported.
"""
# calculate tensor product of ElamentalSystem list for getting new MatrixBasis
basis_tmp: MatrixBasis
if len(self._elemental_systems) == 1:
basis_tmp = self._elemental_systems[0].comp_basis(mode=mode)
else:
basis_tmp = get_comp_basis(self.dim, mode=mode)
return basis_tmp
def basis(self) -> MatrixBasis:
"""returns MatrixBasis of CompositeSystem.
Returns
-------
MatrixBasis
MatrixBasis of CompositeSystem.
"""
return self._total_basis
@property
def dim(self) -> int:
"""returns dim of CompositeSystem.
the dim of CompositeSystem equals the dim of basis.
Returns
-------
int
dim of CompositeSystem.
"""
return self.basis()[0].shape[0]
@property
def num_e_sys(self) -> int:
"""returns the number of ElementalSystem.
the number of ElementalSystem.
Returns
-------
int
num of ElementalSystem.
"""
return len(self._elemental_systems)
def dim_e_sys(self, i: int) -> int:
"""returns the dimension of the i-th ElementalSystem.
the dim of the i-th ElementalSystem.
Parameters
----------
i: int
the id of an ElementalSystem
Returns
-------
int
the dim of the i-th ElementalSystem
"""
return self._elemental_systems[i].dim
def get_basis(self, index: Union[int, Tuple]) -> MatrixBasis:
"""returns basis specified by index.
Parameters
----------
index : Union[int, Tuple]
index of basis.
- if type is int, then regardes it as the index after calculating the basis of CompositeSystem.
- if type is Tuple, then regardes it as the indices of the basis of ElementalSystems.
Returns
-------
MatrixBasis
basis specified by index.
Raises
------
ValueError
length of tuple does not equal length of the list of the basis.
IndexError
specified index does not exist in the list of the basis.
"""
if type(index) == tuple:
# whether size of tuple equals length of the list of ElementalSystems
if len(index) != len(self._elemental_systems):
raise ValueError(
f"length of tuple must equal length of the list of ElementalSystems. length of tuple={len(index)}, length of the list of ElementalSystems={len(self._elemental_systems)}"
)
# calculate index in _basis by traversing the tuple from the back.
# for example, if length of ElementalSystem is 3 and each dim are dim1, dim2, dim3,
# then index in _basis of tuple(x1, x2, x3) can be calculated the following expression:
# x1 * (dim2 ** 2) * (dim3 ** 2) + x2 * (dim3 ** 2) + x3
temp_grobal_index = 0
temp_dim = 1
for e_sys_position, local_index in enumerate(reversed(index)):
temp_grobal_index += local_index * temp_dim
temp_dim = temp_dim * (self._elemental_systems[e_sys_position].dim ** 2)
return self.basis()[temp_grobal_index]
else:
return self.basis()[index]
def basis_basisconjugate(
self, basis_index: Union[int, Tuple[int, int]]
) -> np.ndarray:
"""returns :math:`B_{\\alpha} \\otimes \\bar{B_{\\beta}}`, where basis_index = :math:`(\\alpha, \\beta)` and :math:`B_{i}` are the elements of basis.
Parameters
----------
basis_index : Union[int, Tuple[int, int]]
index of basis.
- if type is int, then regardes it as the indices (basis_index / num_of_basis, basis_index % num_of_basis) of the basis of CompositeSystem.
- if type is Tuple, then regardes (i, j) as the indices of the basis of CompositeSystem.
Returns
-------
np.ndarray
:math:`B_{\\alpha} \\otimes \\bar{B_{\\beta}}`
"""
# calculate _basis_basisconjugate if it is None
if self._basis_basisconjugate is None:
self._basis_basisconjugate = dict()
basis_no = len(self._total_basis.basis)
basis = copy.deepcopy(self._total_basis.basis)
for alpha, beta in itertools.product(range(basis_no), range(basis_no)):
b_alpha = basis[alpha]
b_beta_conj = np.conjugate(basis[beta])
matrix = np.kron(b_alpha, b_beta_conj)
self._basis_basisconjugate[(alpha, beta)] = matrix
# return basis_basisconjugate
if type(basis_index) == tuple:
return self._basis_basisconjugate[(basis_index)]
else:
basis_index = divmod(basis_index, len(self.basis()))
return self._basis_basisconjugate[(basis_index)]
@property
def dict_from_hs_to_choi(self) -> dict:
# calculate _dict_from_hs_to_choi if it is None
if self._dict_from_hs_to_choi is None:
self._dict_from_hs_to_choi = dict()
basis_no = len(self._total_basis.basis)
basis = copy.deepcopy(self._total_basis.basis)
for alpha, beta in itertools.product(range(basis_no), range(basis_no)):
b_alpha = basis[alpha]
b_beta_conj = np.conjugate(basis[beta])
matrix = np.kron(b_alpha, b_beta_conj)
# calc _dict_from_hs_to_choi
row_indices, column_indices = np.where(matrix != 0)
for row_index, column_index in zip(row_indices, column_indices):
if (row_index, column_index) in self._dict_from_hs_to_choi:
self._dict_from_hs_to_choi[(row_index, column_index)].append(
(alpha, beta, matrix[row_index, column_index])
)
else:
self._dict_from_hs_to_choi[(row_index, column_index)] = [
(alpha, beta, matrix[row_index, column_index])
]
# return _dict_from_hs_to_choi
return self._dict_from_hs_to_choi
def delete_dict_from_hs_to_choi(self) -> None:
"""delete ``dict_from_hs_to_choi`` property to save memory.
If you use ``dict_from_hs_to_choi`` again, call ``dict_from_hs_to_choi`` again.
"""
self._dict_from_hs_to_choi = None
@property
def dict_from_choi_to_hs(self) -> dict:
if self._dict_from_choi_to_hs is None:
self._dict_from_choi_to_hs = dict()
basis_no = len(self._total_basis.basis)
basis = copy.deepcopy(self._total_basis.basis)
for alpha, beta in itertools.product(range(basis_no), range(basis_no)):
b_alpha = basis[alpha]
b_beta_conj = np.conjugate(basis[beta])
matrix = np.kron(b_alpha, b_beta_conj)
# calc _dict_from_choi_to_hs
row_indices, column_indices = np.where(matrix != 0)
for row_index, column_index in zip(row_indices, column_indices):
if (alpha, beta) in self._dict_from_choi_to_hs:
self._dict_from_choi_to_hs[(alpha, beta)].append(
(row_index, column_index, matrix[row_index, column_index])
)
else:
self._dict_from_choi_to_hs[(alpha, beta)] = [
(row_index, column_index, matrix[row_index, column_index])
]
# return _dict_from_choi_to_hs
return self._dict_from_choi_to_hs
def delete_dict_from_choi_to_hs(self) -> None:
"""delete ``dict_from_choi_to_hs`` property to save memory.
If you use ``dict_from_choi_to_hs`` again, call ``dict_from_choi_to_hs`` again.
"""
self._dict_from_choi_to_hs = None
def _calc_basis_sparse(self) -> None:
basis = copy.deepcopy(self._total_basis.basis)
basis_tmp = []
basisconjugate_tmp = []
for b_alpha in basis:
basis_tmp.append(b_alpha.flatten())
basisconjugate_tmp.append(b_alpha.conjugate().flatten())
basis_tmp = np.array(basis_tmp)
self._basis_T_sparse = csr_matrix(basis_tmp.T)
self._basisconjugate_sparse = csr_matrix(basisconjugate_tmp)
@property
def basis_T_sparse(self) -> np.ndarray:
if self._basis_T_sparse is None:
self._calc_basis_sparse()
return self._basis_T_sparse
def delete_basis_T_sparse(self) -> None:
"""delete ``basis_T_sparse`` property to save memory.
If you use ``basis_T_sparse`` again, call ``basis_T_sparse`` again.
"""
self._basis_T_sparse = None
@property
def basisconjugate_sparse(self) -> np.ndarray:
if self._basisconjugate_sparse is None:
self._calc_basis_sparse()
return self._basisconjugate_sparse
def delete_basisconjugate_sparse(self) -> None:
"""delete ``basisconjugate_sparse`` property to save memory.
If you use ``basisconjugate_sparse`` again, call ``basisconjugate_sparse`` again.
"""
self._basisconjugate_sparse = None
def _calc_basis_basisconjugate_sparse(self) -> None:
basis_no = len(self._total_basis.basis)
basis = copy.deepcopy(self._total_basis.basis)
basis_basisconjugate_tmp = []
basis_basisconjugate_tmp_from_1 = []
basishermitian_basis_tmp_from_1 = []
for alpha, beta in itertools.product(range(basis_no), range(basis_no)):
b_alpha = basis[alpha]
b_beta_conj = np.conjugate(basis[beta])
matrix = np.kron(b_alpha, b_beta_conj)
basis_basisconjugate_tmp.append(matrix.flatten())
if alpha != 0 and beta != 0:
basis_basisconjugate_tmp_from_1.append(matrix.flatten())
matrix_2 = basis[beta].conj().T @ b_alpha
basishermitian_basis_tmp_from_1.append(matrix_2.flatten())
# set _basisconjugate_basis_sparse and _basis_basisconjugate_T_sparse
basis_basisconjugate_tmp = np.array(basis_basisconjugate_tmp)
self._basisconjugate_basis_sparse = csr_matrix(
basis_basisconjugate_tmp.conjugate()
)
self._basis_basisconjugate_T_sparse = csr_matrix(basis_basisconjugate_tmp.T)
# set _basis_basisconjugate_T_sparse_from_1
basis_basisconjugate_tmp_from_1 = np.array(basis_basisconjugate_tmp_from_1)
self._basis_basisconjugate_T_sparse_from_1 = csr_matrix(
basis_basisconjugate_tmp_from_1.T
)
# set _basishermitian_basis_T_from
basishermitian_basis_tmp_from_1 = np.array(basishermitian_basis_tmp_from_1)
self._basishermitian_basis_T_from_1 = csr_matrix(
basishermitian_basis_tmp_from_1.T
)
@property
def basisconjugate_basis_sparse(self) -> np.ndarray:
if self._basisconjugate_basis_sparse is None:
self._calc_basis_basisconjugate_sparse()
return self._basisconjugate_basis_sparse
def delete_basisconjugate_basis_sparse(self) -> None:
"""delete ``basisconjugate_basis_sparse`` property to save memory.
If you use ``basisconjugate_basis_sparse`` again, call ``basisconjugate_basis_sparse`` again.
"""
self._basisconjugate_basis_sparse = None
@property
def basis_basisconjugate_T_sparse(self) -> np.ndarray:
if self._basis_basisconjugate_T_sparse is None:
self._calc_basis_basisconjugate_sparse()
return self._basis_basisconjugate_T_sparse
def delete_basis_basisconjugate_T_sparse(self) -> None:
"""delete ``basis_basisconjugate_T_sparse`` property to save memory.
If you use ``basis_basisconjugate_T_sparse`` again, call ``basis_basisconjugate_T_sparse`` again.
"""
self._basis_basisconjugate_T_sparse = None
@property
def basis_basisconjugate_T_sparse_from_1(self) -> np.ndarray:
if self._basis_basisconjugate_T_sparse_from_1 is None:
self._calc_basis_basisconjugate_sparse()
return self._basis_basisconjugate_T_sparse_from_1
def delete_basis_basisconjugate_T_sparse_from_1(self) -> None:
"""delete ``basis_basisconjugate_T_sparse_from_1`` property to save memory.
If you use ``basis_basisconjugate_T_sparse_from_1`` again, call ``basis_basisconjugate_T_sparse_from_1`` again.
"""
self._basis_basisconjugate_T_sparse_from_1 = None
@property
def basishermitian_basis_T_from_1(self) -> np.ndarray:
if self._basishermitian_basis_T_from_1 is None:
self._calc_basis_basisconjugate_sparse()
return self._basishermitian_basis_T_from_1
def delete_basishermitian_basis_T_from_1(self) -> None:
"""delete ``basishermitian_basis_T_from_1`` property to save memory.
If you use ``basishermitian_basis_T_from_1`` again, call ``basishermitian_basis_T_from_1`` again.
"""
self._basishermitian_basis_T_from_1 = None
@property
def elemental_systems(self) -> Tuple[ElementalSystem]:
"""returns list of ElementalSystem of this CompositeSystem.
Returns
-------
Tuple[ElementalSystem]
list of ElementalSystem of this CompositeSystem.
"""
return self._elemental_systems
@property
def is_orthonormal_hermitian_0thprop_identity(self) -> bool:
"""returns whether all ElementalSystem of this CompositeSystem are orthonormal, hermitian and 0th prop identity.
Returns
-------
bool
whether all ElementalSystem of this CompositeSystem are orthonormal, hermitian and 0th prop identity.
"""
return self._is_orthonormal_hermitian_0thprop_identity
@property
def is_basis_hermitian(self) -> bool:
return self._is_basis_hermitian
def __len__(self) -> int:
return len(self._elemental_systems)
def __getitem__(self, key: int) -> ElementalSystem:
return self._elemental_systems[key]
def __iter__(self):
return iter(self._elemental_systems)
def __eq__(self, other) -> bool:
if not isinstance(other, CompositeSystem):
return False
if len(self) != len(other):
return False
for s, o in zip(self, other):
if s is not o:
return False
return True
def __str__(self):
desc = "elemental_systems:\n"
for i, e_sys in enumerate(self._elemental_systems):
desc += f"[{i}] {e_sys.name} (system_id={e_sys.system_id})\n"
desc += "\n"
desc += f"dim: {self.dim}\n"
desc += f"basis:\n"
desc += str(self.basis())
return desc
def __repr__(self):
return f"{self.__class__.__name__}(systems={repr(self.elemental_systems)})"
| [
"quara.objects.matrix_basis.MatrixBasis",
"scipy.linalg.kron",
"quara.objects.matrix_basis.get_comp_basis",
"numpy.where",
"numpy.conjugate",
"itertools.product",
"copy.copy",
"numpy.kron",
"numpy.array",
"copy.deepcopy",
"scipy.sparse.csr_matrix"
] | [((1665, 1683), 'copy.copy', 'copy.copy', (['systems'], {}), '(systems)\n', (1674, 1683), False, 'import copy\n'), ((11863, 11901), 'copy.deepcopy', 'copy.deepcopy', (['self._total_basis.basis'], {}), '(self._total_basis.basis)\n', (11876, 11901), False, 'import copy\n'), ((12125, 12144), 'numpy.array', 'np.array', (['basis_tmp'], {}), '(basis_tmp)\n', (12133, 12144), True, 'import numpy as np\n'), ((12176, 12199), 'scipy.sparse.csr_matrix', 'csr_matrix', (['basis_tmp.T'], {}), '(basis_tmp.T)\n', (12186, 12199), False, 'from scipy.sparse import csr_matrix\n'), ((12238, 12268), 'scipy.sparse.csr_matrix', 'csr_matrix', (['basisconjugate_tmp'], {}), '(basisconjugate_tmp)\n', (12248, 12268), False, 'from scipy.sparse import csr_matrix\n'), ((13261, 13299), 'copy.deepcopy', 'copy.deepcopy', (['self._total_basis.basis'], {}), '(self._total_basis.basis)\n', (13274, 13299), False, 'import copy\n'), ((14073, 14107), 'numpy.array', 'np.array', (['basis_basisconjugate_tmp'], {}), '(basis_basisconjugate_tmp)\n', (14081, 14107), True, 'import numpy as np\n'), ((14269, 14307), 'scipy.sparse.csr_matrix', 'csr_matrix', (['basis_basisconjugate_tmp.T'], {}), '(basis_basisconjugate_tmp.T)\n', (14279, 14307), False, 'from scipy.sparse import csr_matrix\n'), ((14403, 14444), 'numpy.array', 'np.array', (['basis_basisconjugate_tmp_from_1'], {}), '(basis_basisconjugate_tmp_from_1)\n', (14411, 14444), True, 'import numpy as np\n'), ((14498, 14543), 'scipy.sparse.csr_matrix', 'csr_matrix', (['basis_basisconjugate_tmp_from_1.T'], {}), '(basis_basisconjugate_tmp_from_1.T)\n', (14508, 14543), False, 'from scipy.sparse import csr_matrix\n'), ((14652, 14693), 'numpy.array', 'np.array', (['basishermitian_basis_tmp_from_1'], {}), '(basishermitian_basis_tmp_from_1)\n', (14660, 14693), True, 'import numpy as np\n'), ((14740, 14785), 'scipy.sparse.csr_matrix', 'csr_matrix', (['basishermitian_basis_tmp_from_1.T'], {}), '(basishermitian_basis_tmp_from_1.T)\n', (14750, 14785), False, 'from scipy.sparse import csr_matrix\n'), ((2788, 2805), 'quara.objects.matrix_basis.MatrixBasis', 'MatrixBasis', (['temp'], {}), '(temp)\n', (2799, 2805), False, 'from quara.objects.matrix_basis import MatrixBasis, get_comp_basis\n'), ((4000, 4035), 'quara.objects.matrix_basis.get_comp_basis', 'get_comp_basis', (['self.dim'], {'mode': 'mode'}), '(self.dim, mode=mode)\n', (4014, 4035), False, 'from quara.objects.matrix_basis import MatrixBasis, get_comp_basis\n'), ((8108, 8146), 'copy.deepcopy', 'copy.deepcopy', (['self._total_basis.basis'], {}), '(self._total_basis.basis)\n', (8121, 8146), False, 'import copy\n'), ((9010, 9048), 'copy.deepcopy', 'copy.deepcopy', (['self._total_basis.basis'], {}), '(self._total_basis.basis)\n', (9023, 9048), False, 'import copy\n'), ((10526, 10564), 'copy.deepcopy', 'copy.deepcopy', (['self._total_basis.basis'], {}), '(self._total_basis.basis)\n', (10539, 10564), False, 'import copy\n'), ((13571, 13596), 'numpy.conjugate', 'np.conjugate', (['basis[beta]'], {}), '(basis[beta])\n', (13583, 13596), True, 'import numpy as np\n'), ((13618, 13647), 'numpy.kron', 'np.kron', (['b_alpha', 'b_beta_conj'], {}), '(b_alpha, b_beta_conj)\n', (13625, 13647), True, 'import numpy as np\n'), ((8301, 8326), 'numpy.conjugate', 'np.conjugate', (['basis[beta]'], {}), '(basis[beta])\n', (8313, 8326), True, 'import numpy as np\n'), ((8352, 8381), 'numpy.kron', 'np.kron', (['b_alpha', 'b_beta_conj'], {}), '(b_alpha, b_beta_conj)\n', (8359, 8381), True, 'import numpy as np\n'), ((9203, 9228), 'numpy.conjugate', 'np.conjugate', (['basis[beta]'], {}), '(basis[beta])\n', (9215, 9228), True, 'import numpy as np\n'), ((9254, 9283), 'numpy.kron', 'np.kron', (['b_alpha', 'b_beta_conj'], {}), '(b_alpha, b_beta_conj)\n', (9261, 9283), True, 'import numpy as np\n'), ((9376, 9397), 'numpy.where', 'np.where', (['(matrix != 0)'], {}), '(matrix != 0)\n', (9384, 9397), True, 'import numpy as np\n'), ((10719, 10744), 'numpy.conjugate', 'np.conjugate', (['basis[beta]'], {}), '(basis[beta])\n', (10731, 10744), True, 'import numpy as np\n'), ((10770, 10799), 'numpy.kron', 'np.kron', (['b_alpha', 'b_beta_conj'], {}), '(b_alpha, b_beta_conj)\n', (10777, 10799), True, 'import numpy as np\n'), ((10892, 10913), 'numpy.where', 'np.where', (['(matrix != 0)'], {}), '(matrix != 0)\n', (10900, 10913), True, 'import numpy as np\n'), ((2673, 2689), 'scipy.linalg.kron', 'kron', (['val1', 'val2'], {}), '(val1, val2)\n', (2677, 2689), False, 'from scipy.linalg import kron\n'), ((2708, 2737), 'itertools.product', 'itertools.product', (['temp', 'elem'], {}), '(temp, elem)\n', (2725, 2737), False, 'import itertools\n')] |
import numpy as np
def is_shadowed(probe_building, main_building):
probe_x, probe_y = probe_building.centroid
main_x, main_y = main_building.centroid
main_height = main_building.max_height
probe_height = probe_building.max_height
if probe_y < main_y: return False
distance_x = abs(probe_x - main_x)
distance_y = abs(probe_y - main_y)
d = np.array([distance_x,distance_y])
distance = np.linalg.norm(d)
if distance > main_height*5:return False
shadow_height = main_height - distance/5
if probe_height> shadow_height:return False
return True
| [
"numpy.array",
"numpy.linalg.norm"
] | [((375, 409), 'numpy.array', 'np.array', (['[distance_x, distance_y]'], {}), '([distance_x, distance_y])\n', (383, 409), True, 'import numpy as np\n'), ((424, 441), 'numpy.linalg.norm', 'np.linalg.norm', (['d'], {}), '(d)\n', (438, 441), True, 'import numpy as np\n')] |
import geopy
import numpy as np
import greengraph_module
class Greengraph(object):
def __init__(self, start, end):
self.start=start
self.end=end
self.geocoder=geopy.geocoders.GoogleV3(domain="maps.google.co.uk")
# function to test the validity of the steps parameter
def steps_test(self, steps):
# test if "steps" is an integer
if (isinstance( steps, int )) == 0:
raise TypeError('The argument "steps" has to be an integer.')
# test if "steps" is positive
if steps < 2:
raise ValueError('The argument "steps" has to be at least 2.')
return 0
def geolocate(self, place):
return self.geocoder.geocode(place, exactly_one=False)[0][1]
def location_sequence(self, start, end, steps):
self.steps_test(steps)
lats = np.linspace(start[0], end[0], steps)
longs = np.linspace(start[1],end[1], steps)
return np.vstack([lats, longs]).transpose()
def green_between(self, steps):
self.steps_test(steps)
return [greengraph_module.Map(*location).count_green() for location in self.location_sequence(
self.geolocate(self.start), self.geolocate(self.end), steps)]
| [
"greengraph_module.Map",
"numpy.linspace",
"numpy.vstack",
"geopy.geocoders.GoogleV3"
] | [((189, 241), 'geopy.geocoders.GoogleV3', 'geopy.geocoders.GoogleV3', ([], {'domain': '"""maps.google.co.uk"""'}), "(domain='maps.google.co.uk')\n", (213, 241), False, 'import geopy\n'), ((875, 911), 'numpy.linspace', 'np.linspace', (['start[0]', 'end[0]', 'steps'], {}), '(start[0], end[0], steps)\n', (886, 911), True, 'import numpy as np\n'), ((928, 964), 'numpy.linspace', 'np.linspace', (['start[1]', 'end[1]', 'steps'], {}), '(start[1], end[1], steps)\n', (939, 964), True, 'import numpy as np\n'), ((979, 1003), 'numpy.vstack', 'np.vstack', (['[lats, longs]'], {}), '([lats, longs])\n', (988, 1003), True, 'import numpy as np\n'), ((1100, 1132), 'greengraph_module.Map', 'greengraph_module.Map', (['*location'], {}), '(*location)\n', (1121, 1132), False, 'import greengraph_module\n')] |
'''Transfer CIFAR-10 model'''
from __future__ import print_function
import logging
import os
import pdb
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from lib.cifar_resnet import *
from lib.dataset_utils import *
from lib.nin import *
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def evaluate(net, dataloader, device):
# net.eval()
net.fc.training = False
val_loss = 0
val_total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = loss_function(outputs, targets)
val_loss += loss.item()
val_total += targets.size(0)
return val_loss / val_total
def train(net, trainloader, validloader, optimizer, epoch, device,
log, save_best_only=True, best_loss=1e9, model_path='./model.pt'):
# net.train()
net.fc.training = True
train_loss = 0
train_total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = loss_function(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
train_total += targets.size(0)
val_loss = evaluate(net, validloader, device)
log.info(' %5d | %.4f | %.4f', epoch, train_loss / train_total, val_loss)
# Save model weights
if not save_best_only or (save_best_only and val_loss < best_loss):
log.info('Saving model...')
torch.save(net.state_dict(), model_path)
best_loss = val_loss
return best_loss
def loss_function(outputs, targets):
batch_size = outputs.size(0)
loss = torch.zeros(1).cuda()
for i in range(batch_size):
mask_same = (targets[i] == targets).type(torch.float32)
# if mask_same.sum() == 1:
# break
mask_diff = (targets[i] != targets).type(torch.float32)
mask_self = torch.ones(batch_size).cuda()
mask_self[i] = 0
dist = ((outputs[i] - outputs) ** 2).sum(1)
# upper bound distance to prevent overflow
# exp = torch.exp(- torch.min(dist, torch.tensor(50.).cuda()))
# exp = torch.exp(- dist)
# loss -= torch.log(torch.sum(mask_same * mask_self * exp) /
# torch.sum(mask_self * exp))
if mask_diff.sum() > 0:
loss -= torch.min(torch.min(1e20 * mask_same + dist),
torch.tensor(100.).cuda())
# additional regularization to pull same class
const = 1e0
# exp = torch.exp(- torch.min(dist * self.it.exp(),
# torch.tensor(50.).cuda()))
# loss -= const * torch.log(torch.sum(mask_same * mask_self * exp) /
# torch.sum(mask_self * exp))
# TODO
# loss += const * \
# torch.log(torch.sum(mask_diff * exp) / torch.sum(mask_self * exp))
if mask_same.sum() > 1:
loss += const * \
torch.min(1e20 * (mask_diff + 1 - mask_self) + dist)
# loss = F.cross_entropy(outputs, targets, reduction='sum')
# class mean clustering
# batch_size = outputs.size(0)
# loss = torch.zeros(1).cuda()
# mean = torch.zeros((10, outputs.size(1))).cuda()
# for i in range(10):
# mask = targets == i
# mean[i] = outputs[mask].mean(0).detach()
# for i in range(batch_size):
# dist = torch.sum((outputs[i] - mean) ** 2, 1)
# exp = torch.exp(- torch.min(dist, torch.tensor(50.).cuda()))
# loss -= torch.log(exp[targets[i]] / exp.sum())
return loss
def main():
# Set experiment id
exp_id = 18
model_name = 'transfer_cifar10_exp%d' % exp_id
# Training parameters
batch_size = 32
epochs = 120
data_augmentation = False
learning_rate = 1e-3
l1_reg = 0
l2_reg = 1e-2
block = 3
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = False
# Set all random seeds
seed = 2019
np.random.seed(seed)
torch.manual_seed(seed)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Set up model directory
save_dir = os.path.join(os.getcwd(), 'saved_models')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
# Get logger
log_file = model_name + '.log'
log = logging.getLogger('train_cifar10')
log.setLevel(logging.DEBUG)
# Create formatter and add it to the handlers
formatter = logging.Formatter(
'[%(levelname)s %(asctime)s %(name)s] %(message)s')
# Create file handler
fh = logging.FileHandler(log_file, mode='w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)
log.info(log_file)
log.info(('CIFAR-10 | exp_id: {}, seed: {}, init_learning_rate: {}, ' +
'batch_size: {}, l2_reg: {}, l1_reg: {}, epochs: {}, ' +
'data_augmentation: {}, subtract_pixel_mean: {}').format(
exp_id, seed, learning_rate, batch_size, l2_reg, l1_reg,
epochs, data_augmentation, subtract_pixel_mean))
log.info('Preparing data...')
trainloader, validloader, testloader = load_cifar10(batch_size,
data_dir='/data',
val_size=0.1,
normalize=False,
augment=False,
shuffle=True,
seed=seed)
log.info('Building model...')
# net = PreActResNet(PreActBlock, [2, 2, 2, 2])
# net = net.to(device)
# opt = {'num_classes': 4, 'num_stages': 4}
# net = NetworkInNetwork(opt)
# net.load_state_dict(torch.load(
# 'saved_models/model_net_epoch200')['network'])
# net = net._feature_blocks
# net_wrap = NINWrapper(net, block=block)
# for param in net_wrap.parameters():
# param.requires_grad = False
# # net_wrap.fc = nn.Linear(3072, 128)
# if block == 2:
# net_wrap.fc = nn.Sequential(
# nn.BatchNorm1d(12288),
# nn.Linear(12288, 2000),
# nn.ReLU(inplace=True),
# nn.BatchNorm1d(2000),
# nn.Linear(2000, 400),
# nn.ReLU(inplace=True),
# nn.BatchNorm1d(400),
# nn.Linear(400, 128),
# )
# elif block == 3:
# net_wrap.fc = nn.Sequential(
# nn.Linear(3072, 200),
# nn.ReLU(inplace=True),
# nn.Linear(200, 200),
# nn.ReLU(inplace=True),
# nn.Linear(200, 128),
# )
# net_wrap = net_wrap.to('cuda')
net = PreActResNet(PreActBlock, [2, 2, 2, 2], num_classes=4)
net.load_state_dict(torch.load('saved_models/rot_cifar10_exp0.h5'))
net_wrap = ResNetWrapper(net, block=block, dim=16384)
for param in net_wrap.parameters():
param.requires_grad = False
# net_wrap.fc = nn.Linear(3072, 128)
net_wrap.eval()
if block == 4:
net_wrap.fc = nn.Sequential(
nn.Linear(8192, 200),
nn.ReLU(inplace=True),
nn.Linear(200, 200),
nn.ReLU(inplace=True),
nn.Linear(200, 128),
)
elif block == 3:
net_wrap.fc = nn.Sequential(
nn.BatchNorm1d(16384),
nn.Linear(16384, 2000),
nn.ReLU(inplace=True),
nn.BatchNorm1d(2000),
nn.Linear(2000, 400),
nn.ReLU(inplace=True),
nn.BatchNorm1d(400),
nn.Linear(400, 128),
)
net_wrap = net_wrap.to('cuda')
# mean = pickle.load(open('resnet_block3_mean.p', 'rb'))
# std = pickle.load(open('resnet_block3_std.p', 'rb'))
# net_wrap.mean.data = torch.tensor(mean).cuda()
# net_wrap.std.data = torch.tensor(std).cuda()
# if device == 'cuda':
# net = torch.nn.DataParallel(net)
# cudnn.benchmark = True
optimizer = optim.Adam(
net_wrap.parameters(), lr=learning_rate, weight_decay=l2_reg)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, [50, 70, 90], gamma=0.1)
log.info(' epoch | loss | val_loss')
best_loss = 1e9
for epoch in range(epochs):
lr_scheduler.step()
best_loss = train(net_wrap, trainloader, validloader, optimizer,
epoch, device, log, save_best_only=True,
best_loss=best_loss, model_path=model_path)
test_loss = evaluate(net_wrap, testloader, device)
log.info('Test loss: %.4f', test_loss)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"torch.nn.ReLU",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.min",
"torch.nn.BatchNorm1d",
"torch.cuda.is_available",
"os.path.isdir",
"logging.FileHandler",
"numpy.random.seed",
"torch.manual_seed",
"os.makedirs",
"logging.Formatter",
"torch.load",
"os.path.join",
... | [((4271, 4291), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4285, 4291), True, 'import numpy as np\n'), ((4296, 4319), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (4313, 4319), False, 'import torch\n'), ((4551, 4593), 'os.path.join', 'os.path.join', (['save_dir', "(model_name + '.h5')"], {}), "(save_dir, model_name + '.h5')\n", (4563, 4593), False, 'import os\n'), ((4657, 4691), 'logging.getLogger', 'logging.getLogger', (['"""train_cifar10"""'], {}), "('train_cifar10')\n", (4674, 4691), False, 'import logging\n'), ((4790, 4859), 'logging.Formatter', 'logging.Formatter', (['"""[%(levelname)s %(asctime)s %(name)s] %(message)s"""'], {}), "('[%(levelname)s %(asctime)s %(name)s] %(message)s')\n", (4807, 4859), False, 'import logging\n'), ((4904, 4943), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {'mode': '"""w"""'}), "(log_file, mode='w')\n", (4923, 4943), False, 'import logging\n'), ((8483, 8555), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer', '[50, 70, 90]'], {'gamma': '(0.1)'}), '(optimizer, [50, 70, 90], gamma=0.1)\n', (8519, 8555), False, 'import torch\n'), ((531, 546), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (544, 546), False, 'import torch\n'), ((4344, 4369), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4367, 4369), False, 'import torch\n'), ((4439, 4450), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4448, 4450), False, 'import os\n'), ((4479, 4502), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (4492, 4502), False, 'import os\n'), ((4512, 4533), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (4523, 4533), False, 'import os\n'), ((7179, 7225), 'torch.load', 'torch.load', (['"""saved_models/rot_cifar10_exp0.h5"""'], {}), "('saved_models/rot_cifar10_exp0.h5')\n", (7189, 7225), False, 'import torch\n'), ((1914, 1928), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1925, 1928), False, 'import torch\n'), ((7490, 7510), 'torch.nn.Linear', 'nn.Linear', (['(8192)', '(200)'], {}), '(8192, 200)\n', (7499, 7510), True, 'import torch.nn as nn\n'), ((7524, 7545), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7531, 7545), True, 'import torch.nn as nn\n'), ((7559, 7578), 'torch.nn.Linear', 'nn.Linear', (['(200)', '(200)'], {}), '(200, 200)\n', (7568, 7578), True, 'import torch.nn as nn\n'), ((7592, 7613), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7599, 7613), True, 'import torch.nn as nn\n'), ((7627, 7646), 'torch.nn.Linear', 'nn.Linear', (['(200)', '(128)'], {}), '(200, 128)\n', (7636, 7646), True, 'import torch.nn as nn\n'), ((2171, 2193), 'torch.ones', 'torch.ones', (['batch_size'], {}), '(batch_size)\n', (2181, 2193), False, 'import torch\n'), ((2621, 2656), 'torch.min', 'torch.min', (['(1e+20 * mask_same + dist)'], {}), '(1e+20 * mask_same + dist)\n', (2630, 2656), False, 'import torch\n'), ((3257, 3310), 'torch.min', 'torch.min', (['(1e+20 * (mask_diff + 1 - mask_self) + dist)'], {}), '(1e+20 * (mask_diff + 1 - mask_self) + dist)\n', (3266, 3310), False, 'import torch\n'), ((7728, 7749), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(16384)'], {}), '(16384)\n', (7742, 7749), True, 'import torch.nn as nn\n'), ((7763, 7785), 'torch.nn.Linear', 'nn.Linear', (['(16384)', '(2000)'], {}), '(16384, 2000)\n', (7772, 7785), True, 'import torch.nn as nn\n'), ((7799, 7820), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7806, 7820), True, 'import torch.nn as nn\n'), ((7834, 7854), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(2000)'], {}), '(2000)\n', (7848, 7854), True, 'import torch.nn as nn\n'), ((7868, 7888), 'torch.nn.Linear', 'nn.Linear', (['(2000)', '(400)'], {}), '(2000, 400)\n', (7877, 7888), True, 'import torch.nn as nn\n'), ((7902, 7923), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7909, 7923), True, 'import torch.nn as nn\n'), ((7937, 7956), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(400)'], {}), '(400)\n', (7951, 7956), True, 'import torch.nn as nn\n'), ((7970, 7989), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(128)'], {}), '(400, 128)\n', (7979, 7989), True, 'import torch.nn as nn\n'), ((2687, 2706), 'torch.tensor', 'torch.tensor', (['(100.0)'], {}), '(100.0)\n', (2699, 2706), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy import exp
from numpy import linspace
def trapezoidal(f, a, b, n):
h = float(b-a)/n
x = linspace(a, b, n+1)
s = sum(f(x)) - 0.5*f(a) - 0.5*f(b)
return h*s
def trapezoidal_double(f, a, b, c, d, nx, ny):
hx = (b - a)/float(nx)
hy = (d - c)/float(ny)
I = 0.25*(f(a, c) + f(a, d) + f(b, c) + f(b, d))
Ix = 0
for i in range(1, nx):
xi = a + i*hx
Ix += f(xi, c) + f(xi, d)
I += 0.5*Ix
Iy = 0
for j in range(1, ny):
yj = c + j*hy
Iy += f(a, yj) + f(b, yj)
I += 0.5*Iy
Ixy = 0
for i in range(1, nx):
for j in range(1, ny):
xi = a + i*hx
yj = c + j*hy
Ixy += f(xi, yj)
I += Ixy
I *= hx*hy
return I
def application():
v = lambda t: 3*(t**2)*exp(t**3)
n = int(input('n: '))
numerical = trapezoidal(v, 0, 1, n)
print(numerical)
# Compare with exact result
V = lambda t: exp(t**3)
exact = V(1) - V(0)
print(exact)
error = exact - numerical
print('n=%d: exact=%.16f, calc=%.16f, error: %g' % (n, exact,numerical, error))
if __name__ == '__main__':
application()
| [
"numpy.exp",
"numpy.linspace"
] | [((155, 176), 'numpy.linspace', 'linspace', (['a', 'b', '(n + 1)'], {}), '(a, b, n + 1)\n', (163, 176), False, 'from numpy import linspace\n'), ((992, 1003), 'numpy.exp', 'exp', (['(t ** 3)'], {}), '(t ** 3)\n', (995, 1003), False, 'from numpy import exp\n'), ((844, 855), 'numpy.exp', 'exp', (['(t ** 3)'], {}), '(t ** 3)\n', (847, 855), False, 'from numpy import exp\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def read_data(input_file, index):
# Read the data from the input file
input_data = np.loadtxt(input_file, delimiter=',')
# Lambda function to convert strings to Pandas date format
to_date = lambda x, y: str(int(x)) + '-' + str(int(y))
# Extract the start date
start = to_date(input_data[0, 0], input_data[0, 1])
# Extract the end date
if input_data[-1, 1] == 12:
year = input_data[-1, 0] + 1
month = 1
else:
year = input_data[-1, 0]
month = input_data[-1, 1] + 1
end = to_date(year, month)
# Create a date list with a monthly frequency
date_indices = pd.date_range(start, end, freq='M')
# Add timestamps to the input data to create time-series data
output = pd.Series(input_data[:, index], index=date_indices)
return output
if __name__=='__main__':
# Input filename
input_file = 'data_2D.txt'
# Specify the columns that need to be converted
# into time-series data
indices = [2, 3]
# Iterate through the columns and plot the data
for index in indices:
# Convert the column to timeseries format
timeseries = read_data(input_file, index)
# Plot the data
plt.figure()
timeseries.plot()
plt.title('Dimension ' + str(index - 1))
plt.show()
| [
"pandas.Series",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"pandas.date_range",
"matplotlib.pyplot.show"
] | [((163, 200), 'numpy.loadtxt', 'np.loadtxt', (['input_file'], {'delimiter': '""","""'}), "(input_file, delimiter=',')\n", (173, 200), True, 'import numpy as np\n'), ((708, 743), 'pandas.date_range', 'pd.date_range', (['start', 'end'], {'freq': '"""M"""'}), "(start, end, freq='M')\n", (721, 743), True, 'import pandas as pd\n'), ((824, 875), 'pandas.Series', 'pd.Series', (['input_data[:, index]'], {'index': 'date_indices'}), '(input_data[:, index], index=date_indices)\n', (833, 875), True, 'import pandas as pd\n'), ((1382, 1392), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1390, 1392), True, 'import matplotlib.pyplot as plt\n'), ((1289, 1301), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1299, 1301), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
import unittest
import numpy as np
from hico_multi_classification.tfrecord_converter import ImageCoder
class ImageCoderTest(unittest.TestCase):
def setUp(self):
self._image_coder = ImageCoder()
def test_init(self):
self.assertIsNotNone(self._image_coder)
def test_png_to_jpeg(self):
with open('testdata/python_logo.png', 'rb') as imageFile:
file = imageFile.read()
array = np.array(file)
jpeg = self._image_coder.png_to_jpeg(array)
self.assertIsNotNone(jpeg)
def test_cmyk_to_rgb(self):
with open('testdata/python_logo.jpg', 'rb') as imageFile:
file = imageFile.read()
array = np.array(file)
rgb = self._image_coder.cmyk_to_rgb(array)
self.assertIsNotNone(rgb)
def test_decode_jpeg(self):
with open('testdata/python_logo.jpg', 'rb') as imageFile:
file = imageFile.read()
array = np.array(file)
rgb = self._image_coder.decode_jpeg(array)
self.assertIsNotNone(rgb)
| [
"numpy.array",
"hico_multi_classification.tfrecord_converter.ImageCoder"
] | [((219, 231), 'hico_multi_classification.tfrecord_converter.ImageCoder', 'ImageCoder', ([], {}), '()\n', (229, 231), False, 'from hico_multi_classification.tfrecord_converter import ImageCoder\n'), ((461, 475), 'numpy.array', 'np.array', (['file'], {}), '(file)\n', (469, 475), True, 'import numpy as np\n'), ((726, 740), 'numpy.array', 'np.array', (['file'], {}), '(file)\n', (734, 740), True, 'import numpy as np\n'), ((989, 1003), 'numpy.array', 'np.array', (['file'], {}), '(file)\n', (997, 1003), True, 'import numpy as np\n')] |
import warnings
import glob
import os
from scipy.stats import linregress, norm
import xarray as xr
import pandas as pd
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ImportWarning)
import cartopy.crs as ccrs
import cartopy.mpl.geoaxes
from utils import (
ensemble_mean_wind_speed,
annual_mean,
add_letters,
open_picontrol,
selbox,
open_datasets,
LONMAX,
LONMIN,
LATMIN,
LATMAX,
)
P_THRESHOLD = 5
def selHadISD(ds, path_to_data):
"""
averages over all station locations used in Zeng et al. (2019) from the HadISD dataset
:param ds:
:return:
"""
# load HadISD station list and limit to region of interest
station_list = pd.read_excel(
f"{path_to_data}/HadISD/Zeng_SIData2_HadISDv202.xlsx",
usecols=["lons", "lats"], sheet_name="stations"
)
station_list = station_list.where(
(station_list.lons < LONMAX)
& (station_list.lons > LONMIN)
& (station_list.lats > LATMIN)
& (station_list.lats < LATMAX)
).dropna()
# interpolate input data to HadISD stations and return the station mean
ds_stations = []
for index, row in station_list.iterrows():
ds_stations.append(ds.interp(lat=row["lats"], lon=row["lons"], method="linear"))
return xr.concat(ds_stations, dim="station_number").mean(dim="station_number")
def slope_if_significant(y, p_threshold=P_THRESHOLD, trend_length=20):
"""
Calculates the slope by fitting a linear trend of length trend_length to the timeseries y.
If the slope is not statistically significant at the p-values provided
as p_threhold, the function returns nan.
:param y:
:param p_threshold:
:param trend_length:
:return:
"""
p_threshold = p_threshold / 100 # from percentage to fraction
res = linregress(np.arange(trend_length) / 10, y)
if res[3] < p_threshold:
return res[0]
else:
return np.nan
def calc_frac_partoftrend(y):
"""
computes the number of timesteps that are part of a 20 timestep trend
:param y:
:return:
"""
y = y.copy()
y[np.isfinite(y)] = 1 # all values are 1
y[np.isnan(y)] = 0
for i in range(y.size):
if y[i] == 1:
for j in range(1, 20):
try:
# if next timestep doesn't feature new trend increase weight
if y[i + j] == 0:
y[i] += 1
else:
break
except IndexError:
# add remaining years to 20y at the end of the timeseries
y[i] += 20 - j
break
return np.round(y.sum() / (y.size + 19) * 100)
def test_calc_frac_partoftrend():
# test frac_partoftrend
test_array = (
np.zeros(81) * np.nan
) # 81 year slope timeseries corresponds to 100y input data
test_array[3] = 3
assert calc_frac_partoftrend(test_array) == 20.0 / 100 * 100
test_array[-1] = 2
assert calc_frac_partoftrend(test_array) == 40.0 / 100 * 100
test_array[4] = 1
assert calc_frac_partoftrend(test_array) == 41.0 / 100 * 100
test_array[:] = 2
assert calc_frac_partoftrend(test_array) == 100.0
print("Test of function `calc_frac_partoftrend` completed succesfully")
def plot_histo(
slopes,
ax,
experiment,
full_output=False,
bins=50,
trend_length=20,
p_threshold=P_THRESHOLD,
):
"""
Plots histogram of wind speed trends that are significant at a given p value threshold
along with the observation-based estimates taken from earlier studies.
:param slopes:
:param ax:
:param experiment:
:param full_output:
:param bins:
:param trend_length:
:param p_threshold:
:return:
"""
n, bins, patches = ax.hist(
slopes[np.isfinite(slopes)],
bins=bins,
density=True,
color="darkorange",
alpha=0.7,
)
ax.set_xlim(xmin=-0.2, xmax=0.2)
textdic = {
"horizontalalignment": "center",
"verticalalignment": "center",
"rotation": 90,
"fontsize": 8,
}
ax.axvline(x=-0.09, color="purple", ls="--") # p.1, 2nd column, 1st paragraph
ax.text(-0.083, n.max() * 3.0 / 4, "Vautard et al. [2010] 1979 - 2008", textdic)
ax.axvline(x=-0.1, color="purple", ls="--") # from SI Fig. 4e
ax.text(-0.107, n.max() * 3.0 / 4, "Zeng et al. [2019] 1978 - 2003", textdic)
ax.axvline(x=0.11, color="purple", ls="--") # from SI Fig. 4e
ax.text(0.103, n.max() * 3.0 / 4, "Zeng et al. [2019] 2004 - 2017", textdic)
frac_partoftrend = calc_frac_partoftrend(slopes)
xlabel = f"Significant wind speed trends at {100 - p_threshold}% level [m/s/decade]"
ax.set_xlabel(xlabel, fontsize=12)
plot_title = f"{experiment}: {int(frac_partoftrend)}% of years belong to a {trend_length}y trend period"
ax.set_title(plot_title)
if full_output:
return n, bins, frac_partoftrend
else:
return bins
def plot_full_timeseries_with_trend_marks(path_to_data, path_to_plots):
"""
Plots annual and 20y running-mean wind speed timeseries during pre-industrial control.
Markers or red and blue color denote onsets of significant 20y trend periods.
A map of the considered domain is added.
:param path_to_data:
:param path_to_plots:
:return:
"""
# plot full timeseries and mark trends
ds_picontrol = open_picontrol(path_to_data)
slopes = np.asarray(
[
slope_if_significant(
ds_picontrol["sfcWind"][x : x + 20], p_threshold=P_THRESHOLD
)
for x in range(1980)
]
)
slopes_ts = xr.DataArray(
slopes, dims="time", coords={"time": ds_picontrol["sfcWind"].time[:1980]}
)
# plot slopes and mark trend onsets
f, ax = plt.subplots(figsize=(12, 5))
ds_picontrol["sfcWind"].plot(ax=ax, alpha=0.5, label="Annual mean")
ds_picontrol["sfcWind"].rolling(time=20, center=True).mean().dropna(
dim="time"
).plot(ax=ax, color="black", label="20y mean")
ds_picontrol["sfcWind"].where(slopes_ts > 0).plot.line(
marker="o", linewidth=0, color="red", alpha=0.7, label="onset upward trend"
)
ds_picontrol["sfcWind"].where(slopes_ts < 0).plot.line(
marker="o", linewidth=0, color="green", alpha=0.7, label="onset downward trend"
)
# add inset with map of focus region
axins = inset_axes(
ax,
width="10%",
height="20%",
loc="upper left",
axes_class=cartopy.mpl.geoaxes.GeoAxes,
axes_kwargs=dict(map_projection=ccrs.PlateCarree()),
)
axins.set_extent((LONMIN - 1, LONMAX + 1, LATMIN - 1.5, LATMAX + 0.5))
axins.add_feature(cartopy.feature.COASTLINE.with_scale("50m"), lw=0.2)
axins.add_feature(cartopy.feature.BORDERS.with_scale("50m"), lw=0.15)
axins.add_patch(
mpatches.Rectangle(
xy=[LONMIN, LATMIN],
width=LONMAX - LONMIN,
height=LATMAX - LATMIN,
facecolor="blue",
alpha=0.2,
transform=ccrs.PlateCarree(),
)
)
axins.outline_patch.set_visible(False)
ax.legend(loc="upper right", ncol=2)
ax.set_xlabel("Year of pi-control simulation", fontsize=12)
ax.set_ylabel("European mean wind speed [m/s]", fontsize=12)
ax.set_title("")
ax.set_ylim(ymax=5.42)
ax.set_xlim(xmin=ds_picontrol.time[0].values, xmax=ds_picontrol.time[-1].values)
plt.tight_layout()
plt.savefig(f"{path_to_plots}/timeseries_picontrol_Europe.jpeg", dpi=300)
plt.close("all")
def plot_trend_histograms(path_to_data, path_to_plots):
"""
Plots trend histograms for different combinations of
- trend lengths (15, 20, 25 years)
- aggregation types (box average or interpolated to HadISD station locations)
- significance levels (p values of 0.05, 0.1, 0.15, 1)
A Gaussian is fitted to those plots where no significance screening is applied (i.e. p=1)
:param path_to_data:
:param path_to_plots:
:return:
"""
ds_picontrol = open_picontrol(path_to_data)
ds_list_HadISD = [
selHadISD(annual_mean(xr.open_dataset(x, use_cftime=True)), path_to_data)
for x in sorted(glob.glob(f"{path_to_data}/pi-control/*.nc"))
] # use_cftime needed after 2200. Otherwise SerializationWarning is raised
ds_picontrol_HadISD = xr.concat(ds_list_HadISD, dim="time")
# PI-CONTROL plot trend histograms for different p-values
for trend_length in [15, 20, 25]:
# HadISD is sensitivity test with data averaged to European HadISD stations
for agg_type in ["HadISD", "box"]:
for p_threshold in [5, 10, 15, 100]:
if agg_type == "box":
ds_tmp = ds_picontrol.copy()
else:
ds_tmp = ds_picontrol_HadISD.copy()
slopes = np.asarray(
[
slope_if_significant(
ds_tmp["sfcWind"][x : x + trend_length],
p_threshold=p_threshold,
trend_length=trend_length,
)
for x in range(ds_tmp.time.size - trend_length)
]
)
f, ax = plt.subplots()
bins = plot_histo(
slopes,
ax,
"Pi-control",
trend_length=trend_length,
p_threshold=p_threshold,
)
# fit Gaussian to histogram without significance screening
if p_threshold == 100:
mu, std = norm.fit(slopes)
ax.plot(bins, norm.pdf(bins, mu, std), color="red")
ax.set_ylabel("MPI-GE PDF", fontsize=12)
add_letters(ax)
plt.tight_layout()
if agg_type == "box":
fig_path = f"{path_to_plots}/picontrol_wind_trends_Europe_{p_threshold}_{trend_length}y.jpeg"
else:
fig_path = f"{path_to_plots}/picontrol_HadISD_wind_trends_Europe_{p_threshold}_{trend_length}y.jpeg"
plt.savefig(fig_path, dpi=300)
plt.close("all")
def plot_pi_control_cmip6_trend_histograms(path_to_data, path_to_plots):
# PI-CONTROL trend histograms for CMIP6 ensemble
filelist = glob.glob(f"{path_to_data}/CMIP6_annual/*.nc")
models = np.unique([x.split("/")[-1].split("_")[2] for x in filelist])
CMIP6_histos = {}
CMIP6_bins = np.arange(-0.2, 0.2, 0.005)
for i, model in enumerate(models):
print(str(int(i / len(models) * 100)) + "%")
ds_list = [
selbox(xr.open_dataset(x, use_cftime=True))
for x in sorted(glob.glob(f"{path_to_data}/CMIP6_annual/*{model}*.nc"))
] # use_cftime needed after 2200. Otherwise SerializationWarning is raised
ds_CMIP6 = xr.concat(ds_list, dim="time")
slopes = np.asarray(
[
slope_if_significant(
ds_CMIP6["sfcWind"][x : x + 20], p_threshold=P_THRESHOLD
)
for x in range(ds_CMIP6.time.size - 20)
]
)
f, ax = plt.subplots()
CMIP6_histos[model] = plot_histo(
slopes,
ax,
"Pi-control " + model,
full_output=True,
bins=CMIP6_bins,
p_threshold=P_THRESHOLD,
)
ax.set_ylabel("PDF")
plt.tight_layout()
os.makedirs(f"{path_to_plots}/CMIP6", exist_ok=True)
fig_path = f"{path_to_plots}/CMIP6/{model}_picontrol_wind_trends_Europe_{P_THRESHOLD}.jpeg"
plt.savefig(fig_path, dpi=300)
plt.close("all")
# ensemble mean histo
del CMIP6_histos["EC-Earth3-CC"] # has no data
del CMIP6_histos["AWI-ESM-1-1-LR"] # only has negative trends of -0.07 m/s/dec
df_CMIP6 = pd.DataFrame(CMIP6_histos, index=["n", "bins", "fracoftrends"])
n_mean, frac_mean = df_CMIP6.loc["n"].mean(), df_CMIP6.loc["fracoftrends"].mean()
f, ax = plt.subplots()
ax.bar(CMIP6_bins[1:], n_mean, width=0.005, color="Darkorange", alpha=0.7)
textdic = {
"horizontalalignment": "center",
"verticalalignment": "center",
"rotation": 90,
"fontsize": 8,
}
ax.axvline(x=-0.09, color="purple", ls="--") # p.1, 2nd column, 1st paragraph
ax.text(
-0.083, n_mean.max() * 3.0 / 4, "<NAME> al. [2010] 1979 - 2008", textdic
)
ax.axvline(x=-0.1, color="purple", ls="--") # from SI Fig. 4e
ax.text(-0.107, n_mean.max() * 3.0 / 4, "Zeng et al. [2019] 1978 - 2003", textdic)
ax.axvline(x=0.11, color="purple", ls="--") # from SI Fig. 4e
ax.text(0.103, n_mean.max() * 3.0 / 4, "Zeng et al. [2019] 2004 - 2017", textdic)
ax.set_ylabel("CMIP6 ensemble mean PDF", fontsize=12)
xlabel = f"Significant wind speed trends at {100 - P_THRESHOLD}% level [m/s/decade]"
ax.set_xlabel(xlabel, fontsize=12)
ax.set_title(f"Pi-control: {int(frac_mean)}% of years belong to a 20y trend period")
ax.set_xlim(xmin=-0.2, xmax=0.2)
add_letters(ax, letter_offset=1)
plt.tight_layout()
os.makedirs(f"{path_to_plots}/CMIP6", exist_ok=True)
fig_path = (
f"{path_to_plots}/CMIP6/Ensmean_picontrol_wind_trends_Europe_{P_THRESHOLD}.jpeg"
)
plt.savefig(fig_path, dpi=300)
plt.close("all")
def plot_experiment_trend_histograms(path_to_data, path_to_plots):
# trend histograms in other periods
for letter_index, experiment in enumerate(
["historical", "rcp26", "rcp45", "rcp85"]
):
print(experiment)
windfiles = sorted(glob.glob(f"{path_to_data}/{experiment}/sfcWind*.nc"))
ds = open_datasets(windfiles)
ds_ensmean = annual_mean(
selbox(ensemble_mean_wind_speed(path_to_data, experiment))
)
# Get internal variability as wind speed minus ensemble mean wind speed
ds_internal = ds - ds_ensmean
for p_threshold in [5, 100]:
# calculate trend slopes in individual ens members
slopes = []
for ens_member in ds_internal.ensemble_member:
da_internal = ds_internal["sfcWind"].sel(
{"ensemble_member": ens_member}
)
slopes.extend(
[
slope_if_significant(
da_internal[x : x + 20], p_threshold=p_threshold
)
for x in range(da_internal.size - 20)
]
)
slopes = np.asarray(slopes)
# calculate trend slopes in ensemble mean
slopes_ensmean = np.asarray(
[
slope_if_significant(
ds_ensmean["sfcWind"][x : x + 20], p_threshold=p_threshold
)
for x in range(ds_ensmean.time.size - 20)
]
)
# plotting
f, ax = plt.subplots()
ax2 = ax.twinx()
bins = plot_histo(slopes, ax, experiment, p_threshold=p_threshold)
ax2.hist(
slopes_ensmean[np.isfinite(slopes_ensmean)],
bins=50,
density=True,
color="darkgreen",
alpha=0.7,
label="ensemble mean",
)
ax.set_ylabel("PDF ensemble members", color="darkorange", fontsize=12)
ax2.set_ylabel("PDF ensemble mean", color="darkgreen", fontsize=12)
# fit Gaussian to histogram without significance screening
if p_threshold == 100:
mu, std = norm.fit(slopes)
ax.plot(bins, norm.pdf(bins, mu, std), color="red")
add_letters(ax, letter_offset=letter_index)
plt.tight_layout()
fig_path = f"{path_to_plots}/{experiment}_wind_trends_Europe_{p_threshold}_all.jpeg"
plt.savefig(fig_path, dpi=300)
plt.close("all") | [
"utils.ensemble_mean_wind_speed",
"xarray.concat",
"numpy.isfinite",
"pandas.read_excel",
"utils.open_picontrol",
"numpy.arange",
"numpy.asarray",
"matplotlib.pyplot.close",
"scipy.stats.norm.fit",
"pandas.DataFrame",
"warnings.simplefilter",
"utils.add_letters",
"glob.glob",
"matplotlib.p... | [((276, 301), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (299, 301), False, 'import warnings\n'), ((307, 362), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'ImportWarning'}), "('ignore', category=ImportWarning)\n", (328, 362), False, 'import warnings\n'), ((880, 1001), 'pandas.read_excel', 'pd.read_excel', (['f"""{path_to_data}/HadISD/Zeng_SIData2_HadISDv202.xlsx"""'], {'usecols': "['lons', 'lats']", 'sheet_name': '"""stations"""'}), "(f'{path_to_data}/HadISD/Zeng_SIData2_HadISDv202.xlsx',\n usecols=['lons', 'lats'], sheet_name='stations')\n", (893, 1001), True, 'import pandas as pd\n'), ((5638, 5666), 'utils.open_picontrol', 'open_picontrol', (['path_to_data'], {}), '(path_to_data)\n', (5652, 5666), False, 'from utils import ensemble_mean_wind_speed, annual_mean, add_letters, open_picontrol, selbox, open_datasets, LONMAX, LONMIN, LATMIN, LATMAX\n'), ((5893, 5985), 'xarray.DataArray', 'xr.DataArray', (['slopes'], {'dims': '"""time"""', 'coords': "{'time': ds_picontrol['sfcWind'].time[:1980]}"}), "(slopes, dims='time', coords={'time': ds_picontrol['sfcWind'].\n time[:1980]})\n", (5905, 5985), True, 'import xarray as xr\n'), ((6048, 6077), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (6060, 6077), True, 'import matplotlib.pyplot as plt\n'), ((7697, 7715), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7713, 7715), True, 'import matplotlib.pyplot as plt\n'), ((7720, 7793), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{path_to_plots}/timeseries_picontrol_Europe.jpeg"""'], {'dpi': '(300)'}), "(f'{path_to_plots}/timeseries_picontrol_Europe.jpeg', dpi=300)\n", (7731, 7793), True, 'import matplotlib.pyplot as plt\n'), ((7798, 7814), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7807, 7814), True, 'import matplotlib.pyplot as plt\n'), ((8315, 8343), 'utils.open_picontrol', 'open_picontrol', (['path_to_data'], {}), '(path_to_data)\n', (8329, 8343), False, 'from utils import ensemble_mean_wind_speed, annual_mean, add_letters, open_picontrol, selbox, open_datasets, LONMAX, LONMIN, LATMIN, LATMAX\n'), ((8625, 8662), 'xarray.concat', 'xr.concat', (['ds_list_HadISD'], {'dim': '"""time"""'}), "(ds_list_HadISD, dim='time')\n", (8634, 8662), True, 'import xarray as xr\n'), ((10671, 10717), 'glob.glob', 'glob.glob', (['f"""{path_to_data}/CMIP6_annual/*.nc"""'], {}), "(f'{path_to_data}/CMIP6_annual/*.nc')\n", (10680, 10717), False, 'import glob\n'), ((10833, 10860), 'numpy.arange', 'np.arange', (['(-0.2)', '(0.2)', '(0.005)'], {}), '(-0.2, 0.2, 0.005)\n', (10842, 10860), True, 'import numpy as np\n'), ((12212, 12275), 'pandas.DataFrame', 'pd.DataFrame', (['CMIP6_histos'], {'index': "['n', 'bins', 'fracoftrends']"}), "(CMIP6_histos, index=['n', 'bins', 'fracoftrends'])\n", (12224, 12275), True, 'import pandas as pd\n'), ((12374, 12388), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12386, 12388), True, 'import matplotlib.pyplot as plt\n'), ((13423, 13455), 'utils.add_letters', 'add_letters', (['ax'], {'letter_offset': '(1)'}), '(ax, letter_offset=1)\n', (13434, 13455), False, 'from utils import ensemble_mean_wind_speed, annual_mean, add_letters, open_picontrol, selbox, open_datasets, LONMAX, LONMIN, LATMIN, LATMAX\n'), ((13460, 13478), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13476, 13478), True, 'import matplotlib.pyplot as plt\n'), ((13483, 13535), 'os.makedirs', 'os.makedirs', (['f"""{path_to_plots}/CMIP6"""'], {'exist_ok': '(True)'}), "(f'{path_to_plots}/CMIP6', exist_ok=True)\n", (13494, 13535), False, 'import os\n'), ((13652, 13682), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'dpi': '(300)'}), '(fig_path, dpi=300)\n', (13663, 13682), True, 'import matplotlib.pyplot as plt\n'), ((13687, 13703), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (13696, 13703), True, 'import matplotlib.pyplot as plt\n'), ((2300, 2314), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (2311, 2314), True, 'import numpy as np\n'), ((2346, 2357), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (2354, 2357), True, 'import numpy as np\n'), ((2994, 3006), 'numpy.zeros', 'np.zeros', (['(81)'], {}), '(81)\n', (3002, 3006), True, 'import numpy as np\n'), ((11216, 11246), 'xarray.concat', 'xr.concat', (['ds_list'], {'dim': '"""time"""'}), "(ds_list, dim='time')\n", (11225, 11246), True, 'import xarray as xr\n'), ((11519, 11533), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11531, 11533), True, 'import matplotlib.pyplot as plt\n'), ((11790, 11808), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11806, 11808), True, 'import matplotlib.pyplot as plt\n'), ((11817, 11869), 'os.makedirs', 'os.makedirs', (['f"""{path_to_plots}/CMIP6"""'], {'exist_ok': '(True)'}), "(f'{path_to_plots}/CMIP6', exist_ok=True)\n", (11828, 11869), False, 'import os\n'), ((11978, 12008), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'dpi': '(300)'}), '(fig_path, dpi=300)\n', (11989, 12008), True, 'import matplotlib.pyplot as plt\n'), ((12017, 12033), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (12026, 12033), True, 'import matplotlib.pyplot as plt\n'), ((14039, 14063), 'utils.open_datasets', 'open_datasets', (['windfiles'], {}), '(windfiles)\n', (14052, 14063), False, 'from utils import ensemble_mean_wind_speed, annual_mean, add_letters, open_picontrol, selbox, open_datasets, LONMAX, LONMIN, LATMIN, LATMAX\n'), ((1472, 1516), 'xarray.concat', 'xr.concat', (['ds_stations'], {'dim': '"""station_number"""'}), "(ds_stations, dim='station_number')\n", (1481, 1516), True, 'import xarray as xr\n'), ((2012, 2035), 'numpy.arange', 'np.arange', (['trend_length'], {}), '(trend_length)\n', (2021, 2035), True, 'import numpy as np\n'), ((4027, 4046), 'numpy.isfinite', 'np.isfinite', (['slopes'], {}), '(slopes)\n', (4038, 4046), True, 'import numpy as np\n'), ((13971, 14024), 'glob.glob', 'glob.glob', (['f"""{path_to_data}/{experiment}/sfcWind*.nc"""'], {}), "(f'{path_to_data}/{experiment}/sfcWind*.nc')\n", (13980, 14024), False, 'import glob\n'), ((14935, 14953), 'numpy.asarray', 'np.asarray', (['slopes'], {}), '(slopes)\n', (14945, 14953), True, 'import numpy as np\n'), ((15353, 15367), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (15365, 15367), True, 'import matplotlib.pyplot as plt\n'), ((16122, 16165), 'utils.add_letters', 'add_letters', (['ax'], {'letter_offset': 'letter_index'}), '(ax, letter_offset=letter_index)\n', (16133, 16165), False, 'from utils import ensemble_mean_wind_speed, annual_mean, add_letters, open_picontrol, selbox, open_datasets, LONMAX, LONMIN, LATMIN, LATMAX\n'), ((16178, 16196), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16194, 16196), True, 'import matplotlib.pyplot as plt\n'), ((16306, 16336), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'dpi': '(300)'}), '(fig_path, dpi=300)\n', (16317, 16336), True, 'import matplotlib.pyplot as plt\n'), ((16349, 16365), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (16358, 16365), True, 'import matplotlib.pyplot as plt\n'), ((7311, 7329), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7327, 7329), True, 'import cartopy.crs as ccrs\n'), ((8397, 8432), 'xarray.open_dataset', 'xr.open_dataset', (['x'], {'use_cftime': '(True)'}), '(x, use_cftime=True)\n', (8412, 8432), True, 'import xarray as xr\n'), ((8473, 8517), 'glob.glob', 'glob.glob', (['f"""{path_to_data}/pi-control/*.nc"""'], {}), "(f'{path_to_data}/pi-control/*.nc')\n", (8482, 8517), False, 'import glob\n'), ((9549, 9563), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9561, 9563), True, 'import matplotlib.pyplot as plt\n'), ((10102, 10117), 'utils.add_letters', 'add_letters', (['ax'], {}), '(ax)\n', (10113, 10117), False, 'from utils import ensemble_mean_wind_speed, annual_mean, add_letters, open_picontrol, selbox, open_datasets, LONMAX, LONMIN, LATMIN, LATMAX\n'), ((10134, 10152), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10150, 10152), True, 'import matplotlib.pyplot as plt\n'), ((10464, 10494), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'dpi': '(300)'}), '(fig_path, dpi=300)\n', (10475, 10494), True, 'import matplotlib.pyplot as plt\n'), ((10511, 10527), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10520, 10527), True, 'import matplotlib.pyplot as plt\n'), ((10992, 11027), 'xarray.open_dataset', 'xr.open_dataset', (['x'], {'use_cftime': '(True)'}), '(x, use_cftime=True)\n', (11007, 11027), True, 'import xarray as xr\n'), ((14118, 14168), 'utils.ensemble_mean_wind_speed', 'ensemble_mean_wind_speed', (['path_to_data', 'experiment'], {}), '(path_to_data, experiment)\n', (14142, 14168), False, 'from utils import ensemble_mean_wind_speed, annual_mean, add_letters, open_picontrol, selbox, open_datasets, LONMAX, LONMIN, LATMIN, LATMAX\n'), ((16025, 16041), 'scipy.stats.norm.fit', 'norm.fit', (['slopes'], {}), '(slopes)\n', (16033, 16041), False, 'from scipy.stats import linregress, norm\n'), ((6831, 6849), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (6847, 6849), True, 'import cartopy.crs as ccrs\n'), ((9939, 9955), 'scipy.stats.norm.fit', 'norm.fit', (['slopes'], {}), '(slopes)\n', (9947, 9955), False, 'from scipy.stats import linregress, norm\n'), ((11057, 11111), 'glob.glob', 'glob.glob', (['f"""{path_to_data}/CMIP6_annual/*{model}*.nc"""'], {}), "(f'{path_to_data}/CMIP6_annual/*{model}*.nc')\n", (11066, 11111), False, 'import glob\n'), ((15529, 15556), 'numpy.isfinite', 'np.isfinite', (['slopes_ensmean'], {}), '(slopes_ensmean)\n', (15540, 15556), True, 'import numpy as np\n'), ((16072, 16095), 'scipy.stats.norm.pdf', 'norm.pdf', (['bins', 'mu', 'std'], {}), '(bins, mu, std)\n', (16080, 16095), False, 'from scipy.stats import linregress, norm\n'), ((9990, 10013), 'scipy.stats.norm.pdf', 'norm.pdf', (['bins', 'mu', 'std'], {}), '(bins, mu, std)\n', (9998, 10013), False, 'from scipy.stats import linregress, norm\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Title :TODO
@File : data_provider_test.py
@Author : minjianxu
@Time : 2019/12/19 2:39 下午
@Version : 1.0
'''
import cv2
import numpy as np
import image_test
#1. 测试图片多点坐标划入是什么样得?
def test1():
print("")
img = cv2.imread("0002.jpg")
# seg_map = cv2.fillPoly(seg_map, [np.array(shrinked_poly).astype(np.int32)], 1)
shrinked_poly = [1333,813,2014,957,0,0,113,0,227,0,340,0,454,0,567,0,681,0,681,144,567,144,454,144,340,144,227,144,113,144,0,144]
# TODO 哪个点在最前面 还有顺时针还是逆时针!!
# shrinked_poly = [50,50,300,50,300,300,50,400]
# TODO 一维变二维 TODO 顺时针旋转
shrinked_poly = image_test.poly_convert(shrinked_poly)
image_test.show(img, "划线前")
shrinked_poly = np.asarray(shrinked_poly)
#TODO 是不是单通道才行?
img = cv2.fillPoly(img, shrinked_poly.astype(np.int32)[np.newaxis, :, :], 0)
# cv2.polylines(img,shrinked_poly,False)
cv2.polylines(img, shrinked_poly, False, color=(0, 255, 0), thickness=5)
# seg_map = cv2.drawContours(img, [np.array(shrinked_poly).astype(np.int32)], -1, 1, -1)
image_test.show(img,"划线后")
if __name__ == '__main__':
test1() | [
"cv2.polylines",
"image_test.poly_convert",
"numpy.asarray",
"image_test.show",
"cv2.imread"
] | [((286, 308), 'cv2.imread', 'cv2.imread', (['"""0002.jpg"""'], {}), "('0002.jpg')\n", (296, 308), False, 'import cv2\n'), ((660, 698), 'image_test.poly_convert', 'image_test.poly_convert', (['shrinked_poly'], {}), '(shrinked_poly)\n', (683, 698), False, 'import image_test\n'), ((703, 730), 'image_test.show', 'image_test.show', (['img', '"""划线前"""'], {}), "(img, '划线前')\n", (718, 730), False, 'import image_test\n'), ((753, 778), 'numpy.asarray', 'np.asarray', (['shrinked_poly'], {}), '(shrinked_poly)\n', (763, 778), True, 'import numpy as np\n'), ((929, 1001), 'cv2.polylines', 'cv2.polylines', (['img', 'shrinked_poly', '(False)'], {'color': '(0, 255, 0)', 'thickness': '(5)'}), '(img, shrinked_poly, False, color=(0, 255, 0), thickness=5)\n', (942, 1001), False, 'import cv2\n'), ((1100, 1127), 'image_test.show', 'image_test.show', (['img', '"""划线后"""'], {}), "(img, '划线后')\n", (1115, 1127), False, 'import image_test\n')] |
import json
import numpy as np
def unsquash(X):
"""Transform vector of dim (n,) into (n,1)."""
if len(X.shape) == 1 or X.shape[0] == 1:
return np.asarray(X).reshape((len(X), 1))
else:
return X
def squash(X):
"""Transform vector of dim (n,1) into (n,)."""
return np.squeeze(np.asarray(X))
def extract_json(text, fields):
"""Extract specified fields from text."""
if not isinstance(fields, list):
fields = [fields]
obj = json.loads(text)
return " ".join([obj.get(field) for field in fields if obj.get(field)])
| [
"json.loads",
"numpy.asarray"
] | [((482, 498), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (492, 498), False, 'import json\n'), ((314, 327), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (324, 327), True, 'import numpy as np\n'), ((162, 175), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (172, 175), True, 'import numpy as np\n')] |
"""
This file contains all the joint data and functions to parse the configuration file
"""
import numpy as np
from json import loads
import math
class Joint:
"""
Contains all the information related to joints
"""
def __init__(self, config, parent=None):
"""
Initialises the objects with the following data:
origin - The xyz coordinates of the joint (Numpy array)
angles - The rpy angles of the joint (Numpy array)
type - The joint type, currently supporting "revolute" and "end"
parent - The parent Joint object
child - The child Joint object
name - The name of the joint (Preferably unique)
axis - The rotation or translation axis of the joint
childName - The string name of the child
"""
self.origin = np.array(config["xyz"])
self.angles = np.array(config["rpy"])
self.type = config["type"]
self.parent = parent
self.name = config["name"]
self.child = None
self.axis = config["axis"]
self.childName = None
if "child" in config:
self.childName = config["child"]
def __str__(self):
"""
Converts the object into a nicely formatted string for output purposes
"""
toOutput = "---------- %s ----------\n" % self.name
toOutput += str(self.getTransformationMatrix(0))
if self.type != "end":
toOutput += "\n######## %s's Child #########\n" % self.name
toOutput += str(self.child)
return toOutput
def forwardKinematic(self, transMat=np.eye(4), angles=[], jointsPos=[]):
"""
Calculates the forward kinematics from a list angles
transMat - The current transformation matrix. Defaults to the identity matrix.
angles - The joint angles from ordered from the first one to the last one
"""
angle = 0
if len(angles):
angle, angles = angles[0], angles[1:]
curMat = np.dot(transMat, self.getTransformationMatrix(angle))
jointsPos.append(curMat[:3, -1])
if self.child:
return self.child.forwardKinematic(curMat, angles, jointsPos)
else:
return curMat, jointsPos
def getTransformationMatrix(self, jointAngle):
"""
Calculates the relative transformation matrix relative to the parent
jointAngle - The angle of the joint
"""
parentOrigin = np.array([0, 0, 0])
parentAngles = np.array([0, 0, 0])
if self.parent:
parentOrigin = self.parent.origin
parentAngles = self.parent.angles
jointAngles = np.array([0, 0, 0], dtype=float)
jointAngles[self.axis] = jointAngle
transMat = np.append(rpyAnglesToRot(self.angles - parentAngles + jointAngles), np.transpose([self.origin - parentOrigin]), axis=1)
transMat = np.append(transMat, [[0,0,0,1]], axis=0)
return transMat
def createChildJoints(configsByName, parent):
"""
Creates the joints recursively and assign the childs to their parent
configsByName - Dictionary of the joint configurations by name
parent - The Joint object of the parent
"""
childJoint = Joint(configsByName[parent.childName], parent)
parent.child = childJoint
if "child" not in configsByName[childJoint.name]:
return
createChildJoints(configsByName, childJoint)
def parseJointsConfigFile(filePath):
"""
Parses the configuration file and outputs the root joint with the number of joints.
filePath - The patht to the configuration file
"""
configsByName = {}
rootConfig = {}
numJoints = 0
#Read the robot's configuration from the conf.json file
with file(filePath) as f:
robotDefinition = loads(f.read())
numJoints = len(robotDefinition) - 1 #Must substract the end effector since it is not a joint
for joint in robotDefinition:
configsByName[joint["name"]] = joint
if "isRoot" in joint and joint["isRoot"]:
rootConfig = joint
#Create the joints object
rootJoint = Joint(rootConfig)
createChildJoints(configsByName, rootJoint)
return rootJoint, numJoints
def rpyAnglesToRot(rpy):
"""
Converts the euler angles to a rotation matrix using Numpy
rpy - An array containing the roll, pitch, yaw angles
"""
R_x = np.array([[1, 0, 0],
[0, math.cos(rpy[0]), -math.sin(rpy[0])],
[0, math.sin(rpy[0]), math.cos(rpy[0])]])
R_y = np.array([[math.cos(rpy[1]), 0, math.sin(rpy[1])],
[0, 1, 0],
[-math.sin(rpy[1]), 0, math.cos(rpy[1])]])
R_z = np.array([[math.cos(rpy[2]), -math.sin(rpy[2]), 0],
[math.sin(rpy[2]), math.cos(rpy[2]), 0],
[0, 0, 1]])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R | [
"numpy.eye",
"math.sin",
"numpy.append",
"numpy.array",
"numpy.dot",
"math.cos",
"numpy.transpose"
] | [((736, 759), 'numpy.array', 'np.array', (["config['xyz']"], {}), "(config['xyz'])\n", (744, 759), True, 'import numpy as np\n'), ((776, 799), 'numpy.array', 'np.array', (["config['rpy']"], {}), "(config['rpy'])\n", (784, 799), True, 'import numpy as np\n'), ((1401, 1410), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1407, 1410), True, 'import numpy as np\n'), ((2143, 2162), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2151, 2162), True, 'import numpy as np\n'), ((2180, 2199), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2188, 2199), True, 'import numpy as np\n'), ((2309, 2341), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'float'}), '([0, 0, 0], dtype=float)\n', (2317, 2341), True, 'import numpy as np\n'), ((2526, 2569), 'numpy.append', 'np.append', (['transMat', '[[0, 0, 0, 1]]'], {'axis': '(0)'}), '(transMat, [[0, 0, 0, 1]], axis=0)\n', (2535, 2569), True, 'import numpy as np\n'), ((4348, 4364), 'numpy.dot', 'np.dot', (['R_y', 'R_x'], {}), '(R_y, R_x)\n', (4354, 4364), True, 'import numpy as np\n'), ((2461, 2503), 'numpy.transpose', 'np.transpose', (['[self.origin - parentOrigin]'], {}), '([self.origin - parentOrigin])\n', (2473, 2503), True, 'import numpy as np\n'), ((3939, 3955), 'math.cos', 'math.cos', (['rpy[0]'], {}), '(rpy[0])\n', (3947, 3955), False, 'import math\n'), ((3998, 4014), 'math.sin', 'math.sin', (['rpy[0]'], {}), '(rpy[0])\n', (4006, 4014), False, 'import math\n'), ((4016, 4032), 'math.cos', 'math.cos', (['rpy[0]'], {}), '(rpy[0])\n', (4024, 4032), False, 'import math\n'), ((4055, 4071), 'math.cos', 'math.cos', (['rpy[1]'], {}), '(rpy[1])\n', (4063, 4071), False, 'import math\n'), ((4076, 4092), 'math.sin', 'math.sin', (['rpy[1]'], {}), '(rpy[1])\n', (4084, 4092), False, 'import math\n'), ((4163, 4179), 'math.cos', 'math.cos', (['rpy[1]'], {}), '(rpy[1])\n', (4171, 4179), False, 'import math\n'), ((4202, 4218), 'math.cos', 'math.cos', (['rpy[2]'], {}), '(rpy[2])\n', (4210, 4218), False, 'import math\n'), ((4261, 4277), 'math.sin', 'math.sin', (['rpy[2]'], {}), '(rpy[2])\n', (4269, 4277), False, 'import math\n'), ((4279, 4295), 'math.cos', 'math.cos', (['rpy[2]'], {}), '(rpy[2])\n', (4287, 4295), False, 'import math\n'), ((3958, 3974), 'math.sin', 'math.sin', (['rpy[0]'], {}), '(rpy[0])\n', (3966, 3974), False, 'import math\n'), ((4142, 4158), 'math.sin', 'math.sin', (['rpy[1]'], {}), '(rpy[1])\n', (4150, 4158), False, 'import math\n'), ((4221, 4237), 'math.sin', 'math.sin', (['rpy[2]'], {}), '(rpy[2])\n', (4229, 4237), False, 'import math\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 5 20:42:24 2019
@author: nico
"""
import os
import numpy as np
from scipy import signal as sig
import matplotlib.pyplot as plt
from scipy.fftpack import fft
import scipy.io as sio
from time import time
import pandas as pd
os.system ("clear") # limpia la terminal de python
plt.close("all") #cierra todos los graficos
fig_sz_x = 14
fig_sz_y = 13
fig_dpi = 80 # dpi
fig_font_family = 'Ubuntu'
fig_font_size = 16
#%% cargo el archivo ECG_TP$.mat
# para listar las variables que hay en el archivo
#sio.whosmat('ECG_TP4.mat')
mat_struct = sio.loadmat('ECG_TP4.mat')
ecg_one_lead = mat_struct['ecg_lead']
ecg_one_lead = ecg_one_lead.flatten(1)
cant_muestras = len(ecg_one_lead)
#%% Defino la fs y el eje de tiempo
fs = 1000
tt = np.linspace(0, cant_muestras, cant_muestras)
#%% genero el filtro de mediana original
the_start = time()
median1 = sig.medfilt(ecg_one_lead, 201) #200 ms
median2 = sig.medfilt(median1, 601) #600 ms
the_end = time()
tiempodft = the_end - the_start
signal = ecg_one_lead - median2
del the_start, the_end
print('El tiempo demorado por este tipo de filtrado es: ',tiempodft)
#%% Graficos
plt.figure("Estimación de la interpolante", constrained_layout=True)
plt.title("Estimación de la interpolante")
plt.plot(tt, median2)
plt.xlabel('Muestras')
plt.ylabel("Amplitud ")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.legend()
plt.show()
plt.figure("ECG", constrained_layout=True)
plt.title("ECG")
plt.plot(tt, ecg_one_lead, label='ECG original')
plt.plot(tt, signal, label='ECG filtrada')
plt.xlabel('Muestras')
plt.ylabel("Amplitud ")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.legend()
plt.show()
#%% Zoom regions
# Segmentos de interés
regs_interes = (
np.array([1.6, 2.6]) *60*fs, # minutos a muestras
np.array([4, 5]) *60*fs, # minutos a muestras
np.array([10, 10.5]) *60*fs, # minutos a muestras
np.array([12, 12.7]) *60*fs, # minutos a muestras
np.array([14.6, 15.7]) *60*fs, # minutos a muestras
)
for ii in regs_interes:
# intervalo limitado de 0 a cant_muestras
zoom_region = np.arange(np.max([0, ii[0]]), np.min([cant_muestras, ii[1]]), dtype='uint')
#hace el clipeo para salvar a los indices otra forma es el modulo N (le sumas N para que ingrece
#por el otro extremo y queda circular en 'C' se hace x % 5 )
plt.figure(figsize=(fig_sz_x, fig_sz_y), dpi= fig_dpi, facecolor='w', edgecolor='k')
plt.plot(zoom_region, ecg_one_lead[zoom_region], label='ECG', lw=2)
plt.plot(zoom_region, signal[zoom_region], label='interpolante')
plt.title('ECG filtering example from ' + str(ii[0]) + ' to ' + str(ii[1]) )
plt.ylabel('Adimensional')
plt.xlabel('Muestras (#)')
axes_hdl = plt.gca()
axes_hdl.legend()
axes_hdl.set_yticks(())
plt.show()
#%% Medicion de la frecuencia de corte del filtro de multirate (me quedo con ek 95% de la energia, para eso utilizo la funcion cumsum)
K = 30
L = cant_muestras/K
ff2,Swelch = sig.welch(median2,fs=fs,nperseg=L,window='bartlett')
Swelch2 = 10*np.log10(Swelch)
plt.figure("Estimación de la señal interpolante con el método de Welch")
plt.title(" Estimación de la señal interpolante con el método de Welch")
plt.plot(ff2,Swelch2)
plt.xlabel('frecuecnia [Hz]')
plt.ylabel('Amplitud db')
plt.grid()
plt.show()
# calculo la frecuencia de corte con el 95% de la enrgia
energia=np.zeros((int(L/2)+1))
np.cumsum(Swelch, out=energia)
limfreq = energia < 0.95*energia[-1]
for ii in range(len(limfreq)) :
if limfreq[ii] == False:
freq = ii
break
# calculo la cantidad de pasadas
nyq_frec = fs / 2
cant_pasadas = nyq_frec/freq
cant_pasadas = np.log2(cant_pasadas) #porque cada pasada divide a la mitad
cant_pasadas = int(np.round(cant_pasadas))
#%% Genero la interpolante utiliziando la técnica multirate
the_start = time()
decimation = ecg_one_lead
for jj in range(cant_pasadas):
decimation = sig.decimate(decimation, 2)
median1_dec = sig.medfilt(decimation, 3) #200 ms
median2_dec = sig.medfilt(median1_dec, 5) #600 ms
interpolation = median2_dec
for jj in range(cant_pasadas):
interpolation = sig.resample(interpolation,2*len(interpolation))
signal_int = ecg_one_lead - interpolation[0:len(ecg_one_lead)]
the_end = time()
tiempodft_dec = the_end - the_start
del the_start, the_end
#%% Guardo un ECG limpio para el punto 5b
obj_arr = np.zeros((1), dtype=np.object)
obj_arr = signal_int
sio.savemat('./ECG_Limpio.mat', mdict={'ECG_Limpio': obj_arr})
#%% comparo los dos métodos en tiempo y en error absoluto
tiempo = tiempodft / tiempodft_dec
error = median2 - interpolation[0:len(ecg_one_lead)]
error_cuadratico = (median2 - interpolation[0:len(ecg_one_lead)])**2
valor_medio_real = np.mean(median2)
valor_medio_interpolate_signal = np.mean(interpolation)
sesgo = np.abs(valor_medio_real - valor_medio_interpolate_signal)
error_cuadratico_medio = np.mean(error_cuadratico)
error__medio = np.mean(error)
var_error = np.var(error, axis=0)
plt.figure("ECG 2", constrained_layout=True)
plt.title("ECG 2")
plt.plot(tt, ecg_one_lead, label='ECG original')
plt.plot(tt, signal, label='ECG filtrada completa')
plt.plot(tt, signal_int, label = 'ECG filtrada con resampleo')
plt.xlabel('Muestras')
plt.ylabel("Amplitud ")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.legend()
plt.show()
plt.figure("Comapración de estimadores", constrained_layout=True)
plt.title("Comparación de estimadores")
plt.plot(tt, median2, label='est med original')
plt.plot(tt, interpolation[0:len(ecg_one_lead)], label='est med resampling')
plt.xlabel('Muestras')
plt.ylabel("Amplitud ")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.legend()
plt.show()
plt.figure("Error cuadrático de estimadores", constrained_layout=True)
plt.title("Error cuadrático de estimadores")
plt.plot(tt, error_cuadratico, label='error cuadrático')
plt.plot(tt, np.ones((len(ecg_one_lead)))*error_cuadratico_medio, label='media')
plt.xlabel('Muestras')
plt.ylabel("Amplitud ")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.legend()
plt.show()
plt.figure("Histograma de errores")
plt.hist(error, bins=50, alpha=1, edgecolor = 'black', linewidth=1, label="error")
plt.legend(loc = 'upper right')
plt.ylabel('frecuencia')
plt.xlabel('valores')
plt.title('Histograma de errores' )
plt.show()
#%% Zoom regions
# Segmentos de interés
regs_interes = (
np.array([1.6, 2.6]) *60*fs, # minutos a muestras
np.array([4, 5]) *60*fs, # minutos a muestras
np.array([10, 10.5]) *60*fs, # minutos a muestras
np.array([12, 12.7]) *60*fs, # minutos a muestras
np.array([14.6, 15.7]) *60*fs, # minutos a muestras
)
for ii in regs_interes:
# intervalo limitado de 0 a cant_muestras
zoom_region = np.arange(np.max([0, ii[0]]), np.min([cant_muestras, ii[1]]), dtype='uint')
#hace el clipeo para salvar a los indices otra forma es el modulo N (le sumas N para que ingece
#por el otro extremo y queda circular en 'C' se hace x % 5 )
plt.figure(figsize=(fig_sz_x, fig_sz_y), dpi= fig_dpi, facecolor='w', edgecolor='k')
plt.plot(zoom_region, ecg_one_lead[zoom_region], label='ECG', lw=2)
plt.plot(zoom_region, interpolation[zoom_region], label='interpolante resamplig')
plt.plot(zoom_region, median2[zoom_region], label='interpolante')
plt.title('ECG filtering example from ' + str(ii[0]) + ' to ' + str(ii[1]) )
plt.ylabel('Adimensional')
plt.xlabel('Muestras (#)')
axes_hdl = plt.gca()
axes_hdl.legend()
axes_hdl.set_yticks(())
plt.show()
#%% Presentación de resultados
tus_resultados_per = [
[ tiempodft,valor_medio_real, '-' , '-'], # <-- acá debería haber numeritos :)
[ tiempodft_dec, valor_medio_interpolate_signal, '-', '-'], # <-- acá debería haber numeritos :)
]
df = pd.DataFrame(tus_resultados_per, columns=['$tiempo', '$media', 'media_error', 'varianza'],
index=['interpolante real','interpolante resamplleada'])
print("\n")
print(df)
| [
"matplotlib.pyplot.grid",
"scipy.io.savemat",
"matplotlib.pyplot.hist",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"scipy.io.loadmat",
"numpy.array",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axhline",
"nu... | [((296, 314), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (305, 314), False, 'import os\n'), ((347, 363), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (356, 363), True, 'import matplotlib.pyplot as plt\n'), ((617, 643), 'scipy.io.loadmat', 'sio.loadmat', (['"""ECG_TP4.mat"""'], {}), "('ECG_TP4.mat')\n", (628, 643), True, 'import scipy.io as sio\n'), ((808, 852), 'numpy.linspace', 'np.linspace', (['(0)', 'cant_muestras', 'cant_muestras'], {}), '(0, cant_muestras, cant_muestras)\n', (819, 852), True, 'import numpy as np\n'), ((907, 913), 'time.time', 'time', ([], {}), '()\n', (911, 913), False, 'from time import time\n'), ((924, 954), 'scipy.signal.medfilt', 'sig.medfilt', (['ecg_one_lead', '(201)'], {}), '(ecg_one_lead, 201)\n', (935, 954), True, 'from scipy import signal as sig\n'), ((973, 998), 'scipy.signal.medfilt', 'sig.medfilt', (['median1', '(601)'], {}), '(median1, 601)\n', (984, 998), True, 'from scipy import signal as sig\n'), ((1017, 1023), 'time.time', 'time', ([], {}), '()\n', (1021, 1023), False, 'from time import time\n'), ((1195, 1263), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Estimación de la interpolante"""'], {'constrained_layout': '(True)'}), "('Estimación de la interpolante', constrained_layout=True)\n", (1205, 1263), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1306), 'matplotlib.pyplot.title', 'plt.title', (['"""Estimación de la interpolante"""'], {}), "('Estimación de la interpolante')\n", (1273, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1307, 1328), 'matplotlib.pyplot.plot', 'plt.plot', (['tt', 'median2'], {}), '(tt, median2)\n', (1315, 1328), True, 'import matplotlib.pyplot as plt\n'), ((1329, 1351), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Muestras"""'], {}), "('Muestras')\n", (1339, 1351), True, 'import matplotlib.pyplot as plt\n'), ((1352, 1375), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitud """'], {}), "('Amplitud ')\n", (1362, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1376, 1405), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""black"""'}), "(0, color='black')\n", (1387, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1406, 1435), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""black"""'}), "(0, color='black')\n", (1417, 1435), True, 'import matplotlib.pyplot as plt\n'), ((1436, 1446), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1444, 1446), True, 'import matplotlib.pyplot as plt\n'), ((1447, 1459), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1457, 1459), True, 'import matplotlib.pyplot as plt\n'), ((1460, 1470), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1468, 1470), True, 'import matplotlib.pyplot as plt\n'), ((1473, 1515), 'matplotlib.pyplot.figure', 'plt.figure', (['"""ECG"""'], {'constrained_layout': '(True)'}), "('ECG', constrained_layout=True)\n", (1483, 1515), True, 'import matplotlib.pyplot as plt\n'), ((1516, 1532), 'matplotlib.pyplot.title', 'plt.title', (['"""ECG"""'], {}), "('ECG')\n", (1525, 1532), True, 'import matplotlib.pyplot as plt\n'), ((1533, 1581), 'matplotlib.pyplot.plot', 'plt.plot', (['tt', 'ecg_one_lead'], {'label': '"""ECG original"""'}), "(tt, ecg_one_lead, label='ECG original')\n", (1541, 1581), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1624), 'matplotlib.pyplot.plot', 'plt.plot', (['tt', 'signal'], {'label': '"""ECG filtrada"""'}), "(tt, signal, label='ECG filtrada')\n", (1590, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1625, 1647), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Muestras"""'], {}), "('Muestras')\n", (1635, 1647), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1671), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitud """'], {}), "('Amplitud ')\n", (1658, 1671), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1701), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""black"""'}), "(0, color='black')\n", (1683, 1701), True, 'import matplotlib.pyplot as plt\n'), ((1702, 1731), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""black"""'}), "(0, color='black')\n", (1713, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1742), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1740, 1742), True, 'import matplotlib.pyplot as plt\n'), ((1743, 1755), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1753, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1756, 1766), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1764, 1766), True, 'import matplotlib.pyplot as plt\n'), ((3122, 3177), 'scipy.signal.welch', 'sig.welch', (['median2'], {'fs': 'fs', 'nperseg': 'L', 'window': '"""bartlett"""'}), "(median2, fs=fs, nperseg=L, window='bartlett')\n", (3131, 3177), True, 'from scipy import signal as sig\n'), ((3206, 3278), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Estimación de la señal interpolante con el método de Welch"""'], {}), "('Estimación de la señal interpolante con el método de Welch')\n", (3216, 3278), True, 'import matplotlib.pyplot as plt\n'), ((3279, 3351), 'matplotlib.pyplot.title', 'plt.title', (['""" Estimación de la señal interpolante con el método de Welch"""'], {}), "(' Estimación de la señal interpolante con el método de Welch')\n", (3288, 3351), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3374), 'matplotlib.pyplot.plot', 'plt.plot', (['ff2', 'Swelch2'], {}), '(ff2, Swelch2)\n', (3360, 3374), True, 'import matplotlib.pyplot as plt\n'), ((3374, 3404), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frecuecnia [Hz]"""'], {}), "('frecuecnia [Hz]')\n", (3384, 3404), True, 'import matplotlib.pyplot as plt\n'), ((3405, 3430), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitud db"""'], {}), "('Amplitud db')\n", (3415, 3430), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3441), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3439, 3441), True, 'import matplotlib.pyplot as plt\n'), ((3442, 3452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3450, 3452), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3572), 'numpy.cumsum', 'np.cumsum', (['Swelch'], {'out': 'energia'}), '(Swelch, out=energia)\n', (3551, 3572), True, 'import numpy as np\n'), ((3809, 3830), 'numpy.log2', 'np.log2', (['cant_pasadas'], {}), '(cant_pasadas)\n', (3816, 3830), True, 'import numpy as np\n'), ((3990, 3996), 'time.time', 'time', ([], {}), '()\n', (3994, 3996), False, 'from time import time\n'), ((4118, 4144), 'scipy.signal.medfilt', 'sig.medfilt', (['decimation', '(3)'], {}), '(decimation, 3)\n', (4129, 4144), True, 'from scipy import signal as sig\n'), ((4167, 4194), 'scipy.signal.medfilt', 'sig.medfilt', (['median1_dec', '(5)'], {}), '(median1_dec, 5)\n', (4178, 4194), True, 'from scipy import signal as sig\n'), ((4412, 4418), 'time.time', 'time', ([], {}), '()\n', (4416, 4418), False, 'from time import time\n'), ((4537, 4565), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.object'}), '(1, dtype=np.object)\n', (4545, 4565), True, 'import numpy as np\n'), ((4589, 4651), 'scipy.io.savemat', 'sio.savemat', (['"""./ECG_Limpio.mat"""'], {'mdict': "{'ECG_Limpio': obj_arr}"}), "('./ECG_Limpio.mat', mdict={'ECG_Limpio': obj_arr})\n", (4600, 4651), True, 'import scipy.io as sio\n'), ((4887, 4903), 'numpy.mean', 'np.mean', (['median2'], {}), '(median2)\n', (4894, 4903), True, 'import numpy as np\n'), ((4937, 4959), 'numpy.mean', 'np.mean', (['interpolation'], {}), '(interpolation)\n', (4944, 4959), True, 'import numpy as np\n'), ((4969, 5026), 'numpy.abs', 'np.abs', (['(valor_medio_real - valor_medio_interpolate_signal)'], {}), '(valor_medio_real - valor_medio_interpolate_signal)\n', (4975, 5026), True, 'import numpy as np\n'), ((5053, 5078), 'numpy.mean', 'np.mean', (['error_cuadratico'], {}), '(error_cuadratico)\n', (5060, 5078), True, 'import numpy as np\n'), ((5094, 5108), 'numpy.mean', 'np.mean', (['error'], {}), '(error)\n', (5101, 5108), True, 'import numpy as np\n'), ((5121, 5142), 'numpy.var', 'np.var', (['error'], {'axis': '(0)'}), '(error, axis=0)\n', (5127, 5142), True, 'import numpy as np\n'), ((5144, 5188), 'matplotlib.pyplot.figure', 'plt.figure', (['"""ECG 2"""'], {'constrained_layout': '(True)'}), "('ECG 2', constrained_layout=True)\n", (5154, 5188), True, 'import matplotlib.pyplot as plt\n'), ((5189, 5207), 'matplotlib.pyplot.title', 'plt.title', (['"""ECG 2"""'], {}), "('ECG 2')\n", (5198, 5207), True, 'import matplotlib.pyplot as plt\n'), ((5208, 5256), 'matplotlib.pyplot.plot', 'plt.plot', (['tt', 'ecg_one_lead'], {'label': '"""ECG original"""'}), "(tt, ecg_one_lead, label='ECG original')\n", (5216, 5256), True, 'import matplotlib.pyplot as plt\n'), ((5257, 5308), 'matplotlib.pyplot.plot', 'plt.plot', (['tt', 'signal'], {'label': '"""ECG filtrada completa"""'}), "(tt, signal, label='ECG filtrada completa')\n", (5265, 5308), True, 'import matplotlib.pyplot as plt\n'), ((5309, 5369), 'matplotlib.pyplot.plot', 'plt.plot', (['tt', 'signal_int'], {'label': '"""ECG filtrada con resampleo"""'}), "(tt, signal_int, label='ECG filtrada con resampleo')\n", (5317, 5369), True, 'import matplotlib.pyplot as plt\n'), ((5372, 5394), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Muestras"""'], {}), "('Muestras')\n", (5382, 5394), True, 'import matplotlib.pyplot as plt\n'), ((5395, 5418), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitud """'], {}), "('Amplitud ')\n", (5405, 5418), True, 'import matplotlib.pyplot as plt\n'), ((5419, 5448), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""black"""'}), "(0, color='black')\n", (5430, 5448), True, 'import matplotlib.pyplot as plt\n'), ((5449, 5478), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""black"""'}), "(0, color='black')\n", (5460, 5478), True, 'import matplotlib.pyplot as plt\n'), ((5479, 5489), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5487, 5489), True, 'import matplotlib.pyplot as plt\n'), ((5490, 5502), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5500, 5502), True, 'import matplotlib.pyplot as plt\n'), ((5503, 5513), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5511, 5513), True, 'import matplotlib.pyplot as plt\n'), ((5515, 5580), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Comapración de estimadores"""'], {'constrained_layout': '(True)'}), "('Comapración de estimadores', constrained_layout=True)\n", (5525, 5580), True, 'import matplotlib.pyplot as plt\n'), ((5581, 5620), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparación de estimadores"""'], {}), "('Comparación de estimadores')\n", (5590, 5620), True, 'import matplotlib.pyplot as plt\n'), ((5621, 5668), 'matplotlib.pyplot.plot', 'plt.plot', (['tt', 'median2'], {'label': '"""est med original"""'}), "(tt, median2, label='est med original')\n", (5629, 5668), True, 'import matplotlib.pyplot as plt\n'), ((5746, 5768), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Muestras"""'], {}), "('Muestras')\n", (5756, 5768), True, 'import matplotlib.pyplot as plt\n'), ((5769, 5792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitud """'], {}), "('Amplitud ')\n", (5779, 5792), True, 'import matplotlib.pyplot as plt\n'), ((5793, 5822), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""black"""'}), "(0, color='black')\n", (5804, 5822), True, 'import matplotlib.pyplot as plt\n'), ((5823, 5852), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""black"""'}), "(0, color='black')\n", (5834, 5852), True, 'import matplotlib.pyplot as plt\n'), ((5853, 5863), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5861, 5863), True, 'import matplotlib.pyplot as plt\n'), ((5864, 5876), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5874, 5876), True, 'import matplotlib.pyplot as plt\n'), ((5877, 5887), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5885, 5887), True, 'import matplotlib.pyplot as plt\n'), ((5889, 5959), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Error cuadrático de estimadores"""'], {'constrained_layout': '(True)'}), "('Error cuadrático de estimadores', constrained_layout=True)\n", (5899, 5959), True, 'import matplotlib.pyplot as plt\n'), ((5960, 6004), 'matplotlib.pyplot.title', 'plt.title', (['"""Error cuadrático de estimadores"""'], {}), "('Error cuadrático de estimadores')\n", (5969, 6004), True, 'import matplotlib.pyplot as plt\n'), ((6005, 6061), 'matplotlib.pyplot.plot', 'plt.plot', (['tt', 'error_cuadratico'], {'label': '"""error cuadrático"""'}), "(tt, error_cuadratico, label='error cuadrático')\n", (6013, 6061), True, 'import matplotlib.pyplot as plt\n'), ((6143, 6165), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Muestras"""'], {}), "('Muestras')\n", (6153, 6165), True, 'import matplotlib.pyplot as plt\n'), ((6166, 6189), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitud """'], {}), "('Amplitud ')\n", (6176, 6189), True, 'import matplotlib.pyplot as plt\n'), ((6190, 6219), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""black"""'}), "(0, color='black')\n", (6201, 6219), True, 'import matplotlib.pyplot as plt\n'), ((6220, 6249), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""black"""'}), "(0, color='black')\n", (6231, 6249), True, 'import matplotlib.pyplot as plt\n'), ((6250, 6260), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6258, 6260), True, 'import matplotlib.pyplot as plt\n'), ((6261, 6273), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6271, 6273), True, 'import matplotlib.pyplot as plt\n'), ((6274, 6284), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6282, 6284), True, 'import matplotlib.pyplot as plt\n'), ((6286, 6321), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Histograma de errores"""'], {}), "('Histograma de errores')\n", (6296, 6321), True, 'import matplotlib.pyplot as plt\n'), ((6322, 6407), 'matplotlib.pyplot.hist', 'plt.hist', (['error'], {'bins': '(50)', 'alpha': '(1)', 'edgecolor': '"""black"""', 'linewidth': '(1)', 'label': '"""error"""'}), "(error, bins=50, alpha=1, edgecolor='black', linewidth=1, label='error'\n )\n", (6330, 6407), True, 'import matplotlib.pyplot as plt\n'), ((6406, 6435), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (6416, 6435), True, 'import matplotlib.pyplot as plt\n'), ((6438, 6462), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frecuencia"""'], {}), "('frecuencia')\n", (6448, 6462), True, 'import matplotlib.pyplot as plt\n'), ((6463, 6484), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""valores"""'], {}), "('valores')\n", (6473, 6484), True, 'import matplotlib.pyplot as plt\n'), ((6485, 6519), 'matplotlib.pyplot.title', 'plt.title', (['"""Histograma de errores"""'], {}), "('Histograma de errores')\n", (6494, 6519), True, 'import matplotlib.pyplot as plt\n'), ((6521, 6531), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6529, 6531), True, 'import matplotlib.pyplot as plt\n'), ((8090, 8246), 'pandas.DataFrame', 'pd.DataFrame', (['tus_resultados_per'], {'columns': "['$tiempo', '$media', 'media_error', 'varianza']", 'index': "['interpolante real', 'interpolante resamplleada']"}), "(tus_resultados_per, columns=['$tiempo', '$media',\n 'media_error', 'varianza'], index=['interpolante real',\n 'interpolante resamplleada'])\n", (8102, 8246), True, 'import pandas as pd\n'), ((2464, 2551), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(fig_sz_x, fig_sz_y)', 'dpi': 'fig_dpi', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(figsize=(fig_sz_x, fig_sz_y), dpi=fig_dpi, facecolor='w',\n edgecolor='k')\n", (2474, 2551), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2620), 'matplotlib.pyplot.plot', 'plt.plot', (['zoom_region', 'ecg_one_lead[zoom_region]'], {'label': '"""ECG"""', 'lw': '(2)'}), "(zoom_region, ecg_one_lead[zoom_region], label='ECG', lw=2)\n", (2561, 2620), True, 'import matplotlib.pyplot as plt\n'), ((2625, 2689), 'matplotlib.pyplot.plot', 'plt.plot', (['zoom_region', 'signal[zoom_region]'], {'label': '"""interpolante"""'}), "(zoom_region, signal[zoom_region], label='interpolante')\n", (2633, 2689), True, 'import matplotlib.pyplot as plt\n'), ((2780, 2806), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Adimensional"""'], {}), "('Adimensional')\n", (2790, 2806), True, 'import matplotlib.pyplot as plt\n'), ((2811, 2837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Muestras (#)"""'], {}), "('Muestras (#)')\n", (2821, 2837), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2867), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2865, 2867), True, 'import matplotlib.pyplot as plt\n'), ((2935, 2945), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2943, 2945), True, 'import matplotlib.pyplot as plt\n'), ((3188, 3204), 'numpy.log10', 'np.log10', (['Swelch'], {}), '(Swelch)\n', (3196, 3204), True, 'import numpy as np\n'), ((3889, 3911), 'numpy.round', 'np.round', (['cant_pasadas'], {}), '(cant_pasadas)\n', (3897, 3911), True, 'import numpy as np\n'), ((4073, 4100), 'scipy.signal.decimate', 'sig.decimate', (['decimation', '(2)'], {}), '(decimation, 2)\n', (4085, 4100), True, 'from scipy import signal as sig\n'), ((7227, 7314), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(fig_sz_x, fig_sz_y)', 'dpi': 'fig_dpi', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(figsize=(fig_sz_x, fig_sz_y), dpi=fig_dpi, facecolor='w',\n edgecolor='k')\n", (7237, 7314), True, 'import matplotlib.pyplot as plt\n'), ((7316, 7383), 'matplotlib.pyplot.plot', 'plt.plot', (['zoom_region', 'ecg_one_lead[zoom_region]'], {'label': '"""ECG"""', 'lw': '(2)'}), "(zoom_region, ecg_one_lead[zoom_region], label='ECG', lw=2)\n", (7324, 7383), True, 'import matplotlib.pyplot as plt\n'), ((7388, 7474), 'matplotlib.pyplot.plot', 'plt.plot', (['zoom_region', 'interpolation[zoom_region]'], {'label': '"""interpolante resamplig"""'}), "(zoom_region, interpolation[zoom_region], label=\n 'interpolante resamplig')\n", (7396, 7474), True, 'import matplotlib.pyplot as plt\n'), ((7474, 7539), 'matplotlib.pyplot.plot', 'plt.plot', (['zoom_region', 'median2[zoom_region]'], {'label': '"""interpolante"""'}), "(zoom_region, median2[zoom_region], label='interpolante')\n", (7482, 7539), True, 'import matplotlib.pyplot as plt\n'), ((7630, 7656), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Adimensional"""'], {}), "('Adimensional')\n", (7640, 7656), True, 'import matplotlib.pyplot as plt\n'), ((7661, 7687), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Muestras (#)"""'], {}), "('Muestras (#)')\n", (7671, 7687), True, 'import matplotlib.pyplot as plt\n'), ((7708, 7717), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7715, 7717), True, 'import matplotlib.pyplot as plt\n'), ((7785, 7795), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7793, 7795), True, 'import matplotlib.pyplot as plt\n'), ((2228, 2246), 'numpy.max', 'np.max', (['[0, ii[0]]'], {}), '([0, ii[0]])\n', (2234, 2246), True, 'import numpy as np\n'), ((2248, 2278), 'numpy.min', 'np.min', (['[cant_muestras, ii[1]]'], {}), '([cant_muestras, ii[1]])\n', (2254, 2278), True, 'import numpy as np\n'), ((6992, 7010), 'numpy.max', 'np.max', (['[0, ii[0]]'], {}), '([0, ii[0]])\n', (6998, 7010), True, 'import numpy as np\n'), ((7012, 7042), 'numpy.min', 'np.min', (['[cant_muestras, ii[1]]'], {}), '([cant_muestras, ii[1]])\n', (7018, 7042), True, 'import numpy as np\n'), ((1835, 1855), 'numpy.array', 'np.array', (['[1.6, 2.6]'], {}), '([1.6, 2.6])\n', (1843, 1855), True, 'import numpy as np\n'), ((1893, 1909), 'numpy.array', 'np.array', (['[4, 5]'], {}), '([4, 5])\n', (1901, 1909), True, 'import numpy as np\n'), ((1947, 1967), 'numpy.array', 'np.array', (['[10, 10.5]'], {}), '([10, 10.5])\n', (1955, 1967), True, 'import numpy as np\n'), ((2005, 2025), 'numpy.array', 'np.array', (['[12, 12.7]'], {}), '([12, 12.7])\n', (2013, 2025), True, 'import numpy as np\n'), ((2063, 2085), 'numpy.array', 'np.array', (['[14.6, 15.7]'], {}), '([14.6, 15.7])\n', (2071, 2085), True, 'import numpy as np\n'), ((6599, 6619), 'numpy.array', 'np.array', (['[1.6, 2.6]'], {}), '([1.6, 2.6])\n', (6607, 6619), True, 'import numpy as np\n'), ((6657, 6673), 'numpy.array', 'np.array', (['[4, 5]'], {}), '([4, 5])\n', (6665, 6673), True, 'import numpy as np\n'), ((6711, 6731), 'numpy.array', 'np.array', (['[10, 10.5]'], {}), '([10, 10.5])\n', (6719, 6731), True, 'import numpy as np\n'), ((6769, 6789), 'numpy.array', 'np.array', (['[12, 12.7]'], {}), '([12, 12.7])\n', (6777, 6789), True, 'import numpy as np\n'), ((6827, 6849), 'numpy.array', 'np.array', (['[14.6, 15.7]'], {}), '([14.6, 15.7])\n', (6835, 6849), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
The :mod:`parsimony.algorithms.proximal` module contains several algorithms
that involve proximal operators.
Algorithms may not store states. I.e., if they are classes, do not keep
references to objects with state in the algorithm objects. It should be
possible to copy and share algorithms between e.g. estimators, and thus they
should not depend on any state.
Created on Mon Jun 2 15:42:13 2014
Copyright (c) 2013-2014, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: <NAME>, <NAME>, <NAME>
@email: <EMAIL>, <EMAIL>,
<EMAIL>
@license: BSD 3-clause.
"""
import numpy as np
import warnings
try:
from scipy.interpolate import PchipInterpolator as interp1
except ImportError:
from scipy.interpolate import interp1d as interp1
try:
from . import bases # Only works when imported as a package.
except (ValueError, SystemError):
import parsimony.algorithms.bases as bases # When run as a program.
import parsimony.utils as utils
import parsimony.utils.maths as maths
import parsimony.utils.consts as consts
from parsimony.algorithms.utils import Info
import parsimony.functions.properties as properties
__all__ = ["ISTA", "FISTA", "CONESTA", "StaticCONESTA",
"ADMM",
"DykstrasProjectionAlgorithm",
"ParallelDykstrasProjectionAlgorithm"]
class ISTA(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""The iterative shrinkage-thresholding algorithm.
Parameters
----------
eps : float
Positive float. Tolerance for the stopping criterion.
info : List or tuple of utils.consts.Info
What, if any, extra run information should be stored. Default is an
empty list, which means that no run information is computed nor
returned.
max_iter : int
Non-negative integer. Maximum allowed number of iterations.
min_iter : int
Non-negative integer less than or equal to max_iter. Minimum number of
iterations that must be performed. Default is 1.
inexact_start_iteration : int, optional
When ISTA is used repeatedly in some outer iteration procedure, it is
useful to be able to set the actual iteration count from outside. This
count is used when deriving ``inexact_eps``. Default is None, which
means to use ``inexact_eps``, if given, or default inexact behaviour
otherwise.
inexact_eps : float, optional
The precision used in the approximation of the proximal operator. This
is only used/relevant if your penalties require the approximation of
a projection or proximal operator. Default is None, which means to
derive ``inexact_eps`` from ``inexact_start_iteration``, if given, or
to use ``eps`` otherwise.
inexact_max_iter : int, optional
The number of iterations to allow in the inexact approximation of the
projection or proximal operator. Default is None, which means to use
``max_iter``.
callback: Callable
A callable object that will be called at the end of each iteration with
locals() as arguments.
Examples
--------
>>> from parsimony.algorithms.proximal import ISTA
>>> from parsimony.functions import LinearRegressionL1L2TV
>>> import scipy.sparse as sparse
>>> import numpy as np
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = sparse.csr_matrix((50, 50)) # Unused here
>>> function = LinearRegressionL1L2TV(X, y, 0.0, 0.0, 0.0,
... A=A, mu=0.0)
>>> ista = ISTA(max_iter=10000)
>>> beta1 = ista.run(function, np.random.rand(50, 1))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS
0.00031215...
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = sparse.csr_matrix((50, 50)) # Unused here
>>> function = LinearRegressionL1L2TV(X, y, 0.1, 0.0, 0.0,
... A=A, mu=0.0)
>>> ista = ISTA(max_iter=10000)
>>> beta1 = ista.run(function, np.random.rand(50, 1))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS
0.82723303...
>>> int(np.linalg.norm(beta2.ravel(), 0))
50
>>> int(np.linalg.norm(beta1.ravel(), 0))
7
"""
INTERFACES = [properties.Function,
properties.Gradient,
properties.StepSize,
properties.ProximalOperator]
INFO_PROVIDED = [Info.ok,
Info.num_iter,
Info.time,
Info.fvalue, # <-- To be deprecated!
Info.func_val,
Info.smooth_func_val,
Info.converged]
def __init__(self,
eps=consts.TOLERANCE,
info=[],
max_iter=20000,
min_iter=1,
inexact_start_iteration=None,
inexact_eps=None,
inexact_max_iter=None,
callback=None):
super(ISTA, self).__init__(info=info,
max_iter=max_iter,
min_iter=min_iter)
self.eps = max(consts.FLOAT_EPSILON, float(eps))
if inexact_eps is None:
self.inexact_eps = inexact_eps
else:
self.inexact_eps = max(consts.FLOAT_EPSILON, float(inexact_eps))
if inexact_start_iteration is None:
self.inexact_start_iteration = inexact_start_iteration
else:
self.inexact_start_iteration = max(0, int(inexact_start_iteration))
if inexact_max_iter is None:
self.inexact_max_iter = self.max_iter
else:
self.inexact_max_iter = max(1, int(inexact_max_iter))
self.callback = callback
@bases.force_reset
@bases.check_compatibility
def run(self, function, beta):
"""Find the minimiser of the given function, starting at beta.
Parameters
----------
function : Function
The function to minimise.
beta : numpy.ndarray
The start vector.
"""
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
# step = function.step(beta)
betanew = betaold = beta
if self.info_requested(Info.time):
_t = []
if self.info_requested(Info.fvalue) \
or self.info_requested(Info.func_val):
_f = []
if self.info_requested(Info.smooth_func_val):
_fmu = []
if self.info_requested(Info.converged):
self.info_set(Info.converged, False)
for i in range(1, self.max_iter + 1):
if self.info_requested(Info.time):
tm = utils.time_cpu()
step = function.step(betanew)
betaold = betanew
if self.inexact_eps is not None:
inexact_eps = self.inexact_eps
else:
if self.inexact_start_iteration is None:
inexact_eps = \
1.0 / (float(i) ** (2.0 + consts.FLOAT_EPSILON))
else:
ii = self.inexact_start_iteration
inexact_eps = \
1.0 / (float(i + ii) ** (2.0 + consts.FLOAT_EPSILON))
betanew = function.prox(betaold - step * function.grad(betaold),
step,
eps=inexact_eps,
max_iter=self.inexact_max_iter)
if self.info_requested(Info.time):
_t.append(utils.time_cpu() - tm)
if self.info_requested(Info.fvalue) \
or self.info_requested(Info.func_val):
_f.append(function.f(betanew))
if self.info_requested(Info.smooth_func_val):
if hasattr(function, "fmu"):
_fmu.append(function.fmu(betanew))
if self.callback is not None:
self.callback(locals())
if (1.0 / step) * maths.norm(betanew - betaold) < self.eps \
and i >= self.min_iter:
if self.info_requested(Info.converged):
self.info_set(Info.converged, True)
break
self.num_iter = i
if self.info_requested(Info.num_iter):
self.info_set(Info.num_iter, i)
if self.info_requested(Info.time):
self.info_set(Info.time, _t)
if self.info_requested(Info.fvalue):
self.info_set(Info.fvalue, _f)
if self.info_requested(Info.func_val):
self.info_set(Info.func_val, _f)
if self.info_requested(Info.smooth_func_val):
self.info_set(Info.smooth_func_val, _fmu)
if self.info_requested(Info.ok):
self.info_set(Info.ok, True)
return betanew
class FISTA(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""The fast iterative shrinkage-thresholding algorithm.
Parameters
----------
eps : float
Must be positive. The tolerance for the stopping criterion.
use_gap : bool
If true, FISTA will use a dual gap, from the interface DualFunction, in
the stopping criterion as
if function.gap(beta) < eps:
break
Default is False, since the gap may be very expensive to compute.
info : List or tuple of utils.consts.Info
What, if any, extra run information should be stored. Default is an
empty list, which means that no run information is computed nor
returned.
max_iter : int
Non-negative integer. Maximum allowed number of iterations.
min_iter : int
Non-negative integer less than or equal to max_iter. Minimum number of
iterations that must be performed. Default is 1.
callback: Callable
A callable object that will be called at the end of each iteration with
locals() as arguments.
Example
-------
>>> from parsimony.algorithms.proximal import FISTA
>>> from parsimony.functions import LinearRegressionL1L2TV
>>> import scipy.sparse as sparse
>>> import numpy as np
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = sparse.csr_matrix((50, 50)) # Unused here
>>> function = LinearRegressionL1L2TV(X, y, 0.0, 0.0, 0.0,
... A=A, mu=0.0)
>>> fista = FISTA(max_iter=10000)
>>> beta1 = fista.run(function, np.random.rand(50, 1))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS
4.618281...e-06
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = sparse.csr_matrix((50, 50)) # Unused here
>>> function = LinearRegressionL1L2TV(X, y, 0.1, 0.0, 0.0,
... A=A, mu=0.0)
>>> fista = FISTA(max_iter=10000)
>>> beta1 = fista.run(function, np.random.rand(50, 1))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS
0.82723292...
>>> int(np.linalg.norm(beta2.ravel(), 0))
50
>>> int(np.linalg.norm(beta1.ravel(), 0))
7
"""
INTERFACES = [properties.Function,
properties.Gradient,
properties.StepSize,
properties.ProximalOperator]
INFO_PROVIDED = [Info.ok,
Info.num_iter,
Info.time,
Info.fvalue, # <-- To be deprecated!
Info.func_val,
Info.converged,
Info.gap,
Info.verbose]
def __init__(self, use_gap=False,
info=[], eps=consts.TOLERANCE, max_iter=10000, min_iter=1,
callback=None,
simulation=False,
return_best=False):
super(FISTA, self).__init__(info=info,
max_iter=int(max_iter),
min_iter=int(min_iter))
self.use_gap = bool(use_gap)
self.eps = max(consts.FLOAT_EPSILON, float(eps))
self.callback = callback
self.simulation = bool(simulation)
self.return_best = bool(return_best)
@bases.force_reset
@bases.check_compatibility
def run(self, function, beta):
"""Find the minimiser of the given function, starting at beta.
Parameters
----------
function : Function. The function to minimise.
beta : Numpy array. The start vector.
"""
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
z = betanew = betaold = beta
if self.info_requested(Info.time):
t_ = []
if self.info_requested(Info.fvalue) \
or self.info_requested(Info.func_val):
f_ = []
if self.info_requested(Info.converged):
self.info_set(Info.converged, False)
if self.info_requested(Info.gap):
gap_ = []
if self.return_best:
best_f = np.inf
best_beta = None
#print("########", max(self.min_iter, self.max_iter) + 1)
for i in range(1, max(self.min_iter, self.max_iter) + 1):
if self.info_requested(Info.time):
tm = utils.time_cpu()
z = betanew + ((i - 2.0) / (i + 1.0)) * (betanew - betaold)
step = function.step(z)
betaold = betanew
betanew = function.prox(z - step * function.grad(z),
step,
eps=1.0 / (float(i) ** (4.0 + consts.FLOAT_EPSILON)),
max_iter=self.max_iter)
if self.info_requested(Info.time):
t_.append(utils.time_cpu() - tm)
if self.info_requested(Info.fvalue) \
or self.info_requested(Info.func_val):
func_val = function.f(betanew)
f_.append(func_val)
if self.return_best and func_val < best_f:
best_f = func_val
best_beta = betanew
if self.callback is not None:
self.callback(locals())
if self.use_gap:
gap = function.gap(betanew,
eps=self.eps,
max_iter=self.max_iter)
# TODO: Warn if G_new < -consts.TOLERANCE.
gap = abs(gap) # May happen close to machine epsilon.
if self.info_requested(Info.gap):
gap_.append(gap)
if not self.simulation:
if self.info_requested(Info.verbose):
print("FISTA ite:%i, gap:%g" % (i, gap))
if gap < self.eps:
if self.info_requested(Info.converged):
self.info_set(Info.converged, True)
break
else:
if not self.simulation:
eps_cur = maths.norm(betanew - z)
if self.info_requested(Info.verbose):
print("FISTA ite: %i, eps_cur:%g" % (i, eps_cur))
if step > 0.0:
if (1.0 / step) * eps_cur < self.eps \
and i >= self.min_iter:
if self.info_requested(Info.converged):
self.info_set(Info.converged, True)
break
else: # TODO: Fix this!
if maths.norm(betanew - z) < self.eps \
and i >= self.min_iter:
if self.info_requested(Info.converged):
self.info_set(Info.converged, True)
break
self.num_iter = i
if self.info_requested(Info.num_iter):
self.info_set(Info.num_iter, i)
if self.info_requested(Info.time):
self.info_set(Info.time, t_)
if self.info_requested(Info.fvalue):
self.info_set(Info.fvalue, f_)
if self.info_requested(Info.func_val):
self.info_set(Info.func_val, f_)
if self.info_requested(Info.gap):
self.info_set(Info.gap, gap_)
if self.info_requested(Info.ok):
self.info_set(Info.ok, True)
if self.return_best and best_beta is not None:
return best_beta
else:
return betanew
class CONESTA(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""COntinuation with NEsterov smoothing in a Soft-Thresholding Algorithm,
or CONESTA for short.
Parameters
----------
mu_min : float
A non-negative float. A "very small" mu to use as a lower bound for mu.
tau : float
A float between 0 < tau < 1. The rate at which eps is decreasing.
Default is 0.5.
eps : float
A positive float. Tolerance for the stopping criterion.
info : List or tuple of utils.Info.
What, if any, extra run information should be stored. Default is an
empty list, which means that no run information is computed nor
returned.
max_iter : int
Non-negative integer. Maximum allowed number of iterations.
min_iter : int
Non-negative integer less than or equal to max_iter. Minimum number of
iterations that must be performed. Default is 1.
eps_max: float
A maximum value for eps computed from the gap. If
np.isfinite(tau * gap(beta)) then use eps_max to avoid NaN. Default is
a large value: 10.
callback: Callable
A callable object that will be called at the end of each iteration with
locals() as arguments.
"""
INTERFACES = [properties.NesterovFunction,
properties.StepSize,
properties.ProximalOperator,
properties.Continuation,
properties.DualFunction]
INFO_PROVIDED = [Info.ok,
Info.converged,
Info.num_iter,
Info.continuations,
Info.time,
Info.fvalue,
Info.func_val,
Info.gap,
Info.mu,
Info.verbose]
def __init__(self, mu_min=consts.TOLERANCE, tau=0.5,
info=[], eps=consts.TOLERANCE, max_iter=10000, min_iter=1,
eps_max=10.,
callback=None,
simulation=False):
super(CONESTA, self).__init__(info=info,
max_iter=max_iter, min_iter=min_iter)
self.mu_min = max(consts.FLOAT_EPSILON, float(mu_min))
self.eps_max = eps_max
self.tau = max(consts.TOLERANCE,
min(float(tau), 1.0 - consts.TOLERANCE))
self.eps = max(consts.TOLERANCE, float(eps))
self.callback = callback
self.simulation = bool(simulation)
@bases.force_reset
@bases.check_compatibility
def run(self, function, beta):
# Copy the allowed info keys for FISTA.
fista_info = list()
for nfo in self.info_copy():
if nfo in FISTA.INFO_PROVIDED:
fista_info.append(nfo)
# CONESTA always asks for the gap.
if Info.gap not in fista_info:
fista_info.append(Info.gap)
# Create the inner algorithm.
algorithm = FISTA(use_gap=True, info=fista_info, eps=self.eps,
max_iter=self.max_iter, min_iter=self.min_iter)
# Not ok until the end.
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
# Time the init computation (essentialy Lipchitz constant in mu_opt).
if self.info_requested(Info.time):
init_time = utils.time_cpu()
# Compute current gap, precision eps (gap decreased by tau) and mu.
function.set_mu(consts.TOLERANCE)
gap = function.gap(beta, eps=self.eps, max_iter=self.max_iter)
eps = self.tau * abs(gap)
# Warning below if gap < -consts.TOLERANCE: See Special case 1
gM = function.eps_max(1.0)
loop = True
# Special case 1: gap is very small: stopping criterion satisfied
if gap < self.eps: # "- mu * gM" has been removed since mu == 0
warnings.warn(
"Stopping criterion satisfied before the first iteration."
" Either beta is the solution (given eps),"
" or if beta is null the problem might be over-penalized "
" - then try smaller penalization.")
loop = False
# Special case 2: gap infinite or NaN => eps is not finite or NaN
# => mu is NaN etc. Force eps to a large value, to force some FISTA
# iteration to getbetter starting point
if not np.isfinite(eps):
eps = self.eps_max
if loop: # mu is useless if loop is False
mu = function.mu_opt(eps)
function.set_mu(mu)
#gM = function.eps_max(1.0)
# Initialise info variables. Info variables have the suffix "_".
if self.info_requested(Info.time):
t_ = []
init_time = utils.time_cpu() - init_time
if self.info_requested(Info.fvalue) \
or self.info_requested(Info.func_val):
f_ = []
if self.info_requested(Info.gap):
gap_ = []
if self.info_requested(Info.converged):
self.info_set(Info.converged, False)
if self.info_requested(Info.mu):
mu_ = []
i = 0 # Iteration counter.
while loop:
converged = False
# Current precision.
eps_mu = max(eps, self.eps) - mu * gM
# Set current parameters to algorithm.
algorithm.set_params(eps=eps_mu,
max_iter=self.max_iter - self.num_iter)
# Run FISTA.
beta = algorithm.run(function, beta)
# Update global iteration counter.
self.num_iter += algorithm.num_iter
# Get info from algorithm.
if Info.time in algorithm.info and \
self.info_requested(Info.time):
t_ += algorithm.info_get(Info.time)
if i == 0: # Add init time to first iteration.
t_[0] += init_time
if Info.func_val in algorithm.info and \
self.info_requested(Info.func_val):
f_ += algorithm.info_get(Info.func_val)
elif Info.fvalue in algorithm.info and \
self.info_requested(Info.fvalue):
f_ += algorithm.info_get(Info.fvalue)
if self.info_requested(Info.mu):
mu_ += [mu] * algorithm.num_iter
if self.info_requested(Info.gap):
gap_ += algorithm.info_get(Info.gap)
# Obtain the gap from the last FISTA run. May be small and negative
# close to machine epsilon.
gap_mu = abs(algorithm.info_get(Info.gap)[-1])
# TODO: Warn if gap_mu < -consts.TOLERANCE.
if not self.simulation:
if gap_mu + mu * gM < self.eps:
if self.info_requested(Info.converged):
self.info_set(Info.converged, True)
converged = True
if self.callback is not None:
self.callback(locals())
if self.info_requested(Info.verbose):
print("CONESTA ite:%i, gap_mu: %g, eps: %g, mu: %g, "
"eps_mu: %g" % (i, gap_mu, eps, mu, eps_mu))
# Stopping criteria.
if (converged or self.num_iter >= self.max_iter) \
and self.num_iter >= self.min_iter:
break
# Update the precision eps.
# eps = self.tau * (gap_mu + mu * gM)
eps = max(self.eps, self.tau * (gap_mu + mu * gM))
# Compute and update mu.
# mu = max(self.mu_min, min(function.mu_opt(eps), mu))
mu = min(function.mu_opt(eps), mu)
function.set_mu(mu)
i = i + 1
if self.info_requested(Info.num_iter):
self.info_set(Info.num_iter, self.num_iter)
if self.info_requested(Info.continuations):
self.info_set(Info.continuations, i + 1)
if self.info_requested(Info.time):
self.info_set(Info.time, t_)
if self.info_requested(Info.func_val):
self.info_set(Info.func_val, f_)
if self.info_requested(Info.fvalue):
self.info_set(Info.fvalue, f_)
if self.info_requested(Info.gap):
self.info_set(Info.gap, gap_)
if self.info_requested(Info.mu):
self.info_set(Info.mu, mu_)
if self.info_requested(Info.ok):
self.info_set(Info.ok, True)
return beta
class StaticCONESTA(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""COntinuation with NEsterov smoothing in a Soft-Thresholding Algorithm,
or CONESTA for short, with a statically decreasing sequence of eps and mu.
Parameters
----------
mu_min : float
Non-negative. A "very small" mu to use as a lower bound for mu.
tau : float
Within 0 < tau < 1. The rate at which eps is decreasing. Default is
0.5.
exponent : float
Within [1.001, 2.0]. The assumed convergence rate of
||beta* - beta_k||_2 for k=1,2,... is O(1 / k^exponent). Default is
1.5.
eps : float
Positive float. Tolerance for the stopping criterion.
info : List or tuple of utils.Info.
What, if any, extra run information should be stored. Default is an
empty list, which means that no run information is computed nor
returned.
max_iter : int
Non-negative integer. Maximum allowed number of iterations.
min_iter : int
Non-negative integer less than or equal to max_iter. Minimum number of
iterations that must be performed. Default is 1.
callback: Callable
A callable object that will be called at the end of each iteration with
locals() as arguments.
Example
-------
>>> from parsimony.algorithms.proximal import StaticCONESTA
>>> from parsimony.functions.nesterov import l1tv
>>> from parsimony.functions import LinearRegressionL1L2TV
>>> import scipy.sparse as sparse
>>> import numpy as np
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = sparse.csr_matrix((50, 50)) # Unused here
>>> function = LinearRegressionL1L2TV(X, y, 0.0, 0.0, 0.0,
... A=[A], mu=0.0)
>>> static_conesta = StaticCONESTA(max_iter=10000)
>>> beta1 = static_conesta.run(function, np.random.rand(50, 1))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> round(np.linalg.norm(beta1 - beta2), 13)
3.0183961e-06
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = sparse.csr_matrix((50, 50))
>>> function = LinearRegressionL1L2TV(X, y, 0.1, 0.0, 0.0,
... A=[A], mu=0.0)
>>> static_conesta = StaticCONESTA(max_iter=10000)
>>> beta1 = static_conesta.run(function, np.random.rand(50, 1))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS
0.82723295...
>>> int(np.linalg.norm(beta2.ravel(), 0))
50
>>> int(np.linalg.norm(beta1.ravel(), 0))
7
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 50)
>>> y = np.random.rand(100, 1)
>>> A = l1tv.linear_operator_from_shape((1, 1, 50), 50)
>>> function = LinearRegressionL1L2TV(X, y, 0.1, 0.1, 0.1,
... A=A, mu=0.0)
>>> static_conesta = StaticCONESTA(max_iter=10000)
>>> beta1 = static_conesta.run(function, np.zeros((50, 1)))
>>> beta2 = np.dot(np.linalg.pinv(X), y)
>>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS
0.96629070...
"""
INTERFACES = [properties.NesterovFunction,
properties.StepSize,
properties.ProximalOperator,
properties.Continuation,
properties.DualFunction]
INFO_PROVIDED = [Info.ok,
Info.converged,
Info.num_iter,
Info.continuations,
Info.time,
Info.fvalue,
Info.func_val,
Info.mu,
Info.verbose]
def __init__(self, mu_min=consts.TOLERANCE, tau=0.5, exponent=1.52753,
info=[], eps=consts.TOLERANCE, max_iter=10000, min_iter=1,
callback=None,
simulation=False):
super(StaticCONESTA, self).__init__(info=info,
max_iter=max_iter,
min_iter=min_iter)
self.mu_min = max(consts.FLOAT_EPSILON, float(mu_min))
self.tau = max(consts.TOLERANCE,
min(float(tau), 1.0 - consts.TOLERANCE))
self.exponent = max(1.001, min(float(exponent), 2.0))
self.eps = max(consts.TOLERANCE, float(eps))
self.callback = callback
self.simulation = bool(simulation)
self._harmonic = None
def _harmonic_number_approx(self):
if self._harmonic is None:
x = [1.001, 1.00125, 1.0025, 1.005, 1.01, 1.025, 1.05, 1.075, 1.1,
1.2, 1.3, 1.4, 1.5, 1.52753, 1.6, 1.7, 1.8, 1.9, 1.95, 2.0]
y = [1000.58, 800.577, 400.577, 200.578, 100.578, 40.579, 20.5808,
13.916, 10.5844, 5.59158, 3.93195, 3.10555, 2.61238, 2.50988,
2.28577, 2.05429, 1.88223, 1.74975, 1.69443, 1.6449340668]
f = interp1(x, y)
self._harmonic = f(self.exponent)
return self._harmonic
def _approximate_eps(self, function, beta0):
old_mu = function.set_mu(self.mu_min)
step = function.step(beta0)
D1 = maths.norm(function.prox(-step * function.grad(beta0),
step,
# Arbitrary eps ...
eps=np.sqrt(consts.TOLERANCE),
max_iter=self.max_iter))
function.set_mu(old_mu)
return (2.0 / step) * D1 * self._harmonic_number_approx()
@bases.force_reset
@bases.check_compatibility
def run(self, function, beta):
# Copy the allowed info keys for FISTA.
fista_info = list()
for nfo in self.info_copy():
if nfo in FISTA.INFO_PROVIDED:
fista_info.append(nfo)
# Create the inner algorithm.
algorithm = FISTA(info=fista_info, eps=self.eps,
max_iter=self.max_iter, min_iter=self.min_iter)
# Not ok until the end.
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
# Time the init computation.
if self.info_requested(Info.time):
init_time = utils.time()
# Estimate the initial precision, eps, and the smoothing parameter mu.
gM = function.eps_max(1.0) # gamma * M
if maths.norm(beta) > consts.TOLERANCE:
mu = function.estimate_mu(beta)
eps = mu * gM
else:
eps = self._approximate_eps(function, beta)
mu = eps / gM
function.set_mu(mu)
# Initialise info variables. Info variables have the suffix "_".
if self.info_requested(Info.time):
t_ = []
init_time = utils.time() - init_time
if self.info_requested(Info.fvalue) \
or self.info_requested(Info.func_val):
f_ = []
if self.info_requested(Info.converged):
self.info_set(Info.converged, False)
if self.info_requested(Info.mu):
mu_ = []
i = 0 # Iteration counter.
while True:
converged = False
# Give current parameters to the algorithm.
algorithm.set_params(eps=eps,
max_iter=self.max_iter - self.num_iter)
# Run FISTA.
beta_new = algorithm.run(function, beta)
# Update global iteration count.
self.num_iter += algorithm.num_iter
# Get info from algorithm.
if Info.time in algorithm.info and \
self.info_requested(Info.time):
t_ += algorithm.info_get(Info.time)
if i == 0: # Add init time to first iteration.
t_[0] += init_time
if Info.func_val in algorithm.info \
and self.info_requested(Info.func_val):
f_ += algorithm.info_get(Info.func_val)
elif Info.fvalue in algorithm.info \
and self.info_requested(Info.fvalue):
f_ += algorithm.info_get(Info.fvalue)
if self.info_requested(Info.mu):
mu_ += [mu] * algorithm.num_iter
# Unless this is a simulation, you want the algorithm to stop when
# it has converged.
if not self.simulation:
# Stopping criterion.
step = function.step(beta_new)
if maths.norm(beta_new - beta) < step * self.eps:
if self.info_requested(Info.converged):
self.info_set(Info.converged, True)
converged = True
beta = beta_new
if self.callback is not None:
self.callback(locals())
if self.info_requested(Info.verbose):
print("StaticCONESTA ite: %i, eps: %g, mu: %g" % (i, eps, mu))
# All combined stopping criteria.
if (converged or self.num_iter >= self.max_iter) \
and self.num_iter >= self.min_iter:
break
# Update the precision eps.
eps = self.tau * eps
# Compute and update mu.
mu = max(self.mu_min, eps / gM)
function.set_mu(mu)
i = i + 1
if self.info_requested(Info.num_iter):
self.info_set(Info.num_iter, self.num_iter)
if self.info_requested(Info.continuations):
self.info_set(Info.continuations, i + 1)
if self.info_requested(Info.time):
self.info_set(Info.time, t_)
if self.info_requested(Info.func_val):
self.info_set(Info.func_val, f_)
if self.info_requested(Info.fvalue):
self.info_set(Info.fvalue, f_)
if self.info_requested(Info.mu):
self.info_set(Info.mu, mu_)
if self.info_requested(Info.ok):
self.info_set(Info.ok, True)
return beta
#class ProjectionADMM(bases.ExplicitAlgorithm):
# """ The Alternating direction method of multipliers, where the functions
# have projection operators onto the corresponding convex sets.
# """
# INTERFACES = [properties.Function,
# properties.ProjectionOperator]
#
# def __init__(self, output=False,
# eps=consts.TOLERANCE,
# max_iter=consts.MAX_ITER, min_iter=1):
#
# self.output = output
# self.eps = eps
# self.max_iter = max_iter
# self.min_iter = min_iter
#
# def run(self, function, x):
# """Finds the projection onto the intersection of two sets.
#
# Parameters
# ----------
# function : List or tuple with two Functions. The two functions.
#
# x : Numpy array. The point that we wish to project.
# """
# self.check_compatibility(function[0], self.INTERFACES)
# self.check_compatibility(function[1], self.INTERFACES)
#
# z = x
# u = np.zeros(x.shape)
# for i in xrange(1, self.max_iter + 1):
# x = function[0].proj(z - u)
# z = function[1].proj(x + u)
# u = u + x - z
#
# if maths.norm(z - x) / maths.norm(z) < self.eps \
# and i >= self.min_iter:
# break
#
# return z
class ADMM(bases.ExplicitAlgorithm,
bases.IterativeAlgorithm,
bases.InformationAlgorithm):
"""The alternating direction method of multipliers (ADMM). Computes the
minimum of the sum of two functions with associated proximal or projection
operators. Solves problems on the form
min. f(x, y) = g(x) + h(y)
s.t. y = x
The functions have associated proximal or projection operators.
Parameters
----------
rho : Positive float. The penalty parameter.
mu : Float, greater than 1. The factor within which the primal and dual
variables should be kept. Set to less than or equal to 1 if you
don't want to update the penalty parameter rho dynamically.
tau : Float, greater than 1. Increase rho by a factor tau.
info : List or tuple of utils.consts.Info. What, if any, extra run
information should be stored. Default is an empty list, which means
that no run information is computed nor returned.
eps : Positive float. Tolerance for the stopping criterion.
max_iter : Non-negative integer. Maximum allowed number of iterations.
min_iter : Non-negative integer less than or equal to max_iter. Minimum
number of iterations that must be performed. Default is 1.
"""
INTERFACES = [properties.SplittableFunction,
properties.AugmentedProximalOperator,
properties.OR(properties.ProximalOperator,
properties.ProjectionOperator)]
INFO_PROVIDED = [Info.ok,
Info.num_iter,
Info.time,
Info.fvalue,
Info.converged]
def __init__(self, rho=1.0, mu=10.0, tau=2.0,
info=[],
eps=consts.TOLERANCE, max_iter=consts.MAX_ITER, min_iter=1,
simulation=False):
# TODO: Investigate what is a good default value here!
super(ADMM, self).__init__(info=info,
max_iter=max_iter,
min_iter=min_iter)
self.rho = max(consts.FLOAT_EPSILON, float(rho))
self.mu = max(1.0, float(mu))
self.tau = max(1.0, float(tau))
self.eps = max(consts.FLOAT_EPSILON, float(eps))
self.simulation = bool(simulation)
@bases.force_reset
@bases.check_compatibility
def run(self, functions, xy):
"""Finds the minimum of two functions with associated proximal
operators.
Parameters
----------
functions : List or tuple with two Functions or a SplittableFunction.
The two functions.
xy : List or tuple with two elements, numpy arrays. The starting points
for the minimisation.
"""
if self.info_requested(Info.ok):
self.info_set(Info.ok, False)
if self.info_requested(Info.time):
t = []
if self.info_requested(Info.fvalue):
f = []
if self.info_requested(Info.converged):
self.info_set(Info.converged, False)
funcs = [functions.g, functions.h]
x_new = xy[0]
y_new = xy[1]
z_new = x_new.copy()
u_new = y_new.copy()
for i in range(1, self.max_iter + 1):
if self.info_requested(Info.time):
tm = utils.time_cpu()
x_old = x_new
z_old = z_new
u_old = u_new
if isinstance(funcs[0], properties.ProximalOperator):
x_new = funcs[0].prox(z_old - u_old)
else:
x_new = funcs[0].proj(z_old - u_old)
y_new = x_new # TODO: Allow a linear operator here.
if isinstance(funcs[1], properties.ProximalOperator):
z_new = funcs[1].prox(y_new + u_old)
else:
z_new = funcs[1].proj(y_new + u_old)
# The order here is important! Do not change!
u_new = (y_new - z_new) + u_old
if self.info_requested(Info.time):
t.append(utils.time_cpu() - tm)
if self.info_requested(Info.fvalue):
fval = funcs[0].f(z_new) + funcs[1].f(z_new)
f.append(fval)
if not self.simulation:
if i == 1:
if maths.norm(x_new - x_old) < self.eps \
and i >= self.min_iter:
# print "Stopping criterion kicked in!"
if self.info_requested(Info.converged):
self.info_set(Info.converged, True)
break
else:
if maths.norm(x_new - x_old) / maths.norm(x_old) < self.eps \
and i >= self.min_iter:
# print "Stopping criterion kicked in!"
if self.info_requested(Info.converged):
self.info_set(Info.converged, True)
break
# Update the penalty parameter, rho, dynamically.
if self.mu > 1.0:
r = x_new - z_new
s = (z_new - z_old) * -self.rho
norm_r = maths.norm(r)
norm_s = maths.norm(s)
# print "norm(r): ", norm_r, ", norm(s): ", norm_s, ", rho:", \
# self.rho
if norm_r > self.mu * norm_s:
self.rho *= self.tau
u_new *= 1.0 / self.tau # Rescale dual variable.
elif norm_s > self.mu * norm_r:
self.rho /= self.tau
u_new *= self.tau # Rescale dual variable.
# Update the penalty parameter in the functions.
functions.set_rho(self.rho)
self.num_iter = i
if self.info_requested(Info.num_iter):
self.info_set(Info.num_iter, i)
if self.info_requested(Info.time):
self.info_set(Info.time, t)
if self.info_requested(Info.fvalue):
self.info_set(Info.fvalue, f)
if self.info_requested(Info.ok):
self.info_set(Info.ok, True)
return z_new
class DykstrasProximalAlgorithm(bases.ExplicitAlgorithm):
"""Dykstra's proximal algorithm. Computes the minimum of the sum of two
proximal operators.
The functions have proximal operators (ProjectionOperator.prox).
"""
INTERFACES = [properties.Function,
properties.ProximalOperator]
def __init__(self, eps=consts.TOLERANCE, max_iter=1000, min_iter=1):
# TODO: Investigate what good default value are here!
self.eps = eps
self.max_iter = max_iter
self.min_iter = min_iter
def run(self, function, x, factor=1.0):
"""Finds the proximal operator of the sum of two proximal operators.
Parameters
----------
function : list or tuple with two Functions
The two functions.
x : numpy array (p-by-1)
The point at which we want to compute the proximal operator.
"""
self.check_compatibility(function[0], self.INTERFACES)
self.check_compatibility(function[1], self.INTERFACES)
x_new = x
p_new = np.zeros(x.shape)
q_new = np.zeros(x.shape)
for i in range(1, self.max_iter + 1):
x_old = x_new
p_old = p_new
q_old = q_new
y_old = function[0].prox(x_old + p_old, factor=factor)
p_new = x_old + p_old - y_old
x_new = function[1].prox(y_old + q_old, factor=factor)
q_new = y_old + q_old - x_new
if maths.norm(x_new - x_old) / maths.norm(x_old) < self.eps \
and i >= self.min_iter:
break
return x_new
class DykstrasProjectionAlgorithm(bases.ExplicitAlgorithm):
"""Dykstra's projection algorithm. Computes the projection onto the
intersection of two convex sets.
The functions have projection operators (ProjectionOperator.proj) onto the
corresponding convex sets.
"""
INTERFACES = [properties.Function,
properties.ProjectionOperator]
def __init__(self, eps=consts.TOLERANCE, max_iter=1000, min_iter=1):
# TODO: Investigate what good default values are here!
self.eps = eps
self.max_iter = max_iter
self.min_iter = min_iter
def run(self, function, x):
"""Finds the projection onto the intersection of two sets.
Parameters
----------
function : list or tuple with two Functions
The two functions.
x : numpy array (p-by-1)
The point that we wish to project.
"""
self.check_compatibility(function[0], self.INTERFACES)
self.check_compatibility(function[1], self.INTERFACES)
x_new = x
p_new = np.zeros(x.shape)
q_new = np.zeros(x.shape)
for i in range(1, self.max_iter + 1):
x_old = x_new
p_old = p_new
q_old = q_new
y_old = function[0].proj(x_old + p_old)
p_new = x_old + p_old - y_old
x_new = function[1].proj(y_old + q_old)
q_new = y_old + q_old - x_new
if maths.norm(x_new - x_old) / maths.norm(x_old) < self.eps \
and i >= self.min_iter:
break
return x_new
class ParallelDykstrasProjectionAlgorithm(bases.ExplicitAlgorithm):
"""Dykstra's projection algorithm for two or more functions. Computes the
projection onto the intersection of two or more convex sets.
The functions have projection operators (ProjectionOperator.proj) onto the
respective convex sets.
"""
INTERFACES = [properties.Function,
properties.ProjectionOperator]
def __init__(self, eps=consts.TOLERANCE,
max_iter=100, min_iter=1):
# TODO: Investigate what is a good default value here!
self.eps = eps
self.max_iter = max_iter
self.min_iter = min_iter
def run(self, functions, x, weights=None):
"""Finds the projection onto the intersection of two sets.
Parameters
----------
functions : List or tuple with two or more elements. The functions.
x : Numpy array. The point that we wish to project.
weights : List or tuple with floats. Weights for the functions.
Default is that they all have the same weight. The elements of
the list or tuple must sum to 1.
"""
for f in functions:
self.check_compatibility(f, self.INTERFACES)
num = len(functions)
if weights is None:
weights = [1.0 / float(num)] * num
x_new = x_old = x
p = [0.0] * len(functions)
z = [0.0] * len(functions)
for i in range(num):
z[i] = np.copy(x)
for it in range(self.max_iter):
for i in range(num):
p[i] = functions[i].proj(z[i])
# TODO: Does the weights really matter when the function is the
# indicator function?
x_old = x_new
x_new = np.zeros(x_old.shape)
for i in range(num):
x_new += weights[i] * p[i]
for i in range(num):
z[i] = x_new + z[i] - p[i]
if maths.norm(x_new - x_old) / maths.norm(x_old) < self.eps \
and it + 1 >= self.min_iter:
break
return x_new
class ParallelDykstrasProximalAlgorithm(bases.ExplicitAlgorithm):
"""Dykstra's projection algorithm for two or more functions. Computes the
proximal operator of a sum of functions. These functions may be indicator
functions for convex sets (ProjectionOperator) or ProximalOperators.
If all functions are ProjectionOperators, this algorithm finds the
projection onto the intersection of the convex sets.
The functions have projection operators (ProjectionOperator.proj) onto the
respective convex sets or proximal operators (ProximalOperator.prox).
"""
INTERFACES = [properties.Function,
properties.OR(properties.ProjectionOperator,
properties.ProximalOperator)]
def __init__(self, eps=consts.TOLERANCE,
max_iter=100, min_iter=1):
# TODO: Investigate what is a good default value here!
self.eps = eps
self.max_iter = max_iter
self.min_iter = min_iter
def run(self, x, prox=[], proj=[], factor=1.0, weights=None):
"""Finds the projection onto the intersection of two sets.
Parameters
----------
prox : List or tuple with two or more elements. The functions that
are ProximalOperators. Either prox or proj must be non-empty.
proj : List or tuple with two or more elements. The functions that
are ProjectionOperators. Either proj or prox must be non-empty.
factor : Positive float. A factor by which the Lagrange multiplier is
scaled. This is usually the step size.
x : Numpy array. The point that we wish to project.
weights : List or tuple with floats. Weights for the functions.
Default is that they all have the same weight. The elements of
the list or tuple must sum to 1.
"""
for f in prox:
self.check_compatibility(f, self.INTERFACES)
for f in proj:
self.check_compatibility(f, self.INTERFACES)
num_prox = len(prox)
num_proj = len(proj)
if weights is None:
weights = [1. / float(num_prox + num_proj)] * (num_prox + num_proj)
x_new = x_old = x
p = [0.0] * (num_prox + num_proj)
z = [0.0] * (num_prox + num_proj)
for i in range(num_prox + num_proj):
z[i] = np.copy(x)
for it in range(self.max_iter):
for i in range(num_prox):
p[i] = prox[i].prox(z[i], factor)
for i in range(num_proj):
p[num_prox + i] = proj[i].proj(z[num_prox + i])
x_old = x_new
x_new = np.zeros(x_old.shape)
for i in range(num_prox + num_proj):
x_new += weights[i] * p[i]
if maths.norm(x_new - x_old) / maths.norm(x_old) < self.eps \
and it + 1 >= self.min_iter:
all_feasible = True
for i in range(num_proj):
if proj[i].f(p[num_prox + i]) > 0.0:
all_feasible = False
if all_feasible:
break
for i in range(num_prox + num_proj):
z[i] = x_new + z[i] - p[i]
return x_new
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"parsimony.utils.time_cpu",
"numpy.copy",
"numpy.sqrt",
"parsimony.utils.time",
"scipy.interpolate.interp1d",
"numpy.zeros",
"parsimony.utils.maths.norm",
"doctest.testmod",
"numpy.isfinite",
"warnings.warn",
"parsimony.functions.properties.OR"
] | [((52077, 52094), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (52092, 52094), False, 'import doctest\n'), ((38548, 38621), 'parsimony.functions.properties.OR', 'properties.OR', (['properties.ProximalOperator', 'properties.ProjectionOperator'], {}), '(properties.ProximalOperator, properties.ProjectionOperator)\n', (38561, 38621), True, 'import parsimony.functions.properties as properties\n'), ((44424, 44441), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (44432, 44441), True, 'import numpy as np\n'), ((44458, 44475), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (44466, 44475), True, 'import numpy as np\n'), ((46073, 46090), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (46081, 46090), True, 'import numpy as np\n'), ((46107, 46124), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (46115, 46124), True, 'import numpy as np\n'), ((49389, 49462), 'parsimony.functions.properties.OR', 'properties.OR', (['properties.ProjectionOperator', 'properties.ProximalOperator'], {}), '(properties.ProjectionOperator, properties.ProximalOperator)\n', (49402, 49462), True, 'import parsimony.functions.properties as properties\n'), ((20471, 20487), 'parsimony.utils.time_cpu', 'utils.time_cpu', ([], {}), '()\n', (20485, 20487), True, 'import parsimony.utils as utils\n'), ((20998, 21211), 'warnings.warn', 'warnings.warn', (['"""Stopping criterion satisfied before the first iteration. Either beta is the solution (given eps), or if beta is null the problem might be over-penalized - then try smaller penalization."""'], {}), "(\n 'Stopping criterion satisfied before the first iteration. Either beta is the solution (given eps), or if beta is null the problem might be over-penalized - then try smaller penalization.'\n )\n", (21011, 21211), False, 'import warnings\n'), ((21515, 21531), 'numpy.isfinite', 'np.isfinite', (['eps'], {}), '(eps)\n', (21526, 21531), True, 'import numpy as np\n'), ((30717, 30730), 'scipy.interpolate.interp1d', 'interp1', (['x', 'y'], {}), '(x, y)\n', (30724, 30730), True, 'from scipy.interpolate import interp1d as interp1\n'), ((32021, 32033), 'parsimony.utils.time', 'utils.time', ([], {}), '()\n', (32031, 32033), True, 'import parsimony.utils as utils\n'), ((32173, 32189), 'parsimony.utils.maths.norm', 'maths.norm', (['beta'], {}), '(beta)\n', (32183, 32189), True, 'import parsimony.utils.maths as maths\n'), ((48111, 48121), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (48118, 48121), True, 'import numpy as np\n'), ((48401, 48422), 'numpy.zeros', 'np.zeros', (['x_old.shape'], {}), '(x_old.shape)\n', (48409, 48422), True, 'import numpy as np\n'), ((51142, 51152), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (51149, 51152), True, 'import numpy as np\n'), ((51432, 51453), 'numpy.zeros', 'np.zeros', (['x_old.shape'], {}), '(x_old.shape)\n', (51440, 51453), True, 'import numpy as np\n'), ((6989, 7005), 'parsimony.utils.time_cpu', 'utils.time_cpu', ([], {}), '()\n', (7003, 7005), True, 'import parsimony.utils as utils\n'), ((13764, 13780), 'parsimony.utils.time_cpu', 'utils.time_cpu', ([], {}), '()\n', (13778, 13780), True, 'import parsimony.utils as utils\n'), ((21883, 21899), 'parsimony.utils.time_cpu', 'utils.time_cpu', ([], {}), '()\n', (21897, 21899), True, 'import parsimony.utils as utils\n'), ((32566, 32578), 'parsimony.utils.time', 'utils.time', ([], {}), '()\n', (32576, 32578), True, 'import parsimony.utils as utils\n'), ((40503, 40519), 'parsimony.utils.time_cpu', 'utils.time_cpu', ([], {}), '()\n', (40517, 40519), True, 'import parsimony.utils as utils\n'), ((42365, 42378), 'parsimony.utils.maths.norm', 'maths.norm', (['r'], {}), '(r)\n', (42375, 42378), True, 'import parsimony.utils.maths as maths\n'), ((42404, 42417), 'parsimony.utils.maths.norm', 'maths.norm', (['s'], {}), '(s)\n', (42414, 42417), True, 'import parsimony.utils.maths as maths\n'), ((15541, 15564), 'parsimony.utils.maths.norm', 'maths.norm', (['(betanew - z)'], {}), '(betanew - z)\n', (15551, 15564), True, 'import parsimony.utils.maths as maths\n'), ((31155, 31180), 'numpy.sqrt', 'np.sqrt', (['consts.TOLERANCE'], {}), '(consts.TOLERANCE)\n', (31162, 31180), True, 'import numpy as np\n'), ((34271, 34298), 'parsimony.utils.maths.norm', 'maths.norm', (['(beta_new - beta)'], {}), '(beta_new - beta)\n', (34281, 34298), True, 'import parsimony.utils.maths as maths\n'), ((7862, 7878), 'parsimony.utils.time_cpu', 'utils.time_cpu', ([], {}), '()\n', (7876, 7878), True, 'import parsimony.utils as utils\n'), ((8313, 8342), 'parsimony.utils.maths.norm', 'maths.norm', (['(betanew - betaold)'], {}), '(betanew - betaold)\n', (8323, 8342), True, 'import parsimony.utils.maths as maths\n'), ((14253, 14269), 'parsimony.utils.time_cpu', 'utils.time_cpu', ([], {}), '()\n', (14267, 14269), True, 'import parsimony.utils as utils\n'), ((41223, 41239), 'parsimony.utils.time_cpu', 'utils.time_cpu', ([], {}), '()\n', (41237, 41239), True, 'import parsimony.utils as utils\n'), ((44836, 44861), 'parsimony.utils.maths.norm', 'maths.norm', (['(x_new - x_old)'], {}), '(x_new - x_old)\n', (44846, 44861), True, 'import parsimony.utils.maths as maths\n'), ((44864, 44881), 'parsimony.utils.maths.norm', 'maths.norm', (['x_old'], {}), '(x_old)\n', (44874, 44881), True, 'import parsimony.utils.maths as maths\n'), ((46455, 46480), 'parsimony.utils.maths.norm', 'maths.norm', (['(x_new - x_old)'], {}), '(x_new - x_old)\n', (46465, 46480), True, 'import parsimony.utils.maths as maths\n'), ((46483, 46500), 'parsimony.utils.maths.norm', 'maths.norm', (['x_old'], {}), '(x_old)\n', (46493, 46500), True, 'import parsimony.utils.maths as maths\n'), ((48592, 48617), 'parsimony.utils.maths.norm', 'maths.norm', (['(x_new - x_old)'], {}), '(x_new - x_old)\n', (48602, 48617), True, 'import parsimony.utils.maths as maths\n'), ((48620, 48637), 'parsimony.utils.maths.norm', 'maths.norm', (['x_old'], {}), '(x_old)\n', (48630, 48637), True, 'import parsimony.utils.maths as maths\n'), ((51562, 51587), 'parsimony.utils.maths.norm', 'maths.norm', (['(x_new - x_old)'], {}), '(x_new - x_old)\n', (51572, 51587), True, 'import parsimony.utils.maths as maths\n'), ((51590, 51607), 'parsimony.utils.maths.norm', 'maths.norm', (['x_old'], {}), '(x_old)\n', (51600, 51607), True, 'import parsimony.utils.maths as maths\n'), ((41474, 41499), 'parsimony.utils.maths.norm', 'maths.norm', (['(x_new - x_old)'], {}), '(x_new - x_old)\n', (41484, 41499), True, 'import parsimony.utils.maths as maths\n'), ((16096, 16119), 'parsimony.utils.maths.norm', 'maths.norm', (['(betanew - z)'], {}), '(betanew - z)\n', (16106, 16119), True, 'import parsimony.utils.maths as maths\n'), ((41832, 41857), 'parsimony.utils.maths.norm', 'maths.norm', (['(x_new - x_old)'], {}), '(x_new - x_old)\n', (41842, 41857), True, 'import parsimony.utils.maths as maths\n'), ((41860, 41877), 'parsimony.utils.maths.norm', 'maths.norm', (['x_old'], {}), '(x_old)\n', (41870, 41877), True, 'import parsimony.utils.maths as maths\n')] |
import json
from os import listdir
from os.path import exists, join, isfile
from argparse import ArgumentParser
from collections import defaultdict
from copy import deepcopy
import torch
import numpy as np
from tqdm import tqdm
from mmcv import Config
from mmcv.parallel import scatter, collate, MMDataParallel
from mmaction.core import load_checkpoint
from mmaction.core.utils import propagate_root_dir
from mmaction.datasets import build_dataset
from mmaction.datasets.pipelines import Compose
from mmaction.models import build_model
NO_MOTION_LABEL = 12
NEGATIVE_LABEL = 13
IGNORE_LABELS = {NO_MOTION_LABEL, NEGATIVE_LABEL}
STATIC_LABELS = {0, 1, 2, 3, 4, 5, 6, 7}
DYNAMIC_LABELS = {8, 9, 10, 11}
class RawFramesSegmentedRecord:
def __init__(self, row):
self._data = row
assert len(self._data) == 7
def __repr__(self):
return ' '.join(self._data)
@property
def raw(self):
return deepcopy(self._data)
@property
def path(self):
return self._data[0]
@property
def label(self):
return int(self._data[1])
@label.setter
def label(self, value):
self._data[1] = str(value)
@property
def clip_start(self):
return int(self._data[2])
@clip_start.setter
def clip_start(self, value):
self._data[2] = str(value)
@property
def clip_end(self):
return int(self._data[3])
@clip_end.setter
def clip_end(self, value):
self._data[3] = str(value)
@property
def video_start(self):
return int(self._data[4])
@video_start.setter
def video_start(self, value):
self._data[4] = str(value)
@property
def video_end(self):
return int(self._data[5])
@video_end.setter
def video_end(self, value):
self._data[5] = str(value)
def load_annotation(ann_full_path):
return [RawFramesSegmentedRecord(x.strip().split(' ')) for x in open(ann_full_path)]
def parse_predictions_file(file_path):
predictions = dict()
with open(file_path) as input_stream:
for line in input_stream:
line_parts = line.strip().split(';')
if len(line_parts) != 15:
continue
start_pos = int(line_parts[0])
scores = [float(v) for v in line_parts[1:]]
predictions[start_pos] = scores
starts = list(sorted(predictions.keys()))
scores = np.array([predictions[s] for s in starts], dtype=np.float32)
assert len(starts) >= 3
return starts, scores
def parse_movements_file(file_path):
movements = dict()
with open(file_path) as input_stream:
for line in input_stream:
line_parts = line.strip().split(';')
if len(line_parts) != 2:
continue
frame_id = int(line_parts[0])
movement_detected = bool(int(line_parts[1]))
movements[frame_id] = movement_detected
frame_ids = list(sorted(movements.keys()))
assert frame_ids[0] == 0
assert len(frame_ids) == frame_ids[-1] + 1
detected_motions = np.array([movements[frame_id] for frame_id in frame_ids], dtype=np.uint8)
return detected_motions
def parse_kpts_file(file_path):
with open(file_path) as input_stream:
hand_kpts = json.load(input_stream)
if len(hand_kpts) == 0:
return None
hand_presented = dict()
for kpt_idx, kpt_track in hand_kpts.items():
for frame_id in kpt_track.keys():
hand_presented[int(frame_id) - 1] = True
return hand_presented
def load_distributed_data(root_dir, proc_fun, extension):
file_suffix = '.{}'.format(extension)
files = [f for f in listdir(root_dir) if isfile(join(root_dir, f)) and f.endswith(file_suffix)]
out_data = dict()
for file_name in tqdm(files, desc='Loading data'):
full_path = join(root_dir, file_name)
rel_path = file_name.replace(file_suffix, '')
file_data = proc_fun(full_path)
if file_data is not None:
out_data[rel_path] = file_data
return out_data
def flat_predictions(start_positions, all_scores, trg_label, window_size, num_frames):
pred_labels = np.argmax(all_scores, axis=1)
pred_scores = np.max(all_scores, axis=1)
matched_mask = pred_labels == trg_label
out_segm = np.zeros([num_frames], dtype=np.uint8)
out_scores = np.full([num_frames], -1.0, dtype=np.float32)
for i, start_pos in enumerate(start_positions):
glob_end = min(start_pos + window_size, num_frames)
glob_start = max(0, start_pos, glob_end - window_size // 2)
if 0 <= glob_start < glob_end:
out_segm[glob_start:glob_end] = matched_mask[i]
out_scores[glob_start:glob_end] = pred_scores[i] if matched_mask[i] else -1.0
return out_segm, out_scores
def find_hands(sparse_mask, num_frames):
if sparse_mask is None:
return None
out_seg = np.zeros([num_frames], dtype=np.uint8)
for frame_id in sparse_mask.keys():
if 0 <= frame_id < num_frames:
out_seg[frame_id] = True
return out_seg
def split_subsequences(values, trg_label=None, min_size=None):
changes = (np.argwhere(values[1:] != values[:-1]).reshape([-1]) + 1).tolist()
changes = [0] + changes + [len(values)]
segments = [[changes[i], changes[i + 1], values[changes[i]]] for i in range(len(changes) - 1)]
if trg_label is not None:
segments = [s for s in segments if s[2] == trg_label]
if min_size is not None:
segments = [s for s in segments if s[1] - s[0] >= min_size]
return segments
def get_longest_segment(segments):
return max(segments, key=lambda tup: tup[1]-tup[0])
def merge_closest(segments, max_distance=1):
out_data = []
last_segment = None
for segment in segments:
if last_segment is None:
last_segment = deepcopy(segment)
else:
last_end = last_segment[1]
cur_start = segment[0]
if cur_start - last_end <= max_distance:
last_segment[1] = segment[1]
else:
out_data.append(last_segment)
last_segment = deepcopy(segment)
if last_segment is not None:
out_data.append(last_segment)
return out_data
def get_ignore_candidates(records, ignore_labels):
out_data = []
for record in records:
if record.label in ignore_labels:
out_data.append((record, []))
return out_data
def get_regular_candidates(records, all_predictions, all_motions, all_hand_kpts,
window_size, dynamic,
target_labels, negative_label, no_motion_label,
min_score=0.99, min_length=5, max_distance=1):
out_data = []
ignores = defaultdict(list)
for record in tqdm(records, desc='Processing gestures'):
if record.label not in target_labels:
continue
if record.path not in all_predictions or record.path not in all_motions:
continue
pred_starts, scores = all_predictions[record.path]
det_motion = all_motions[record.path]
person_hand_kpts = all_hand_kpts[record.path] if record.path in all_hand_kpts else None
trg_label_mask, trg_label_scores = flat_predictions(
pred_starts, scores, record.label, window_size, len(det_motion))
trg_hand_mask = find_hands(person_hand_kpts, len(det_motion))
if dynamic:
motion_segments = split_subsequences(det_motion, trg_label=1, min_size=min_length)
if len(motion_segments) == 0:
record.label = no_motion_label
out_data.append((record, []))
ignores['no_motion'].append(record)
continue
if trg_hand_mask is not None:
interest_segments = []
for motion_start, motion_end, _ in motion_segments:
segment_mask = trg_hand_mask[motion_start:motion_end]
hand_presented_segments = split_subsequences(segment_mask, trg_label=1, min_size=min_length)
hand_presented_segments = merge_closest(hand_presented_segments, max_distance)
if len(hand_presented_segments) == 0:
continue
hand_start, hand_end, _ = get_longest_segment(hand_presented_segments)
interest_segments.append((hand_start + motion_start, hand_end + motion_start, 1))
if len(interest_segments) == 0:
record.label = negative_label
out_data.append((record, []))
ignores['no_hands'].append(record)
continue
else:
interest_segments = motion_segments
elif trg_hand_mask is not None:
interest_segments = split_subsequences(trg_hand_mask, trg_label=1, min_size=min_length)
if len(interest_segments) == 0:
record.label = negative_label
out_data.append((record, []))
ignores['no_hands'].append(record)
continue
else:
interest_segments = [[0, len(det_motion), 1]]
candidates = []
for segment_start, segment_end, _ in interest_segments:
glob_shift = segment_start
segment_mask = trg_label_mask[segment_start:segment_end]
segment_scores = trg_label_scores[segment_start:segment_end]
gesture_segments = split_subsequences(segment_mask, trg_label=1)
if len(gesture_segments) == 0:
continue
gesture_start, gesture_end, _ = get_longest_segment(gesture_segments)
glob_shift += gesture_start
gesture_mask = segment_scores[gesture_start:gesture_end] > min_score
movement_segments = split_subsequences(gesture_mask.astype(np.uint8), trg_label=1)
if len(movement_segments) == 0:
continue
movement_segments = merge_closest(movement_segments, max_distance)
movement_start, movement_end, _ = get_longest_segment(movement_segments)
clip_start = glob_shift + movement_start
clip_end = glob_shift + movement_end
if clip_end - clip_start >= min_length:
candidates.append((clip_start, clip_end))
if len(candidates) == 0:
record.label = negative_label
out_data.append((record, []))
ignores['no_candidates'].append(record)
continue
out_data.append((record, candidates))
return out_data, ignores
def find_best_match(candidates, model, dataset, negative_label, input_clip_length, pipeline):
idx_map = dict()
for idx in range(len(dataset)):
ann = dataset.get_ann_info(idx)
idx_map[ann['rel_path']] = idx
out_records, empty_records = [], []
for record, segments in tqdm(candidates, desc='Fixing annotation'):
if len(segments) == 0:
out_records.append(record)
continue
data = []
for segment in segments:
indices = generate_indices(segment[0], segment[1], input_clip_length)
idx = idx_map[record.path]
record = deepcopy(dataset.video_infos[idx])
record['modality'] = dataset.modality
record['frame_inds'] = indices + dataset.start_index
record['num_clips'] = 1
record['clip_len'] = input_clip_length
record_data = pipeline(record)
data.append(record_data)
data_gpu = scatter(collate(data, samples_per_gpu=len(segments)),
[torch.cuda.current_device()])[0]
with torch.no_grad():
net_output = model(return_loss=False, **data_gpu)
if isinstance(net_output, (list, tuple)):
assert len(net_output) == 1
net_output = net_output[0]
pred_labels = np.argmax(net_output, axis=1)
pred_scores = np.max(net_output, axis=1)
valid_segments = []
for segment, pred_label, pred_score in zip(segments, pred_labels, pred_scores):
if pred_label == record.label:
valid_segments.append((segment, pred_score))
if len(valid_segments) == 0:
record.label = negative_label
out_records.append(record)
empty_records.append(record)
continue
# find best record
best_match_record = max(valid_segments, key=lambda tup: tup[1])
# add positive
record.clip_start = best_match_record[0][0]
record.clip_end = best_match_record[0][1]
out_records.append(record)
# add negative before clip
if record.clip_start > record.video_start:
record_before = RawFramesSegmentedRecord(record.raw)
record_before.clip_start = record.video_start
record_before.clip_end = record.clip_start
record_before.video_end = record.clip_start
record_before.label = negative_label
out_records.append(record_before)
# add negative after clip
if record.video_end > record.clip_end:
record_after = RawFramesSegmentedRecord(record.raw)
record_after.video_start = record.clip_end
record_after.clip_start = record.clip_end
record_after.clip_end = record.video_end
record_after.label = negative_label
out_records.append(record_after)
out_stat = dict()
if len(empty_records) > 0:
out_stat['invalid_matches'] = empty_records
return out_records, out_stat
def generate_indices(start, end, out_length, invalid_idx=-2):
num_frames = end - start
if num_frames < out_length:
indices = np.arange(start, end)
num_rest = out_length - len(indices)
if num_rest > 0:
num_before = num_rest // 2
num_after = num_rest - num_before
indices = np.concatenate((np.full(num_before, invalid_idx, dtype=np.int32),
indices,
np.full(num_after, invalid_idx, dtype=np.int32)))
else:
shift_start = start
shift_end = end - out_length + 1
start_pos = (shift_start + shift_end) // 2
indices = np.array([start_pos + i for i in range(out_length)])
return indices
def dump_records(records, out_file_path):
with open(out_file_path, 'w') as output_stream:
for record in records:
output_stream.write(str(record) + '\n')
def update_config(cfg, args, trg_name):
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if cfg.test_cfg is None:
cfg.test_cfg = dict(average_clips=args.average_clips)
cfg.data.train.source = trg_name,
cfg.data.val.source = trg_name,
cfg.data.test.source = trg_name,
cfg.data.train.pipeline = cfg.val_pipeline
cfg.data.val.pipeline = cfg.val_pipeline
cfg.data.test.pipeline = cfg.val_pipeline
cfg.data.train.ann_file = 'train.txt'
cfg.data.val.ann_file = 'val.txt'
cfg.data.test.ann_file = 'test.txt'
return cfg
def update_stat(old_state, new_state):
for key, value in new_state.items():
if key not in old_state:
old_state[key] = value
else:
old_state[key].extend(value)
return old_state
def main():
parser = ArgumentParser()
parser.add_argument('--config', '-c', type=str, required=True)
parser.add_argument('--checkpoint', '-w', type=str, required=True)
parser.add_argument('--dataset_name', '-n', type=str, required=True)
parser.add_argument('--data_dir', '-d', type=str, required=True)
parser.add_argument('--predictions', '-p', type=str, required=True)
parser.add_argument('--movements', '-m', type=str, required=True)
parser.add_argument('--keypoints', '-k', type=str, required=True)
parser.add_argument('--out_annotation', '-o', type=str, required=True)
args = parser.parse_args()
assert exists(args.config)
assert exists(args.weights)
assert exists(args.data_dir)
assert exists(args.predictions)
assert exists(args.movements)
assert exists(args.keypoints)
assert args.dataset_name is not None and args.dataset_name != ''
assert args.out_annotation is not None and args.out_annotation != ''
cfg = Config.fromfile(args.config)
cfg = update_config(cfg, args, trg_name=args.dataset_name)
cfg = propagate_root_dir(cfg, args.data_dir)
dataset = build_dataset(cfg.data, 'train', dict(test_mode=True))
data_pipeline = Compose(dataset.pipeline.transforms[1:])
print('{} dataset:\n'.format(args.mode) + str(dataset))
model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
load_checkpoint(model, args.checkpoint, strict=False)
model = MMDataParallel(model, device_ids=[0])
model.eval()
annotation_path = join(args.data_dir, cfg.data.train.sources[0], cfg.data.train.ann_file)
records = load_annotation(annotation_path)
predictions = load_distributed_data(args.predictions, parse_predictions_file, 'txt')
movements = load_distributed_data(args.movements, parse_movements_file, 'txt')
hand_kpts = load_distributed_data(args.keypoints, parse_kpts_file, 'json')
print('Loaded records: {}'.format(len(records)))
invalid_stat = dict()
all_candidates = []
ignore_candidates = get_ignore_candidates(records, IGNORE_LABELS)
all_candidates += ignore_candidates
static_candidates, static_invalids = get_regular_candidates(
records, predictions, movements, hand_kpts,
cfg.data.output.length, False,
STATIC_LABELS, NEGATIVE_LABEL, NO_MOTION_LABEL,
min_score=0.9, min_length=4, max_distance=1)
all_candidates += static_candidates
invalid_stat = update_stat(invalid_stat, static_invalids)
print('Static candidates: {}'.format(len(static_candidates)))
if len(invalid_stat) > 0:
print('Ignored records after static analysis:')
for ignore_label, ignore_values in invalid_stat.items():
print(' - {}: {}'.format(ignore_label.replace('_', ' '), len(ignore_values)))
dynamic_candidates, dynamic_invalids = get_regular_candidates(
records, predictions, movements, hand_kpts,
cfg.data.output.length, True,
DYNAMIC_LABELS, NEGATIVE_LABEL, NO_MOTION_LABEL,
min_score=0.9, min_length=4, max_distance=1)
all_candidates += dynamic_candidates
invalid_stat = update_stat(invalid_stat, dynamic_invalids)
print('Dynamic candidates: {}'.format(len(dynamic_candidates)))
if len(invalid_stat) > 0:
print('Ignored records after dynamic analysis:')
for ignore_label, ignore_values in invalid_stat.items():
print(' - {}: {}'.format(ignore_label.replace('_', ' '), len(ignore_values)))
fixed_records, fix_stat = find_best_match(all_candidates, model, dataset, NEGATIVE_LABEL)
invalid_stat = update_stat(invalid_stat, fix_stat)
print('Final records: {}'.format(len(fixed_records)))
if len(invalid_stat) > 0:
print('Final ignored records:')
for ignore_label, ignore_values in invalid_stat.items():
print(' - {}: {}'.format(ignore_label.replace('_', ' '), len(ignore_values)))
for ignored_record in ignore_values:
print(' - {}'.format(ignored_record.path))
dump_records(fixed_records, args.out_annotation)
print('Fixed annotation has been stored at: {}'.format(args.out_annotation))
if __name__ == '__main__':
main()
| [
"numpy.array",
"copy.deepcopy",
"mmcv.Config.fromfile",
"numpy.arange",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"numpy.max",
"torch.cuda.current_device",
"mmcv.parallel.MMDataParallel",
"numpy.argmax",
"mmaction.core.load_checkpoint",
"torch.no_grad",
"mmaction.models.bu... | [((2422, 2482), 'numpy.array', 'np.array', (['[predictions[s] for s in starts]'], {'dtype': 'np.float32'}), '([predictions[s] for s in starts], dtype=np.float32)\n', (2430, 2482), True, 'import numpy as np\n'), ((3088, 3161), 'numpy.array', 'np.array', (['[movements[frame_id] for frame_id in frame_ids]'], {'dtype': 'np.uint8'}), '([movements[frame_id] for frame_id in frame_ids], dtype=np.uint8)\n', (3096, 3161), True, 'import numpy as np\n'), ((3806, 3838), 'tqdm.tqdm', 'tqdm', (['files'], {'desc': '"""Loading data"""'}), "(files, desc='Loading data')\n", (3810, 3838), False, 'from tqdm import tqdm\n'), ((4186, 4215), 'numpy.argmax', 'np.argmax', (['all_scores'], {'axis': '(1)'}), '(all_scores, axis=1)\n', (4195, 4215), True, 'import numpy as np\n'), ((4234, 4260), 'numpy.max', 'np.max', (['all_scores'], {'axis': '(1)'}), '(all_scores, axis=1)\n', (4240, 4260), True, 'import numpy as np\n'), ((4321, 4359), 'numpy.zeros', 'np.zeros', (['[num_frames]'], {'dtype': 'np.uint8'}), '([num_frames], dtype=np.uint8)\n', (4329, 4359), True, 'import numpy as np\n'), ((4377, 4422), 'numpy.full', 'np.full', (['[num_frames]', '(-1.0)'], {'dtype': 'np.float32'}), '([num_frames], -1.0, dtype=np.float32)\n', (4384, 4422), True, 'import numpy as np\n'), ((4932, 4970), 'numpy.zeros', 'np.zeros', (['[num_frames]'], {'dtype': 'np.uint8'}), '([num_frames], dtype=np.uint8)\n', (4940, 4970), True, 'import numpy as np\n'), ((6808, 6825), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6819, 6825), False, 'from collections import defaultdict\n'), ((6844, 6885), 'tqdm.tqdm', 'tqdm', (['records'], {'desc': '"""Processing gestures"""'}), "(records, desc='Processing gestures')\n", (6848, 6885), False, 'from tqdm import tqdm\n'), ((10981, 11023), 'tqdm.tqdm', 'tqdm', (['candidates'], {'desc': '"""Fixing annotation"""'}), "(candidates, desc='Fixing annotation')\n", (10985, 11023), False, 'from tqdm import tqdm\n'), ((15530, 15546), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (15544, 15546), False, 'from argparse import ArgumentParser\n'), ((16157, 16176), 'os.path.exists', 'exists', (['args.config'], {}), '(args.config)\n', (16163, 16176), False, 'from os.path import exists, join, isfile\n'), ((16188, 16208), 'os.path.exists', 'exists', (['args.weights'], {}), '(args.weights)\n', (16194, 16208), False, 'from os.path import exists, join, isfile\n'), ((16220, 16241), 'os.path.exists', 'exists', (['args.data_dir'], {}), '(args.data_dir)\n', (16226, 16241), False, 'from os.path import exists, join, isfile\n'), ((16253, 16277), 'os.path.exists', 'exists', (['args.predictions'], {}), '(args.predictions)\n', (16259, 16277), False, 'from os.path import exists, join, isfile\n'), ((16289, 16311), 'os.path.exists', 'exists', (['args.movements'], {}), '(args.movements)\n', (16295, 16311), False, 'from os.path import exists, join, isfile\n'), ((16323, 16345), 'os.path.exists', 'exists', (['args.keypoints'], {}), '(args.keypoints)\n', (16329, 16345), False, 'from os.path import exists, join, isfile\n'), ((16499, 16527), 'mmcv.Config.fromfile', 'Config.fromfile', (['args.config'], {}), '(args.config)\n', (16514, 16527), False, 'from mmcv import Config\n'), ((16601, 16639), 'mmaction.core.utils.propagate_root_dir', 'propagate_root_dir', (['cfg', 'args.data_dir'], {}), '(cfg, args.data_dir)\n', (16619, 16639), False, 'from mmaction.core.utils import propagate_root_dir\n'), ((16730, 16770), 'mmaction.datasets.pipelines.Compose', 'Compose', (['dataset.pipeline.transforms[1:]'], {}), '(dataset.pipeline.transforms[1:])\n', (16737, 16770), False, 'from mmaction.datasets.pipelines import Compose\n'), ((16844, 16905), 'mmaction.models.build_model', 'build_model', (['cfg.model'], {'train_cfg': 'None', 'test_cfg': 'cfg.test_cfg'}), '(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n', (16855, 16905), False, 'from mmaction.models import build_model\n'), ((16910, 16963), 'mmaction.core.load_checkpoint', 'load_checkpoint', (['model', 'args.checkpoint'], {'strict': '(False)'}), '(model, args.checkpoint, strict=False)\n', (16925, 16963), False, 'from mmaction.core import load_checkpoint\n'), ((16976, 17013), 'mmcv.parallel.MMDataParallel', 'MMDataParallel', (['model'], {'device_ids': '[0]'}), '(model, device_ids=[0])\n', (16990, 17013), False, 'from mmcv.parallel import scatter, collate, MMDataParallel\n'), ((17054, 17125), 'os.path.join', 'join', (['args.data_dir', 'cfg.data.train.sources[0]', 'cfg.data.train.ann_file'], {}), '(args.data_dir, cfg.data.train.sources[0], cfg.data.train.ann_file)\n', (17058, 17125), False, 'from os.path import exists, join, isfile\n'), ((937, 957), 'copy.deepcopy', 'deepcopy', (['self._data'], {}), '(self._data)\n', (945, 957), False, 'from copy import deepcopy\n'), ((3287, 3310), 'json.load', 'json.load', (['input_stream'], {}), '(input_stream)\n', (3296, 3310), False, 'import json\n'), ((3860, 3885), 'os.path.join', 'join', (['root_dir', 'file_name'], {}), '(root_dir, file_name)\n', (3864, 3885), False, 'from os.path import exists, join, isfile\n'), ((12021, 12050), 'numpy.argmax', 'np.argmax', (['net_output'], {'axis': '(1)'}), '(net_output, axis=1)\n', (12030, 12050), True, 'import numpy as np\n'), ((12073, 12099), 'numpy.max', 'np.max', (['net_output'], {'axis': '(1)'}), '(net_output, axis=1)\n', (12079, 12099), True, 'import numpy as np\n'), ((13864, 13885), 'numpy.arange', 'np.arange', (['start', 'end'], {}), '(start, end)\n', (13873, 13885), True, 'import numpy as np\n'), ((3686, 3703), 'os.listdir', 'listdir', (['root_dir'], {}), '(root_dir)\n', (3693, 3703), False, 'from os import listdir\n'), ((5882, 5899), 'copy.deepcopy', 'deepcopy', (['segment'], {}), '(segment)\n', (5890, 5899), False, 'from copy import deepcopy\n'), ((11311, 11345), 'copy.deepcopy', 'deepcopy', (['dataset.video_infos[idx]'], {}), '(dataset.video_infos[idx])\n', (11319, 11345), False, 'from copy import deepcopy\n'), ((11778, 11793), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11791, 11793), False, 'import torch\n'), ((6181, 6198), 'copy.deepcopy', 'deepcopy', (['segment'], {}), '(segment)\n', (6189, 6198), False, 'from copy import deepcopy\n'), ((3714, 3731), 'os.path.join', 'join', (['root_dir', 'f'], {}), '(root_dir, f)\n', (3718, 3731), False, 'from os.path import exists, join, isfile\n'), ((11731, 11758), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (11756, 11758), False, 'import torch\n'), ((14080, 14128), 'numpy.full', 'np.full', (['num_before', 'invalid_idx'], {'dtype': 'np.int32'}), '(num_before, invalid_idx, dtype=np.int32)\n', (14087, 14128), True, 'import numpy as np\n'), ((14215, 14262), 'numpy.full', 'np.full', (['num_after', 'invalid_idx'], {'dtype': 'np.int32'}), '(num_after, invalid_idx, dtype=np.int32)\n', (14222, 14262), True, 'import numpy as np\n'), ((5187, 5225), 'numpy.argwhere', 'np.argwhere', (['(values[1:] != values[:-1])'], {}), '(values[1:] != values[:-1])\n', (5198, 5225), True, 'import numpy as np\n')] |
# Machine Learning/Data Science Precourse Work
# ###
# LAMBDA SCHOOL
# ###
# MIT LICENSE
# ###
# Free example function definition
# This function passes one of the 11 tests contained inside of test.py. Write the rest, defined in README.md, here, and execute python test.py to test. Passing this precourse work will greatly increase your odds of acceptance into the program.
import numpy as np
import math
def f(x):
return x**2
def f_2(x):
return x**3
def f_3(x):
return x**3+5*x
def d_f(x):
return 2*x
def d_f_2(x):
a = x**2
return 3*a
def d_f_3(x):
a = x**2
return 3*a + 5
def vector_sum(v1,v2):
x = np.array(v1)
y = np.array(v2)
res = x + y
return res
def vector_less(v1,v2):
x = np.array(v1)
y = np.array(v2)
res = x - y
return res
def vector_magnitude(x):
sum = 0
for i in range(0, len(x)):
sum += x[i]**2
return(math.sqrt(sum))
def vec5():
x = np.ones(5)
return x
def vec3():
x=np.zeros(3)
return x
def vec2_1():
a=np.array([1,0])
return a
def vec2_2():
a=np.array([0,1])
return a
def matrix_multiply(m,v):
a = np.array(m)
b = np.array(v)
c = a.dot(b)
return c
| [
"numpy.array",
"numpy.zeros",
"math.sqrt",
"numpy.ones"
] | [((619, 631), 'numpy.array', 'np.array', (['v1'], {}), '(v1)\n', (627, 631), True, 'import numpy as np\n'), ((637, 649), 'numpy.array', 'np.array', (['v2'], {}), '(v2)\n', (645, 649), True, 'import numpy as np\n'), ((706, 718), 'numpy.array', 'np.array', (['v1'], {}), '(v1)\n', (714, 718), True, 'import numpy as np\n'), ((724, 736), 'numpy.array', 'np.array', (['v2'], {}), '(v2)\n', (732, 736), True, 'import numpy as np\n'), ((850, 864), 'math.sqrt', 'math.sqrt', (['sum'], {}), '(sum)\n', (859, 864), False, 'import math\n'), ((884, 894), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (891, 894), True, 'import numpy as np\n'), ((921, 932), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (929, 932), True, 'import numpy as np\n'), ((961, 977), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (969, 977), True, 'import numpy as np\n'), ((1005, 1021), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1013, 1021), True, 'import numpy as np\n'), ((1063, 1074), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (1071, 1074), True, 'import numpy as np\n'), ((1080, 1091), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1088, 1091), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import bayesnet as bn
class TestProduct(unittest.TestCase):
def test_product(self):
arrays = [
1,
np.arange(1, 5),
np.arange(1, 7).reshape(2, 3),
np.arange(1, 7).reshape(2, 3, 1)
]
axes = [
None,
None,
1,
(0, 2)
]
keepdims = [
False,
False,
True,
False
]
grads = [
1,
np.array([24., 12., 8., 6.]),
np.array([
[6., 3., 2.],
[30., 24., 20.]
]),
np.array([4., 5., 6., 1., 2., 3.]).reshape(2, 3, 1)
]
for arr, ax, keep, g in zip(arrays, axes, keepdims, grads):
a = bn.Parameter(arr)
b = a.prod(ax, keep)
b.backward(np.ones(b.shape))
if isinstance(g, int):
self.assertEqual(g, a.grad)
else:
self.assertTrue((g == a.grad).all())
if __name__ == '__main__':
unittest.main()
| [
"numpy.ones",
"numpy.arange",
"numpy.array",
"unittest.main",
"bayesnet.Parameter"
] | [((1099, 1114), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1112, 1114), False, 'import unittest\n'), ((172, 187), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (181, 187), True, 'import numpy as np\n'), ((534, 566), 'numpy.array', 'np.array', (['[24.0, 12.0, 8.0, 6.0]'], {}), '([24.0, 12.0, 8.0, 6.0])\n', (542, 566), True, 'import numpy as np\n'), ((576, 623), 'numpy.array', 'np.array', (['[[6.0, 3.0, 2.0], [30.0, 24.0, 20.0]]'], {}), '([[6.0, 3.0, 2.0], [30.0, 24.0, 20.0]])\n', (584, 623), True, 'import numpy as np\n'), ((824, 841), 'bayesnet.Parameter', 'bn.Parameter', (['arr'], {}), '(arr)\n', (836, 841), True, 'import bayesnet as bn\n'), ((898, 914), 'numpy.ones', 'np.ones', (['b.shape'], {}), '(b.shape)\n', (905, 914), True, 'import numpy as np\n'), ((201, 216), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (210, 216), True, 'import numpy as np\n'), ((244, 259), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (253, 259), True, 'import numpy as np\n'), ((677, 717), 'numpy.array', 'np.array', (['[4.0, 5.0, 6.0, 1.0, 2.0, 3.0]'], {}), '([4.0, 5.0, 6.0, 1.0, 2.0, 3.0])\n', (685, 717), True, 'import numpy as np\n')] |
import numpy as np
import pickle
class CorrelationList:
def __init__(self, shape):
self._sumx = np.zeros(shape, dtype=float)
self._sumy = np.zeros(shape, dtype=float)
self._sumxy = np.zeros(shape, dtype=float)
self._sumxsq = np.zeros(shape, dtype=float)
self._sumysq = np.zeros(shape, dtype=float)
self._n = np.zeros(shape, dtype=float) # TODO: Since this should be the same for every point, we can maybe use a single point for it
def __getitem__(self, key):
if isinstance(key, int) or isinstance(key, tuple) or isinstance(key, list):
num = self._sumxy[key] - (self._sumx[key] * self._sumy[key] / self._n[key])
denom1 = self._sumxsq[key] - (self._sumx[key]**2 / self._n[key])
denom2 = self._sumysq[key] - (self._sumy[key]**2 / self._n[key])
corr = num / np.maximum(np.sqrt(denom1 * denom2), 1e-15)
return corr
if isinstance(key, slice):
raise NotImplementedError
else:
raise TypeError
def update(self, key, x, y):
self._sumx[key] += np.sum(x)
self._sumy[key] += np.sum(y)
self._sumxy[key] += np.dot(x, y)
self._sumxsq[key] += np.sum(np.square(x))
self._sumysq[key] += np.sum(np.square(y))
self._n[key] += len(x)
def merge(self, correlation_array):
if isinstance(correlation_array, CorrelationList):
self._sumx += correlation_array._sumx
self._sumy += correlation_array._sumy
self._sumxy += correlation_array._sumxy
self._sumxsq += correlation_array._sumxsq
self._sumysq += correlation_array._sumysq
self._n += correlation_array._n
else:
raise TypeError
def save(self):
pickle.dump(self, open("/tmp/correlations.p", "wb"))
| [
"numpy.sqrt",
"numpy.square",
"numpy.sum",
"numpy.dot",
"numpy.zeros"
] | [((110, 138), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (118, 138), True, 'import numpy as np\n'), ((160, 188), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (168, 188), True, 'import numpy as np\n'), ((211, 239), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (219, 239), True, 'import numpy as np\n'), ((263, 291), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (271, 291), True, 'import numpy as np\n'), ((315, 343), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (323, 343), True, 'import numpy as np\n'), ((362, 390), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (370, 390), True, 'import numpy as np\n'), ((1114, 1123), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (1120, 1123), True, 'import numpy as np\n'), ((1151, 1160), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (1157, 1160), True, 'import numpy as np\n'), ((1189, 1201), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (1195, 1201), True, 'import numpy as np\n'), ((1238, 1250), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (1247, 1250), True, 'import numpy as np\n'), ((1288, 1300), 'numpy.square', 'np.square', (['y'], {}), '(y)\n', (1297, 1300), True, 'import numpy as np\n'), ((881, 905), 'numpy.sqrt', 'np.sqrt', (['(denom1 * denom2)'], {}), '(denom1 * denom2)\n', (888, 905), True, 'import numpy as np\n')] |
import click
import cv2
import numpy as np
class Configuration():
"""Setup up data for the rest of the functions
"""
def __init__(self, criteria):
self.MAXIMUM_ITERATIONS = 30
self.SUBPIXEL_RESOLUTION = 0.001
self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
self.MAXIMUM_ITERATIONS,
self.SUBPIXEL_RESOLUTION)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
# Set it the the maximum size array needed.
objp = np.zeros((7 * 6, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)
pass_configuration = click.make_pass_decorator(Configuration)
@click.group()
@click.command()
@click.argument('images', nargs=-1)
@pass_configuration()
def cli(images):
# Arrays to store object points and image points from all the images.
# objpoints = [] # 3d point in real world space
# imgpoints = [] # 2d points in image plane.
[show_image(image) for image in images]
def show_image(filename):
click.echo("\n" + filename + "\n")
image = cv2.imread(filename)
click.echo(image)
cv2.imshow('image', image)
cv2.waitKey(500)
| [
"click.argument",
"click.make_pass_decorator",
"click.group",
"cv2.imshow",
"click.echo",
"numpy.zeros",
"cv2.waitKey",
"click.command",
"cv2.imread"
] | [((660, 700), 'click.make_pass_decorator', 'click.make_pass_decorator', (['Configuration'], {}), '(Configuration)\n', (685, 700), False, 'import click\n'), ((704, 717), 'click.group', 'click.group', ([], {}), '()\n', (715, 717), False, 'import click\n'), ((719, 734), 'click.command', 'click.command', ([], {}), '()\n', (732, 734), False, 'import click\n'), ((736, 770), 'click.argument', 'click.argument', (['"""images"""'], {'nargs': '(-1)'}), "('images', nargs=-1)\n", (750, 770), False, 'import click\n'), ((551, 583), 'numpy.zeros', 'np.zeros', (['(7 * 6, 3)', 'np.float32'], {}), '((7 * 6, 3), np.float32)\n', (559, 583), True, 'import numpy as np\n'), ((1065, 1099), 'click.echo', 'click.echo', (["('\\n' + filename + '\\n')"], {}), "('\\n' + filename + '\\n')\n", (1075, 1099), False, 'import click\n'), ((1112, 1132), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (1122, 1132), False, 'import cv2\n'), ((1137, 1154), 'click.echo', 'click.echo', (['image'], {}), '(image)\n', (1147, 1154), False, 'import click\n'), ((1159, 1185), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (1169, 1185), False, 'import cv2\n'), ((1190, 1206), 'cv2.waitKey', 'cv2.waitKey', (['(500)'], {}), '(500)\n', (1201, 1206), False, 'import cv2\n')] |
#!/usr/bin/env python
"""
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser, SUPPRESS
import cv2
import cv2 as cv
import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IECore
import json
from telepyth import TelepythClient
from model import model
class classifier(model):
def __init__(self):
super()
self.model = 'inception-v4.xml'
self.device = 'MYRIAD'
self.number_top = 10
self.input=['input.jpg']
self.load_model()
def load_model(self):
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
model_xml = self.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
log.info("Creating Inference Engine")
ie = IECore()
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
self.net = ie.read_network(model=model_xml, weights=model_bin)
assert len(self.net.inputs.keys()) == 1, "Sample supports only single input topologies"
assert len(self.net.outputs) == 1, "Sample supports only single output topologies"
log.info("Preparing input blobs")
self.input_blob = next(iter(self.net.inputs))
self.out_blob = next(iter(self.net.outputs))
self.net.batch_size = len(self.input)
# Loading model to the plugin
log.info("Loading model to the plugin")
self.exec_net = ie.load_network(network=self.net, device_name=self.device)
#load classes
with open("/home/pi/inception/imagenet_class_index.json",'r') as file:
self.labels_map=json.load(file)
def shot(self):
#log.info('Shoting')
# capture camera
cap = cv2.VideoCapture(0)
# Read an image.
#frame = cv.imread('input.jpg')
#if frame is None:
# raise Exception('Image not found!')
ret, frame = cap.read()
# Display the resulting frame
#Save the frame to an image file.
#cv.imwrite('input.jpg', frame)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
return frame
def telegram_send(self, fig=None, text=None, key='8884910787382816523'):
global tp
tp = TelepythClient(key)
if fig is not None:
tp.send_figure(fig)
if text is not None:
tp.send_text(text)
def classify(self, image):
# Read and pre-process input images
n, c, h, w = self.net.inputs[self.input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
#image = cv2.imread(self.input[0])
if image.shape[:-1] != (h, w):
#log.warning("Image {} is resized from {} to {}".format(self.input[0], image.shape[:-1], (h, w)))
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[0] = image
#log.info("Batch size is {}".format(n))
# Start sync inference
#log.info("Starting inference in synchronous mode")
res = self.exec_net.infer(inputs={self.input_blob: images})
# Processing output blob
#log.info("Processing output blob")
res = res[self.out_blob]
#log.info("Top {} results: ".format(self.number_top))
classid_str = "classid"
probability_str = "probability"
for i, probs in enumerate(res):
probs = np.squeeze(probs)
top_ind = np.argsort(probs)[-self.number_top:][::-1]
#print("Image {}\n".format(self.input[i]))
#print(classid_str, probability_str)
#print("{} {}".format('-' * len(classid_str), '-' * len(probability_str)))
for id in top_ind:
det_label = self.labels_map[str(id)][1] if self.labels_map else "{}".format(id)
label_length = len(det_label)
space_num_before = (len(classid_str) - label_length) // 2
space_num_after = len(classid_str) - (space_num_before + label_length) + 2
space_num_before_prob = (len(probability_str) - len(str(probs[id]))) // 2
#print("{}{}\t{}{}{:.7f}".format(' ' * space_num_before, det_label,
#' ' * space_num_after, ' ' * space_num_before_prob,
#probs[id]))
#print("\n")
#telegram_send(text="%s with p: %f"%(self.labels_map[str(0)][1], probs[0]))
return ["{}{}\t{}{}{:.7f}".format(' ' * space_num_before, self.labels_map[str(top_ind[0])][1] if self.labels_map else "{}".format(top_ind[0]),
' ' * space_num_after, ' ' * space_num_before_prob,
probs[top_ind[0]]),
"{}{}\t{}{}{:.7f}".format(' ' * space_num_before, self.labels_map[str(top_ind[1])][1] if self.labels_map else "{}".format(top_ind[0]),
' ' * space_num_after, ' ' * space_num_before_prob,
probs[top_ind[1]]),
"{}{}\t{}{}{:.7f}".format(' ' * space_num_before, self.labels_map[str(top_ind[2])][1] if self.labels_map else "{}".format(top_ind[0]),
' ' * space_num_after, ' ' * space_num_before_prob,
probs[top_ind[2]]),
"{}{}\t{}{}{:.7f}".format(' ' * space_num_before, self.labels_map[str(top_ind[3])][1] if self.labels_map else "{}".format(top_ind[0]),
' ' * space_num_after, ' ' * space_num_before_prob,
probs[top_ind[3]]),
"{}{}\t{}{}{:.7f}".format(' ' * space_num_before, self.labels_map[str(top_ind[4])][1] if self.labels_map else "{}".format(top_ind[0]),
' ' * space_num_after, ' ' * space_num_before_prob,
probs[top_ind[4]])
]
def process(self, frame):
classes = self.classify(frame)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10,250)
fontScale = 1
fontColor = (50,50,255)
lineType = 2
cv2.putText(frame,classes[0],
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
return frame
if __name__ == '__main__':
sys.exit(classifier() or 0)
| [
"logging.basicConfig",
"telepyth.TelepythClient",
"os.path.splitext",
"cv2.putText",
"numpy.squeeze",
"numpy.argsort",
"openvino.inference_engine.IECore",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"numpy.ndarray",
"json.load",
"cv2.resize",
"logging.info"
] | [((1177, 1271), 'logging.basicConfig', 'log.basicConfig', ([], {'format': '"""[ %(levelname)s ] %(message)s"""', 'level': 'log.INFO', 'stream': 'sys.stdout'}), "(format='[ %(levelname)s ] %(message)s', level=log.INFO,\n stream=sys.stdout)\n", (1192, 1271), True, 'import logging as log\n'), ((1462, 1499), 'logging.info', 'log.info', (['"""Creating Inference Engine"""'], {}), "('Creating Inference Engine')\n", (1470, 1499), True, 'import logging as log\n'), ((1513, 1521), 'openvino.inference_engine.IECore', 'IECore', ([], {}), '()\n', (1519, 1521), False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((1892, 1925), 'logging.info', 'log.info', (['"""Preparing input blobs"""'], {}), "('Preparing input blobs')\n", (1900, 1925), True, 'import logging as log\n'), ((2126, 2165), 'logging.info', 'log.info', (['"""Loading model to the plugin"""'], {}), "('Loading model to the plugin')\n", (2134, 2165), True, 'import logging as log\n'), ((2484, 2503), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2500, 2503), False, 'import cv2\n'), ((2882, 2905), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2903, 2905), False, 'import cv2\n'), ((3036, 3055), 'telepyth.TelepythClient', 'TelepythClient', (['key'], {}), '(key)\n', (3050, 3055), False, 'from telepyth import TelepythClient\n'), ((3338, 3368), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(n, c, h, w)'}), '(shape=(n, c, h, w))\n', (3348, 3368), True, 'import numpy as np\n'), ((6582, 6678), 'cv2.putText', 'cv2.putText', (['frame', 'classes[0]', 'bottomLeftCornerOfText', 'font', 'fontScale', 'fontColor', 'lineType'], {}), '(frame, classes[0], bottomLeftCornerOfText, font, fontScale,\n fontColor, lineType)\n', (6593, 6678), False, 'import cv2\n'), ((2379, 2394), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2388, 2394), False, 'import json\n'), ((3581, 3606), 'cv2.resize', 'cv2.resize', (['image', '(w, h)'], {}), '(image, (w, h))\n', (3591, 3606), False, 'import cv2\n'), ((4232, 4249), 'numpy.squeeze', 'np.squeeze', (['probs'], {}), '(probs)\n', (4242, 4249), True, 'import numpy as np\n'), ((1319, 1346), 'os.path.splitext', 'os.path.splitext', (['model_xml'], {}), '(model_xml)\n', (1335, 1346), False, 'import os\n'), ((4272, 4289), 'numpy.argsort', 'np.argsort', (['probs'], {}), '(probs)\n', (4282, 4289), True, 'import numpy as np\n')] |
'''
@Author: <NAME>
@Date: 2021-01-07 15:04:21
@Description: 这个是训练 mixed model 的时候使用的
@LastEditTime: 2021-02-06 22:40:20
'''
import os
import numpy as np
import time
import torch
from torch import nn, optim
from TrafficFlowClassification.TrafficLog.setLog import logger
from TrafficFlowClassification.utils.setConfig import setup_config
# 下面是一些可以使用的模型
from TrafficFlowClassification.models.resnet1d_ae import resnet_AE
from TrafficFlowClassification.data.dataLoader import data_loader
from TrafficFlowClassification.data.tensordata import get_tensor_data
from TrafficFlowClassification.utils.helper import adjust_learning_rate, save_checkpoint
from TrafficFlowClassification.utils.evaluate_tools import display_model_performance_metrics
# 针对这个训练修改的 train process
from TrafficFlowClassification.utils.helper import AverageMeter, accuracy
mean_val = np.array([2.86401660e-03, 0.00000000e+00, 3.08146750e-03, 1.17455448e-02,
5.75561597e-03, 6.91365004e-04, 6.64955585e-02, 2.41380099e-02,
9.75861990e-01, 0.00000000e+00, 2.89814456e+02, 6.42617944e+01,
6.89227965e+00, 2.56964887e+02, 1.36799462e+02, 9.32648320e+01,
7.83185943e+01, 1.32048335e+02, 2.09555592e+01, 1.70122810e-02,
6.28544986e+00, 3.27195426e-03, 3.60230735e+01, 9.15340653e+00,
2.17694894e-06, 7.32748605e+01])
std_val = np.array([3.44500263e-02, 0.00000000e+00, 3.09222563e-02, 8.43027570e-02,
4.87519125e-02, 1.48120354e-02, 2.49138903e-01, 1.53477827e-01,
1.53477827e-01, 0.00000000e+00, 8.48196659e+02, 1.94163550e+02,
1.30259798e+02, 7.62370125e+02, 4.16966374e+02, 1.25455838e+02,
2.30658312e+01, 8.78612984e+02, 1.84367543e+02, 1.13978421e-01,
1.19289813e+02, 1.45965914e-01, 8.76535415e+02, 1.78680040e+02,
4.91812227e-04, 4.40298923e+03]) + 0.001
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
mean_val = torch.from_numpy(mean_val).float().to(device)
std_val = torch.from_numpy(std_val).float().to(device)
def train_process(train_loader, model, alpha, criterion_c, criterion_r, optimizer, epoch, device, print_freq):
"""训练一个 epoch 的流程
Args:
train_loader (dataloader): [description]
model ([type]): [description]
criterion_c ([type]): 计算分类误差
criterion_l ([type]): 计算重构误差
optimizer ([type]): [description]
epoch (int): 当前所在的 epoch
device (torch.device): 是否使用 gpu
print_freq ([type]): [description]
"""
c_loss = AverageMeter()
r_loss = AverageMeter()
losses = AverageMeter() # 在一个 train loader 中的 loss 变化
top1 = AverageMeter() # 记录在一个 train loader 中的 accuracy 变化
model.train() # 切换为训练模型
for i, (pcap, statistic, target) in enumerate(train_loader):
pcap = (pcap/255).to(device) # 也要归一化
statistic = statistic.to(device)
statistic = (statistic - mean_val)/std_val # 首先需要对 statistic 的数据进行归一化
target = target.to(device)
classific_result, fake_statistic = model(pcap, statistic) # 分类结果和重构结果
loss_c = criterion_c(classific_result, target) # 计算 分类的 loss
loss_r = criterion_r(statistic, fake_statistic) # 计算 重构 loss
loss = alpha * loss_c + loss_r # 将两个误差组合在一起
# 计算准确率, 记录 loss 和 accuracy
prec1 = accuracy(classific_result.data, target)
c_loss.update(loss_c.item(), pcap.size(0))
r_loss.update(loss_r.item(), pcap.size(0))
losses.update(loss.item(), pcap.size(0))
top1.update(prec1[0].item(), pcap.size(0))
# 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % print_freq == 0:
logger.info(
'Epoch: [{0}][{1}/{2}], Loss {loss.val:.4f} ({loss.avg:.4f}), Loss_c {loss_c.val:.4f} ({loss_c.avg:.4f}), Loss_r {loss_r.val:.4f} ({loss_r.avg:.4f}), Prec@1 {top1.val:.3f} ({top1.avg:.3f})'
.format(epoch,
i,
len(train_loader),
loss=losses,
loss_c=c_loss,
loss_r=r_loss,
top1=top1))
def validate_process(val_loader, model, device, print_freq):
top1 = AverageMeter()
model.eval() # switch to evaluate mode
for i, (pcap, statistic, target) in enumerate(val_loader):
pcap = (pcap/255).to(device) # 也要归一化
statistic = statistic.to(device)
statistic = (statistic - mean_val)/std_val # 首先需要对 statistic 的数据进行归一化
target = target.to(device)
with torch.no_grad():
output, _ = model(pcap, statistic) # compute output
# measure accuracy and record loss
prec1 = accuracy(output.data, target)
top1.update(prec1[0].item(), pcap.size(0))
if (i + 1) % print_freq == 0:
logger.info('Test: [{0}/{1}], Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.
format(i, len(val_loader), top1=top1))
logger.info(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def CENTIME_train_pipeline(alpha):
cfg = setup_config() # 获取 config 文件
logger.info(cfg)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info('是否使用 GPU 进行训练, {}'.format(device))
model_path = os.path.join(cfg.train.model_dir, cfg.train.model_name) # 模型的路径
model = resnet_AE(model_path, pretrained=False, num_classes=12).to(device) # 初始化模型
criterion_c = nn.CrossEntropyLoss() # 分类用的损失函数
criterion_r = nn.L1Loss() # 重构误差的损失函数
optimizer = optim.Adam(model.parameters(), lr=cfg.train.lr) # 定义优化器
logger.info('成功初始化模型.')
train_loader = data_loader(
pcap_file=cfg.train.train_pcap,
label_file=cfg.train.train_label,
statistic_file=cfg.train.train_statistic,
trimed_file_len=cfg.train.TRIMED_FILE_LEN) # 获得 train dataloader
test_loader = data_loader(
pcap_file=cfg.train.test_pcap,
label_file=cfg.train.test_label,
statistic_file=cfg.train.test_statistic,
trimed_file_len=cfg.train.TRIMED_FILE_LEN) # 获得 train dataloader
logger.info('成功加载数据集.')
best_prec1 = 0
for epoch in range(cfg.train.epochs):
adjust_learning_rate(optimizer, epoch, cfg.train.lr) # 动态调整学习率
train_process(train_loader, model, alpha, criterion_c, criterion_r, optimizer, epoch, device, 80) # train for one epoch
prec1 = validate_process(test_loader, model, device, 20) # evaluate on validation set
# remember the best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
# 保存最优的模型
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict()
}, is_best, model_path)
# 下面进入测试模式, 计算每个类别详细的准确率
logger.info('进入测试模式.')
model = resnet_AE(model_path, pretrained=True, num_classes=12).to(device) # 加载最好的模型
index2label = {j: i for i, j in cfg.test.label2index.items()} # index->label 对应关系
label_list = [index2label.get(i) for i in range(12)] # 12 个 label 的标签
pcap_data, statistic_data, label_data = get_tensor_data(
pcap_file=cfg.train.test_pcap,
statistic_file=cfg.train.test_statistic,
label_file=cfg.train.test_label,
trimed_file_len=cfg.train.TRIMED_FILE_LEN) # 将 numpy 转换为 tensor
pcap_data = (pcap_data/255).to(device) # 流量数据
statistic_data = (statistic_data.to(device) - mean_val)/std_val # 对数据做一下归一化
y_pred, _ = model(pcap_data, statistic_data) # 放入模型进行预测
_, pred = y_pred.topk(1, 1, largest=True, sorted=True)
Y_data_label = [index2label.get(i.tolist()) for i in label_data] # 转换为具体名称
pred_label = [index2label.get(i.tolist()) for i in pred.view(-1).cpu().detach()]
logger.info('Alpha:{}'.format(alpha))
display_model_performance_metrics(true_labels=Y_data_label,
predicted_labels=pred_label,
classes=label_list)
logger.info('Finished! (* ̄︶ ̄)')
def alpha_experiment_CENTIME():
alpha_list = [0, 0.001, 0.01, 0.1, 0.5, 1, 5, 10, 100]
for alpha in alpha_list:
CENTIME_train_pipeline(alpha)
time.sleep(10)
if __name__ == "__main__":
CENTIME_train_pipeline() # 用于测试
| [
"TrafficFlowClassification.utils.helper.adjust_learning_rate",
"torch.nn.CrossEntropyLoss",
"TrafficFlowClassification.data.tensordata.get_tensor_data",
"torch.nn.L1Loss",
"TrafficFlowClassification.utils.setConfig.setup_config",
"os.path.join",
"TrafficFlowClassification.data.dataLoader.data_loader",
... | [((854, 1205), 'numpy.array', 'np.array', (['[0.0028640166, 0.0, 0.0030814675, 0.0117455448, 0.00575561597, \n 0.000691365004, 0.0664955585, 0.0241380099, 0.97586199, 0.0, 289.814456,\n 64.2617944, 6.89227965, 256.964887, 136.799462, 93.264832, 78.3185943, \n 132.048335, 20.9555592, 0.017012281, 6.28544986, 0.00327195426, \n 36.0230735, 9.15340653, 2.17694894e-06, 73.2748605]'], {}), '([0.0028640166, 0.0, 0.0030814675, 0.0117455448, 0.00575561597, \n 0.000691365004, 0.0664955585, 0.0241380099, 0.97586199, 0.0, 289.814456,\n 64.2617944, 6.89227965, 256.964887, 136.799462, 93.264832, 78.3185943, \n 132.048335, 20.9555592, 0.017012281, 6.28544986, 0.00327195426, \n 36.0230735, 9.15340653, 2.17694894e-06, 73.2748605])\n', (862, 1205), True, 'import numpy as np\n'), ((1334, 1678), 'numpy.array', 'np.array', (['[0.0344500263, 0.0, 0.0309222563, 0.084302757, 0.0487519125, 0.0148120354, \n 0.249138903, 0.153477827, 0.153477827, 0.0, 848.196659, 194.16355, \n 130.259798, 762.370125, 416.966374, 125.455838, 23.0658312, 878.612984,\n 184.367543, 0.113978421, 119.289813, 0.145965914, 876.535415, 178.68004,\n 0.000491812227, 4402.98923]'], {}), '([0.0344500263, 0.0, 0.0309222563, 0.084302757, 0.0487519125, \n 0.0148120354, 0.249138903, 0.153477827, 0.153477827, 0.0, 848.196659, \n 194.16355, 130.259798, 762.370125, 416.966374, 125.455838, 23.0658312, \n 878.612984, 184.367543, 0.113978421, 119.289813, 0.145965914, \n 876.535415, 178.68004, 0.000491812227, 4402.98923])\n', (1342, 1678), True, 'import numpy as np\n'), ((2480, 2494), 'TrafficFlowClassification.utils.helper.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2492, 2494), False, 'from TrafficFlowClassification.utils.helper import AverageMeter, accuracy\n'), ((2508, 2522), 'TrafficFlowClassification.utils.helper.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2520, 2522), False, 'from TrafficFlowClassification.utils.helper import AverageMeter, accuracy\n'), ((2536, 2550), 'TrafficFlowClassification.utils.helper.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2548, 2550), False, 'from TrafficFlowClassification.utils.helper import AverageMeter, accuracy\n'), ((2593, 2607), 'TrafficFlowClassification.utils.helper.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2605, 2607), False, 'from TrafficFlowClassification.utils.helper import AverageMeter, accuracy\n'), ((4200, 4214), 'TrafficFlowClassification.utils.helper.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4212, 4214), False, 'from TrafficFlowClassification.utils.helper import AverageMeter, accuracy\n'), ((5094, 5108), 'TrafficFlowClassification.utils.setConfig.setup_config', 'setup_config', ([], {}), '()\n', (5106, 5108), False, 'from TrafficFlowClassification.utils.setConfig import setup_config\n'), ((5129, 5145), 'TrafficFlowClassification.TrafficLog.setLog.logger.info', 'logger.info', (['cfg'], {}), '(cfg)\n', (5140, 5145), False, 'from TrafficFlowClassification.TrafficLog.setLog import logger\n'), ((5291, 5346), 'os.path.join', 'os.path.join', (['cfg.train.model_dir', 'cfg.train.model_name'], {}), '(cfg.train.model_dir, cfg.train.model_name)\n', (5303, 5346), False, 'import os\n'), ((5462, 5483), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5481, 5483), False, 'from torch import nn, optim\n'), ((5514, 5525), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (5523, 5525), False, 'from torch import nn, optim\n'), ((5616, 5639), 'TrafficFlowClassification.TrafficLog.setLog.logger.info', 'logger.info', (['"""成功初始化模型."""'], {}), "('成功初始化模型.')\n", (5627, 5639), False, 'from TrafficFlowClassification.TrafficLog.setLog import logger\n'), ((5660, 5832), 'TrafficFlowClassification.data.dataLoader.data_loader', 'data_loader', ([], {'pcap_file': 'cfg.train.train_pcap', 'label_file': 'cfg.train.train_label', 'statistic_file': 'cfg.train.train_statistic', 'trimed_file_len': 'cfg.train.TRIMED_FILE_LEN'}), '(pcap_file=cfg.train.train_pcap, label_file=cfg.train.\n train_label, statistic_file=cfg.train.train_statistic, trimed_file_len=\n cfg.train.TRIMED_FILE_LEN)\n', (5671, 5832), False, 'from TrafficFlowClassification.data.dataLoader import data_loader\n'), ((5898, 6066), 'TrafficFlowClassification.data.dataLoader.data_loader', 'data_loader', ([], {'pcap_file': 'cfg.train.test_pcap', 'label_file': 'cfg.train.test_label', 'statistic_file': 'cfg.train.test_statistic', 'trimed_file_len': 'cfg.train.TRIMED_FILE_LEN'}), '(pcap_file=cfg.train.test_pcap, label_file=cfg.train.test_label,\n statistic_file=cfg.train.test_statistic, trimed_file_len=cfg.train.\n TRIMED_FILE_LEN)\n', (5909, 6066), False, 'from TrafficFlowClassification.data.dataLoader import data_loader\n'), ((6119, 6142), 'TrafficFlowClassification.TrafficLog.setLog.logger.info', 'logger.info', (['"""成功加载数据集."""'], {}), "('成功加载数据集.')\n", (6130, 6142), False, 'from TrafficFlowClassification.TrafficLog.setLog import logger\n'), ((6951, 6973), 'TrafficFlowClassification.TrafficLog.setLog.logger.info', 'logger.info', (['"""进入测试模式."""'], {}), "('进入测试模式.')\n", (6962, 6973), False, 'from TrafficFlowClassification.TrafficLog.setLog import logger\n'), ((7268, 7441), 'TrafficFlowClassification.data.tensordata.get_tensor_data', 'get_tensor_data', ([], {'pcap_file': 'cfg.train.test_pcap', 'statistic_file': 'cfg.train.test_statistic', 'label_file': 'cfg.train.test_label', 'trimed_file_len': 'cfg.train.TRIMED_FILE_LEN'}), '(pcap_file=cfg.train.test_pcap, statistic_file=cfg.train.\n test_statistic, label_file=cfg.train.test_label, trimed_file_len=cfg.\n train.TRIMED_FILE_LEN)\n', (7283, 7441), False, 'from TrafficFlowClassification.data.tensordata import get_tensor_data\n'), ((7950, 8062), 'TrafficFlowClassification.utils.evaluate_tools.display_model_performance_metrics', 'display_model_performance_metrics', ([], {'true_labels': 'Y_data_label', 'predicted_labels': 'pred_label', 'classes': 'label_list'}), '(true_labels=Y_data_label,\n predicted_labels=pred_label, classes=label_list)\n', (7983, 8062), False, 'from TrafficFlowClassification.utils.evaluate_tools import display_model_performance_metrics\n'), ((8144, 8175), 'TrafficFlowClassification.TrafficLog.setLog.logger.info', 'logger.info', (['"""Finished! (* ̄︶ ̄)"""'], {}), "('Finished! (* ̄︶ ̄)')\n", (8155, 8175), False, 'from TrafficFlowClassification.TrafficLog.setLog import logger\n'), ((1845, 1870), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1868, 1870), False, 'import torch\n'), ((3267, 3306), 'TrafficFlowClassification.utils.helper.accuracy', 'accuracy', (['classific_result.data', 'target'], {}), '(classific_result.data, target)\n', (3275, 3306), False, 'from TrafficFlowClassification.utils.helper import AverageMeter, accuracy\n'), ((6213, 6265), 'TrafficFlowClassification.utils.helper.adjust_learning_rate', 'adjust_learning_rate', (['optimizer', 'epoch', 'cfg.train.lr'], {}), '(optimizer, epoch, cfg.train.lr)\n', (6233, 6265), False, 'from TrafficFlowClassification.utils.helper import adjust_learning_rate, save_checkpoint\n'), ((8344, 8358), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (8354, 8358), False, 'import time\n'), ((4538, 4553), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4551, 4553), False, 'import torch\n'), ((4689, 4718), 'TrafficFlowClassification.utils.helper.accuracy', 'accuracy', (['output.data', 'target'], {}), '(output.data, target)\n', (4697, 4718), False, 'from TrafficFlowClassification.utils.helper import AverageMeter, accuracy\n'), ((5183, 5208), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5206, 5208), False, 'import torch\n'), ((5368, 5423), 'TrafficFlowClassification.models.resnet1d_ae.resnet_AE', 'resnet_AE', (['model_path'], {'pretrained': '(False)', 'num_classes': '(12)'}), '(model_path, pretrained=False, num_classes=12)\n', (5377, 5423), False, 'from TrafficFlowClassification.models.resnet1d_ae import resnet_AE\n'), ((6986, 7040), 'TrafficFlowClassification.models.resnet1d_ae.resnet_AE', 'resnet_AE', (['model_path'], {'pretrained': '(True)', 'num_classes': '(12)'}), '(model_path, pretrained=True, num_classes=12)\n', (6995, 7040), False, 'from TrafficFlowClassification.models.resnet1d_ae import resnet_AE\n'), ((1894, 1920), 'torch.from_numpy', 'torch.from_numpy', (['mean_val'], {}), '(mean_val)\n', (1910, 1920), False, 'import torch\n'), ((1950, 1975), 'torch.from_numpy', 'torch.from_numpy', (['std_val'], {}), '(std_val)\n', (1966, 1975), False, 'import torch\n')] |
"""10. Introducing Decord: an efficient video reader
====================================================
Training deep neural networks on videos is very time consuming. For example, training a state-of-the-art SlowFast network
on Kinetics400 dataset using a server with 8 V100 GPUs takes more than 10 days. Slow training causes long research cycles
and is not friendly for new comers and students to work on video related problems. There are several reasons causing the slowness,
big batch of data, inefficiency of video reader and huge model computation.
Another troubling matter is the complex data preprocessing and huge storage cost. Take Kinetics400 dataset as an example, this dataset
has about 240K training and 20K validation videos. All the videos take 450G disk space.
However, if we decode the videos to frames and use image loader to train the model, the decoded frames will take 6.8T disk space, which
is unacceptable to most people. In addition, the decoding process is slow. It takes 1.5 days using 60 workers to decode all the videos to frames.
If we use 8 workers (as in common laptop or standard workstation), it will take a week to perform such data preprocessing even before your actual training.
Given the challenges aforementioned, in this tutotial, we introduce a new video reader, `Decord <https://github.com/zhreshold/decord>`_.
Decord is efficient and flexible. It provides convenient video slicing methods based on a wrapper on top of hardware accelerated video decoders,
e.g. FFMPEG/LibAV and Nvidia Codecs. It is designed to handle awkward video shuffling experience in order to provide smooth experiences
similar to random image loader for deep learning. In addition, it works cross-platform, e.g., Linux, Windows and Mac OS.
With the new video reader, you don't need to decode videos to frames anymore, just start training on your video dataset with even higher training speed.
"""
########################################################################
# Install
# -------
#
# Decord is easy to install, just
# ::
#
# pip install decord
########################################################################
# Usage
# -----
#
# We provide some usage cases here to get you started. For complete API, please refer to official documentation.
################################################################
# Suppose we want to read a video. Let's download the example video first.
from gluoncv import utils
url = 'https://github.com/bryanyzhu/tiny-ucf101/raw/master/abseiling_k400.mp4'
video_fname = utils.download(url)
from decord import VideoReader
vr = VideoReader(video_fname)
################################################################
# If we want to load the video in a specific dimension so that it can be fed into a CNN for processing,
vr = VideoReader(video_fname, width=320, height=256)
################################################################
# Now we have loaded the video, if we want to know how many frames are there in the video,
duration = len(vr)
print('The video contains %d frames' % duration)
################################################################
# If we want to access frame at index 10,
frame = vr[9]
print(frame.shape)
################################################################
# For deep learning, usually we want to get multiple frames at once. Now you can use ``get_batch`` function,
# Suppose we want to get a 32-frame video clip by skipping one frame in between,
frame_id_list = range(0, 64, 2)
frames = vr.get_batch(frame_id_list).asnumpy()
print(frames.shape)
################################################################
# There is another advanced functionality, you can get all the key frames as below,
key_indices = vr.get_key_indices()
key_frames = vr.get_batch(key_indices)
print(key_frames.shape)
################################################################
# Pretty flexible, right? Try it on your videos.
################################################################
# Speed comparison
# ----------------
################################################################
# Now we want to compare its speed with Opencv VideoCapture to demonstrate its efficiency.
# Let's load the same video and get all the frames randomly using both decoders to compare their performance.
# We will run the loading for 11 times: use the first one as warming up, and average the rest 10 runs as the average speed.
import cv2
import time
import numpy as np
frames_list = np.arange(duration)
np.random.shuffle(frames_list)
# Decord
for i in range(11):
if i == 1:
start_time = time.time()
decord_vr = VideoReader(video_fname)
frames = decord_vr.get_batch(frames_list)
end_time = time.time()
print('Decord takes %4.4f seconds.' % ((end_time - start_time)/10))
# OpenCV
for i in range(11):
if i == 1:
start_time = time.time()
cv2_vr = cv2.VideoCapture(video_fname)
for frame_idx in frames_list:
cv2_vr.set(1, frame_idx)
_, frame = cv2_vr.read()
cv2_vr.release()
end_time = time.time()
print('OpenCV takes %4.4f seconds.' % ((end_time - start_time)/10))
################################################################
# We can see that Decord is 2x faster than OpenCV VideoCapture.
# We also compare with `Pyav container <https://github.com/mikeboers/PyAV>`_ and demonstrate 2x speed up as well.
#
# In conclusion, Decord is an efficient and flexible video reader. It supports get_batch, GPU loading, fast random access, etc, which is
# perfectly designed for training video deep neural networks. We use Decord in our video model training for large-scale datasets and observe
# similar speed as using image loaders on decoded video frames. This significanly reduces the data preprocessing time and the storage
# cost for large-scale video datasets.
| [
"decord.VideoReader",
"cv2.VideoCapture",
"gluoncv.utils.download",
"time.time",
"numpy.arange",
"numpy.random.shuffle"
] | [((2546, 2565), 'gluoncv.utils.download', 'utils.download', (['url'], {}), '(url)\n', (2560, 2565), False, 'from gluoncv import utils\n'), ((2603, 2627), 'decord.VideoReader', 'VideoReader', (['video_fname'], {}), '(video_fname)\n', (2614, 2627), False, 'from decord import VideoReader\n'), ((2804, 2851), 'decord.VideoReader', 'VideoReader', (['video_fname'], {'width': '(320)', 'height': '(256)'}), '(video_fname, width=320, height=256)\n', (2815, 2851), False, 'from decord import VideoReader\n'), ((4492, 4511), 'numpy.arange', 'np.arange', (['duration'], {}), '(duration)\n', (4501, 4511), True, 'import numpy as np\n'), ((4512, 4542), 'numpy.random.shuffle', 'np.random.shuffle', (['frames_list'], {}), '(frames_list)\n', (4529, 4542), True, 'import numpy as np\n'), ((4719, 4730), 'time.time', 'time.time', ([], {}), '()\n', (4728, 4730), False, 'import time\n'), ((5052, 5063), 'time.time', 'time.time', ([], {}), '()\n', (5061, 5063), False, 'import time\n'), ((4637, 4661), 'decord.VideoReader', 'VideoReader', (['video_fname'], {}), '(video_fname)\n', (4648, 4661), False, 'from decord import VideoReader\n'), ((4890, 4919), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_fname'], {}), '(video_fname)\n', (4906, 4919), False, 'import cv2\n'), ((4609, 4620), 'time.time', 'time.time', ([], {}), '()\n', (4618, 4620), False, 'import time\n'), ((4865, 4876), 'time.time', 'time.time', ([], {}), '()\n', (4874, 4876), False, 'import time\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sklearn.base import BaseEstimator, MetaEstimatorMixin
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.exceptions import NotFittedError
import numpy as np
import scipy.sparse as sp
try:
from .base import RetriEvalMixin
except SystemError:
from base import RetriEvalMixin
class Retrieval(BaseEstimator, MetaEstimatorMixin, RetriEvalMixin):
"""Meta estimator for an end to end information retrieval process"""
def __init__(self, retrieval_model, matching=None,
query_expansion=None, name='RM',
labels=None):
"""TODO: to be defined1.
:retrieval_model: A retrieval model satisfying fit and query.
:vectorizer: A vectorizer satisfying fit and transform (and fit_transform).
:matching: A matching operation satisfying fit and predict.
:query_expansion: A query operation satisfying fit and transform
:labels: Pre-defined mapping of indices to identifiers, will be inferred during fit, if not given.
"""
BaseEstimator.__init__(self)
self._retrieval_model = retrieval_model
self._matching = matching
self._query_expansion = query_expansion
self.name = name
self.labels_ = np.asarray(labels) if labels is not None else None
def fit(self, X, y=None):
""" Fit vectorizer to raw_docs, transform them and fit the
retrieval_model. Matching and Query expansion are fit separatly on the
`raw_docs` to allow dedicated analysis.
"""
assert y is None or len(X) == len(y)
if self.labels_ is None:
# If labels were not specified, infer them from y
self.labels_ = np.asarray(y) if y is not None else np.arange(len(X))
matching = self._matching
query_expansion = self._query_expansion
retrieval_model = self._retrieval_model
if query_expansion:
query_expansion.fit(X)
if matching:
matching.fit(X)
retrieval_model.fit(X)
return self
def query(self, q, k=None, return_scores=False):
labels = self.labels_
if labels is None:
raise NotFittedError
matching = self._matching
retrieval_model = self._retrieval_model
query_expansion = self._query_expansion
if query_expansion:
q = query_expansion.transform(q)
if matching:
ind = matching.predict(q)
# print('{} documents matched.'.format(len(ind)))
if len(ind) == 0:
if return_scores:
return [], []
else:
return []
labels = labels[ind] # Reduce our own view
else:
ind = None
# pass matched indices to query method of retrieval model
# The retrieval model is assumed to reduce its representation of X
# to the given indices and the returned indices are relative to the
# reduction
if return_scores:
try:
ind, scores = retrieval_model.query(q, k=k, indices=ind,
return_scores=return_scores)
except TypeError:
raise NotImplementedError("Underlying retrieval model does not support `return_scores`")
if k is not None:
ind = ind[:k]
scores = scores[:k]
return labels[ind], scores
else:
retrieved_indices = retrieval_model.query(q, k=k, indices=ind)
if k is not None:
# Just assert that it did not cheat
retrieved_indices = retrieved_indices[:k]
return labels[retrieved_indices] # Unfold retrieved indices
class EmbeddedVectorizer(TfidfVectorizer):
"""Embedding-aware vectorizer"""
def __init__(self, embedding, **kwargs):
"""TODO: to be defined1. """
# list of words in the embedding
if not hasattr(embedding, 'index2word'):
raise ValueError("No `index2word` attribute found."
" Supply the word vectors (`.wv`) instead.")
if not hasattr(embedding, 'vectors'):
raise ValueError("No `vectors` attribute found."
" Supply the word vectors (`.wv`) instead.")
vocabulary = embedding.index2word
self.embedding = embedding
print("Embedding shape:", embedding.vectors.shape)
TfidfVectorizer.__init__(self, vocabulary=vocabulary, **kwargs)
def fit(self, raw_docs, y=None):
super().fit(raw_docs)
return self
def transform(self, raw_documents, y=None):
Xt = super().transform(raw_documents)
syn0 = self.embedding.vectors
# Xt is sparse counts
return (Xt @ syn0)
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X, y)
| [
"sklearn.feature_extraction.text.TfidfVectorizer.__init__",
"numpy.asarray",
"sklearn.base.BaseEstimator.__init__"
] | [((1092, 1120), 'sklearn.base.BaseEstimator.__init__', 'BaseEstimator.__init__', (['self'], {}), '(self)\n', (1114, 1120), False, 'from sklearn.base import BaseEstimator, MetaEstimatorMixin\n'), ((4549, 4612), 'sklearn.feature_extraction.text.TfidfVectorizer.__init__', 'TfidfVectorizer.__init__', (['self'], {'vocabulary': 'vocabulary'}), '(self, vocabulary=vocabulary, **kwargs)\n', (4573, 4612), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1300, 1318), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (1310, 1318), True, 'import numpy as np\n'), ((1757, 1770), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1767, 1770), True, 'import numpy as np\n')] |
from encodings.utf_8 import encode
from enum import auto
from re import M
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import MeanSquaredError
import numpy as np
import os
import pickle
from mss.utils.dataloader import DataLoader
from mss.utils.visualisation import visualize_loss
from tensorflow.keras.utils import Progbar
import tensorflow as tf
# import keras as keras
from pathlib import Path
import random
from math import prod
class AutoEncoder():
"""
Autoencoder represents a Deep Convolutional autoencoder architecture with
mirrored encoder and decoder components.
- skip connections are added compared to Vanilla auto encoder -> changed model structure by building model only after encode+decode st it now concatenates like (unet)
"""
def __init__(self, input_shape, # width x height x nr channels (rgb) -> or [28 x 28 x 1] for black/white
# list of filter sizes for each layer [2,4,8 ] 1st layer 2x2, 2nd layer 4x4 etc..
conv_filters: list,
# list of kernel sizes for each layer [3,5,3 ] 1st layer 3x3, 2nd layer 5x5 etc..
conv_kernels: list,
# list of stride sizes for each layer [1,2,2 ] 1st layer 1x1, 2nd layer 2x2 etc..
conv_strides: list,
latent_space_dim): # int #number of dimensions of bottleneck
self.input_shape = input_shape
self.conv_filters = conv_filters
self.conv_kernels = conv_kernels
self.conv_strides = conv_strides
self.latent_space_dim = latent_space_dim
self.encoder = None
self.decoder = None
self.model = None
self._num_conv_layers = len(conv_filters) # dimension of amnt kernels
self._shape_before_bottleneck = None
self._model_input = None
# tf.compat.v1.disable_eager_execution() # works for random seed
tf.random.set_seed(1)
# self.weight_initializer = tf.initializers. TruncatedNormal(mean=0., stddev=1/1024)
self.weight_initializer = tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.05, seed=None
)
'''private and protected does not exist in python, so this is just convention, but not neccesary!'''
# _variables or _functions are protected variables/functions and can only be used in subclasses, but can be overwritten by subclasses
# __variables or __functions are private classes and can not EASILY be used in other classes/subclasses because the name does not show up on top!
self._build()
def save(self, save_folder="."):
print("saved:",save_folder)
self._create_folder_if_it_doesnt_exist(save_folder)
self._save_parameters(save_folder)
self._save_weights(save_folder)
def reconstruct(self, images):
latent_representations = self.encoder.predict(images)
reconstructed_images = self.decoder.predict(latent_representations)
return reconstructed_images, latent_representations
@classmethod
def load(cls, save_folder="."):
save_folder = "trained_models"/Path(save_folder)
parameters_path = os.path.join(save_folder, "parameters.pkl")
with open(parameters_path, "rb") as f:
parameters = pickle.load(f)
variational_auto_encoder = AutoEncoder(*parameters) # star for positional arguments!
weights_path = os.path.join(save_folder, "weights.h5")
variational_auto_encoder.load_weights(weights_path)
return variational_auto_encoder
def load_weights(self, weights_path):
self.model.load_weights(weights_path)
def _create_folder_if_it_doesnt_exist(self, folder):
folder = "trained_models"/Path(folder)
if not os.path.exists(folder):
os.makedirs(folder)
def _save_parameters(self, save_folder):
parameters = [
self.input_shape,
self.conv_filters,
self.conv_kernels,
self.conv_strides,
self.latent_space_dim
]
save_folder = "trained_models"/Path(save_folder)
save_path = os.path.join(save_folder, "parameters.pkl")
with open(save_path, "wb") as f:
pickle.dump(parameters, f)
def _save_weights(self, save_folder):
save_folder = "trained_models"/Path(save_folder)
save_path = os.path.join(save_folder, "weights.h5")
self.model.save_weights(save_path)
def summary(self, save_image=False):
# self.encoder.summary()
# self.decoder.summary()
import tensorflow
self.model.summary()
if save_image:
tensorflow.keras.utils.plot_model(self.model, "input_skip.png", show_shapes=True)
# keras.utils.plot_model(self.decoder, "decoder_model.png", show_shapes=True)
def compile(self, learning_rate=0.0001):
optimizer = Adam(learning_rate=learning_rate)
mse_loss = MeanSquaredError()
self.model.compile(optimizer=optimizer, loss=mse_loss)
def train(self,x_train,y_train,batch_size,num_epoch):
# since we try to reconstruct the input, the output y_train is basically also x_train
# y_train=x_train
self.model.fit(x_train, y_train,
batch_size=batch_size,
epochs=num_epoch,
shuffle=True)
def train_on_batch(self, batch_size, num_epoch):
metrics_names = ['train loss','mean loss','val_loss','mean val_loss']
self.dataloader = DataLoader(batch_size=batch_size,num_epoch=num_epoch)
self.loss = []
meanloss = 0
self.val_loss_m = []
meanloss_val = 0
val_loss2 = 0
total_train_loss = []
total_val_loss = []
try:
total_train_loss = list(np.load("visualisation/total_train_loss.npy"))
total_val_loss = list(np.load("visualisation/total_val_loss.npy"))
print("loaded loss files")
except:
print("no file of previous loss yet")
for epoch_nr in range(0, num_epoch):
pb_i = Progbar(self.dataloader.len_train_data, stateful_metrics=metrics_names)
print("\nepoch {}/{}".format(epoch_nr+1,num_epoch))
for batch_nr in range(self.dataloader.nr_batches):
# try:
x_train, y_train = self.dataloader.load_data(batch_nr=batch_nr)
loss = self.model.train_on_batch(x_train, y_train)
loss2 = float(str(loss))# [0:15])
self.loss.append(loss)
meanloss = np.mean(self.loss)
meanloss = float(str(meanloss))#[0:15])
if batch_nr % 6 == 0 :
x_val, y_val = self.dataloader.load_val(batch_nr=batch_nr)
# val_loss = self.model.train_on_batch(x_val, y_val)
# print("val loss 1",val_loss)
y_pred = self.model.predict(x_val)
y_pred = tf.convert_to_tensor(y_pred,dtype=tf.float32)
y_val = tf.cast(y_val, y_pred.dtype)
val_loss = K.mean(tf.math.squared_difference(y_pred, y_val), axis=-1)
# val_loss = val_loss.eval(session=tf.compat.v1.Session()) # if eager execution
val_loss = np.mean(val_loss.numpy())
# print("val loss 2",val_loss)
val_loss2 = float(str(val_loss))#)[0:15])
self.val_loss_m.append(val_loss)
meanloss_val = np.mean(self.val_loss_m)
meanloss_val = float(str(meanloss_val))#[0:15])
if batch_nr %100 == 0:
total_train_loss.append(loss)
total_val_loss.append(val_loss)
values=[('train loss',loss2),("mean loss",meanloss),("val_loss",val_loss2),("mean val_loss",meanloss_val)] # add comma after last ) to add another metric!
pb_i.add(batch_size, values=values)
# except:
# pass
self.dataloader.shuffle_data()
self.dataloader.reset_counter() # makes it work after last epoch
visualize_loss(total_train_loss,total_val_loss)
if epoch_nr%1 == 0:
# self.save(f"model_train_on_batch_vocals3-{epoch_nr}-{round(meanloss,5)}")
pass
self.loss = []
self.val_loss_m = []
def _build(self):
# self._build_encoder()
# self._build_decoder()
self._build_autoencoder()
def _add_encoder_input(self):
'''returns Input Object - Keras Input Layer object'''
self.encoder_list = []
inp = Input(shape=self.input_shape, name="encoder_input")
self.encoder_list.append(inp)
return inp # returns the input shape of your data
def _add_conv_layers(self, encoder_input):
'''Creates all convolutional blocks in the encoder'''
model = encoder_input
# layer_index tells us at which layer we pass in the specific conv layer
for layer_index in range(self._num_conv_layers):
# will now be a graph of layers
model = self._add_conv_layer(layer_index, model)
return model
def _add_conv_layer(self, layer_index, model):
'''adds a conv layer to the total neural network network that started with only Input()'''
'''
Adds a convolutional block to a graph of layers, consisting of
conv 2d +
Relu activation +
Batch normalization
'''
layer_number = layer_index + 1
conv_layer = Conv2D(
# (int) amount of kernels we use -> output dimensionality of this conv layer -> how many filters we use
filters=self.conv_filters[layer_index],
# filter size over input (4 x 4) -> can also be rectengular
kernel_size=(self.conv_kernels[layer_index],self.conv_kernels[layer_index]),
strides=self.conv_strides[layer_index],
# keeps dimensionality same -> adds 0's outside the "image" to make w/e stride u pick work
padding="same",
name=f"encoder_conv_layer{layer_number}",
kernel_initializer=self.weight_initializer
)
'''adding Conv, Relu, and Batch normalisation to each layer -> x is now the model'''
# model = model (Input) + Conv layers
# add the convolutional layers to whatever x was
model = conv_layer(model)
model = ReLU(name=f"encoder_relu_{layer_number}")(model)
model = BatchNormalization(name=f"encoder_bn_{layer_number}")(model)
# print("shape",model)
self.encoder_list.append(model)
return model
def _add_bottle_neck(self, model):
'''Flatten data and add bottleneck ( Dense Layer ). '''
self._shape_before_bottleneck = K.int_shape(model)[
1:] # [2, 7 ,7 , 32] # 4 dimensional array ( batch size x width x height x channels )
model = Flatten()(model)
model = Dense(self.latent_space_dim, name="encoder_output")(model) # dimensionality of latent space -> outputshape
# each output layer in dense layer is value between 0 and 1, if the value is highest -> then we pick that output
# for duo classification you have 1 layer between 0 and 1 , if the value is > 0.5 then we pick that output
return model
def _add_decoder_input(self):
return Input(shape=self.latent_space_dim, name="decoder_input")
def _add_dense_layer(self, decoder_input):
# product of neurons from previous conv output in dense layer
num_neurons = np.prod(self._shape_before_bottleneck)
dense_layer = Dense(num_neurons, name="decoder_dense")(decoder_input)
return dense_layer
def _add_reshape_layer(self, dense_layer):
reshape_layer = Reshape(self._shape_before_bottleneck)(dense_layer)
reshape_layer = Concatenate(axis=3)([self.encoder_list[len(self.encoder_list)-1], reshape_layer]) # U-net skip connections
return reshape_layer
def _add_conv_transpose_layers(self, x):
'''add convolutional transpose blocks -> conv2d -> relu -> batch normalisation'''
# loop through all the conv layers in reverse order and stop at the first layer
# [0, 1 , 2 ] -> [ 2, 1 ] remove first value
for layer_index in reversed(range(1, self._num_conv_layers)):
x = self._add_conv_transpose_layer(layer_index, x)
return x
def _add_conv_transpose_layer(self, layer_index, x):
layer_number = self._num_conv_layers - layer_index
conv_transpose_layer = Conv2DTranspose(
filters=self.conv_filters[layer_index ],
kernel_size=(self.conv_kernels[layer_index],self.conv_kernels[layer_index]),
strides=self.conv_strides[layer_index],
padding="same",
name=f"decoder_conv_transpose_layer_{layer_number}",
kernel_initializer=self.weight_initializer
)
x = conv_transpose_layer(x)
if layer_index == 1:
pass
x = ReLU(name=f"decoder_relu_{layer_number}")(x)
x = BatchNormalization(name=f"decoder_bn_{layer_number}")(x)
x = Concatenate(axis=3)([self.encoder_list[layer_index ], x]) # U-net skip connections
return x
def _add_decoder_output(self, x):
conv_transpose_layer = Conv2DTranspose(
# [ 24 x 24 x 1] # number of channels = 1 thus filtersd is 1
filters=1,
# first that we skipped on _add_conv_transpose_layer
kernel_size=self.conv_kernels[0],
# first that we skipped on _add_conv_transpose_layer
strides=self.conv_strides[0],
padding="same",
name=f"decoder_conv_transpose_layer_{self._num_conv_layers}",
kernel_initializer=self.weight_initializer
)
x = conv_transpose_layer(x)
x = Concatenate(axis=3)([self.encoder_list[0], x]) # U-net skip connections
conv_transpose_layer = Conv2DTranspose(
# [ 24 x 24 x 1] # number of channels = 1 thus filtersd is 1
filters=1,
# first that we skipped on _add_conv_transpose_layer
kernel_size=(1,1),
# first that we skipped on _add_conv_transpose_layer
strides=(1,1),
padding="same",
name=f"decoder_conv_transpose_layer_{self._num_conv_layers+1}",
kernel_initializer=self.weight_initializer
)
x = conv_transpose_layer(x)
output_layer = Activation("tanh", name="tanh_output_layer")(x) # pogchamp rob - sigmoid -> tanh because normalisation
return output_layer
def _build_autoencoder(self):
####### encoder
encoder_input = self._add_encoder_input()
conv_layers = self._add_conv_layers(encoder_input)
bottleneck = self._add_bottle_neck(conv_layers)
# self._model_input = encoder_input
# self.encoder = Model(encoder_input, bottleneck, name="encoder")
####### decoder
# decoder_input = self.encoder #self._add_decoder_input()
dense_layer = self._add_dense_layer(bottleneck)
reshape_layer = self._add_reshape_layer(dense_layer)
conv_transpose_layers = self._add_conv_transpose_layers(reshape_layer)
decoder_output = self._add_decoder_output(conv_transpose_layers)
# print("input",decoder_input)
# self.decoder = Model(decoder_input, decoder_output, name="decoder")
# print(decoder_output)
self.model = Model(encoder_input, decoder_output, name="Autoencoder")
def main():
auto_encoder = AutoEncoder(
input_shape=(112, 112, 1),
conv_filters=(16, 32, 64, 128),
conv_kernels=(3, 3, 3, 3),
# stride of 2 is downsampling the data -> halving it!
conv_strides=(2, 2, 2, 2),
latent_space_dim=2)
auto_encoder.summary(save_image=True)
if __name__ == "__main__":
main()
| [
"numpy.prod",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"mss.utils.visualisation.visualize_loss",
"tensorflow.cast",
"tensorflow.keras.layers.Input",
"os.path.exists",
"numpy.mean"... | [((2160, 2181), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(1)'], {}), '(1)\n', (2178, 2181), True, 'import tensorflow as tf\n'), ((2309, 2380), 'tensorflow.keras.initializers.TruncatedNormal', 'tf.keras.initializers.TruncatedNormal', ([], {'mean': '(0.0)', 'stddev': '(0.05)', 'seed': 'None'}), '(mean=0.0, stddev=0.05, seed=None)\n', (2346, 2380), True, 'import tensorflow as tf\n'), ((3419, 3462), 'os.path.join', 'os.path.join', (['save_folder', '"""parameters.pkl"""'], {}), "(save_folder, 'parameters.pkl')\n", (3431, 3462), False, 'import os\n'), ((3667, 3706), 'os.path.join', 'os.path.join', (['save_folder', '"""weights.h5"""'], {}), "(save_folder, 'weights.h5')\n", (3679, 3706), False, 'import os\n'), ((4385, 4428), 'os.path.join', 'os.path.join', (['save_folder', '"""parameters.pkl"""'], {}), "(save_folder, 'parameters.pkl')\n", (4397, 4428), False, 'import os\n'), ((4629, 4668), 'os.path.join', 'os.path.join', (['save_folder', '"""weights.h5"""'], {}), "(save_folder, 'weights.h5')\n", (4641, 4668), False, 'import os\n'), ((5148, 5181), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (5152, 5181), False, 'from tensorflow.keras.optimizers import Adam\n'), ((5201, 5219), 'tensorflow.keras.losses.MeanSquaredError', 'MeanSquaredError', ([], {}), '()\n', (5217, 5219), False, 'from tensorflow.keras.losses import MeanSquaredError\n'), ((5786, 5840), 'mss.utils.dataloader.DataLoader', 'DataLoader', ([], {'batch_size': 'batch_size', 'num_epoch': 'num_epoch'}), '(batch_size=batch_size, num_epoch=num_epoch)\n', (5796, 5840), False, 'from mss.utils.dataloader import DataLoader\n'), ((9198, 9249), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.input_shape', 'name': '"""encoder_input"""'}), "(shape=self.input_shape, name='encoder_input')\n", (9203, 9249), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((10134, 10420), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'self.conv_filters[layer_index]', 'kernel_size': '(self.conv_kernels[layer_index], self.conv_kernels[layer_index])', 'strides': 'self.conv_strides[layer_index]', 'padding': '"""same"""', 'name': 'f"""encoder_conv_layer{layer_number}"""', 'kernel_initializer': 'self.weight_initializer'}), "(filters=self.conv_filters[layer_index], kernel_size=(self.\n conv_kernels[layer_index], self.conv_kernels[layer_index]), strides=\n self.conv_strides[layer_index], padding='same', name=\n f'encoder_conv_layer{layer_number}', kernel_initializer=self.\n weight_initializer)\n", (10140, 10420), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((11996, 12052), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.latent_space_dim', 'name': '"""decoder_input"""'}), "(shape=self.latent_space_dim, name='decoder_input')\n", (12001, 12052), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((12193, 12231), 'numpy.prod', 'np.prod', (['self._shape_before_bottleneck'], {}), '(self._shape_before_bottleneck)\n', (12200, 12231), True, 'import numpy as np\n'), ((13203, 13509), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': 'self.conv_filters[layer_index]', 'kernel_size': '(self.conv_kernels[layer_index], self.conv_kernels[layer_index])', 'strides': 'self.conv_strides[layer_index]', 'padding': '"""same"""', 'name': 'f"""decoder_conv_transpose_layer_{layer_number}"""', 'kernel_initializer': 'self.weight_initializer'}), "(filters=self.conv_filters[layer_index], kernel_size=(self.\n conv_kernels[layer_index], self.conv_kernels[layer_index]), strides=\n self.conv_strides[layer_index], padding='same', name=\n f'decoder_conv_transpose_layer_{layer_number}', kernel_initializer=self\n .weight_initializer)\n", (13218, 13509), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((13972, 14198), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': '(1)', 'kernel_size': 'self.conv_kernels[0]', 'strides': 'self.conv_strides[0]', 'padding': '"""same"""', 'name': 'f"""decoder_conv_transpose_layer_{self._num_conv_layers}"""', 'kernel_initializer': 'self.weight_initializer'}), "(filters=1, kernel_size=self.conv_kernels[0], strides=self.\n conv_strides[0], padding='same', name=\n f'decoder_conv_transpose_layer_{self._num_conv_layers}',\n kernel_initializer=self.weight_initializer)\n", (13987, 14198), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((14631, 14833), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': '(1)', 'kernel_size': '(1, 1)', 'strides': '(1, 1)', 'padding': '"""same"""', 'name': 'f"""decoder_conv_transpose_layer_{self._num_conv_layers + 1}"""', 'kernel_initializer': 'self.weight_initializer'}), "(filters=1, kernel_size=(1, 1), strides=(1, 1), padding=\n 'same', name=\n f'decoder_conv_transpose_layer_{self._num_conv_layers + 1}',\n kernel_initializer=self.weight_initializer)\n", (14646, 14833), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((16170, 16226), 'tensorflow.keras.Model', 'Model', (['encoder_input', 'decoder_output'], {'name': '"""Autoencoder"""'}), "(encoder_input, decoder_output, name='Autoencoder')\n", (16175, 16226), False, 'from tensorflow.keras import Model\n'), ((3375, 3392), 'pathlib.Path', 'Path', (['save_folder'], {}), '(save_folder)\n', (3379, 3392), False, 'from pathlib import Path\n'), ((3535, 3549), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3546, 3549), False, 'import pickle\n'), ((3988, 4000), 'pathlib.Path', 'Path', (['folder'], {}), '(folder)\n', (3992, 4000), False, 'from pathlib import Path\n'), ((4016, 4038), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (4030, 4038), False, 'import os\n'), ((4052, 4071), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (4063, 4071), False, 'import os\n'), ((4347, 4364), 'pathlib.Path', 'Path', (['save_folder'], {}), '(save_folder)\n', (4351, 4364), False, 'from pathlib import Path\n'), ((4482, 4508), 'pickle.dump', 'pickle.dump', (['parameters', 'f'], {}), '(parameters, f)\n', (4493, 4508), False, 'import pickle\n'), ((4591, 4608), 'pathlib.Path', 'Path', (['save_folder'], {}), '(save_folder)\n', (4595, 4608), False, 'from pathlib import Path\n'), ((4910, 4996), 'tensorflow.keras.utils.plot_model', 'tensorflow.keras.utils.plot_model', (['self.model', '"""input_skip.png"""'], {'show_shapes': '(True)'}), "(self.model, 'input_skip.png', show_shapes\n =True)\n", (4943, 4996), False, 'import tensorflow\n'), ((6378, 6449), 'tensorflow.keras.utils.Progbar', 'Progbar', (['self.dataloader.len_train_data'], {'stateful_metrics': 'metrics_names'}), '(self.dataloader.len_train_data, stateful_metrics=metrics_names)\n', (6385, 6449), False, 'from tensorflow.keras.utils import Progbar\n'), ((8660, 8708), 'mss.utils.visualisation.visualize_loss', 'visualize_loss', (['total_train_loss', 'total_val_loss'], {}), '(total_train_loss, total_val_loss)\n', (8674, 8708), False, 'from mss.utils.visualisation import visualize_loss\n'), ((11021, 11062), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {'name': 'f"""encoder_relu_{layer_number}"""'}), "(name=f'encoder_relu_{layer_number}')\n", (11025, 11062), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((11086, 11139), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': 'f"""encoder_bn_{layer_number}"""'}), "(name=f'encoder_bn_{layer_number}')\n", (11104, 11139), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((11384, 11402), 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['model'], {}), '(model)\n', (11395, 11402), True, 'from tensorflow.keras import backend as K\n'), ((11547, 11556), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (11554, 11556), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((11580, 11631), 'tensorflow.keras.layers.Dense', 'Dense', (['self.latent_space_dim'], {'name': '"""encoder_output"""'}), "(self.latent_space_dim, name='encoder_output')\n", (11585, 11631), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((12254, 12294), 'tensorflow.keras.layers.Dense', 'Dense', (['num_neurons'], {'name': '"""decoder_dense"""'}), "(num_neurons, name='decoder_dense')\n", (12259, 12294), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((12409, 12447), 'tensorflow.keras.layers.Reshape', 'Reshape', (['self._shape_before_bottleneck'], {}), '(self._shape_before_bottleneck)\n', (12416, 12447), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((12485, 12504), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (12496, 12504), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((13667, 13708), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {'name': 'f"""decoder_relu_{layer_number}"""'}), "(name=f'decoder_relu_{layer_number}')\n", (13671, 13708), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((13724, 13777), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': 'f"""decoder_bn_{layer_number}"""'}), "(name=f'decoder_bn_{layer_number}')\n", (13742, 13777), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((13793, 13812), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (13804, 13812), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((14518, 14537), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (14529, 14537), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((15163, 15207), 'tensorflow.keras.layers.Activation', 'Activation', (['"""tanh"""'], {'name': '"""tanh_output_layer"""'}), "('tanh', name='tanh_output_layer')\n", (15173, 15207), False, 'from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, ReLU, BatchNormalization, Flatten, Dense, Reshape, Activation, Concatenate\n'), ((6082, 6127), 'numpy.load', 'np.load', (['"""visualisation/total_train_loss.npy"""'], {}), "('visualisation/total_train_loss.npy')\n", (6089, 6127), True, 'import numpy as np\n'), ((6163, 6206), 'numpy.load', 'np.load', (['"""visualisation/total_val_loss.npy"""'], {}), "('visualisation/total_val_loss.npy')\n", (6170, 6206), True, 'import numpy as np\n'), ((6907, 6925), 'numpy.mean', 'np.mean', (['self.loss'], {}), '(self.loss)\n', (6914, 6925), True, 'import numpy as np\n'), ((7339, 7385), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y_pred'], {'dtype': 'tf.float32'}), '(y_pred, dtype=tf.float32)\n', (7359, 7385), True, 'import tensorflow as tf\n'), ((7417, 7445), 'tensorflow.cast', 'tf.cast', (['y_val', 'y_pred.dtype'], {}), '(y_val, y_pred.dtype)\n', (7424, 7445), True, 'import tensorflow as tf\n'), ((7944, 7968), 'numpy.mean', 'np.mean', (['self.val_loss_m'], {}), '(self.val_loss_m)\n', (7951, 7968), True, 'import numpy as np\n'), ((7488, 7529), 'tensorflow.math.squared_difference', 'tf.math.squared_difference', (['y_pred', 'y_val'], {}), '(y_pred, y_val)\n', (7514, 7529), True, 'import tensorflow as tf\n')] |
from logging import getLogger
from typing import Callable, Iterable, Mapping, Sequence, Tuple
import numpy as np
import toys
from toys.common import BaseEstimator
from toys.data import Dataset
logger = getLogger(__name__)
Fold = Tuple[Dataset, Dataset]
CrossValSplitter = Callable[[Dataset], Iterable[Fold]]
ParamGrid = Mapping[str, Sequence]
def combinations(grid):
'''Iterates over all combinations of parameters in a parameter grid.
A parameter grid is a mapping from parameter names to a sequence of
possible values. This function yields dictionaries mapping all names in the
grid to exactly one value, for all possible combinations. The argument
``grid`` may also be a sequence of parameter grids, which is equivalent to
chaining the iterators for each individual grid. If a ``grid`` is
:obj:`None`, this function yields a single empty dictionary.
Arguments:
grid (ParamGrid or Iterable[ParamGrid] or None):
One or more parameter grids to search.
Yields:
A dictionary mapping parameter names to values.
'''
if not grid:
yield {}
elif isinstance(grid, Mapping):
indices = {k:0 for k in grid.keys()}
lens = {k:len(v) for k, v in grid.items()}
n = int(np.prod(list(lens.values())))
for _ in range(n):
yield {k: grid[k][indices[k]] for k in grid}
for k in indices:
indices[k] += 1
if indices[k] == lens[k]:
indices[k] = 0
else:
break
else:
for g in grid:
yield from combinations(g)
class KFold(CrossValSplitter):
'''A splitter for simple k-fold cross validation.
K-folding partitions a dataset into k subsets of roughly equal size. Each
"fold" is a pair of datasets, ``(train, test)``, where ``test`` is one of
the partitions and ``train`` is the concatenation of the remaining
partitions.
Instances of this class are functions which apply k-folding to datasets.
They return an iterator over all folds of the datasets.
If ``shuffle`` is true, the elements of each partition are chosen at
random. Otherwise each partition is a continuous subset of the dataset.
Arguments:
k (int):
The number of folds. Must be at least 2.
shuffle (bool):
Whether to shuffle the indices before splitting.
'''
def __init__(self, k=3, shuffle=True):
if k < 2:
raise ValueError('The number of folds must be at least 2.')
self.k = k
self.shuffle = shuffle
def __call__(self, dataset):
indices = np.arange(len(dataset))
if self.shuffle: np.random.shuffle(indices)
splits = np.array_split(indices, self.k)
for test_indices in splits:
train_indices = [s for s in splits if s is not test_indices]
train_indices = np.concatenate(train_indices)
train = toys.subset(dataset, train_indices)
test = toys.subset(dataset, test_indices)
yield train, test
class TunedEstimator(BaseEstimator):
'''An estimator wrapped with a with default kwargs.
These are often returned by meta-estimators performain a parameter search,
e.g. :class:`~toys.model_selection.GridSearchCV`.
Attributes:
estimator (Estimator):
The underlying estimator.
kwargs (Dict[str, Any]):
Overrides for the default kwargs of the estimator.
cv_results (pandas.DataFrame or Dict or None):
An optional table attached to the instance.
'''
def __init__(self, estimator, kwargs, cv_results=None):
super().__init__()
self.estimator = estimator
self.kwargs = kwargs
self.cv_results = pd.DataFrame(cv_results)
def fit(self, *args, **kwargs):
kwargs = {**self.kwargs, **kwargs}
model = self.estimator(*args, **kwargs)
return model
| [
"logging.getLogger",
"numpy.array_split",
"toys.subset",
"numpy.concatenate",
"numpy.random.shuffle"
] | [((206, 225), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (215, 225), False, 'from logging import getLogger\n'), ((2776, 2807), 'numpy.array_split', 'np.array_split', (['indices', 'self.k'], {}), '(indices, self.k)\n', (2790, 2807), True, 'import numpy as np\n'), ((2732, 2758), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2749, 2758), True, 'import numpy as np\n'), ((2945, 2974), 'numpy.concatenate', 'np.concatenate', (['train_indices'], {}), '(train_indices)\n', (2959, 2974), True, 'import numpy as np\n'), ((2995, 3030), 'toys.subset', 'toys.subset', (['dataset', 'train_indices'], {}), '(dataset, train_indices)\n', (3006, 3030), False, 'import toys\n'), ((3050, 3084), 'toys.subset', 'toys.subset', (['dataset', 'test_indices'], {}), '(dataset, test_indices)\n', (3061, 3084), False, 'import toys\n')] |
import numpy as np
import pandas as pd
import pytest
from activitysim.abm.models.util.trip import get_time_windows
@pytest.mark.parametrize("duration, levels, expected",
[(24, 3, 2925), (24, 2, 325), (24, 1, 25),
(48, 3, 20825), (48, 2, 1225), (48, 1, 49)])
def test_get_time_windows(duration, levels, expected):
time_windows = get_time_windows(duration, levels)
if levels == 1:
assert time_windows.ndim == 1
assert len(time_windows) == expected
assert np.sum(time_windows <= duration) == expected
else:
assert len(time_windows) == levels
assert len(time_windows[0]) == expected
total_duration = np.sum(time_windows, axis=0)
assert np.sum(total_duration <= duration) == expected
df = pd.DataFrame(np.transpose(time_windows))
assert len(df) == len(df.drop_duplicates())
| [
"pytest.mark.parametrize",
"activitysim.abm.models.util.trip.get_time_windows",
"numpy.transpose",
"numpy.sum"
] | [((119, 265), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""duration, levels, expected"""', '[(24, 3, 2925), (24, 2, 325), (24, 1, 25), (48, 3, 20825), (48, 2, 1225), (\n 48, 1, 49)]'], {}), "('duration, levels, expected', [(24, 3, 2925), (24, \n 2, 325), (24, 1, 25), (48, 3, 20825), (48, 2, 1225), (48, 1, 49)])\n", (142, 265), False, 'import pytest\n'), ((386, 420), 'activitysim.abm.models.util.trip.get_time_windows', 'get_time_windows', (['duration', 'levels'], {}), '(duration, levels)\n', (402, 420), False, 'from activitysim.abm.models.util.trip import get_time_windows\n'), ((711, 739), 'numpy.sum', 'np.sum', (['time_windows'], {'axis': '(0)'}), '(time_windows, axis=0)\n', (717, 739), True, 'import numpy as np\n'), ((825, 851), 'numpy.transpose', 'np.transpose', (['time_windows'], {}), '(time_windows)\n', (837, 851), True, 'import numpy as np\n'), ((540, 572), 'numpy.sum', 'np.sum', (['(time_windows <= duration)'], {}), '(time_windows <= duration)\n', (546, 572), True, 'import numpy as np\n'), ((755, 789), 'numpy.sum', 'np.sum', (['(total_duration <= duration)'], {}), '(total_duration <= duration)\n', (761, 789), True, 'import numpy as np\n')] |
"""Machine learning utilities."""
from collections import Counter, defaultdict
from datetime import datetime
import importlib
import pathlib
import warnings
import numpy as np
import pytorch_lightning as pl
import torch
import tqdm
from mltype.utils import get_cache_dir, get_mlflow_artifacts_path, print_section
warnings.filterwarnings("ignore")
def create_data_language(
text, vocabulary, window_size=2, fill_strategy="zeros", verbose=False
):
"""Create a supervised dataset for the characte/-lever language model.
Parameters
----------
text : str
Some text.
vocabulary : list
Unique list of supported characters. Their corresponding indices
are going to be used for the one hot encoding.
window_size : int
The number of previous characters to condition on.
fill_strategy : str, {"skip", "zeros"}
Strategy for handling initial characters and unknown characters.
verbose : bool
If True, progress bar is showed.
Returns
-------
X : np.ndarray
Features array of shape `(len(text), window_size)` if
`fill_strategy=zeros`, otherwise it might be shorter. The dtype is
`np.int8`. If applicable, the integer `(len(vocabulary))` represnts
a zero vector (out of vocabulary token).
y : np.ndarray
Targets array of shape `(len(text),)` if `fill_strategy=zeros`,
otherwise it might be shorter. The dtype is `np.int8`.
indices : np.ndarray
For each sample an index of the character we are trying to predict.
Note that for `fill_strategy="zeros"` it is going to be
`np.arange(len(text))`. However, for different strategies might
have gaps. It helps us to keep track of the sample - character
correspondence.
"""
if not vocabulary:
raise ValueError("The vocabulary is empty.")
if len(vocabulary) != len(set(vocabulary)):
raise ValueError("There are duplicates in the vocabulary.")
vocab_size = len(vocabulary)
if vocab_size >= 255: # we need to use one integer for out of vocabulary
raise ValueError("The maximum vocabulary size is 255")
text_size = len(text)
ch2ix = defaultdict(lambda: vocab_size)
ch2ix.update({ch: ix for ix, ch in enumerate(vocabulary)})
text_l = window_size * [None] + list(text)
X_lines = []
y_lines = []
indices_lines = []
iterable = range(text_size)
if verbose:
iterable = tqdm.tqdm(iterable)
for i in iterable:
feature_ixs = [
ch2ix[text_l[i + offset]] for offset in range(window_size)
]
target_ix = ch2ix[text_l[i + window_size]]
if fill_strategy == "skip":
if vocab_size in feature_ixs or vocab_size == target_ix:
continue
X_lines.append(feature_ixs)
y_lines.append(target_ix)
indices_lines.append(i)
if not X_lines:
X = np.empty((0, window_size), dtype=np.int8)
y = np.empty((0,), dtype=np.int8)
else:
X = np.array(X_lines, dtype=np.int8)
y = np.array(y_lines, dtype=np.int8)
indices = np.array(indices_lines)
return X, y, indices
def text2features(text, vocabulary):
"""Create per character one hot encoding.
Note that we employ the zeros strategy out of vocabulary characters.
Parameters
----------
text : str
Text.
vocabulary : list
Vocabulary to be used for the endcoding.
Returns
-------
res : np.ndarray
Array of shape `(len(text), len(vocabulary)` of boolean dtype.
Each row represents the one hot encoding of the respective character.
Note that out of vocabulary characters are encoding with a zero
vector.
"""
text_size = len(text)
vocab_size = len(vocabulary)
ch2ix = {ch: ix for ix, ch in enumerate(vocabulary)}
output = np.zeros((text_size, vocab_size), dtype=np.bool)
for i, ch in enumerate(text):
try:
output[i, ch2ix[ch]] = True
except KeyError:
pass
return output
def sample_char(
network,
vocabulary,
h=None,
c=None,
previous_chars=None,
random_state=None,
top_k=None,
device=None,
):
"""Sample a character given network probability prediciton (with a state).
Parameters
----------
network : torch.nn.Module
Trained neural network that outputs a probability distribution over
`vocabulary`.
vocabulary : list
List of unique characters.
h, c : torch.Tensor
Hidden states with shape `(n_layers, batch_size=1, hidden_size)`.
Note that if both of them are None we are at the very first character.
previous_chars : None or str
Previous charaters. None or and empty string if we are at the very
first character.
random_state : None or int
Guarantees reproducibility.
top_k : None or int
If specified, we only sample from the top k most probably characters.
Otherwise all of them.
device : None or torch.device
By default `torch.device("cpu")`.
Returns
-------
ch : str
A character from the vocabulary.
"""
device = device or torch.device("cpu")
if previous_chars:
features = text2features(previous_chars, vocabulary)
else:
features = np.zeros((1, len(vocabulary)), dtype=np.bool)
network.eval()
features = features[None, ...] # add batch dimension
if random_state is not None:
np.random.seed(random_state)
x = torch.from_numpy(features).to(dtype=torch.float32, device=device)
out, h_n, c_n = network(x, h, c)
probs = out[0].detach().cpu().numpy()
if top_k is not None:
probs_new = np.zeros_like(probs)
top_k_indices = probs.argsort()[-top_k:]
probs_new[top_k_indices] = probs[top_k_indices]
probs = probs_new / probs_new.sum()
return np.random.choice(vocabulary, p=probs), h_n, c_n
def sample_text(
n_chars,
network,
vocabulary,
initial_text=None,
random_state=None,
top_k=None,
verbose=False,
device=None,
):
"""Sample text by unrolling character by character predictions.
Note that keep the pass hidden states with each character prediciton
and there is not need to specify a window.
Parameters
----------
n_chars : int
Number of characters to sample.
network : torch.nn.Module
Pretrained character level network.
vocabulary : list
List of unique characters.
initial_text : None or str
If specified, initial text to condition based on.
random_state : None or int
Allows reproducibility.
top_k : None or int
If specified, we only sample from the top k most probable
characters. Otherwise all of them.
verbose : bool
Controls verbosity.
device : None or torch.device
By default `torch.device("cpu")`.
Returns
-------
text : str
Generated text of length `n_chars + len(initial_text)`.
"""
device = device or torch.device("cpu")
network.eval()
initial_text = initial_text or ""
res = initial_text
h, c = None, None
iterable = range(n_chars)
if verbose:
iterable = tqdm.tqdm(iterable)
if random_state is not None:
np.random.seed(random_state)
for _ in iterable:
previous_chars = initial_text if res == initial_text else res[-1]
new_ch, h, c = sample_char(
network,
vocabulary,
h=h,
c=c,
previous_chars=previous_chars,
top_k=top_k,
device=device,
)
res += new_ch
return res
class LanguageDataset(torch.utils.data.Dataset):
"""Language dataset.
All the inputs of this class should be generated via
`create_data_language`.
Parameters
----------
X : np.ndarray
Array of shape (n_samples, window_size) of dtype `np.int8`.
It represents the features.
y : np.ndarray
Array of shape (n_samples,) of dtype `np.int8`.
It represents the targets
vocabulary : list
List of characters in the vocabulary.
transform : callable or None
Some callable that inputs `X` and `y` and returns some
modified instances of them.
Attributes
----------
ohv_matrix : np.ndarray
Matrix of shape `(vocab_size + 1, vocab_size)`. The submatrix
`ohv_matrix[:vocab_size, :]` is an identity matrix and is used
for fast creation of one hot vectors. The last row of `ohv_matrix`
is a zero vector and is used for encoding out-of-vocabulary characters.
"""
def __init__(self, X, y, vocabulary, transform=None):
self.X = X
self.y = y
self.vocabulary = vocabulary
self.transform = transform
vocab_size = len(vocabulary)
ch2ix = defaultdict(lambda: vocab_size)
ch2ix.update({ch: ix for ix, ch in enumerate(vocabulary)})
ohv_matrix = np.eye(vocab_size, dtype=np.float32)
self.ohv_matrix = np.concatenate(
[ohv_matrix, np.zeros((1, vocab_size), dtype=np.float32)], axis=0
)
def __len__(self):
"""Compute the number of samples."""
return len(self.X)
def __getitem__(self, ix):
"""Get a single sample.
Parameters
----------
ix : int
Index od the sample.
Returns
-------
X_sample : np.ndarray
Array of shape `(window_size, vocab_size)` where each
row is either an one hot vector (inside of vocabulary character) or
a zero vector (out of vocabulary character).
y_sample : np.ndarray
Array of shape `(vocab_size,)` representing either the one hot
encoding of the character to be predicted (inside of vocabulary
character) or a zero vector (out of vocabulary character).
vocabulary : list
The vocabulary. The reason why we want to provide this too
is to have access to it during validation.
"""
X_sample = torch.from_numpy(self.ohv_matrix[self.X[ix]])
y_sample = torch.from_numpy(self.ohv_matrix[self.y[ix]])
if self.transform is not None:
X_sample, y_sample = self.transform(X_sample, y_sample)
# unfortunatelly vocab will get collated to a batch, but whatever
return X_sample, y_sample, self.vocabulary
class SingleCharacterLSTM(pl.LightningModule):
"""Single character recurrent neural network.
Given some string of characters, we generate the probability distribution
of the next character.
Architecture starts with an LSTM (`hidden_size`, `n_layers`, `vocab_size`)
network and then we feed the last hidden state to a fully
connected network with one hidden layer (`dense_size`).
Parameters
----------
vocab_size : int
Size of the vocabulary. Necessary since we are encoding each
character as a one hot vector.
hidden_size : int
Hidden size of the recurrent cell.
n_layers : int
Number of layers in the recurrent network.
dense_size : int
Size of the single layer of the feed forward network.
Attributes
----------
rnn_layer : torch.nn.Module
The recurrent network layer.
linear_layer1 : torch.nn.Module
Linear layer connecting the last hidden state and the single
layer of the feedforward network.
linear_layer2 : torch.nn.Module
Linear layer connecting the single layer of the feedforward network
with the output (of size `vocabulary_size`).
activation_layer : torch.nn.Module
Softmax layer making sure we get a probability distribution.
"""
def __init__(self, vocab_size, hidden_size=16, n_layers=1, dense_size=128):
super().__init__()
self.save_hyperparameters()
self.rnn_layer = torch.nn.LSTM(
input_size=vocab_size,
hidden_size=hidden_size,
num_layers=n_layers,
batch_first=True,
)
self.linear_layer1 = torch.nn.Linear(hidden_size, dense_size)
self.linear_layer2 = torch.nn.Linear(dense_size, vocab_size)
self.activation_layer = torch.nn.Softmax(dim=1)
def forward(self, x, h=None, c=None):
"""Perform forward pass.
Parameters
----------
x : torch.Tensor
Input features of shape `(batch_size, window_size, vocab_size)`.
Note that the provided `vocab_size` needs to be equal to the one
provided in the constructor. The remaining dimensions
(`batch_size` and `window_size`) can be any positive integers.
h, c : torch.Tensor
Hidden states of shape `(n_layers, batch_size, hidden_size)`. Note
that if provided we enter a continuation mode. In this case
to generate the prediction we just use the last character and the
hidden state for the prediction. Note that in this case
we enforce that `x.shape=(batch_size, 1, vocab_size)`.
Returns
-------
probs : torch.Tensor
Tensor of shape `(batch_size, vocab_size)`. For each sample
it represents the probability distribution over all characters
in the vocabulary.
h_n, c_n : torch.Tensor
New Hidden states of shape `(n_layers, batch_size, hidden_size)`.
"""
continuation_mode = h is not None and c is not None
if continuation_mode:
if not (x.ndim == 3 and x.shape[1] == 1):
raise ValueError("Wrong input for the continuation mode")
_, (h_n, c_n) = self.rnn_layer(x, (h, c))
else:
_, (h_n, c_n) = self.rnn_layer(x)
average_h_n = h_n.mean(dim=0)
x = self.linear_layer1(average_h_n)
logits = self.linear_layer2(x)
probs = self.activation_layer(logits)
return probs, h_n, c_n
def training_step(self, batch, batch_idx):
"""Run training step.
Necessary for pytorch-lightning.
Parameters
----------
batch : tuple
Batch of training samples. The exact definition depends
on the dataloader.
batch_idx : idx
Index of the batch.
Returns
-------
loss : torch.Tensor
Tensor scalar representing the mean binary cross entropy
over the batch.
"""
x, y, _ = batch
probs, _, _ = self.forward(x)
loss = torch.nn.functional.binary_cross_entropy(probs, y)
self.log("train_loss", loss, prog_bar=False)
return loss
def validation_step(self, batch, batch_idx):
"""Run validation step.
Optional for pytorch-lightning.
Parameters
----------
batch : tuple
Batch of validation samples. The exact definition depends
on the dataloader.
batch_idx : idx
Index of the batch.
Returns
-------
vocabulary : list
Vocabulary in order to have access in
`validation_epoch_end`.
"""
x, y, vocabulary = batch
probs, _, _ = self.forward(x)
loss = torch.nn.functional.binary_cross_entropy(probs, y)
self.log("val_loss", loss, prog_bar=True)
return vocabulary
def validation_epoch_end(self, outputs):
"""Run epoch end validation logic.
We sample 5 times 100 characters from the current network. We
then print to the standard output.
Parameters
----------
outputs : list
List of batches that were collected over the validation
set with `validation_step`.
"""
if self.logger is None:
return
vocabulary = np.array(outputs[-1])[:, 0]
n_samples = 5
n_chars = 100
lines = [
sample_text(n_chars, self, vocabulary, device=self.device)
for _ in range(n_samples)
]
text = "\n".join(lines)
artifacts_path = get_mlflow_artifacts_path(
self.logger.experiment, self.logger.run_id
)
output_path = artifacts_path / f"{datetime.now()}.txt"
output_path.write_text(text)
def configure_optimizers(self):
"""Configure optimizers.
Necessary for pytorch-lightning.
Returns
-------
optimizer : Optimizer
The chosen optimizer.
"""
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def run_train(
texts,
name,
max_epochs=10,
window_size=50,
batch_size=32,
vocab_size=None,
fill_strategy="skip",
illegal_chars="",
train_test_split=0.5,
hidden_size=32,
dense_size=32,
n_layers=1,
checkpoint_path=None,
output_path=None,
use_mlflow=True,
early_stopping=True,
gpus=None,
):
"""Run the training loop.
Note that the parameters are also explained in the cli of `mlt train`.
Parameters
----------
texts : list
List of str representing all texts we would like to train on.
name : str
Name of the model. This name is only used when we save the model -
it is not hardcoded anywhere in the serialization.
max_epochs : int
Maximum number of epochs. Note that the number of actual epochs
can be lower if we activate the `early_stopping` flag.
window_size : int
Number of previous characters to consider when predicting the next
character. The higher the number the longer the memory we are
enforcing. Howerever, at the same time, the training becomes slower.
batch_size : int
Number of samples in one batch.
vocab_size : int
Maximum number of characters to be put in the vocabulary. Note that
one can explicityly exclude characters via `illegal_chars`. The higher
this number the bigger the feature vectors are and the slower the
training.
fill_strategy : str, {"zeros", "skip"}
Determines how to deal with out of vocabulary characters. When
"zeros" then we simply encode them as zero vectors. If "skip", we
skip a given sample if any of the characters in the window or the
predicted character are not in the vocabulary.
illegal_chars : str or None
If specified, then each character of the str represents a forbidden
character that we do not put in the vocabulary.
train_test_split : float
Float in the range (0, 1) representing the percentage of the training
set with respect to the entire dataset.
hidden_size : int
Hidden size of LSTM cells (equal in all layers).
dense_size : int
Size of the dense layer that is bridging the hidden state outputted
by the LSTM and the final output probabilities over the vocabulary.
n_layers : int
Number of layers inside of the LSTM.
checkpoint_path : None or pathlib.Path or str
If specified, it is pointing to a checkpoint file (generated
by Pytorch-lightning). This file does not contain the vocabulary.
It can be used to continue the training.
output_path : None or pathlib.Path or str
If specified, it is an alternative output folder when the trained
models and logging information will be stored. If not specified
the output folder is by default set to `~/.mltype`.
use_mlflow : bool
If active, than we use mlflow for logging of training and validation
loss. Additionally, at the end of each epoch we generate a few
sample texts to demonstrate how good/bad the current network is.
early_stopping : bool
If True, then we monitor the validation loss and if it does not
improve for a certain number of epochs then we stop the traning.
gpus : int or None
If None or 0, no GPUs are used (only CPUs). Otherwise, it represents
the number of GPUs to be used (using the data parallelization
strategy).
"""
illegal_chars = illegal_chars or ""
cache_dir = get_cache_dir(output_path)
languages_path = cache_dir / "languages" / name
checkpoints_path = cache_dir / "checkpoints" / name
if languages_path.exists():
raise FileExistsError(f"The model {name} already exists")
with print_section(" Computing vocabulary ", drop_end=True):
vocabulary = sorted(
[
x[0]
for x in Counter("".join(texts)).most_common()
if x[0] not in illegal_chars
][:vocab_size]
) # works for None
vocab_size = len(vocabulary)
print(f"# characters: {vocab_size}")
print(vocabulary)
with print_section(" Creating training set ", drop_end=True):
X_list = []
y_list = []
for text in tqdm.tqdm(texts):
X_, y_, _ = create_data_language(
text,
vocabulary,
window_size=window_size,
verbose=False,
fill_strategy=fill_strategy,
)
X_list.append(X_)
y_list.append(y_)
X = np.concatenate(X_list, axis=0) if len(X_list) != 1 else X_list[0]
y = np.concatenate(y_list, axis=0) if len(y_list) != 1 else y_list[0]
print(f"X.dtype={X.dtype}, y.dtype={y.dtype}")
split_ix = int(len(X) * train_test_split)
indices = np.random.permutation(len(X))
train_indices = indices[:split_ix]
val_indices = indices[split_ix:]
print(f"Train: {len(train_indices)}\nValidation: {len(val_indices)}")
dataset = LanguageDataset(X, y, vocabulary=vocabulary)
dataloader_t = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=torch.utils.data.SubsetRandomSampler(train_indices),
)
dataloader_v = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=torch.utils.data.SubsetRandomSampler(val_indices),
)
if checkpoint_path is None:
network = SingleCharacterLSTM(
vocab_size,
hidden_size=hidden_size,
dense_size=dense_size,
n_layers=n_layers,
)
else:
print(f"Loading a checkpointed network: {checkpoint_path}")
network = SingleCharacterLSTM.load_from_checkpoint(str(checkpoint_path))
chp_name_template = str(checkpoints_path / "{epoch}-{val_loss:.3f}")
chp_callback = pl.callbacks.ModelCheckpoint(
filepath=chp_name_template,
save_last=True, # last epoch always there
save_top_k=1,
verbose=True,
monitor="val_loss",
mode="min",
save_weights_only=False,
)
callbacks = []
if use_mlflow:
print("Logging with MLflow")
logger = pl.loggers.MLFlowLogger(
"mltype", save_dir=get_cache_dir(output_path) / "logs" / "mlruns"
)
print(f"Run ID: {logger.run_id}")
logger.log_hyperparams(
{
"fill_strategy": fill_strategy,
"model_name": name,
"train_test_split": train_test_split,
"vocab_size": vocab_size,
"window_size": window_size,
}
)
else:
logger = None
if early_stopping:
print("Activating early stopping")
callbacks.append(
pl.callbacks.EarlyStopping(monitor="val_loss", verbose=True)
)
with print_section(" Training ", drop_end=True):
trainer = pl.Trainer(
gpus=gpus,
max_epochs=max_epochs,
logger=logger,
callbacks=callbacks,
checkpoint_callback=chp_callback,
)
trainer.fit(network, dataloader_t, dataloader_v)
with print_section(" Saving the model ", drop_end=False):
if chp_callback.best_model_path:
print(f"Using the checkpoint {chp_callback.best_model_path}")
network = SingleCharacterLSTM.load_from_checkpoint(
chp_callback.best_model_path
)
else:
print("No checkpoint found, using the current network")
print(f"The final model is saved to: {languages_path}")
save_model(network, vocabulary, languages_path)
def load_model(path):
"""Load serialized model and vocabulary.
Parameters
----------
path : pathlib.Path
Path to where the file lies. This file was created by
`save_model` method.
Returns
-------
model_inst : SingleCharacterLSTM
Instance of the model. Note that all of its parameters
will be lying on a CPU.
vocabulary : list
Corresponding vocabulary.
"""
output_dict = torch.load(path, map_location=torch.device("cpu"))
kwargs = output_dict["kwargs"]
model_class_name = output_dict["model_class_name"]
state_dict = output_dict["state_dict"]
vocabulary = output_dict["vocabulary"]
model_class = getattr(
importlib.import_module("mltype.ml"), model_class_name
)
model_inst = model_class(**kwargs)
model_inst.load_state_dict(state_dict)
return model_inst, vocabulary
def save_model(model, vocabulary, path):
"""Serialize a model.
Note that we require that the model has a property `hparams` that
we can unpack into the constructor of the class and get the same
network architecture. This is automatically the case if we subclass
from `pl.LightningModule`.
Parameters
----------
model : SingleCharacterLSTM
Torch model to be saved. Additionally, we require that it has
the `hparams` property that contains all necessary hyperparameters
to instantiate the model.
vocabulary : list
The corresponding vocabulary.
path : pathlib.Path
Path to the file that will whole the serialized object.
"""
output_dict = {
"kwargs": model.hparams,
"model_class_name": model.__class__.__name__,
"state_dict": model.state_dict(),
"vocabulary": vocabulary,
}
path_parent = pathlib.Path(path).parent
path_parent.mkdir(parents=True, exist_ok=True)
torch.save(output_dict, path)
| [
"mltype.utils.print_section",
"torch.from_numpy",
"numpy.array",
"pytorch_lightning.Trainer",
"pytorch_lightning.callbacks.ModelCheckpoint",
"pathlib.Path",
"torch.nn.LSTM",
"torch.nn.functional.binary_cross_entropy",
"numpy.empty",
"numpy.random.seed",
"numpy.concatenate",
"mltype.utils.get_c... | [((316, 349), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (339, 349), False, 'import warnings\n'), ((2216, 2248), 'collections.defaultdict', 'defaultdict', (['(lambda : vocab_size)'], {}), '(lambda : vocab_size)\n', (2227, 2248), False, 'from collections import Counter, defaultdict\n'), ((3152, 3175), 'numpy.array', 'np.array', (['indices_lines'], {}), '(indices_lines)\n', (3160, 3175), True, 'import numpy as np\n'), ((3915, 3963), 'numpy.zeros', 'np.zeros', (['(text_size, vocab_size)'], {'dtype': 'np.bool'}), '((text_size, vocab_size), dtype=np.bool)\n', (3923, 3963), True, 'import numpy as np\n'), ((20504, 20530), 'mltype.utils.get_cache_dir', 'get_cache_dir', (['output_path'], {}), '(output_path)\n', (20517, 20530), False, 'from mltype.utils import get_cache_dir, get_mlflow_artifacts_path, print_section\n'), ((22907, 23072), 'pytorch_lightning.callbacks.ModelCheckpoint', 'pl.callbacks.ModelCheckpoint', ([], {'filepath': 'chp_name_template', 'save_last': '(True)', 'save_top_k': '(1)', 'verbose': '(True)', 'monitor': '"""val_loss"""', 'mode': '"""min"""', 'save_weights_only': '(False)'}), "(filepath=chp_name_template, save_last=True,\n save_top_k=1, verbose=True, monitor='val_loss', mode='min',\n save_weights_only=False)\n", (22935, 23072), True, 'import pytorch_lightning as pl\n'), ((26664, 26693), 'torch.save', 'torch.save', (['output_dict', 'path'], {}), '(output_dict, path)\n', (26674, 26693), False, 'import torch\n'), ((2485, 2504), 'tqdm.tqdm', 'tqdm.tqdm', (['iterable'], {}), '(iterable)\n', (2494, 2504), False, 'import tqdm\n'), ((2952, 2993), 'numpy.empty', 'np.empty', (['(0, window_size)'], {'dtype': 'np.int8'}), '((0, window_size), dtype=np.int8)\n', (2960, 2993), True, 'import numpy as np\n'), ((3006, 3035), 'numpy.empty', 'np.empty', (['(0,)'], {'dtype': 'np.int8'}), '((0,), dtype=np.int8)\n', (3014, 3035), True, 'import numpy as np\n'), ((3059, 3091), 'numpy.array', 'np.array', (['X_lines'], {'dtype': 'np.int8'}), '(X_lines, dtype=np.int8)\n', (3067, 3091), True, 'import numpy as np\n'), ((3104, 3136), 'numpy.array', 'np.array', (['y_lines'], {'dtype': 'np.int8'}), '(y_lines, dtype=np.int8)\n', (3112, 3136), True, 'import numpy as np\n'), ((5265, 5284), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5277, 5284), False, 'import torch\n'), ((5566, 5594), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (5580, 5594), True, 'import numpy as np\n'), ((5796, 5816), 'numpy.zeros_like', 'np.zeros_like', (['probs'], {}), '(probs)\n', (5809, 5816), True, 'import numpy as np\n'), ((5979, 6016), 'numpy.random.choice', 'np.random.choice', (['vocabulary'], {'p': 'probs'}), '(vocabulary, p=probs)\n', (5995, 6016), True, 'import numpy as np\n'), ((7184, 7203), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7196, 7203), False, 'import torch\n'), ((7372, 7391), 'tqdm.tqdm', 'tqdm.tqdm', (['iterable'], {}), '(iterable)\n', (7381, 7391), False, 'import tqdm\n'), ((7434, 7462), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (7448, 7462), True, 'import numpy as np\n'), ((9034, 9066), 'collections.defaultdict', 'defaultdict', (['(lambda : vocab_size)'], {}), '(lambda : vocab_size)\n', (9045, 9066), False, 'from collections import Counter, defaultdict\n'), ((9155, 9191), 'numpy.eye', 'np.eye', (['vocab_size'], {'dtype': 'np.float32'}), '(vocab_size, dtype=np.float32)\n', (9161, 9191), True, 'import numpy as np\n'), ((10274, 10319), 'torch.from_numpy', 'torch.from_numpy', (['self.ohv_matrix[self.X[ix]]'], {}), '(self.ohv_matrix[self.X[ix]])\n', (10290, 10319), False, 'import torch\n'), ((10339, 10384), 'torch.from_numpy', 'torch.from_numpy', (['self.ohv_matrix[self.y[ix]]'], {}), '(self.ohv_matrix[self.y[ix]])\n', (10355, 10384), False, 'import torch\n'), ((12152, 12257), 'torch.nn.LSTM', 'torch.nn.LSTM', ([], {'input_size': 'vocab_size', 'hidden_size': 'hidden_size', 'num_layers': 'n_layers', 'batch_first': '(True)'}), '(input_size=vocab_size, hidden_size=hidden_size, num_layers=\n n_layers, batch_first=True)\n', (12165, 12257), False, 'import torch\n'), ((12341, 12381), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden_size', 'dense_size'], {}), '(hidden_size, dense_size)\n', (12356, 12381), False, 'import torch\n'), ((12411, 12450), 'torch.nn.Linear', 'torch.nn.Linear', (['dense_size', 'vocab_size'], {}), '(dense_size, vocab_size)\n', (12426, 12450), False, 'import torch\n'), ((12484, 12507), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (12500, 12507), False, 'import torch\n'), ((14838, 14888), 'torch.nn.functional.binary_cross_entropy', 'torch.nn.functional.binary_cross_entropy', (['probs', 'y'], {}), '(probs, y)\n', (14878, 14888), False, 'import torch\n'), ((15551, 15601), 'torch.nn.functional.binary_cross_entropy', 'torch.nn.functional.binary_cross_entropy', (['probs', 'y'], {}), '(probs, y)\n', (15591, 15601), False, 'import torch\n'), ((16408, 16477), 'mltype.utils.get_mlflow_artifacts_path', 'get_mlflow_artifacts_path', (['self.logger.experiment', 'self.logger.run_id'], {}), '(self.logger.experiment, self.logger.run_id)\n', (16433, 16477), False, 'from mltype.utils import get_cache_dir, get_mlflow_artifacts_path, print_section\n'), ((20748, 20802), 'mltype.utils.print_section', 'print_section', (['""" Computing vocabulary """'], {'drop_end': '(True)'}), "(' Computing vocabulary ', drop_end=True)\n", (20761, 20802), False, 'from mltype.utils import get_cache_dir, get_mlflow_artifacts_path, print_section\n'), ((21149, 21204), 'mltype.utils.print_section', 'print_section', (['""" Creating training set """'], {'drop_end': '(True)'}), "(' Creating training set ', drop_end=True)\n", (21162, 21204), False, 'from mltype.utils import get_cache_dir, get_mlflow_artifacts_path, print_section\n'), ((21266, 21282), 'tqdm.tqdm', 'tqdm.tqdm', (['texts'], {}), '(texts)\n', (21275, 21282), False, 'import tqdm\n'), ((23916, 23958), 'mltype.utils.print_section', 'print_section', (['""" Training """'], {'drop_end': '(True)'}), "(' Training ', drop_end=True)\n", (23929, 23958), False, 'from mltype.utils import get_cache_dir, get_mlflow_artifacts_path, print_section\n'), ((23978, 24097), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'gpus': 'gpus', 'max_epochs': 'max_epochs', 'logger': 'logger', 'callbacks': 'callbacks', 'checkpoint_callback': 'chp_callback'}), '(gpus=gpus, max_epochs=max_epochs, logger=logger, callbacks=\n callbacks, checkpoint_callback=chp_callback)\n', (23988, 24097), True, 'import pytorch_lightning as pl\n'), ((24231, 24282), 'mltype.utils.print_section', 'print_section', (['""" Saving the model """'], {'drop_end': '(False)'}), "(' Saving the model ', drop_end=False)\n", (24244, 24282), False, 'from mltype.utils import get_cache_dir, get_mlflow_artifacts_path, print_section\n'), ((25464, 25500), 'importlib.import_module', 'importlib.import_module', (['"""mltype.ml"""'], {}), "('mltype.ml')\n", (25487, 25500), False, 'import importlib\n'), ((26582, 26600), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (26594, 26600), False, 'import pathlib\n'), ((5604, 5630), 'torch.from_numpy', 'torch.from_numpy', (['features'], {}), '(features)\n', (5620, 5630), False, 'import torch\n'), ((16139, 16160), 'numpy.array', 'np.array', (['outputs[-1]'], {}), '(outputs[-1])\n', (16147, 16160), True, 'import numpy as np\n'), ((21583, 21613), 'numpy.concatenate', 'np.concatenate', (['X_list'], {'axis': '(0)'}), '(X_list, axis=0)\n', (21597, 21613), True, 'import numpy as np\n'), ((21661, 21691), 'numpy.concatenate', 'np.concatenate', (['y_list'], {'axis': '(0)'}), '(y_list, axis=0)\n', (21675, 21691), True, 'import numpy as np\n'), ((22217, 22268), 'torch.utils.data.SubsetRandomSampler', 'torch.utils.data.SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (22253, 22268), False, 'import torch\n'), ((22389, 22438), 'torch.utils.data.SubsetRandomSampler', 'torch.utils.data.SubsetRandomSampler', (['val_indices'], {}), '(val_indices)\n', (22425, 22438), False, 'import torch\n'), ((23835, 23895), 'pytorch_lightning.callbacks.EarlyStopping', 'pl.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'verbose': '(True)'}), "(monitor='val_loss', verbose=True)\n", (23861, 23895), True, 'import pytorch_lightning as pl\n'), ((25230, 25249), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (25242, 25249), False, 'import torch\n'), ((9259, 9302), 'numpy.zeros', 'np.zeros', (['(1, vocab_size)'], {'dtype': 'np.float32'}), '((1, vocab_size), dtype=np.float32)\n', (9267, 9302), True, 'import numpy as np\n'), ((16543, 16557), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16555, 16557), False, 'from datetime import datetime\n'), ((23304, 23330), 'mltype.utils.get_cache_dir', 'get_cache_dir', (['output_path'], {}), '(output_path)\n', (23317, 23330), False, 'from mltype.utils import get_cache_dir, get_mlflow_artifacts_path, print_section\n')] |
import os
from tqdm import tqdm
import sys
import subprocess
import pandas as pd
import numpy as np
from amrtime import parsers
import math
import itertools
import re
from sklearn.preprocessing import normalize
class Homology():
"""
Generate a read encoding
"""
def __init__(self, simulated_reads, data_type, card, tool):
self.reads = simulated_reads
self.data_type = data_type
self.db = 'training_data/card_proteins.faa'
card.write_proteins(self.db)
if tool == 'DIAMOND':
if not os.path.exists(f'training_data/{data_type}_diamond.out6') :
self.alignment_fp = self.run_diamond_alignment(self.reads, self.db)
else:
print(f"training_data/{data_type}_diamond.out6 already exists so re-using, use --redo to rebuild")
self.alignment_fp = f'training_data/{data_type}_diamond.out6'
elif tool == 'MMSEQS2':
if not os.path.exists(f'training_data/{data_type}_mmseqs.out6') :
self.alignment_fp = self.run_alignment(self.reads, self.db)
else:
print(f"training_data/{data_type}_mmseqs.out6 already exists so re-using, use --redo to rebuild")
self.alignment_fp = f'training_data/{data_type}_mmseqs.out6'
def run_diamond_alignment(self, reads, db):
"""
Perform a DIAMOND BLASTX search to gather homology data
"""
# build database if it doesn't exist
if not os.path.exists(db + '.dmnd'):
subprocess.check_call('diamond makedb --in {0} --db {0}'.format(db),
shell=True)
# run alignment
subprocess.check_call(f'diamond blastx --db {db} --out training_data/{self.data_type}_diamond.out6 --outfmt 6 --threads 2 --query {reads} --more-sensitive', shell=True)
return f'training_data/{self.data_type}_diamond.out6'
def run_mmseqs_alignment(self, reads, db):
subprocess.check_call(f'mmseqs easy-search {db} {reads} training_data/{self.data_type}_mmseqs.out6 /tmp', shell=True)
def encode(self, card, metric, norm=False, dissimilarity=False):
alignment_fh = open(self.alignment_fp)
reads_fh = open(self.reads)
# build reference to get the correct row for each AMR family
if self.data_type == 'family':
label_to_field = {family: ix for ix, family in enumerate(set(card.aro_to_gene_family.values()))}
field_to_label = {v: k for k, v in label_to_field.items()}
# build the same for the aros
elif self.data_type == 'gene':
label_to_field = {aro: ix for ix, aro in enumerate(set(card.aro_to_gene_family.keys()))}
field_to_label = {v: k for k, v in field_to_label.items()}
# read input fastq and initialise an empty vectors for each read
# and store in a dictionary of read_acc: vector
#read_ixs = []
#for ix, read in enumerate(reads_fh):
# if ix % 4 == 0:
# read_acc = read.strip().replace('@gb', 'gb')
# read_ixs.append(read_acc)
# # initalise the gene family and aro similarity vectors
# # i.e. x_j for j is the similarity to the gene family
# # or aro of interest
# family_sim_vector = np.zeros(len(label_to_field))
# aro_sim_vector = np.zeros(len(aro_to_field))
# gene_family_encoding.update({read_acc : family_sim_vector})
# aro_encoding.update({read_acc: aro_sim_vector})
# read the alignment file and store the top blast score per family
# for each read
if metric == 'bitscore':
out_field = 11
elif metric == 'evalue':
out_field = 10
elif metric == 'pident':
out_field = 2
else:
raise ValueError('metric must be: {bitscore,evalue,pident}')
scores = {}
# without this we don't have encodings for anything with no DIAMOND
# hits i.e. all 0 vectors.
if self.data_type == 'aro':
folder = 'training_data/subfamily_training_data'
elif self.data_type == 'family':
folder = 'training_data/family_training_data'
encoding_fp = f'{folder}/{self.data_type}_X.txt'
read_names_fp = f'{folder}/{self.data_type}_read_names.txt'
if os.path.exists(encoding_fp) and os.path.exists(read_names_fp):
print(f'Encoded {self.data_type} already exists so re-using'
', use --redo to rebuild')
alignment_fh.close()
reads_fh.close()
else:
encoding_fh = open(encoding_fp, 'w')
read_names_fh = open(read_names_fp, 'w')
align_iter = itertools.groupby(alignment_fh,
lambda x: x.split('\t')[0])
for query_acc, query_hits in align_iter:
# make new vector and assign new read to current
vector = np.zeros(len(label_to_field))
for hit in query_hits:
alignment = hit.strip().split('\t')
alignment_aro = alignment[1].split('|')[2]
score = float(alignment[out_field])
if self.data_type == 'family':
label = card.aro_to_gene_family[alignment_aro]
elif self.data_type == 'aro':
label = alignment_aro
else:
raise ValueError("data_type must be {family,aro}")
field = label_to_field[label]
# if this bitscore is greater than the already highest for that
# read and family
if metric == 'evalue':
score = math.log(score)
if score < vector[field]:
vector[field] = score
else:
if score > vector[field]:
vector[field] = score
# moved onto the next read so we can dump the vector and move on
encoded_vector = "\t".join([str(x) for x in np.nditer(vector)])
encoding_fh.write(encoded_vector + "\n")
read_names_fh.write(query_acc+ "\n")
encoding_fh.close()
read_names_fh.close()
alignment_fh.close()
x_encoding = np.loadtxt(encoding_fp, delimiter='\t')
print(x_encoding.shape)
# normalise bitscores
if self.data_type == 'family':
card.calculate_maximum_bitscores_per_family()
max_bitscores = card.max_family_bitscores
elif self.data_type == 'aro':
card.calculate_maximum_bitscores_per_aro()
max_bitscores = card.max_aro_bitscores
if norm and metric == 'bitscore':
print("Normalising encoding")
x_encoding = normalize(x_encoding, axis=1, norm='l1')
# numpy divide column by max per column
#normalising = np.zeros(len(label_to_field))
#for label, field in label_to_field.items():
# normalising[field] = card.max_family_bitscores[label]
# x_encoding = x_encoding / normalising
elif norm and metric != 'bitscore':
print("Can't normalise non bitscore metrics currently, must set metric to bitscore")
sys.exit(1)
# normalise and calculate dissimilarity matrices
#if dissimilarity and norm:
# print("Calculating Dissimilarity")
# # convert this to new matrix
# family_df = family_df.applymap(lambda x: 1-x)
# family_df = family_df.fillna(0)
# aro_df = aro_df.applymap(lambda x: 1-x)
# aro_df = aro_df.fillna(0)
#elif dissimilarity and not norm:
# print("Can't create dissimilarity matrix for non-normalised bitscores, must set dissimilarity and norm to True")
# sys.exit(1)
return x_encoding
class Kmer():
def __init__(self, metagenome_fp, k):
self.metagenome_fp = metagenome_fp
self.k = k
def encode(self):
tnf = ["".join(x) for x in itertools.product(['A', 'T', 'G', 'C', 'N'], repeat=self.k)]
tnf_encoder = {v:k for k,v in enumerate(tnf)}
X = []
with open(self.metagenome_fp) as fh:
for ix, line in enumerate(fh):
if ix % 4 == 1:
clean_seq = re.sub("[M,X,R,S,Y,K]", 'N', line.strip())
self.seq = clean_seq
encoded_seq = self.read_encode(clean_seq, tnf_encoder)
X.append(encoded_seq)
return np.vstack(X)
def read_encode(self, seq, tnf):
"""
k-mer decomposition might be simplest although might have to be
done at the file level in order to represent all k-mers
"""
encoded_vec = np.zeros(len(tnf))
for tetranucleotide in self.window(self.k):
encoded_vec[tnf[tetranucleotide]] += 1
return encoded_vec
def window(self, window_size):
"Returns a sliding window (of width w) over data from the iterable"
" s -> (s0,s1,...s[w-1]), (s1,s2,...,sw), ... "
it = iter(self.seq)
result = tuple(itertools.islice(it, window_size))
if len(result) == window_size:
yield "".join(result)
for elem in it:
result = result[1:] + (elem,)
yield "".join(result)
# dna2vec kmer embedding
class KMer_embedding():
pass
| [
"os.path.exists",
"itertools.islice",
"subprocess.check_call",
"numpy.nditer",
"itertools.product",
"math.log",
"numpy.loadtxt",
"numpy.vstack",
"sys.exit",
"sklearn.preprocessing.normalize"
] | [((1664, 1842), 'subprocess.check_call', 'subprocess.check_call', (['f"""diamond blastx --db {db} --out training_data/{self.data_type}_diamond.out6 --outfmt 6 --threads 2 --query {reads} --more-sensitive"""'], {'shell': '(True)'}), "(\n f'diamond blastx --db {db} --out training_data/{self.data_type}_diamond.out6 --outfmt 6 --threads 2 --query {reads} --more-sensitive'\n , shell=True)\n", (1685, 1842), False, 'import subprocess\n'), ((1951, 2078), 'subprocess.check_call', 'subprocess.check_call', (['f"""mmseqs easy-search {db} {reads} training_data/{self.data_type}_mmseqs.out6 /tmp"""'], {'shell': '(True)'}), "(\n f'mmseqs easy-search {db} {reads} training_data/{self.data_type}_mmseqs.out6 /tmp'\n , shell=True)\n", (1972, 2078), False, 'import subprocess\n'), ((6467, 6506), 'numpy.loadtxt', 'np.loadtxt', (['encoding_fp'], {'delimiter': '"""\t"""'}), "(encoding_fp, delimiter='\\t')\n", (6477, 6506), True, 'import numpy as np\n'), ((8753, 8765), 'numpy.vstack', 'np.vstack', (['X'], {}), '(X)\n', (8762, 8765), True, 'import numpy as np\n'), ((1500, 1528), 'os.path.exists', 'os.path.exists', (["(db + '.dmnd')"], {}), "(db + '.dmnd')\n", (1514, 1528), False, 'import os\n'), ((4381, 4408), 'os.path.exists', 'os.path.exists', (['encoding_fp'], {}), '(encoding_fp)\n', (4395, 4408), False, 'import os\n'), ((4413, 4442), 'os.path.exists', 'os.path.exists', (['read_names_fp'], {}), '(read_names_fp)\n', (4427, 4442), False, 'import os\n'), ((6976, 7016), 'sklearn.preprocessing.normalize', 'normalize', (['x_encoding'], {'axis': '(1)', 'norm': '"""l1"""'}), "(x_encoding, axis=1, norm='l1')\n", (6985, 7016), False, 'from sklearn.preprocessing import normalize\n'), ((9374, 9407), 'itertools.islice', 'itertools.islice', (['it', 'window_size'], {}), '(it, window_size)\n', (9390, 9407), False, 'import itertools\n'), ((553, 610), 'os.path.exists', 'os.path.exists', (['f"""training_data/{data_type}_diamond.out6"""'], {}), "(f'training_data/{data_type}_diamond.out6')\n", (567, 610), False, 'import os\n'), ((7464, 7475), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7472, 7475), False, 'import sys\n'), ((8254, 8313), 'itertools.product', 'itertools.product', (["['A', 'T', 'G', 'C', 'N']"], {'repeat': 'self.k'}), "(['A', 'T', 'G', 'C', 'N'], repeat=self.k)\n", (8271, 8313), False, 'import itertools\n'), ((959, 1015), 'os.path.exists', 'os.path.exists', (['f"""training_data/{data_type}_mmseqs.out6"""'], {}), "(f'training_data/{data_type}_mmseqs.out6')\n", (973, 1015), False, 'import os\n'), ((5829, 5844), 'math.log', 'math.log', (['score'], {}), '(score)\n', (5837, 5844), False, 'import math\n'), ((6214, 6231), 'numpy.nditer', 'np.nditer', (['vector'], {}), '(vector)\n', (6223, 6231), True, 'import numpy as np\n')] |
import warnings
from itertools import product
from numbers import Real
from typing import Dict, List, Set, Tuple, Union
import numpy as np
import pandas as pd
from gbd_mapping import (
Cause,
ModelableEntity,
RiskFactor,
causes,
covariates,
risk_factors,
)
from vivarium.framework.artifact import EntityKey
from vivarium_gbd_access import constants as gbd_constants
from vivarium_gbd_access import gbd
from vivarium_gbd_access.utilities import get_draws, query
from vivarium_inputs import globals as vi_globals
from vivarium_inputs import utilities as vi_utils
from vivarium_inputs import utility_data
from vivarium_inputs.mapping_extension import (
AlternativeRiskFactor,
alternative_risk_factors,
)
from vivarium_inputs.validation.raw import check_metadata
from vivarium_gates_child_iv_iron.constants.metadata import AGE_GROUP, GBD_2019_ROUND_ID
def get_data(key: EntityKey, entity: ModelableEntity, location: str, source: str, gbd_id_type: str,
age_group_ids: Set[int], gbd_round_id: int, decomp_step: str = 'iterative') -> pd.DataFrame:
age_group_ids = list(age_group_ids)
# from interface.get_measure
# from vivarium_inputs.core.get_data
location_id = utility_data.get_location_id(location) if isinstance(location, str) else location
# from vivarium_inputs.core.get_{measure}
# from vivarium_inputs.extract.extract_data
check_metadata(entity, key.measure)
# from vivarium_inputs.extract.extract_{measure}
# from vivarium_gbd_access.gbd.get_{measure}
data = get_draws(gbd_id_type=gbd_id_type,
gbd_id=entity.gbd_id,
source=source,
location_id=location_id,
sex_id=gbd_constants.SEX.MALE + gbd_constants.SEX.FEMALE,
age_group_id=age_group_ids,
gbd_round_id=gbd_round_id,
decomp_step=decomp_step,
status='best')
return data
def get_entity(key: str):
# Map of entity types to their gbd mappings.
type_map = {
'cause': causes,
'covariate': covariates,
'risk_factor': risk_factors,
'alternative_risk_factor': alternative_risk_factors
}
key = EntityKey(key)
return type_map[key.type][key.name]
def process_exposure(data: pd.DataFrame, key: str, entity: Union[RiskFactor, AlternativeRiskFactor],
location: str, gbd_round_id: int, age_group_ids: List[int] = None) -> pd.DataFrame:
data['rei_id'] = entity.gbd_id
# from vivarium_inputs.extract.extract_exposure
allowable_measures = [vi_globals.MEASURES['Proportion'], vi_globals.MEASURES['Continuous'],
vi_globals.MEASURES['Prevalence']]
proper_measure_id = set(data.measure_id).intersection(allowable_measures)
if len(proper_measure_id) != 1:
raise vi_globals.DataAbnormalError(f'Exposure data have {len(proper_measure_id)} measure id(s). '
f'Data should have exactly one id out of {allowable_measures} '
f'but came back with {proper_measure_id}.')
data = data[data.measure_id == proper_measure_id.pop()]
# from vivarium_inputs.core.get_exposure
data = data.drop('modelable_entity_id', 'columns')
if entity.name in vi_globals.EXTRA_RESIDUAL_CATEGORY:
# noinspection PyUnusedLocal
cat = vi_globals.EXTRA_RESIDUAL_CATEGORY[entity.name]
data = data.drop(labels=data.query('parameter == @cat').index)
data[vi_globals.DRAW_COLUMNS] = data[vi_globals.DRAW_COLUMNS].clip(lower=vi_globals.MINIMUM_EXPOSURE_VALUE)
if entity.distribution in ['dichotomous', 'ordered_polytomous', 'unordered_polytomous']:
tmrel_cat = utility_data.get_tmrel_category(entity)
exposed = data[data.parameter != tmrel_cat]
unexposed = data[data.parameter == tmrel_cat]
# FIXME: We fill 1 as exposure of tmrel category, which is not correct.
data = pd.concat([normalize_age_and_years(exposed, fill_value=0, gbd_round_id=gbd_round_id),
normalize_age_and_years(unexposed, fill_value=1, gbd_round_id=gbd_round_id)],
ignore_index=True)
# normalize so all categories sum to 1
cols = list(set(data.columns).difference(vi_globals.DRAW_COLUMNS + ['parameter']))
data = data.set_index(cols + ['parameter'])
sums = (
data.groupby(cols)[vi_globals.DRAW_COLUMNS].sum()
.reindex(index=data.index)
)
data = data.divide(sums).reset_index()
else:
data = vi_utils.normalize(data, fill_value=0)
data = data.filter(vi_globals.DEMOGRAPHIC_COLUMNS + vi_globals.DRAW_COLUMNS + ['parameter'])
data = validate_and_reshape_gbd_data(data, entity, key, location, gbd_round_id, age_group_ids)
return data
def process_relative_risk(data: pd.DataFrame, key: str, entity: Union[RiskFactor, AlternativeRiskFactor],
location: str, gbd_round_id: int, age_group_ids: List[int] = None,
whitelist_sids: bool = False) -> pd.DataFrame:
# from vivarium_gbd_access.gbd.get_relative_risk
data['rei_id'] = entity.gbd_id
# from vivarium_inputs.extract.extract_relative_risk
data = vi_utils.filter_to_most_detailed_causes(data)
# from vivarium_inputs.core.get_relative_risk
yll_only_causes = set([c.gbd_id for c in causes if c.restrictions.yll_only
and (c != causes.sudden_infant_death_syndrome if whitelist_sids else True)])
data = data[~data.cause_id.isin(yll_only_causes)]
data = vi_utils.convert_affected_entity(data, 'cause_id')
morbidity = data.morbidity == 1
mortality = data.mortality == 1
data.loc[morbidity & mortality, 'affected_measure'] = 'incidence_rate'
data.loc[morbidity & ~mortality, 'affected_measure'] = 'incidence_rate'
data.loc[~morbidity & mortality, 'affected_measure'] = 'excess_mortality_rate'
data = filter_relative_risk_to_cause_restrictions(data)
data = data.filter(vi_globals.DEMOGRAPHIC_COLUMNS + ['affected_entity', 'affected_measure', 'parameter']
+ vi_globals.DRAW_COLUMNS)
data = (data.groupby(['affected_entity', 'parameter'])
.apply(normalize_age_and_years, fill_value=1, gbd_round_id=gbd_round_id, age_group_ids=age_group_ids)
.reset_index(drop=True))
if entity.distribution in ['dichotomous', 'ordered_polytomous', 'unordered_polytomous']:
tmrel_cat = utility_data.get_tmrel_category(entity)
tmrel_mask = data.parameter == tmrel_cat
data.loc[tmrel_mask, vi_globals.DRAW_COLUMNS] = (data.loc[tmrel_mask, vi_globals.DRAW_COLUMNS]
.mask(np.isclose(data.loc[tmrel_mask, vi_globals.DRAW_COLUMNS],
1.0), 1.0))
data = validate_and_reshape_gbd_data(data, entity, key, location, gbd_round_id, age_group_ids)
return data
def normalize_age_and_years(data: pd.DataFrame, fill_value: Real = None,
cols_to_fill: List[str] = vi_globals.DRAW_COLUMNS,
gbd_round_id: int = GBD_2019_ROUND_ID,
age_group_ids: List[int] = AGE_GROUP.GBD_2019) -> pd.DataFrame:
data = vi_utils.normalize_sex(data, fill_value, cols_to_fill)
# vi_inputs.normalize_year(data)
binned_years = get_gbd_estimation_years(gbd_round_id)
years = {'annual': list(range(min(binned_years), max(binned_years) + 1)), 'binned': binned_years}
if 'year_id' not in data:
# Data doesn't vary by year, so copy for each year.
df = []
for year in years['annual']:
fill_data = data.copy()
fill_data['year_id'] = year
df.append(fill_data)
data = pd.concat(df, ignore_index=True)
elif set(data.year_id) == set(years['binned']):
data = vi_utils.interpolate_year(data)
else: # set(data.year_id.unique()) == years['annual']
pass
# Dump extra data.
data = data[data.year_id.isin(years['annual'])]
data = _normalize_age(data, fill_value, cols_to_fill, age_group_ids)
return data
def get_gbd_estimation_years(gbd_round_id: int) -> List[int]:
"""Gets the estimation years for a particular gbd round."""
from db_queries import get_demographics
warnings.filterwarnings("default", module="db_queries")
return get_demographics(gbd_constants.CONN_DEFS.EPI, gbd_round_id=gbd_round_id)['year_id']
def _normalize_age(data: pd.DataFrame, fill_value: Real, cols_to_fill: List[str],
age_group_ids: List[int] = None) -> pd.DataFrame:
data_ages = set(data.age_group_id.unique()) if 'age_group_id' in data.columns else set()
gbd_ages = set(utility_data.get_age_group_ids()) if not age_group_ids else set(age_group_ids)
if not data_ages:
# Data does not correspond to individuals, so no age column necessary.
pass
elif data_ages == {vi_globals.SPECIAL_AGES['all_ages']}:
# Data applies to all ages, so copy.
dfs = []
for age in gbd_ages:
missing = data.copy()
missing.loc[:, 'age_group_id'] = age
dfs.append(missing)
data = pd.concat(dfs, ignore_index=True)
elif data_ages < gbd_ages:
# Data applies to subset, so fill other ages with fill value.
key_columns = list(data.columns.difference(cols_to_fill))
key_columns.remove('age_group_id')
expected_index = pd.MultiIndex.from_product([data[c].unique() for c in key_columns] + [gbd_ages],
names=key_columns + ['age_group_id'])
data = (data.set_index(key_columns + ['age_group_id'])
.reindex(expected_index, fill_value=fill_value)
.reset_index())
else: # data_ages == gbd_ages
pass
return data
def validate_and_reshape_gbd_data(data: pd.DataFrame, entity: ModelableEntity, key: EntityKey,
location: str, gbd_round_id: int, age_group_ids: List[int] = None) -> pd.DataFrame:
# from vivarium_inputs.core.get_data
data = vi_utils.reshape(data, value_cols=vi_globals.DRAW_COLUMNS)
# from interface.get_measure
data = _scrub_gbd_conventions(data, location, age_group_ids)
estimation_years = get_gbd_estimation_years(gbd_round_id)
validation_years = pd.DataFrame({'year_start': range(min(estimation_years), max(estimation_years) + 1)})
validation_years['year_end'] = validation_years['year_start'] + 1
# validate_for_simulation(data, entity, key.measure, location, years=validation_years,
# age_bins=get_gbd_age_bins(age_group_ids))
data = vi_utils.split_interval(data, interval_column='age', split_column_prefix='age')
data = vi_utils.split_interval(data, interval_column='year', split_column_prefix='year')
data = vi_utils.sort_hierarchical_data(data).droplevel('location')
return data
def _scrub_gbd_conventions(data: pd.DataFrame, location: str, age_group_ids: List[int] = None) -> pd.DataFrame:
data = vi_utils.scrub_location(data, location)
data = vi_utils.scrub_sex(data)
data = _scrub_age(data, age_group_ids)
data = vi_utils.scrub_year(data)
data = vi_utils.scrub_affected_entity(data)
return data
def _scrub_age(data: pd.DataFrame, age_group_ids: List[int] = None) -> pd.DataFrame:
if 'age_group_id' in data.index.names:
age_bins = get_gbd_age_bins(age_group_ids).set_index('age_group_id')
id_levels = data.index.levels[data.index.names.index('age_group_id')]
interval_levels = [pd.Interval(age_bins.age_start[age_id], age_bins.age_end[age_id], closed='left')
for age_id in id_levels]
data.index = data.index.rename('age', 'age_group_id').set_levels(interval_levels, 'age')
return data
def get_gbd_age_bins(age_group_ids: List[int] = None) -> pd.DataFrame:
# If no age group ids are specified, use the standard GBD 2019 age bins
if not age_group_ids:
age_group_ids = gbd.get_age_group_id()
# from gbd.get_age_bins()
q = f"""
SELECT age_group_id,
age_group_years_start,
age_group_years_end,
age_group_name
FROM age_group
WHERE age_group_id IN ({','.join([str(a) for a in age_group_ids])})
"""
raw_age_bins = query(q, 'shared')
# from utility_data.get_age_bins()
age_bins = (
raw_age_bins[['age_group_id', 'age_group_name', 'age_group_years_start', 'age_group_years_end']]
.rename(columns={'age_group_years_start': 'age_start', 'age_group_years_end': 'age_end'})
)
# set age start for birth prevalence age bin to -1 to avoid validation issues
age_bins.loc[age_bins['age_end'] == 0.0, 'age_start'] = -1.0
return age_bins
def filter_relative_risk_to_cause_restrictions(data: pd.DataFrame) -> pd.DataFrame:
""" It applies age restrictions according to affected causes
and affected measures. If affected measure is incidence_rate,
it applies the yld_age_restrictions. If affected measure is
excess_mortality_rate, it applies the yll_age_restrictions to filter
the relative_risk data"""
temp = []
affected_entities = set(data.affected_entity)
affected_measures = set(data.affected_measure)
for cause, measure in product(affected_entities, affected_measures):
df = data[(data.affected_entity == cause) & (data.affected_measure == measure)]
cause = get_entity(EntityKey(f'cause.{cause}.{measure}'))
if measure == 'excess_mortality_rate':
start, end = vi_utils.get_age_group_ids_by_restriction(cause, 'yll')
else: # incidence_rate
start, end = vi_utils.get_age_group_ids_by_restriction(cause, 'yld')
temp.append(df[df.age_group_id.isin(range(start, end + 1))])
data = pd.concat(temp)
return data
def get_intervals_from_categories(lbwsg_type: str, categories: Dict[str, str]) -> pd.Series:
if lbwsg_type == "low_birth_weight":
category_endpoints = pd.Series(
{cat: parse_low_birth_weight_description(description) for cat, description in categories.items()},
name=f'{lbwsg_type}.endpoints'
)
elif lbwsg_type == "short_gestation":
category_endpoints = pd.Series(
{cat: parse_short_gestation_description(description) for cat, description in categories.items()},
name=f'{lbwsg_type}.endpoints'
)
else:
raise ValueError(f'Unrecognized risk type {lbwsg_type}. Risk type must be low_birth_weight or short_gestation')
category_endpoints.index.name = 'parameter'
return category_endpoints
def parse_low_birth_weight_description(description: str) -> pd.Interval:
# descriptions look like this: 'Birth prevalence - [34, 36) wks, [2000, 2500) g'
endpoints = pd.Interval(*[float(val) for val in description.split(', [')[1].split(')')[0].split(', ')])
return endpoints
def parse_short_gestation_description(description: str) -> pd.Interval:
# descriptions look like this: 'Birth prevalence - [34, 36) wks, [2000, 2500) g'
endpoints = pd.Interval(*[float(val) for val in description.split('- [')[1].split(')')[0].split(', ')])
return endpoints
| [
"pandas.Interval",
"vivarium_inputs.utilities.split_interval",
"vivarium_gbd_access.gbd.get_age_group_id",
"vivarium_inputs.utilities.scrub_affected_entity",
"vivarium_inputs.utilities.sort_hierarchical_data",
"vivarium_inputs.utility_data.get_tmrel_category",
"itertools.product",
"vivarium.framework.... | [((1404, 1439), 'vivarium_inputs.validation.raw.check_metadata', 'check_metadata', (['entity', 'key.measure'], {}), '(entity, key.measure)\n', (1418, 1439), False, 'from vivarium_inputs.validation.raw import check_metadata\n'), ((1554, 1816), 'vivarium_gbd_access.utilities.get_draws', 'get_draws', ([], {'gbd_id_type': 'gbd_id_type', 'gbd_id': 'entity.gbd_id', 'source': 'source', 'location_id': 'location_id', 'sex_id': '(gbd_constants.SEX.MALE + gbd_constants.SEX.FEMALE)', 'age_group_id': 'age_group_ids', 'gbd_round_id': 'gbd_round_id', 'decomp_step': 'decomp_step', 'status': '"""best"""'}), "(gbd_id_type=gbd_id_type, gbd_id=entity.gbd_id, source=source,\n location_id=location_id, sex_id=gbd_constants.SEX.MALE + gbd_constants.\n SEX.FEMALE, age_group_id=age_group_ids, gbd_round_id=gbd_round_id,\n decomp_step=decomp_step, status='best')\n", (1563, 1816), False, 'from vivarium_gbd_access.utilities import get_draws, query\n'), ((2253, 2267), 'vivarium.framework.artifact.EntityKey', 'EntityKey', (['key'], {}), '(key)\n', (2262, 2267), False, 'from vivarium.framework.artifact import EntityKey\n'), ((5349, 5394), 'vivarium_inputs.utilities.filter_to_most_detailed_causes', 'vi_utils.filter_to_most_detailed_causes', (['data'], {}), '(data)\n', (5388, 5394), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((5695, 5745), 'vivarium_inputs.utilities.convert_affected_entity', 'vi_utils.convert_affected_entity', (['data', '"""cause_id"""'], {}), "(data, 'cause_id')\n", (5727, 5745), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((7435, 7489), 'vivarium_inputs.utilities.normalize_sex', 'vi_utils.normalize_sex', (['data', 'fill_value', 'cols_to_fill'], {}), '(data, fill_value, cols_to_fill)\n', (7457, 7489), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((8502, 8557), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""default"""'], {'module': '"""db_queries"""'}), "('default', module='db_queries')\n", (8525, 8557), False, 'import warnings\n'), ((10327, 10385), 'vivarium_inputs.utilities.reshape', 'vi_utils.reshape', (['data'], {'value_cols': 'vi_globals.DRAW_COLUMNS'}), '(data, value_cols=vi_globals.DRAW_COLUMNS)\n', (10343, 10385), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((10902, 10981), 'vivarium_inputs.utilities.split_interval', 'vi_utils.split_interval', (['data'], {'interval_column': '"""age"""', 'split_column_prefix': '"""age"""'}), "(data, interval_column='age', split_column_prefix='age')\n", (10925, 10981), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((10993, 11079), 'vivarium_inputs.utilities.split_interval', 'vi_utils.split_interval', (['data'], {'interval_column': '"""year"""', 'split_column_prefix': '"""year"""'}), "(data, interval_column='year', split_column_prefix=\n 'year')\n", (11016, 11079), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((11287, 11326), 'vivarium_inputs.utilities.scrub_location', 'vi_utils.scrub_location', (['data', 'location'], {}), '(data, location)\n', (11310, 11326), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((11338, 11362), 'vivarium_inputs.utilities.scrub_sex', 'vi_utils.scrub_sex', (['data'], {}), '(data)\n', (11356, 11362), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((11417, 11442), 'vivarium_inputs.utilities.scrub_year', 'vi_utils.scrub_year', (['data'], {}), '(data)\n', (11436, 11442), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((11454, 11490), 'vivarium_inputs.utilities.scrub_affected_entity', 'vi_utils.scrub_affected_entity', (['data'], {}), '(data)\n', (11484, 11490), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((12649, 12667), 'vivarium_gbd_access.utilities.query', 'query', (['q', '"""shared"""'], {}), "(q, 'shared')\n", (12654, 12667), False, 'from vivarium_gbd_access.utilities import get_draws, query\n'), ((13628, 13673), 'itertools.product', 'product', (['affected_entities', 'affected_measures'], {}), '(affected_entities, affected_measures)\n', (13635, 13673), False, 'from itertools import product\n'), ((14150, 14165), 'pandas.concat', 'pd.concat', (['temp'], {}), '(temp)\n', (14159, 14165), True, 'import pandas as pd\n'), ((1223, 1261), 'vivarium_inputs.utility_data.get_location_id', 'utility_data.get_location_id', (['location'], {}), '(location)\n', (1251, 1261), False, 'from vivarium_inputs import utility_data\n'), ((3795, 3834), 'vivarium_inputs.utility_data.get_tmrel_category', 'utility_data.get_tmrel_category', (['entity'], {}), '(entity)\n', (3826, 3834), False, 'from vivarium_inputs import utility_data\n'), ((4666, 4704), 'vivarium_inputs.utilities.normalize', 'vi_utils.normalize', (['data'], {'fill_value': '(0)'}), '(data, fill_value=0)\n', (4684, 4704), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((6596, 6635), 'vivarium_inputs.utility_data.get_tmrel_category', 'utility_data.get_tmrel_category', (['entity'], {}), '(entity)\n', (6627, 6635), False, 'from vivarium_inputs import utility_data\n'), ((7956, 7988), 'pandas.concat', 'pd.concat', (['df'], {'ignore_index': '(True)'}), '(df, ignore_index=True)\n', (7965, 7988), True, 'import pandas as pd\n'), ((8570, 8642), 'db_queries.get_demographics', 'get_demographics', (['gbd_constants.CONN_DEFS.EPI'], {'gbd_round_id': 'gbd_round_id'}), '(gbd_constants.CONN_DEFS.EPI, gbd_round_id=gbd_round_id)\n', (8586, 8642), False, 'from db_queries import get_demographics\n'), ((12264, 12286), 'vivarium_gbd_access.gbd.get_age_group_id', 'gbd.get_age_group_id', ([], {}), '()\n', (12284, 12286), False, 'from vivarium_gbd_access import gbd\n'), ((6851, 6913), 'numpy.isclose', 'np.isclose', (['data.loc[tmrel_mask, vi_globals.DRAW_COLUMNS]', '(1.0)'], {}), '(data.loc[tmrel_mask, vi_globals.DRAW_COLUMNS], 1.0)\n', (6861, 6913), True, 'import numpy as np\n'), ((8056, 8087), 'vivarium_inputs.utilities.interpolate_year', 'vi_utils.interpolate_year', (['data'], {}), '(data)\n', (8081, 8087), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((8919, 8951), 'vivarium_inputs.utility_data.get_age_group_ids', 'utility_data.get_age_group_ids', ([], {}), '()\n', (8949, 8951), False, 'from vivarium_inputs import utility_data\n'), ((9395, 9428), 'pandas.concat', 'pd.concat', (['dfs'], {'ignore_index': '(True)'}), '(dfs, ignore_index=True)\n', (9404, 9428), True, 'import pandas as pd\n'), ((11086, 11123), 'vivarium_inputs.utilities.sort_hierarchical_data', 'vi_utils.sort_hierarchical_data', (['data'], {}), '(data)\n', (11117, 11123), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((11819, 11904), 'pandas.Interval', 'pd.Interval', (['age_bins.age_start[age_id]', 'age_bins.age_end[age_id]'], {'closed': '"""left"""'}), "(age_bins.age_start[age_id], age_bins.age_end[age_id], closed='left'\n )\n", (11830, 11904), True, 'import pandas as pd\n'), ((13790, 13827), 'vivarium.framework.artifact.EntityKey', 'EntityKey', (['f"""cause.{cause}.{measure}"""'], {}), "(f'cause.{cause}.{measure}')\n", (13799, 13827), False, 'from vivarium.framework.artifact import EntityKey\n'), ((13901, 13956), 'vivarium_inputs.utilities.get_age_group_ids_by_restriction', 'vi_utils.get_age_group_ids_by_restriction', (['cause', '"""yll"""'], {}), "(cause, 'yll')\n", (13942, 13956), True, 'from vivarium_inputs import utilities as vi_utils\n'), ((14014, 14069), 'vivarium_inputs.utilities.get_age_group_ids_by_restriction', 'vi_utils.get_age_group_ids_by_restriction', (['cause', '"""yld"""'], {}), "(cause, 'yld')\n", (14055, 14069), True, 'from vivarium_inputs import utilities as vi_utils\n')] |
import numpy as np
from numpy import sum as npsum
from numpy import zeros, tile, r_, squeeze
from numpy.linalg import solve, norm
from functions_legacy.SmartInverse import SmartInverse
def MaxLikelihoodFPLocDispT(epsi, p, nu, threshold, last=0, smartinverse=0, maxiter=10 ** 5):
# This function estimates the Maximum Likelihood with Flexible Probabilities location and dispersion parameters of the invariants under the Student t distribution assumption by
# means of an iterative algorithm (MaxLikelihoodFPLocDispT routine)
# INPUT
# epsi :[matrix](i_ x t_end) timeseries of invariants
# p :[vector](1 x t_end) flexible probabilities associated to the invariants
# nu :[scalar] degrees of freedom of the t-Student distribution
# threshold :[scalar] or [vector](1 x 2) convergence threshold
# last (optional):[scalar] if last!=0 only the last computed mean and covariance are returned
# maxiter :[scalar] maximum number of iterations
# OUTPUT
# mu_MLFP :[matrix](i_ x k_) array containing the mean vectors computed at each iteration
# sigma2_MLFP :[array](i_ x i_ x k_) array containing the covariance matrices computed at each iteration
# error :[vector](2 x 1) vector containing the relative errors at the last iteration
# For details on the exercise, see here .
## Code
if isinstance(threshold, float):
threshold = r_[threshold, threshold]
# initialize
i_, t_ = epsi.shape
mu_MLFP = zeros((i_, 1))
sigma2_MLFP = zeros((i_, i_, 1))
mu_MLFP[:, [0]] = epsi @ p.T
epsi_c = epsi - tile(mu_MLFP[:, [0]], (1, t_))
sigma2_MLFP[:, :, 0] = epsi_c @ (np.diagflat(p) @ epsi_c.T)
error = [10 ** 6, 10 ** 6]
k = 0
while npsum(error > threshold) >= 1 and k < maxiter:
k = k + 1
# update weigths
epsi_c = epsi - tile(mu_MLFP[:, [k - 1]], (1, t_))
w_den = nu + npsum(epsi_c * solve(sigma2_MLFP[:, :, k - 1], epsi_c), 0)
w = (nu + i_) / w_den
# update output
mu_MLFP = r_['-1', mu_MLFP, (npsum(tile(p * w, (i_, 1)) * epsi, 1) / (p @ w.T))[..., np.newaxis]]
epsi_c = epsi - tile(mu_MLFP[:, [k]], (1, t_))
sigma2_MLFP = r_['-1', sigma2_MLFP, (epsi_c @ np.diagflat(p * w) @ epsi_c.T)[..., np.newaxis]]
sigma2_MLFP[:, :, k] = (squeeze(sigma2_MLFP[:, :, k]) + squeeze(sigma2_MLFP[:, :, k]).T) / 2
# convergence
error[0] = norm(mu_MLFP[:, k] - mu_MLFP[:, k - 1]) / norm(mu_MLFP[:, k])
error[1] = norm(sigma2_MLFP[:, :, k] - sigma2_MLFP[:, :, k - 1], ord='fro') / norm(sigma2_MLFP[:, :, k],
ord='fro')
if last != 0:
mu_MLFP = mu_MLFP[:, -1]
sigma2_MLFP = sigma2_MLFP[:, :, -1]
return mu_MLFP, sigma2_MLFP, error
| [
"numpy.tile",
"numpy.linalg.solve",
"numpy.squeeze",
"numpy.sum",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.diagflat"
] | [((1553, 1567), 'numpy.zeros', 'zeros', (['(i_, 1)'], {}), '((i_, 1))\n', (1558, 1567), False, 'from numpy import zeros, tile, r_, squeeze\n'), ((1586, 1604), 'numpy.zeros', 'zeros', (['(i_, i_, 1)'], {}), '((i_, i_, 1))\n', (1591, 1604), False, 'from numpy import zeros, tile, r_, squeeze\n'), ((1659, 1689), 'numpy.tile', 'tile', (['mu_MLFP[:, [0]]', '(1, t_)'], {}), '(mu_MLFP[:, [0]], (1, t_))\n', (1663, 1689), False, 'from numpy import zeros, tile, r_, squeeze\n'), ((1728, 1742), 'numpy.diagflat', 'np.diagflat', (['p'], {}), '(p)\n', (1739, 1742), True, 'import numpy as np\n'), ((1807, 1831), 'numpy.sum', 'npsum', (['(error > threshold)'], {}), '(error > threshold)\n', (1812, 1831), True, 'from numpy import sum as npsum\n'), ((1921, 1955), 'numpy.tile', 'tile', (['mu_MLFP[:, [k - 1]]', '(1, t_)'], {}), '(mu_MLFP[:, [k - 1]], (1, t_))\n', (1925, 1955), False, 'from numpy import zeros, tile, r_, squeeze\n'), ((2221, 2251), 'numpy.tile', 'tile', (['mu_MLFP[:, [k]]', '(1, t_)'], {}), '(mu_MLFP[:, [k]], (1, t_))\n', (2225, 2251), False, 'from numpy import zeros, tile, r_, squeeze\n'), ((2497, 2536), 'numpy.linalg.norm', 'norm', (['(mu_MLFP[:, k] - mu_MLFP[:, k - 1])'], {}), '(mu_MLFP[:, k] - mu_MLFP[:, k - 1])\n', (2501, 2536), False, 'from numpy.linalg import solve, norm\n'), ((2539, 2558), 'numpy.linalg.norm', 'norm', (['mu_MLFP[:, k]'], {}), '(mu_MLFP[:, k])\n', (2543, 2558), False, 'from numpy.linalg import solve, norm\n'), ((2578, 2642), 'numpy.linalg.norm', 'norm', (['(sigma2_MLFP[:, :, k] - sigma2_MLFP[:, :, k - 1])'], {'ord': '"""fro"""'}), "(sigma2_MLFP[:, :, k] - sigma2_MLFP[:, :, k - 1], ord='fro')\n", (2582, 2642), False, 'from numpy.linalg import solve, norm\n'), ((2645, 2682), 'numpy.linalg.norm', 'norm', (['sigma2_MLFP[:, :, k]'], {'ord': '"""fro"""'}), "(sigma2_MLFP[:, :, k], ord='fro')\n", (2649, 2682), False, 'from numpy.linalg import solve, norm\n'), ((2387, 2416), 'numpy.squeeze', 'squeeze', (['sigma2_MLFP[:, :, k]'], {}), '(sigma2_MLFP[:, :, k])\n', (2394, 2416), False, 'from numpy import zeros, tile, r_, squeeze\n'), ((1993, 2032), 'numpy.linalg.solve', 'solve', (['sigma2_MLFP[:, :, k - 1]', 'epsi_c'], {}), '(sigma2_MLFP[:, :, k - 1], epsi_c)\n', (1998, 2032), False, 'from numpy.linalg import solve, norm\n'), ((2419, 2448), 'numpy.squeeze', 'squeeze', (['sigma2_MLFP[:, :, k]'], {}), '(sigma2_MLFP[:, :, k])\n', (2426, 2448), False, 'from numpy import zeros, tile, r_, squeeze\n'), ((2306, 2324), 'numpy.diagflat', 'np.diagflat', (['(p * w)'], {}), '(p * w)\n', (2317, 2324), True, 'import numpy as np\n'), ((2134, 2154), 'numpy.tile', 'tile', (['(p * w)', '(i_, 1)'], {}), '(p * w, (i_, 1))\n', (2138, 2154), False, 'from numpy import zeros, tile, r_, squeeze\n')] |
import socket
import struct
import numpy as np
import serial
from psychopy import visual, core, sound
from serial.tools.list_ports import comports
from Online.AmpInterface import TriggerUnit
from thirdparty.collections import AttrDict
class TriggerNeuracle(TriggerUnit):
triggerbox = None
dotSize = 50
def __init__(self, useLightSensor=False, **kwargs):
super(TriggerNeuracle, self).__init__()
self.__useLightSensor = useLightSensor
if useLightSensor:
self.window = None
self.__dot = None
self.kwargs = kwargs
# initiate triggerbox
self.triggerbox = TriggerBox()
def config(self, window):
if self.__useLightSensor:
assert isinstance(window, visual.Window)
self.window = window
size = self.window.size
radius = self.dotSize / 2
self.__dot = visual.Circle(self.window,
radius=radius,
pos=(size[0] / 2 - radius, radius - size[1] / 2),
units='pix',
fillColor='white',
)
# sensor info
sensorID = None
for i, sensor in enumerate(self.triggerbox.sensorInfo):
if 'Light' == sensor.Type:
sensorID = i
break
self.triggerbox.InitLightSensor(sensorID=sensorID, screen_index=self.kwargs['screen_index'])
def send_trigger(self, data):
if self.__useLightSensor:
self.triggerbox.OutputEventData(data)
self.__dot.setFillColor('white')
self.__dot.draw()
else:
# directly using serial port
self.triggerbox.OutputEventData(data)
def reset_trigger(self):
# do not need to reset serial port
if self.__useLightSensor:
self.__dot.setFillColor('black')
self.__dot.draw()
def after_flip(self):
return not self.__useLightSensor
class TriggerBox(object):
"""docstring for TriggerBox"""
functionIDSensorParaGet = 1
functionIDSensorParaSet = 2
functionIDDeviceInfoGet = 3
functionIDDeviceNameGet = 4
functionIDSensorSampleGet = 5
functionIDSensorInfoGet = 6
functionIDOutputEventData = 225
functionIDError = 131
sensorTypeDigitalIN = 1
sensorTypeLight = 2
sensorTypeLineIN = 3
sensorTypeMic = 4
sensorTypeKey = 5
sensorTypeTemperature = 6
sensorTypeHumidity = 7
sensorTypeAmbientlight = 8
sensorTypeDebug = 9
sensorTypeAll = 255
deviceID = 1
# TODO: get device ID
# properties
comportHandle = None
deviceName = None
deviceInfo = None
sensorInfo = None
tcpOutput = None
def __init__(self, port=None, tcpPort=None):
if tcpPort is not None:
self.tcpOutput = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcpOutput.connect(('localhost', tcpPort))
if port is None:
plist = comports()
if not plist:
raise Exception('No available port')
validPort = None
for p in plist:
port = p.device
if 'cu.usbserial' in port or 'COM' in port:
isValidDevice = TriggerBox.isValidDevice(port)
if isValidDevice:
validPort = port
break
if validPort is None:
raise Exception('No available port')
self.comportHandle = serial.Serial(port, 115200, timeout=0.05)
self.comportHandle.flush()
self.GetDeviceName()
self.GetDeviceInfo()
self.GetSensorInfo()
@staticmethod
def isValidDevice(portName):
'''
ValidateDevice
'''
try:
handle = serial.Serial(portName, 115200, timeout=0.05)
handle.flush()
except serial.SerialException:
return False
# send device message
message = struct.pack('<2BH', *[TriggerBox.deviceID, 4, 0])
handle.write(message)
message = handle.read(size=4)
handle.flush()
if not message:
return False
return True
def InitLightSensor(self, sensorID, **kwargs):
'''
InitLightSensor: Init light sensor
'''
sensorPara = self.GetSensorPara(sensorID)
sensorPara.OutputChannel = 3
sensorPara.TriggerToBeOut = 0
sensorPara.EventData = 0
self.SetSensorPara(sensorID, sensorPara)
self.SetLightSensorThreshold(sensorID, **kwargs)
def SetLightSensorThreshold(self, sensorID, dotSize=50, screen_index=-1):
'''
SetLightSensorThreshold: Set light sensor threshold
'''
w = visual.Window(fullscr=True,
screen=screen_index,
units='pix'
)
w.setColor((0.4, 0.4, 0.4))
w.flip()
size = w.size
sensorPara = self.GetSensorPara(sensorID)
# draw white dot for 0.5s
radius = dotSize / 2
dot = visual.Circle(w,
radius=radius,
pos=(size[0] / 2 - radius, radius - size[1] / 2),
units='pix',
fillColor='white',
)
dot.draw()
w.flip()
core.wait(0.5)
sensorWhite = self.GetSensorSample(sensorID)
# draw black dot for 0.5s
dot.setFillColor('black')
dot.draw()
w.flip()
core.wait(0.5)
sensorBlack = self.GetSensorSample(sensorID)
print('Light sensor data')
print('White:', sensorWhite)
print('Black:', sensorBlack)
if sensorWhite - sensorBlack < sensorBlack * 0.5:
print('Light sensor data out of range.')
else:
sensorPara.Threshold = int(round(0.8 * (sensorWhite - sensorBlack) + sensorBlack))
print('Light sensor threshold: ', sensorPara.Threshold)
self.SetSensorPara(sensorID, sensorPara)
w.close()
def InitAudioSensor(self, sensorID):
sensorPara = self.GetSensorPara(sensorID)
sensorPara.OutputChannel = 3
sensorPara.TriggerToBeOut = 0
sensorPara.EventData = 0
self.SetSensorPara(sensorID, sensorPara)
self.SetAudioSensorThreshold(sensorID)
def SetAudioSensorThreshold(self, sensorID):
sensorPara = self.GetSensorPara(sensorID)
# generate a pure tone, 1s long
# 1000 Hz, 48000 Hz sample rate
# played for 3 times
sensorWhite = []
sensorBlack = []
for i in range(3):
pahandle = sound.Sound(
value=1000,
secs=1,
sampleRate=48000
)
pahandle.play()
# wait
core.wait(0.55)
sensorWhite.append(self.GetSensorSample(sensorID))
pahandle.stop()
core.wait(0.55)
sensorBlack.append(self.GetSensorSample(sensorID))
sensorWhite = np.mean(sensorWhite)
sensorBlack = np.mean(sensorBlack)
print('Mic sensor data')
print('White:', sensorWhite)
print('Black:', sensorBlack)
sensorPara.Threshold = int(round(0.8 * (sensorWhite - sensorBlack) + sensorBlack))
print('Mic sensor threshold: ', sensorPara.Threshold)
self.SetSensorPara(sensorID, sensorPara)
def OutputEventData(self, eventData):
# directly mark trigger with serial
# eventData is an unsigned short
assert isinstance(eventData, int)
msg = struct.pack('<H', eventData)
self.SendCommand(self.functionIDOutputEventData, msg)
resp = self.ReadResponse(self.functionIDOutputEventData)
if self.tcpOutput is not None:
self.tcpOutput.send(resp)
def SetEventData(self, sensorID, eventData, triggerToBeOut=1):
assert isinstance(eventData, int)
sensorPara = self.GetSensorPara(sensorID)
sensorPara.TriggerToBeOut = triggerToBeOut
sensorPara.EventData = eventData
self.SetSensorPara(sensorID, sensorPara)
def GetDeviceName(self):
self.SendCommand(self.functionIDDeviceNameGet, 1)
name = self.ReadResponse(self.functionIDDeviceNameGet)
name = name.decode()
self.deviceName = name
return name
def GetDeviceInfo(self):
self.SendCommand(self.functionIDDeviceInfoGet, 1)
info = self.ReadResponse(self.functionIDDeviceInfoGet)
deviceInfo = AttrDict({
'HardwareVersion': info[0],
'FirmwareVersion': info[1],
'SensorSum': info[2],
'ID': struct.unpack('<I', info[4:])
})
self.deviceInfo = deviceInfo
return deviceInfo
def GetSensorInfo(self):
switch = {
self.sensorTypeDigitalIN: 'DigitalIN',
self.sensorTypeLight: 'Light',
self.sensorTypeLineIN: 'LineIN',
self.sensorTypeMic: 'Mic',
self.sensorTypeKey: 'Key',
self.sensorTypeTemperature: 'Temperature',
self.sensorTypeHumidity: 'Humidity',
self.sensorTypeAmbientlight: 'Ambientlight',
self.sensorTypeDebug: 'Debug'
}
self.SendCommand(self.functionIDSensorInfoGet)
info = self.ReadResponse(self.functionIDSensorInfoGet)
sensorInfo = []
for i in range(0, len(info), 2):
# print(info[i], info[i+1])
sensor_type = info[i]
try:
sensorType = switch[sensor_type]
except KeyError:
sensorType = 'Undefined'
# print('Undefined sensor type')
sensorNum = info[i + 1]
sensorInfo.append(AttrDict(Type=sensorType, Number=sensorNum))
self.sensorInfo = sensorInfo
return sensorInfo
def GetSensorPara(self, sensorID):
sensor = self.sensorInfo[sensorID]
cmd = [self.SensorType(sensor.Type), sensor.Number]
cmd = struct.pack('<2B', *cmd)
self.SendCommand(self.functionIDSensorParaGet, cmd)
para = self.ReadResponse(self.functionIDSensorParaGet)
para = struct.unpack('<2B3H', para)
sensorPara = AttrDict({
'Edge': para[0],
'OutputChannel': para[1],
'TriggerToBeOut': para[2],
'Threshold': para[3],
'EventData': para[4]
})
return sensorPara
def SetSensorPara(self, sensorID, sensorPara):
sensor = self.sensorInfo[sensorID]
cmd = [self.SensorType(sensor.Type), sensor.Number] + [sensorPara[key]
for key in sensorPara.keys()]
cmd = struct.pack('<4B3H', *cmd)
self.SendCommand(self.functionIDSensorParaSet, cmd)
resp = self.ReadResponse(self.functionIDSensorParaSet)
isSucceed = (resp[0] == self.SensorType(sensor.Type)) and (resp[1] == sensor.Number)
return isSucceed
def GetSensorSample(self, sensorID):
sensor = self.sensorInfo[sensorID]
cmd = [self.SensorType(sensor.Type), sensor.Number]
self.SendCommand(self.functionIDSensorSampleGet, struct.pack('<2B', *cmd))
result = self.ReadResponse(self.functionIDSensorSampleGet)
if result[0] != self.SensorType(sensor.Type) or result[1] != sensor.Number:
raise Exception('Get sensor sample error')
adcResult = struct.unpack('<H', result[2:])[0]
return adcResult
def SensorType(self, typeString):
switch = {
'DigitalIN': self.sensorTypeDigitalIN,
'Light': self.sensorTypeLight,
'LineIN': self.sensorTypeLineIN,
'Mic': self.sensorTypeMic,
'Key': self.sensorTypeKey,
'Temperature': self.sensorTypeTemperature,
'Humidity': self.sensorTypeHumidity,
'Ambientlight': self.sensorTypeAmbientlight,
'Debug': self.sensorTypeDebug
}
try:
typeNum = switch[typeString]
except KeyError:
raise Exception('Undefined sensor type')
return typeNum
def SendCommand(self, functionID, command=None):
if command is not None:
# process command data structure
if isinstance(command, int):
command = struct.pack('<B', command)
# make sure command finally becomes 'bytes'
assert isinstance(command, bytes)
payload = len(command)
else:
payload = 0
value = (self.deviceID, functionID, payload)
message = struct.pack('<2BH', *value)
if command is not None:
message += command
self.comportHandle.write(message)
def ReadResponse(self, functionID):
errorCases = {
0: 'None',
1: 'FrameHeader',
2: 'FramePayload',
3: 'ChannelNotExist',
4: 'DeviceID',
5: 'FunctionID',
6: 'SensorType'
}
message = self.comportHandle.read(4)
message = struct.unpack('<2BH', message)
if message[0] != self.deviceID:
raise Exception('Response error: request deviceID %d, \
return deviceID %d', self.deviceID, message[0])
if message[1] != functionID:
if message[1] == self.functionIDError:
errorType = self.comportHandle.read(1)
try:
errorMessage = errorCases[errorType]
except KeyError:
raise Exception('Undefined error type')
raise Exception('Response error: ', errorMessage)
else:
raise Exception('Response error: request functionID %d, \
return functionID %d', functionID, message[1])
payload = message[2]
DataBuf = self.comportHandle.read(payload)
return DataBuf
| [
"numpy.mean",
"serial.tools.list_ports.comports",
"psychopy.sound.Sound",
"thirdparty.collections.AttrDict",
"socket.socket",
"psychopy.visual.Circle",
"struct.pack",
"struct.unpack",
"serial.Serial",
"psychopy.visual.Window",
"psychopy.core.wait"
] | [((3650, 3691), 'serial.Serial', 'serial.Serial', (['port', '(115200)'], {'timeout': '(0.05)'}), '(port, 115200, timeout=0.05)\n', (3663, 3691), False, 'import serial\n'), ((4132, 4181), 'struct.pack', 'struct.pack', (['"""<2BH"""', '*[TriggerBox.deviceID, 4, 0]'], {}), "('<2BH', *[TriggerBox.deviceID, 4, 0])\n", (4143, 4181), False, 'import struct\n'), ((4900, 4961), 'psychopy.visual.Window', 'visual.Window', ([], {'fullscr': '(True)', 'screen': 'screen_index', 'units': '"""pix"""'}), "(fullscr=True, screen=screen_index, units='pix')\n", (4913, 4961), False, 'from psychopy import visual, core, sound\n'), ((5246, 5363), 'psychopy.visual.Circle', 'visual.Circle', (['w'], {'radius': 'radius', 'pos': '(size[0] / 2 - radius, radius - size[1] / 2)', 'units': '"""pix"""', 'fillColor': '"""white"""'}), "(w, radius=radius, pos=(size[0] / 2 - radius, radius - size[1] /\n 2), units='pix', fillColor='white')\n", (5259, 5363), False, 'from psychopy import visual, core, sound\n'), ((5546, 5560), 'psychopy.core.wait', 'core.wait', (['(0.5)'], {}), '(0.5)\n', (5555, 5560), False, 'from psychopy import visual, core, sound\n'), ((5728, 5742), 'psychopy.core.wait', 'core.wait', (['(0.5)'], {}), '(0.5)\n', (5737, 5742), False, 'from psychopy import visual, core, sound\n'), ((7266, 7286), 'numpy.mean', 'np.mean', (['sensorWhite'], {}), '(sensorWhite)\n', (7273, 7286), True, 'import numpy as np\n'), ((7309, 7329), 'numpy.mean', 'np.mean', (['sensorBlack'], {}), '(sensorBlack)\n', (7316, 7329), True, 'import numpy as np\n'), ((7823, 7851), 'struct.pack', 'struct.pack', (['"""<H"""', 'eventData'], {}), "('<H', eventData)\n", (7834, 7851), False, 'import struct\n'), ((10259, 10283), 'struct.pack', 'struct.pack', (['"""<2B"""', '*cmd'], {}), "('<2B', *cmd)\n", (10270, 10283), False, 'import struct\n'), ((10422, 10450), 'struct.unpack', 'struct.unpack', (['"""<2B3H"""', 'para'], {}), "('<2B3H', para)\n", (10435, 10450), False, 'import struct\n'), ((10472, 10601), 'thirdparty.collections.AttrDict', 'AttrDict', (["{'Edge': para[0], 'OutputChannel': para[1], 'TriggerToBeOut': para[2],\n 'Threshold': para[3], 'EventData': para[4]}"], {}), "({'Edge': para[0], 'OutputChannel': para[1], 'TriggerToBeOut': para\n [2], 'Threshold': para[3], 'EventData': para[4]})\n", (10480, 10601), False, 'from thirdparty.collections import AttrDict\n'), ((10974, 11000), 'struct.pack', 'struct.pack', (['"""<4B3H"""', '*cmd'], {}), "('<4B3H', *cmd)\n", (10985, 11000), False, 'import struct\n'), ((12870, 12897), 'struct.pack', 'struct.pack', (['"""<2BH"""', '*value'], {}), "('<2BH', *value)\n", (12881, 12897), False, 'import struct\n'), ((13342, 13372), 'struct.unpack', 'struct.unpack', (['"""<2BH"""', 'message'], {}), "('<2BH', message)\n", (13355, 13372), False, 'import struct\n'), ((902, 1029), 'psychopy.visual.Circle', 'visual.Circle', (['self.window'], {'radius': 'radius', 'pos': '(size[0] / 2 - radius, radius - size[1] / 2)', 'units': '"""pix"""', 'fillColor': '"""white"""'}), "(self.window, radius=radius, pos=(size[0] / 2 - radius, radius -\n size[1] / 2), units='pix', fillColor='white')\n", (915, 1029), False, 'from psychopy import visual, core, sound\n'), ((2964, 3013), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2977, 3013), False, 'import socket\n'), ((3118, 3128), 'serial.tools.list_ports.comports', 'comports', ([], {}), '()\n', (3126, 3128), False, 'from serial.tools.list_ports import comports\n'), ((3947, 3992), 'serial.Serial', 'serial.Serial', (['portName', '(115200)'], {'timeout': '(0.05)'}), '(portName, 115200, timeout=0.05)\n', (3960, 3992), False, 'import serial\n'), ((6874, 6923), 'psychopy.sound.Sound', 'sound.Sound', ([], {'value': '(1000)', 'secs': '(1)', 'sampleRate': '(48000)'}), '(value=1000, secs=1, sampleRate=48000)\n', (6885, 6923), False, 'from psychopy import visual, core, sound\n'), ((7045, 7060), 'psychopy.core.wait', 'core.wait', (['(0.55)'], {}), '(0.55)\n', (7054, 7060), False, 'from psychopy import visual, core, sound\n'), ((7164, 7179), 'psychopy.core.wait', 'core.wait', (['(0.55)'], {}), '(0.55)\n', (7173, 7179), False, 'from psychopy import visual, core, sound\n'), ((11444, 11468), 'struct.pack', 'struct.pack', (['"""<2B"""', '*cmd'], {}), "('<2B', *cmd)\n", (11455, 11468), False, 'import struct\n'), ((11696, 11727), 'struct.unpack', 'struct.unpack', (['"""<H"""', 'result[2:]'], {}), "('<H', result[2:])\n", (11709, 11727), False, 'import struct\n'), ((8903, 8932), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'info[4:]'], {}), "('<I', info[4:])\n", (8916, 8932), False, 'import struct\n'), ((9994, 10037), 'thirdparty.collections.AttrDict', 'AttrDict', ([], {'Type': 'sensorType', 'Number': 'sensorNum'}), '(Type=sensorType, Number=sensorNum)\n', (10002, 10037), False, 'from thirdparty.collections import AttrDict\n'), ((12597, 12623), 'struct.pack', 'struct.pack', (['"""<B"""', 'command'], {}), "('<B', command)\n", (12608, 12623), False, 'import struct\n')] |
import numpy as np
# Cyamites imports
from Utilities import normalize, norm
# A "triangle soup" mesh, with an array of vertices and list of face indices
# Generally, this class exists only for interfacing with external components, such as IO and visualization. Cyamites encourages
# all mesh computation to be performed on a halfedge mesh.
class TriSoupMesh(object):
### Construct a new mesh froms a vert array and a tri list
def __init__(self, verts, tris, faceAttr=dict(), vertAttr=dict()):
# A Nx3 list of vertex positions
self.verts = np.array(verts)
self.tris = np.array(tris, dtype=np.uint32)
# A Mx3 list of tuples, where each gives an (i,j,k) CCW triangular face
self.nVerts = len(verts)
self.nTris = len(tris)
# Dictionaries to hold various data on the faces and vertices
# (as indexed lists): vertAttr['value'] = [val1, val2, val3...]
self.faceAttr = faceAttr
self.vertAttr = vertAttr
# Numpy-ify all the attr data
# TODO really need some kind of more general solution here... this
# will fail on lots of things
for key in self.faceAttr:
self.faceAttr[key] = np.array(self.faceAttr[key])
for key in self.vertAttr:
self.vertAttr[key] = np.array(self.vertAttr[key])
# Compute face and vertex normals, using numpy operations for efficiency
# This will generally be much faster than computing normals on the halfedge
# mesh.
# 'useExisting' will cause this method to exit immediately if there is already
# a 'normal' attribute defined on the vertices
def computeNormals(self, useExisting=False):
if useExisting and 'normal' in self.vertAttr:
#print("Skipping normal computation, normals are already defined")
return
# Expand out an indexed view of our vertices
faceVerts = self.verts[self.tris]
# Compute face normals as a cross product of the face vertices
# TODO why the double colon...?
faceNormals = np.cross(faceVerts[::,1 ] - faceVerts[::,0],
faceVerts[::,2 ] - faceVerts[::,0])
# Area of each face is used for a weighted average below, so save that
faceAreas = 0.5 * norm(faceNormals, axis=1).reshape((self.nTris, 1))
faceNormals = normalize(faceNormals)
self.faceAttr['normal'] = faceNormals
# Now compute vertex normals as a area-weighted average of the face normals
vertNormals = np.zeros( self.verts.shape, dtype=self.verts.dtype)
vertNormals[ self.tris[:,0] ] += faceNormals * faceAreas
vertNormals[ self.tris[:,1] ] += faceNormals * faceAreas
vertNormals[ self.tris[:,2] ] += faceNormals * faceAreas
normalize(vertNormals)
self.vertAttr['normal'] = vertNormals
def computeCurvature(self):
pass
| [
"numpy.cross",
"Utilities.normalize",
"numpy.array",
"numpy.zeros",
"Utilities.norm"
] | [((567, 582), 'numpy.array', 'np.array', (['verts'], {}), '(verts)\n', (575, 582), True, 'import numpy as np\n'), ((603, 634), 'numpy.array', 'np.array', (['tris'], {'dtype': 'np.uint32'}), '(tris, dtype=np.uint32)\n', (611, 634), True, 'import numpy as np\n'), ((2084, 2162), 'numpy.cross', 'np.cross', (['(faceVerts[:, 1] - faceVerts[:, 0])', '(faceVerts[:, 2] - faceVerts[:, 0])'], {}), '(faceVerts[:, 1] - faceVerts[:, 0], faceVerts[:, 2] - faceVerts[:, 0])\n', (2092, 2162), True, 'import numpy as np\n'), ((2375, 2397), 'Utilities.normalize', 'normalize', (['faceNormals'], {}), '(faceNormals)\n', (2384, 2397), False, 'from Utilities import normalize, norm\n'), ((2553, 2603), 'numpy.zeros', 'np.zeros', (['self.verts.shape'], {'dtype': 'self.verts.dtype'}), '(self.verts.shape, dtype=self.verts.dtype)\n', (2561, 2603), True, 'import numpy as np\n'), ((2808, 2830), 'Utilities.normalize', 'normalize', (['vertNormals'], {}), '(vertNormals)\n', (2817, 2830), False, 'from Utilities import normalize, norm\n'), ((1208, 1236), 'numpy.array', 'np.array', (['self.faceAttr[key]'], {}), '(self.faceAttr[key])\n', (1216, 1236), True, 'import numpy as np\n'), ((1304, 1332), 'numpy.array', 'np.array', (['self.vertAttr[key]'], {}), '(self.vertAttr[key])\n', (1312, 1332), True, 'import numpy as np\n'), ((2302, 2327), 'Utilities.norm', 'norm', (['faceNormals'], {'axis': '(1)'}), '(faceNormals, axis=1)\n', (2306, 2327), False, 'from Utilities import normalize, norm\n')] |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
import os
from pickle import Pickler, Unpickler
print(tf.__version__)
EPOCHS = 200
NUM_CHANNELS=128
KERNEL_SIZE=(4,4)
DROPOUT_RATE=0
BATCHNORM=True
BATCH_SIZE=128
SPLIT=0.2
CACHE=True
REMOVE_DUPLICATES=False
AVERAGE_DUPLICATES=True
#DATAFILE="/home/doma945/amoba_teleport/temp_1000sim_medium"
DATAFILE="/home/doma945/amoba_teleport/temp_1000sim_big"
#DATAFILE="/home/doma945/amoba_teleport/temp_4000sim"
CACHEFILE = "/home/zombori/tmp/amoba_cache.npz"
#CACHEFILE = "/home/doma945/tmp/amoba_cache.npz"
NETWORK="linear"
def load_history():
# ===== load history file =====
# Descriptions: containes the data collected in every Iteration
modelFile = os.path.join(DATAFILE, "trainhistory.pth.tar")
examplesFile = modelFile+".examples"
trainhistory = []
if not os.path.isfile(examplesFile):
print(examplesFile)
else:
print("File with trainExamples found. Read it.")
with open(examplesFile, "rb") as f:
for i in Unpickler(f).load():
trainhistory.append(i)
f.closed
print("The trainhistory containes {} iteration of data".format(len(trainhistory)))
# ===== Extract data =====
trainExamples = []
for i,e in enumerate(trainhistory):
trainExamples.extend(np.array(e))
print("Number of all trainexamples: {}".format(len(trainExamples)))
return trainExamples
def remove_duplicates(xs, ps, vs):
dict = {}
for i in range(xs.shape[0]):
s = str(xs[i])
dict[s] = i
indices = list(dict.values())
xs2 = xs[indices]
ps2 = ps[indices]
vs2 = vs[indices]
print("Reduced shapes {}, {}, {} to {}, {}, {}".format(xs.shape, ps.shape, vs.shape, xs2.shape, ps2.shape, vs2.shape))
return xs2, ps2, vs2
def average_duplicates(xs, ps, vs):
dict = {}
for i in range(xs.shape[0]):
s = str(xs[i])
if s in dict:
dict[s]["ps"].append(ps[i])
dict[s]["vs"].append(vs[i])
else:
dict[s] = {"x": xs[i], "ps": [ps[i]], "vs": [vs[i]]}
xs2 = []
ps2 = []
vs2 = []
for s in dict:
xs2.append(dict[s]["x"])
ps2.append(np.mean(dict[s]["ps"], axis=0))
vs2.append(np.mean(dict[s]["vs"]))
xs2 = np.array(xs2)
ps2 = np.array(ps2)
vs2 = np.array(vs2)
print("Average: reduced shapes {}, {}, {} to {}, {}, {}".format(xs.shape, ps.shape, vs.shape, xs2.shape, ps2.shape, vs2.shape))
return xs2, ps2, vs2
def preprocess_data(cache=True):
if cache and os.path.isfile(CACHEFILE):
npz = np.load(CACHEFILE)
xs = npz['xs']
ps = npz['ps']
vs = npz['vs']
else:
trainExamples = load_history()
xs = []
ps = []
vs = []
curPlayers = []
for (allBoard, curPlayer, pi, action) in trainExamples:
xs.append(allBoard)
curPlayers.append(curPlayer)
ps.append(pi)
vs.append(action)
xs = np.array(xs)
curPlayers = np.array(curPlayers)
ps = np.array(ps)
vs = np.array(vs)
board = np.expand_dims(xs[:,:,:,0], axis = 3)
heur_channels = xs[:,:,:,1:]
white_board = board * (board+1) -1
black_board = board * (board-1) -1
curPlayers = curPlayers.reshape((-1, 1, 1, 1))
player_channel = curPlayers * np.ones_like(board)
xs = np.concatenate([white_board, black_board, heur_channels, player_channel], axis=3)
if AVERAGE_DUPLICATES:
xs, ps, vs = average_duplicates(xs, ps, vs)
elif REMOVE_DUPLICATES:
xs, ps, vs = remove_duplicates(xs, ps, vs)
np.savez(CACHEFILE, xs=xs, ps=ps, vs=vs)
print("Input shape: ", xs.shape)
print("Target policy shape: ", ps.shape)
print("Target value shape: ", vs.shape)
return (xs, ps, vs)
(xs, ps, vs) = preprocess_data(cache=CACHE)
def show(i):
x = xs[i]
p = ps[i]
white = (x[:,:,0]+1) / 2
black = (x[:,:,1]+1) / 2
player = x[0,0,10]
board = white - black
policy = p.reshape((12,4))
value = vs[i]
print(np.transpose(board))
print(np.transpose(policy))
print("value: ", value)
print("player: ", player)
# players = np.mean(xs[:,:,:,10], axis=(1,2))
# print(len(players))
# print(np.sum(players))
input_shape = xs.shape[1:]
policy_shape = ps.shape[1:]
pi_output_count = np.prod(policy_shape)
if NETWORK=="original":
inputs = keras.Input(shape=input_shape)
outputs = inputs
outputs = layers.Conv2D(NUM_CHANNELS, KERNEL_SIZE, padding="same")(outputs)
if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)
outputs = layers.Activation(tf.nn.relu)(outputs)
outputs = layers.Conv2D(NUM_CHANNELS, KERNEL_SIZE, padding="same")(outputs)
if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)
outputs = layers.Activation(tf.nn.relu)(outputs)
outputs = layers.Conv2D(NUM_CHANNELS, KERNEL_SIZE, padding="same", strides=(2,1))(outputs)
if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)
outputs = layers.Activation(tf.nn.relu)(outputs)
outputs = layers.Conv2D(NUM_CHANNELS, KERNEL_SIZE, padding="same", strides=(2,2))(outputs)
if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)
outputs = layers.Activation(tf.nn.relu)(outputs)
outputs = layers.Flatten()(outputs)
outputs_flat = layers.Flatten()(inputs)
outputs_flat = layers.Dense(1512)(outputs_flat)
if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)
outputs = layers.Activation(tf.nn.relu)(outputs)
outputs_flat = layers.Dropout(DROPOUT_RATE)(outputs_flat)
outputs_flat = layers.Dense(1256)(outputs_flat)
if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)
outputs = layers.Activation(tf.nn.relu)(outputs)
outputs_flat = layers.Dropout(DROPOUT_RATE)(outputs_flat)
outputs = layers.Concatenate(axis=1)([outputs_flat, outputs])
outputs = layers.Dense(1024)(outputs)
outputs = layers.Dropout(DROPOUT_RATE)(outputs)
if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)
outputs = layers.Activation(tf.nn.relu)(outputs)
outputs = layers.Dense(512)(outputs)
outputs = layers.Dropout(DROPOUT_RATE)(outputs)
if BATCHNORM: outputs = tf.keras.layers.BatchNormalization()(outputs)
outputs = layers.Activation(tf.nn.relu)(outputs)
pi = layers.Dense(pi_output_count, name="policy")(outputs)
v0 = layers.Dense(1)(outputs)
v = layers.Activation(tf.math.tanh, name="value")(v0)
model = keras.Model(inputs=inputs, outputs=(pi, v))
elif NETWORK=="linear":
inputs = keras.Input(shape=input_shape)
pi = layers.Conv2D(1, (1,1), padding="same", name="conv1")(inputs)
pi = layers.Flatten(name="policy")(pi)
v = layers.Flatten()(inputs)
# v = layers.Dense(1, activation="relu")(v)
v = layers.Dense(1)(v)
v = layers.Activation(tf.math.tanh, name="value")(v)
model = keras.Model(inputs=inputs, outputs=(pi, v))
elif NETWORK=="linear2":
inputs = keras.Input(shape=input_shape)
pi = layers.Conv2D(10, (1,1), padding="same", name="conv1", activation="relu")(inputs)
pi = layers.Conv2D(1, (1,1), padding="same", name="conv2")(pi)
pi = layers.Flatten(name="policy")(pi)
v = layers.Flatten()(inputs)
# v = layers.Dense(1, activation="relu")(v)
v = layers.Dense(1)(v)
v = layers.Activation(tf.math.tanh, name="value")(v)
model = keras.Model(inputs=inputs, outputs=(pi, v))
elif NETWORK=="local":
inputs = keras.Input(shape=input_shape)
outputs = layers.Conv2D(1, (3,3), padding="same", name="conv1")(inputs)
pi = layers.Flatten(name="policy")(outputs)
v0 = layers.Dense(1)(pi)
v = layers.Activation(tf.math.tanh, name="value")(v0)
model = keras.Model(inputs=inputs, outputs=(pi, v))
# elif NETWORK=="dense": # todo two heads
# model = keras.Sequential([
# keras.layers.Flatten(input_shape=input_shape),
# keras.layers.Dense(1128, activation='relu'),
# keras.layers.Dense(1256, activation='relu'),
# keras.layers.Dense(1128, activation='relu'),
# keras.layers.Dense(output_count),
# keras.layers.Reshape(output_shape)
# ])
loss = {
"policy": keras.losses.CategoricalCrossentropy(from_logits=True),
"value": keras.losses.MeanSquaredError(),
}
loss_weights = {
"policy": 1,
"value": 10,
}
metrics = {
"policy": ['categorical_accuracy',
keras.metrics.TopKCategoricalAccuracy(2, "top2"),
keras.metrics.TopKCategoricalAccuracy(3, "top3"),
keras.metrics.TopKCategoricalAccuracy(4, "top4"),
keras.metrics.TopKCategoricalAccuracy(5, "top5")],
"value": 'mse',
}
# prob = layers.Softmax()(pi)
# model = keras.Model(inputs=inputs, outputs=prob)
# loss = keras.losses.MeanSquaredError()
model.compile(optimizer="adam",
loss=loss,
loss_weights=loss_weights,
metrics=metrics
)
model.fit(xs, (ps, vs), epochs=EPOCHS, batch_size=BATCH_SIZE, validation_split=SPLIT, verbose=2)
mylayer = model.get_layer(name="conv1")
myweights = mylayer.trainable_weights[0]
myweights = myweights.numpy()[:,:,:,0]
print(myweights.shape)
for i in range(11):
print("Filter ", i)
print(myweights[:,:,i])
class Model_Arena:
def get_input_for_model(self, board, player):
mtx = self.heuristic.get_field_stregth_mtx(board, 1)
heuristic_components = self.heuristic.get_x_line_mtx(board, 1)
shape = list(np.shape(board))+[1]
white_board = board * (board+1) -1
black_board = board * (board-1) -1
player_channel = player*np.ones(shape)
new_board = np.concatenate([np.reshape(white_board,shape),np.reshape(black_board,shape),
np.reshape(mtx, shape),
heuristic_components,
player_channel], axis=2)
return new_board
def model_player(self,b,p, model):
board = np.array([self.get_input_for_model(b, p)])
valids = self.game.getValidMoves(b, 1)
probs = model.predict(board)[0][0]
move = np.argmax(probs*valids+valids*0.00001)
#print(probs, move)
return move
def __init__(self, model):
self.game = GobangGame(col=12, row=4, nir=7, defender=-1)
self.heuristic = Heuristic(self.game)
#heuristic_player = Heuristic(self.game).random_play
heuristic_player = Heuristic(self.game).play
model_player = lambda b, p: Model_Arena.model_player(self,b,p,model)
self.arena = Arena.Arena(model_player, heuristic_player, self.game, display=display)
def play(self, number_of_games=100):
return self.arena.playGames(number_of_games, verbose=True)
if 1:
# === Ugly hack for reaching parent directory packages ===
from inspect import getsourcefile
import os.path
import sys
current_path = os.path.abspath(getsourcefile(lambda:0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
# ========================================================
import Arena
from gobang.GobangGame import GobangGame, display
from gobang.GobangPlayers import *
from gobang.tensorflow.NNet import NNetWrapper as NNet
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
#set gpu memory grow
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.compat.v1.Session(config=config)
arena = Model_Arena(model)
print(arena.play())
| [
"numpy.prod",
"sys.path.insert",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.layers.BatchNormalization",
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.compat.v1.Session",
"numpy.mean",
"numpy.savez",
"inspect.gets... | [((4521, 4542), 'numpy.prod', 'np.prod', (['policy_shape'], {}), '(policy_shape)\n', (4528, 4542), True, 'import numpy as np\n'), ((802, 848), 'os.path.join', 'os.path.join', (['DATAFILE', '"""trainhistory.pth.tar"""'], {}), "(DATAFILE, 'trainhistory.pth.tar')\n", (814, 848), False, 'import os\n'), ((2372, 2385), 'numpy.array', 'np.array', (['xs2'], {}), '(xs2)\n', (2380, 2385), True, 'import numpy as np\n'), ((2396, 2409), 'numpy.array', 'np.array', (['ps2'], {}), '(ps2)\n', (2404, 2409), True, 'import numpy as np\n'), ((2420, 2433), 'numpy.array', 'np.array', (['vs2'], {}), '(vs2)\n', (2428, 2433), True, 'import numpy as np\n'), ((4581, 4611), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (4592, 4611), False, 'from tensorflow import keras\n'), ((6733, 6776), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': '(pi, v)'}), '(inputs=inputs, outputs=(pi, v))\n', (6744, 6776), False, 'from tensorflow import keras\n'), ((8435, 8489), 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (8471, 8489), False, 'from tensorflow import keras\n'), ((8504, 8535), 'tensorflow.keras.losses.MeanSquaredError', 'keras.losses.MeanSquaredError', ([], {}), '()\n', (8533, 8535), False, 'from tensorflow import keras\n'), ((11248, 11277), 'os.path.dirname', 'os.path.dirname', (['current_path'], {}), '(current_path)\n', (11263, 11277), False, 'import os\n'), ((11345, 11375), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parent_dir'], {}), '(0, parent_dir)\n', (11360, 11375), False, 'import sys\n'), ((11694, 11720), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (11718, 11720), True, 'import tensorflow as tf\n'), ((11777, 11812), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (11797, 11812), True, 'import tensorflow as tf\n'), ((923, 951), 'os.path.isfile', 'os.path.isfile', (['examplesFile'], {}), '(examplesFile)\n', (937, 951), False, 'import os\n'), ((2648, 2673), 'os.path.isfile', 'os.path.isfile', (['CACHEFILE'], {}), '(CACHEFILE)\n', (2662, 2673), False, 'import os\n'), ((2689, 2707), 'numpy.load', 'np.load', (['CACHEFILE'], {}), '(CACHEFILE)\n', (2696, 2707), True, 'import numpy as np\n'), ((3105, 3117), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (3113, 3117), True, 'import numpy as np\n'), ((3139, 3159), 'numpy.array', 'np.array', (['curPlayers'], {}), '(curPlayers)\n', (3147, 3159), True, 'import numpy as np\n'), ((3173, 3185), 'numpy.array', 'np.array', (['ps'], {}), '(ps)\n', (3181, 3185), True, 'import numpy as np\n'), ((3199, 3211), 'numpy.array', 'np.array', (['vs'], {}), '(vs)\n', (3207, 3211), True, 'import numpy as np\n'), ((3229, 3267), 'numpy.expand_dims', 'np.expand_dims', (['xs[:, :, :, 0]'], {'axis': '(3)'}), '(xs[:, :, :, 0], axis=3)\n', (3243, 3267), True, 'import numpy as np\n'), ((3529, 3614), 'numpy.concatenate', 'np.concatenate', (['[white_board, black_board, heur_channels, player_channel]'], {'axis': '(3)'}), '([white_board, black_board, heur_channels, player_channel],\n axis=3)\n', (3543, 3614), True, 'import numpy as np\n'), ((3794, 3834), 'numpy.savez', 'np.savez', (['CACHEFILE'], {'xs': 'xs', 'ps': 'ps', 'vs': 'vs'}), '(CACHEFILE, xs=xs, ps=ps, vs=vs)\n', (3802, 3834), True, 'import numpy as np\n'), ((4240, 4259), 'numpy.transpose', 'np.transpose', (['board'], {}), '(board)\n', (4252, 4259), True, 'import numpy as np\n'), ((4271, 4291), 'numpy.transpose', 'np.transpose', (['policy'], {}), '(policy)\n', (4283, 4291), True, 'import numpy as np\n'), ((4647, 4703), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['NUM_CHANNELS', 'KERNEL_SIZE'], {'padding': '"""same"""'}), "(NUM_CHANNELS, KERNEL_SIZE, padding='same')\n", (4660, 4703), False, 'from tensorflow.keras import layers\n'), ((4801, 4830), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.nn.relu'], {}), '(tf.nn.relu)\n', (4818, 4830), False, 'from tensorflow.keras import layers\n'), ((4854, 4910), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['NUM_CHANNELS', 'KERNEL_SIZE'], {'padding': '"""same"""'}), "(NUM_CHANNELS, KERNEL_SIZE, padding='same')\n", (4867, 4910), False, 'from tensorflow.keras import layers\n'), ((5008, 5037), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.nn.relu'], {}), '(tf.nn.relu)\n', (5025, 5037), False, 'from tensorflow.keras import layers\n'), ((5061, 5133), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['NUM_CHANNELS', 'KERNEL_SIZE'], {'padding': '"""same"""', 'strides': '(2, 1)'}), "(NUM_CHANNELS, KERNEL_SIZE, padding='same', strides=(2, 1))\n", (5074, 5133), False, 'from tensorflow.keras import layers\n'), ((5230, 5259), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.nn.relu'], {}), '(tf.nn.relu)\n', (5247, 5259), False, 'from tensorflow.keras import layers\n'), ((5283, 5355), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['NUM_CHANNELS', 'KERNEL_SIZE'], {'padding': '"""same"""', 'strides': '(2, 2)'}), "(NUM_CHANNELS, KERNEL_SIZE, padding='same', strides=(2, 2))\n", (5296, 5355), False, 'from tensorflow.keras import layers\n'), ((5452, 5481), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.nn.relu'], {}), '(tf.nn.relu)\n', (5469, 5481), False, 'from tensorflow.keras import layers\n'), ((5505, 5521), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (5519, 5521), False, 'from tensorflow.keras import layers\n'), ((5551, 5567), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (5565, 5567), False, 'from tensorflow.keras import layers\n'), ((5595, 5613), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1512)'], {}), '(1512)\n', (5607, 5613), False, 'from tensorflow.keras import layers\n'), ((5716, 5745), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.nn.relu'], {}), '(tf.nn.relu)\n', (5733, 5745), False, 'from tensorflow.keras import layers\n'), ((5774, 5802), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['DROPOUT_RATE'], {}), '(DROPOUT_RATE)\n', (5788, 5802), False, 'from tensorflow.keras import layers\n'), ((5836, 5854), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1256)'], {}), '(1256)\n', (5848, 5854), False, 'from tensorflow.keras import layers\n'), ((5957, 5986), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.nn.relu'], {}), '(tf.nn.relu)\n', (5974, 5986), False, 'from tensorflow.keras import layers\n'), ((6015, 6043), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['DROPOUT_RATE'], {}), '(DROPOUT_RATE)\n', (6029, 6043), False, 'from tensorflow.keras import layers\n'), ((6073, 6099), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (6091, 6099), False, 'from tensorflow.keras import layers\n'), ((6139, 6157), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1024)'], {}), '(1024)\n', (6151, 6157), False, 'from tensorflow.keras import layers\n'), ((6181, 6209), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['DROPOUT_RATE'], {}), '(DROPOUT_RATE)\n', (6195, 6209), False, 'from tensorflow.keras import layers\n'), ((6307, 6336), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.nn.relu'], {}), '(tf.nn.relu)\n', (6324, 6336), False, 'from tensorflow.keras import layers\n'), ((6360, 6377), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(512)'], {}), '(512)\n', (6372, 6377), False, 'from tensorflow.keras import layers\n'), ((6401, 6429), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['DROPOUT_RATE'], {}), '(DROPOUT_RATE)\n', (6415, 6429), False, 'from tensorflow.keras import layers\n'), ((6527, 6556), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.nn.relu'], {}), '(tf.nn.relu)\n', (6544, 6556), False, 'from tensorflow.keras import layers\n'), ((6575, 6619), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['pi_output_count'], {'name': '"""policy"""'}), "(pi_output_count, name='policy')\n", (6587, 6619), False, 'from tensorflow.keras import layers\n'), ((6638, 6653), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (6650, 6653), False, 'from tensorflow.keras import layers\n'), ((6671, 6716), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.math.tanh'], {'name': '"""value"""'}), "(tf.math.tanh, name='value')\n", (6688, 6716), False, 'from tensorflow.keras import layers\n'), ((6814, 6844), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (6825, 6844), False, 'from tensorflow import keras\n'), ((7136, 7179), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': '(pi, v)'}), '(inputs=inputs, outputs=(pi, v))\n', (7147, 7179), False, 'from tensorflow import keras\n'), ((8658, 8706), 'tensorflow.keras.metrics.TopKCategoricalAccuracy', 'keras.metrics.TopKCategoricalAccuracy', (['(2)', '"""top2"""'], {}), "(2, 'top2')\n", (8695, 8706), False, 'from tensorflow import keras\n'), ((8723, 8771), 'tensorflow.keras.metrics.TopKCategoricalAccuracy', 'keras.metrics.TopKCategoricalAccuracy', (['(3)', '"""top3"""'], {}), "(3, 'top3')\n", (8760, 8771), False, 'from tensorflow import keras\n'), ((8788, 8836), 'tensorflow.keras.metrics.TopKCategoricalAccuracy', 'keras.metrics.TopKCategoricalAccuracy', (['(4)', '"""top4"""'], {}), "(4, 'top4')\n", (8825, 8836), False, 'from tensorflow import keras\n'), ((8853, 8901), 'tensorflow.keras.metrics.TopKCategoricalAccuracy', 'keras.metrics.TopKCategoricalAccuracy', (['(5)', '"""top5"""'], {}), "(5, 'top5')\n", (8890, 8901), False, 'from tensorflow import keras\n'), ((10387, 10429), 'numpy.argmax', 'np.argmax', (['(probs * valids + valids * 1e-05)'], {}), '(probs * valids + valids * 1e-05)\n', (10396, 10429), True, 'import numpy as np\n'), ((10534, 10579), 'gobang.GobangGame.GobangGame', 'GobangGame', ([], {'col': '(12)', 'row': '(4)', 'nir': '(7)', 'defender': '(-1)'}), '(col=12, row=4, nir=7, defender=-1)\n', (10544, 10579), False, 'from gobang.GobangGame import GobangGame, display\n'), ((10838, 10909), 'Arena.Arena', 'Arena.Arena', (['model_player', 'heuristic_player', 'self.game'], {'display': 'display'}), '(model_player, heuristic_player, self.game, display=display)\n', (10849, 10909), False, 'import Arena\n'), ((11205, 11230), 'inspect.getsourcefile', 'getsourcefile', (['(lambda : 0)'], {}), '(lambda : 0)\n', (11218, 11230), False, 'from inspect import getsourcefile\n'), ((1402, 1413), 'numpy.array', 'np.array', (['e'], {}), '(e)\n', (1410, 1413), True, 'import numpy as np\n'), ((2287, 2317), 'numpy.mean', 'np.mean', (["dict[s]['ps']"], {'axis': '(0)'}), "(dict[s]['ps'], axis=0)\n", (2294, 2317), True, 'import numpy as np\n'), ((2338, 2360), 'numpy.mean', 'np.mean', (["dict[s]['vs']"], {}), "(dict[s]['vs'])\n", (2345, 2360), True, 'import numpy as np\n'), ((3484, 3503), 'numpy.ones_like', 'np.ones_like', (['board'], {}), '(board)\n', (3496, 3503), True, 'import numpy as np\n'), ((4741, 4777), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (4775, 4777), True, 'import tensorflow as tf\n'), ((4948, 4984), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (4982, 4984), True, 'import tensorflow as tf\n'), ((5170, 5206), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (5204, 5206), True, 'import tensorflow as tf\n'), ((5392, 5428), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (5426, 5428), True, 'import tensorflow as tf\n'), ((5656, 5692), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (5690, 5692), True, 'import tensorflow as tf\n'), ((5897, 5933), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (5931, 5933), True, 'import tensorflow as tf\n'), ((6247, 6283), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (6281, 6283), True, 'import tensorflow as tf\n'), ((6467, 6503), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (6501, 6503), True, 'import tensorflow as tf\n'), ((6854, 6908), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(1)', '(1, 1)'], {'padding': '"""same"""', 'name': '"""conv1"""'}), "(1, (1, 1), padding='same', name='conv1')\n", (6867, 6908), False, 'from tensorflow.keras import layers\n'), ((6925, 6954), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {'name': '"""policy"""'}), "(name='policy')\n", (6939, 6954), False, 'from tensorflow.keras import layers\n'), ((6967, 6983), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (6981, 6983), False, 'from tensorflow.keras import layers\n'), ((7048, 7063), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (7060, 7063), False, 'from tensorflow.keras import layers\n'), ((7075, 7120), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.math.tanh'], {'name': '"""value"""'}), "(tf.math.tanh, name='value')\n", (7092, 7120), False, 'from tensorflow.keras import layers\n'), ((7218, 7248), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (7229, 7248), False, 'from tensorflow import keras\n'), ((7627, 7670), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': '(pi, v)'}), '(inputs=inputs, outputs=(pi, v))\n', (7638, 7670), False, 'from tensorflow import keras\n'), ((9858, 9872), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (9865, 9872), True, 'import numpy as np\n'), ((7258, 7332), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(10)', '(1, 1)'], {'padding': '"""same"""', 'name': '"""conv1"""', 'activation': '"""relu"""'}), "(10, (1, 1), padding='same', name='conv1', activation='relu')\n", (7271, 7332), False, 'from tensorflow.keras import layers\n'), ((7349, 7403), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(1)', '(1, 1)'], {'padding': '"""same"""', 'name': '"""conv2"""'}), "(1, (1, 1), padding='same', name='conv2')\n", (7362, 7403), False, 'from tensorflow.keras import layers\n'), ((7416, 7445), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {'name': '"""policy"""'}), "(name='policy')\n", (7430, 7445), False, 'from tensorflow.keras import layers\n'), ((7458, 7474), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (7472, 7474), False, 'from tensorflow.keras import layers\n'), ((7539, 7554), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (7551, 7554), False, 'from tensorflow.keras import layers\n'), ((7566, 7611), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.math.tanh'], {'name': '"""value"""'}), "(tf.math.tanh, name='value')\n", (7583, 7611), False, 'from tensorflow.keras import layers\n'), ((7707, 7737), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (7718, 7737), False, 'from tensorflow import keras\n'), ((7961, 8004), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': '(pi, v)'}), '(inputs=inputs, outputs=(pi, v))\n', (7972, 8004), False, 'from tensorflow import keras\n'), ((9719, 9734), 'numpy.shape', 'np.shape', (['board'], {}), '(board)\n', (9727, 9734), True, 'import numpy as np\n'), ((9909, 9939), 'numpy.reshape', 'np.reshape', (['white_board', 'shape'], {}), '(white_board, shape)\n', (9919, 9939), True, 'import numpy as np\n'), ((9939, 9969), 'numpy.reshape', 'np.reshape', (['black_board', 'shape'], {}), '(black_board, shape)\n', (9949, 9969), True, 'import numpy as np\n'), ((10006, 10028), 'numpy.reshape', 'np.reshape', (['mtx', 'shape'], {}), '(mtx, shape)\n', (10016, 10028), True, 'import numpy as np\n'), ((1113, 1125), 'pickle.Unpickler', 'Unpickler', (['f'], {}), '(f)\n', (1122, 1125), False, 'from pickle import Pickler, Unpickler\n'), ((7752, 7806), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(1)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""conv1"""'}), "(1, (3, 3), padding='same', name='conv1')\n", (7765, 7806), False, 'from tensorflow.keras import layers\n'), ((7823, 7852), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {'name': '"""policy"""'}), "(name='policy')\n", (7837, 7852), False, 'from tensorflow.keras import layers\n'), ((7871, 7886), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (7883, 7886), False, 'from tensorflow.keras import layers\n'), ((7899, 7944), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.math.tanh'], {'name': '"""value"""'}), "(tf.math.tanh, name='value')\n", (7916, 7944), False, 'from tensorflow.keras import layers\n')] |
# Graphical User Interface
from tkinter import *
from tkinter import messagebox
import tictactoe as ttt
import numpy as np
import mcts
import agent
class Game:
'''
This class defines the graphical interface for the Game
Attributes:
- size: defines the size of the quadratic game Board
- win_condition: defines how many consecutive pieces
of one player arerequired to win the game
- isAIstarting: if true the AI makes the first move
- ai_mode: specifies which AI is used
(random, policy network or MCTS)
- number_of_rollouts: specifies the number of simulations
for each move for MCTS
'''
def __init__(self, size, win_condition, isAIstarting = False, ai_mode = "network", number_of_rollouts = 1000):
self.game = ttt.Tictactoe(size, win_condition)
self.btnGrid = [[0 for i in range(size)] for i in range(size)]
self.isAistarting = isAIstarting
self.ai_mode = ai_mode
self.number_of_rollouts = number_of_rollouts
if self.ai_mode == "network":
self.Agent = agent.Agent('model1')
print('collected parameters for model0')
self.initGui()
def initGui(self):
'''Initializes the GUI by creating grid with size**2 buttons
and sets the action of each button to btnClick'''
self.root = Tk()
frame = Frame(self.root)
self.root.title("TicTacToe")
frame.grid(row=0, column=0)
for i in range(self.game.size):
for j in range(self.game.size):
self.btnGrid[i][j] = Button(frame, text=" ")
self.btnGrid[i][j].config(height=1, width=1)
self.btnGrid[i][j].config(font=("Courier", 48, "bold"))
self.btnGrid[i][j].config(command= lambda x=i, y=j: self.btnClick(x,y))
self.btnGrid[i][j].grid(column=i, row=j)
if self.isAistarting:
self.genmove()
self.root.mainloop()
def btnClick(self, x, y):
'''Try to make move at x,y of button that was clicked
for current player. If successful perform AI move afterwards'''
if self.makeMove(x,y):
self.genmove()
def makeMove(self, x, y):
'''call setField for self.game at x,y. If successful
update the GUI and check if game reached terminal
state. If so, show finishdialog'''
valid = self.game.setField(x,y)
if valid:
self.updateGUI()
winner = self.game.checkboard()
if winner or self.game.move_number >= self.game.size**2:
self.showFinishDialog(winner)
return False
return valid
def updateGUI(self):
'''update the GUI according to current self.game'''
for x in range(self.game.size):
for y in range(self.game.size):
value = self.game.board[x][y]
text = "0" if value == 2 else "X" if value == 1 else " "
self.btnGrid[x][y].config(text=text)
def resetGame(self):
'''reset GUI and game to initial state'''
for i in self.btnGrid:
for j in i:
j.config(text=" ")
if self.isAistarting:
self.genmove()
def showFinishDialog(self, winner):
'''Show dialog that lets you start new game or end game'''
title = ("Player " + str(winner) if winner != 0 else "Nobody") + " has won"
result = messagebox.askquestion(title, "Start new game?")
if result == "yes":
self.game.reset()
self.resetGame()
else:
self.root.destroy()
def genmove(self):
'''Generate AI move for choosen ai_mode'''
flatgame = self.game.getFlatgame()
policy = np.zeros(self.game.size**2)
if (self.ai_mode == "network"):
policy = self.Agent.policy_head(self.game.board).detach().numpy()
highest = self.game.get_coords(np.argmax(policy))
print("Network would like to have picked: x="+str(highest[0])+", y="+str(highest[1]))
policy = policy * (flatgame == 0)
print(np.round(policy, decimals=2))
elif (self.ai_mode == "tree"):
current_player = self.game.move_number % 2 + 1
tree_ai = mcts.MCTS(self.game, self.number_of_rollouts)
policy = tree_ai.perform_search()
# Add small deviation to prevent ties
policy = policy + (np.random.rand(self.game.size**2)*0.1)
print(np.round(policy))
elif (self.ai_mode == "random"):
policy = np.random.rand(self.game.size**2)
policy = policy * (flatgame == 0)
# map index of highest policy value to size x size matrix
x,y = np.unravel_index(np.argmax(policy), (self.game.size, self.game.size))
print("AI choose: x = ", x, ", y = ", y)
self.makeMove(x, y)
| [
"tictactoe.Tictactoe",
"numpy.random.rand",
"numpy.argmax",
"tkinter.messagebox.askquestion",
"agent.Agent",
"numpy.zeros",
"mcts.MCTS",
"numpy.round"
] | [((821, 855), 'tictactoe.Tictactoe', 'ttt.Tictactoe', (['size', 'win_condition'], {}), '(size, win_condition)\n', (834, 855), True, 'import tictactoe as ttt\n'), ((3532, 3580), 'tkinter.messagebox.askquestion', 'messagebox.askquestion', (['title', '"""Start new game?"""'], {}), "(title, 'Start new game?')\n", (3554, 3580), False, 'from tkinter import messagebox\n'), ((3849, 3878), 'numpy.zeros', 'np.zeros', (['(self.game.size ** 2)'], {}), '(self.game.size ** 2)\n', (3857, 3878), True, 'import numpy as np\n'), ((1116, 1137), 'agent.Agent', 'agent.Agent', (['"""model1"""'], {}), "('model1')\n", (1127, 1137), False, 'import agent\n'), ((4866, 4883), 'numpy.argmax', 'np.argmax', (['policy'], {}), '(policy)\n', (4875, 4883), True, 'import numpy as np\n'), ((4047, 4064), 'numpy.argmax', 'np.argmax', (['policy'], {}), '(policy)\n', (4056, 4064), True, 'import numpy as np\n'), ((4228, 4256), 'numpy.round', 'np.round', (['policy'], {'decimals': '(2)'}), '(policy, decimals=2)\n', (4236, 4256), True, 'import numpy as np\n'), ((4378, 4423), 'mcts.MCTS', 'mcts.MCTS', (['self.game', 'self.number_of_rollouts'], {}), '(self.game, self.number_of_rollouts)\n', (4387, 4423), False, 'import mcts\n'), ((4608, 4624), 'numpy.round', 'np.round', (['policy'], {}), '(policy)\n', (4616, 4624), True, 'import numpy as np\n'), ((4688, 4723), 'numpy.random.rand', 'np.random.rand', (['(self.game.size ** 2)'], {}), '(self.game.size ** 2)\n', (4702, 4723), True, 'import numpy as np\n'), ((4551, 4586), 'numpy.random.rand', 'np.random.rand', (['(self.game.size ** 2)'], {}), '(self.game.size ** 2)\n', (4565, 4586), True, 'import numpy as np\n')] |
# %%
import os
from PIL import Image
import numpy as np
# Set the directory you want to start from
rootDir = 'photos'
for dirName, subdirList, fileList in os.walk(rootDir):
print('Found directory: %s' % dirName)
for fname in fileList:
print('\t%s' % fname)
# %%
main_list = []
im = Image.open(dirName+"\\"+fileList[0])
print("----------X------------X-----------------------X\n\n")
im.show()
original_array = np.array(im)
print(original_array.shape)
print(original_array.size)
print(original_array)
print("\n\n----------X------------X-----------------------X\n\n")
new_image = im.resize((200, 200))
resized_array = np.array(new_image)
print(resized_array.size)
print(resized_array.shape)
print(resized_array)
main_list.append(resized_array)
# %%
main_list
# %%
pic = 2
for i in range(1,len(fileList)):
print("PIC NO : ",pic)
im = Image.open(dirName+"\\"+fileList[i])
print("----------X------------X-----------------------X\n\n")
im.show()
original_array = np.array(im)
print(original_array.shape)
print(original_array.size)
print(original_array)
print("\n\n----------X------------X-----------------------X\n\n")
new_image = im.resize((200, 200))
resized_array = np.array(new_image)
print(resized_array.size)
print(resized_array.shape)
print(resized_array)
print("APPENDING TIME")
main_list.append(resized_array)
pic = pic+1
# %%
len(main_list)
# %%
main_list
# %%
def main():
main_arr= np.array(main_list)
print("FINAL NUMPY ARRAY :\n\n ",main_arr)
return main_arr
# %%
# main_arr = main()
# main_arr
# %%
main_arr.shape
# %%
main_arr.ndim
# %%
main_arr.size
# %%
| [
"numpy.array",
"PIL.Image.open",
"os.walk"
] | [((165, 181), 'os.walk', 'os.walk', (['rootDir'], {}), '(rootDir)\n', (172, 181), False, 'import os\n'), ((322, 362), 'PIL.Image.open', 'Image.open', (["(dirName + '\\\\' + fileList[0])"], {}), "(dirName + '\\\\' + fileList[0])\n", (332, 362), False, 'from PIL import Image\n'), ((451, 463), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (459, 463), True, 'import numpy as np\n'), ((663, 682), 'numpy.array', 'np.array', (['new_image'], {}), '(new_image)\n', (671, 682), True, 'import numpy as np\n'), ((901, 941), 'PIL.Image.open', 'Image.open', (["(dirName + '\\\\' + fileList[i])"], {}), "(dirName + '\\\\' + fileList[i])\n", (911, 941), False, 'from PIL import Image\n'), ((1053, 1065), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (1061, 1065), True, 'import numpy as np\n'), ((1289, 1308), 'numpy.array', 'np.array', (['new_image'], {}), '(new_image)\n', (1297, 1308), True, 'import numpy as np\n'), ((1566, 1585), 'numpy.array', 'np.array', (['main_list'], {}), '(main_list)\n', (1574, 1585), True, 'import numpy as np\n')] |
"""This module provides the basic functions about deep learning"""
# -*- coding: utf-8 -*-
# date: 2021
# author: AllChooseC
import numpy as np
import torch
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from tqdm import tqdm
class_distribution = [59.68, 8.68, 28.55, 3.08]
# 2017
class_distribution = [59.22, 8.65, 28.80, 3.33]
def split_indices(n, vld_pct, labels, compensation_factor, random_state=None):
"""This function is used to split the data into train and validation.
Args:
n: the number of train data
vld_pct: the percentage of validation data
random_state: keep the random results same each time calling the function
Returns:
the indexes of 2 divided datasets(train indices, validation indices).
"""
n_vld = int(vld_pct*n) # Determine size of validation set
if random_state:
np.random.seed(random_state) # Set the random seed(for reproducibility)
idxs = np.random.permutation(n) # Create random permutation of 0 to n-1
split_sets = [idxs[:n_vld], idxs[n_vld:2*n_vld], idxs[2*n_vld:3*n_vld], idxs[3*n_vld:4*n_vld], idxs[4*n_vld:]]
train_sets = []
vld_sets = []
for k in range(5):
train_set = np.concatenate((split_sets[k], split_sets[(k+1)%5], split_sets[(k+2)%5], split_sets[(k+3)%5]))
masks = [labels[train_set, i].astype(bool) for i in range(labels.shape[1])]
sets = [train_set[mask] for mask in masks]
lst = []
for idx, set_ in enumerate(sets):
scale = int(100 * compensation_factor / class_distribution[idx]) + 1
set_ = np.tile(set_, scale)
set_ = set_.reshape([-1, 1])
lst.append(set_)
train_set = np.vstack(lst)
train_set = train_set.squeeze()
np.random.shuffle(train_set)
train_sets.append(train_set)
vld_sets.append(split_sets[k-1])
if n_vld == 0:
train_sets = []
vld_sets = []
train_set = idxs
masks = [labels[:, i].astype(bool) for i in range(labels.shape[1])]
sets = [train_set[mask] for mask in masks]
lst = []
for idx, set_ in enumerate(sets):
scale = int(100 * compensation_factor / class_distribution[idx]) + 1
set_ = np.tile(set_, scale)
set_ = set_.reshape([-1, 1])
lst.append(set_)
train_set = np.vstack(lst)
train_set = train_set.squeeze()
np.random.shuffle(train_set)
train_sets.append(train_set)
vld_sets.append(idxs)
return train_sets, vld_sets # Pick the first n_vld indices for validation set
def get_data_loader(train_dataset, vld_dataset, batch_size, onehot_labels, compensation_factor):
"""This function generate the batch data for every epoch."""
train_indices, vld_indices = split_indices(len(train_dataset), 0, onehot_labels, compensation_factor, random_state=2021)
train_lds = []
vld_lds = []
for train_idx, vld_idx in zip(train_indices, vld_indices):
train_sampler = SubsetRandomSampler(train_idx)
train_ld = DataLoader(train_dataset, batch_size, sampler=train_sampler)
vld_ld = DataLoader(vld_dataset, batch_size, sampler=vld_idx)
train_lds.append(train_ld)
vld_lds.append(vld_ld)
return train_lds, vld_lds
def get_default_device():
"""Pick GPU if available, else CPU."""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensors to the chosen device."""
if isinstance(data, (list, tuple)):
return [to_device(x, device) if not isinstance(x, str) else x for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader:
"""Wrap a data loader to move data to a device."""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a data batch after moving it to device."""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches."""
return len(self.dl)
@torch.no_grad()
def get_all_preds(model, loader):
"""Output model's predictions and targets.
:param model:
:param loader:
:return:
"""
device = get_default_device()
all_preds = to_device(torch.tensor([]), device)
all_labels = to_device(torch.tensor([]), device)
all_names = []
for batch in tqdm(loader):
signals, labels = batch
preds = model(signals)
try:
all_labels = torch.cat((all_labels, labels), dim=0)
except TypeError:
all_names.extend(labels)
all_preds = torch.cat(
(all_preds, preds)
, dim=0
)
_, predicted = torch.max(all_preds, dim=1)
if all_names:
return predicted.cpu(), all_names
return predicted.cpu(), all_labels.cpu()
class EarlyStopping:
"""Early stopping to stop the training when the loss does not improve after certain epochs."""
def __init__(self, patience=100, mode='max'):
"""
:param patience: how many epochs to wait before stopping when loss is not improving
"""
self.patience = patience
self.mode = mode
self.counter = 0
self.best_metric = None
self.early_stop = False
def __call__(self, val_metric):
if self.best_metric is None:
self.best_metric = val_metric
elif self.best_metric > val_metric:
if self.mode == 'max':
self.counter += 1
else:
self.best_metric = val_metric
elif self.best_metric < val_metric:
if self.mode == 'max':
self.best_metric = val_metric
else:
self.counter += 1
else:
self.counter += 1
print(f'INFO: Early stopping counter {self.counter} of {self.patience}')
if self.counter >= self.patience:
print('INFO: Early stopping')
self.early_stop = True
def load_model(model, path, evaluation=False):
"""Load the saved model."""
device = get_default_device()
model.load_state_dict(torch.load(path, map_location=torch.device(device)))
if evaluation:
# If the model is used to evaluation, the requires grad should be disabled.
for parameter in model.parameters():
parameter.requires_grad = False
return model
device = get_default_device()
def get_length(data):
# data shape [b, c, t, f]
shape = list(data.shape)
maps, _ = torch.max(torch.abs(data), 1)
# data shape [b, t, f]
used = torch.sign(maps)
used = used.int()
t_range = torch.arange(0, shape[2], device=device).unsqueeze(1)
ranged = t_range * used
length, _ = torch.max(ranged, 1)
# data shape [b, f]
length, _ = torch.max(length, 1)
# data shape [b]
length = length + 1
return length
def set_zeros(data, length):
shape = list(data.shape)
# generate data shape matrix with time range with padding
r = torch.arange(0, shape[1], device=device)
r = torch.unsqueeze(r, 0)
r = torch.unsqueeze(r, 2)
r = r.repeat(shape[0], 1, shape[2])
# generate data shape matrix with time range without padding
l = torch.unsqueeze(length, 1)
l = torch.unsqueeze(l, 2)
l = l.repeat(1, shape[1], shape[2])
# when col_n smaller than length mask entry is true
mask = torch.lt(r, l)
# when col_n larger than length, set input to zero
output = torch.where(mask, data, torch.zeros_like(data))
return output
def class_penalty(class_distribution, class_penalty=0.2):
eq_w = [1 for _ in class_distribution]
occ_w = [100/r for r in class_distribution]
c = class_penalty
weights = [[e * (1-c) + o * c for e,o in zip(eq_w, occ_w)]]
class_weights = torch.Tensor(weights)
return class_weights.to(device)
| [
"torch.max",
"torch.cuda.is_available",
"torch.arange",
"torch.unsqueeze",
"torch.utils.data.dataloader.DataLoader",
"numpy.random.seed",
"numpy.concatenate",
"numpy.vstack",
"torch.zeros_like",
"numpy.random.permutation",
"torch.utils.data.sampler.SubsetRandomSampler",
"numpy.tile",
"torch.... | [((4223, 4238), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4236, 4238), False, 'import torch\n'), ((1004, 1028), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (1025, 1028), True, 'import numpy as np\n'), ((3458, 3483), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3481, 3483), False, 'import torch\n'), ((4555, 4567), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (4559, 4567), False, 'from tqdm import tqdm\n'), ((4890, 4917), 'torch.max', 'torch.max', (['all_preds'], {'dim': '(1)'}), '(all_preds, dim=1)\n', (4899, 4917), False, 'import torch\n'), ((6773, 6789), 'torch.sign', 'torch.sign', (['maps'], {}), '(maps)\n', (6783, 6789), False, 'import torch\n'), ((6924, 6944), 'torch.max', 'torch.max', (['ranged', '(1)'], {}), '(ranged, 1)\n', (6933, 6944), False, 'import torch\n'), ((6985, 7005), 'torch.max', 'torch.max', (['length', '(1)'], {}), '(length, 1)\n', (6994, 7005), False, 'import torch\n'), ((7199, 7239), 'torch.arange', 'torch.arange', (['(0)', 'shape[1]'], {'device': 'device'}), '(0, shape[1], device=device)\n', (7211, 7239), False, 'import torch\n'), ((7248, 7269), 'torch.unsqueeze', 'torch.unsqueeze', (['r', '(0)'], {}), '(r, 0)\n', (7263, 7269), False, 'import torch\n'), ((7278, 7299), 'torch.unsqueeze', 'torch.unsqueeze', (['r', '(2)'], {}), '(r, 2)\n', (7293, 7299), False, 'import torch\n'), ((7413, 7439), 'torch.unsqueeze', 'torch.unsqueeze', (['length', '(1)'], {}), '(length, 1)\n', (7428, 7439), False, 'import torch\n'), ((7448, 7469), 'torch.unsqueeze', 'torch.unsqueeze', (['l', '(2)'], {}), '(l, 2)\n', (7463, 7469), False, 'import torch\n'), ((7577, 7591), 'torch.lt', 'torch.lt', (['r', 'l'], {}), '(r, l)\n', (7585, 7591), False, 'import torch\n'), ((7983, 8004), 'torch.Tensor', 'torch.Tensor', (['weights'], {}), '(weights)\n', (7995, 8004), False, 'import torch\n'), ((920, 948), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (934, 948), True, 'import numpy as np\n'), ((1267, 1377), 'numpy.concatenate', 'np.concatenate', (['(split_sets[k], split_sets[(k + 1) % 5], split_sets[(k + 2) % 5],\n split_sets[(k + 3) % 5])'], {}), '((split_sets[k], split_sets[(k + 1) % 5], split_sets[(k + 2) %\n 5], split_sets[(k + 3) % 5]))\n', (1281, 1377), True, 'import numpy as np\n'), ((1767, 1781), 'numpy.vstack', 'np.vstack', (['lst'], {}), '(lst)\n', (1776, 1781), True, 'import numpy as np\n'), ((1830, 1858), 'numpy.random.shuffle', 'np.random.shuffle', (['train_set'], {}), '(train_set)\n', (1847, 1858), True, 'import numpy as np\n'), ((2438, 2452), 'numpy.vstack', 'np.vstack', (['lst'], {}), '(lst)\n', (2447, 2452), True, 'import numpy as np\n'), ((2501, 2529), 'numpy.random.shuffle', 'np.random.shuffle', (['train_set'], {}), '(train_set)\n', (2518, 2529), True, 'import numpy as np\n'), ((3102, 3132), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_idx'], {}), '(train_idx)\n', (3121, 3132), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((3152, 3212), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['train_dataset', 'batch_size'], {'sampler': 'train_sampler'}), '(train_dataset, batch_size, sampler=train_sampler)\n', (3162, 3212), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((3230, 3282), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['vld_dataset', 'batch_size'], {'sampler': 'vld_idx'}), '(vld_dataset, batch_size, sampler=vld_idx)\n', (3240, 3282), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((3500, 3520), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3512, 3520), False, 'import torch\n'), ((3546, 3565), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3558, 3565), False, 'import torch\n'), ((4439, 4455), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (4451, 4455), False, 'import torch\n'), ((4492, 4508), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (4504, 4508), False, 'import torch\n'), ((4794, 4830), 'torch.cat', 'torch.cat', (['(all_preds, preds)'], {'dim': '(0)'}), '((all_preds, preds), dim=0)\n', (4803, 4830), False, 'import torch\n'), ((6715, 6730), 'torch.abs', 'torch.abs', (['data'], {}), '(data)\n', (6724, 6730), False, 'import torch\n'), ((7684, 7706), 'torch.zeros_like', 'torch.zeros_like', (['data'], {}), '(data)\n', (7700, 7706), False, 'import torch\n'), ((1656, 1676), 'numpy.tile', 'np.tile', (['set_', 'scale'], {}), '(set_, scale)\n', (1663, 1676), True, 'import numpy as np\n'), ((2327, 2347), 'numpy.tile', 'np.tile', (['set_', 'scale'], {}), '(set_, scale)\n', (2334, 2347), True, 'import numpy as np\n'), ((4671, 4709), 'torch.cat', 'torch.cat', (['(all_labels, labels)'], {'dim': '(0)'}), '((all_labels, labels), dim=0)\n', (4680, 4709), False, 'import torch\n'), ((6826, 6866), 'torch.arange', 'torch.arange', (['(0)', 'shape[2]'], {'device': 'device'}), '(0, shape[2], device=device)\n', (6838, 6866), False, 'import torch\n'), ((6343, 6363), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (6355, 6363), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 22:47:56 2019
Final Demo
@author: Stuart
"""
import numpy as np
def cosine_distance(v1, v2): # cosine similarity
if v1.all() and v2.all():
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
else:
return 0
def manhattan_distance(v1, v2): # Manhattan distance
return np.sum(np.abs(v1 - v2))
def euclidean_distance(v1, v2): # Euclidean distance
return np.sqrt(np.sum(np.square(v1 - v2)))
def euclidean_standardized_distance(v1, v2): # Euclidean distance standardized
v1_v2 = np.vstack([v1, v2])
sk_v1_v2 = np.var(v1_v2, axis=0, ddof=1)
zero_bit = 0.000000001
distance = np.sqrt(((v1 - v2) ** 2 / (sk_v1_v2 + zero_bit * np.ones_like(sk_v1_v2))).sum())
return distance
def hamming_distance(v1, v2):
n = int(v1, 2) ^ int(v2, 2)
return bin(n & 0xffffffff).count('1')
def chebyshev_distance(v1, v2): # Chebyshev distance
return np.max(np.abs(v1 - v2))
def minkowski_distance(v1, v2): # Cinkowski distance
return np.sqrt(np.sum(np.square(v1 - v2)))
def mahalanobis_distance(v1, v2): # Mahalanobis distance
X = np.vstack([v1, v2])
XT = X.T # numpy.ndarray.T
S = np.cov(X) # Covariance matrix between two dimensions
try:
SI = np.linalg.inv(S) # Inverse matrix of covariance matrix
except:
SI = np.zeros_like(S)
# The Mahalanobis distance calculates the distance between two samples. There are 10 samples in total, and there are a total of 45 distances.
n = XT.shape[0]
distance_all = []
for i in range(0, n):
for j in range(i + 1, n):
delta = XT[i] - XT[j]
distance_1 = np.sqrt(np.dot(np.dot(delta, SI), delta.T))
distance_all.append(distance_1)
return np.sum(np.abs(distance_all))
def bray_curtis_distance(v1, v2): # Bray Curtis distance, Biological ecological distance
up_v1_v2 = np.sum(np.abs(v2 - v1))
down_v1_v2 = np.sum(v1) + np.sum(v2)
zero_bit = 0.000000001
return up_v1_v2 / (down_v1_v2 + zero_bit)
def pearson_correlation_distance(v1, v2): # Pearson correlation
v1_v2 = np.vstack([v1, v2])
return np.corrcoef(v1_v2)[0][1]
def jaccard_similarity_coefficient_distance(v1, v2): # Jaccard similarity coefficient, it's the useful one
v1 = np.asarray(v1)
v2 = np.asarray(v2)
up = np.double(np.bitwise_and((v1 != v2), np.bitwise_or(v1 != 0, v2 != 0)).sum())
zero_bit = 0.000000001
down = np.double(np.bitwise_or(v1 != 0, v2 != 0).sum() + zero_bit)
jaccard = up/down
return jaccard
def word_move_distance(model, sentence1_split, sentence2_split): # WORD MOVER DISTANCE, it's the important one
# model = gensim.models.KeyedVectors.load_word2vec_format(word2_vec_path, unicode_errors='ignore', limit=None) # ,binary=True)
# model.init_sims(replace=True)
distance = model.wmdistance(sentence1_split, sentence2_split)
return distance
| [
"numpy.abs",
"numpy.ones_like",
"numpy.bitwise_or",
"numpy.corrcoef",
"numpy.asarray",
"numpy.square",
"numpy.sum",
"numpy.dot",
"numpy.linalg.inv",
"numpy.vstack",
"numpy.linalg.norm",
"numpy.cov",
"numpy.zeros_like",
"numpy.var"
] | [((587, 606), 'numpy.vstack', 'np.vstack', (['[v1, v2]'], {}), '([v1, v2])\n', (596, 606), True, 'import numpy as np\n'), ((622, 651), 'numpy.var', 'np.var', (['v1_v2'], {'axis': '(0)', 'ddof': '(1)'}), '(v1_v2, axis=0, ddof=1)\n', (628, 651), True, 'import numpy as np\n'), ((1164, 1183), 'numpy.vstack', 'np.vstack', (['[v1, v2]'], {}), '([v1, v2])\n', (1173, 1183), True, 'import numpy as np\n'), ((1223, 1232), 'numpy.cov', 'np.cov', (['X'], {}), '(X)\n', (1229, 1232), True, 'import numpy as np\n'), ((2158, 2177), 'numpy.vstack', 'np.vstack', (['[v1, v2]'], {}), '([v1, v2])\n', (2167, 2177), True, 'import numpy as np\n'), ((2333, 2347), 'numpy.asarray', 'np.asarray', (['v1'], {}), '(v1)\n', (2343, 2347), True, 'import numpy as np\n'), ((2357, 2371), 'numpy.asarray', 'np.asarray', (['v2'], {}), '(v2)\n', (2367, 2371), True, 'import numpy as np\n'), ((372, 387), 'numpy.abs', 'np.abs', (['(v1 - v2)'], {}), '(v1 - v2)\n', (378, 387), True, 'import numpy as np\n'), ((975, 990), 'numpy.abs', 'np.abs', (['(v1 - v2)'], {}), '(v1 - v2)\n', (981, 990), True, 'import numpy as np\n'), ((1299, 1315), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (1312, 1315), True, 'import numpy as np\n'), ((1812, 1832), 'numpy.abs', 'np.abs', (['distance_all'], {}), '(distance_all)\n', (1818, 1832), True, 'import numpy as np\n'), ((1948, 1963), 'numpy.abs', 'np.abs', (['(v2 - v1)'], {}), '(v2 - v1)\n', (1954, 1963), True, 'import numpy as np\n'), ((1982, 1992), 'numpy.sum', 'np.sum', (['v1'], {}), '(v1)\n', (1988, 1992), True, 'import numpy as np\n'), ((1995, 2005), 'numpy.sum', 'np.sum', (['v2'], {}), '(v2)\n', (2001, 2005), True, 'import numpy as np\n'), ((212, 226), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (218, 226), True, 'import numpy as np\n'), ((471, 489), 'numpy.square', 'np.square', (['(v1 - v2)'], {}), '(v1 - v2)\n', (480, 489), True, 'import numpy as np\n'), ((1074, 1092), 'numpy.square', 'np.square', (['(v1 - v2)'], {}), '(v1 - v2)\n', (1083, 1092), True, 'import numpy as np\n'), ((1382, 1398), 'numpy.zeros_like', 'np.zeros_like', (['S'], {}), '(S)\n', (1395, 1398), True, 'import numpy as np\n'), ((2189, 2207), 'numpy.corrcoef', 'np.corrcoef', (['v1_v2'], {}), '(v1_v2)\n', (2200, 2207), True, 'import numpy as np\n'), ((230, 248), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (244, 248), True, 'import numpy as np\n'), ((251, 269), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (265, 269), True, 'import numpy as np\n'), ((1721, 1738), 'numpy.dot', 'np.dot', (['delta', 'SI'], {}), '(delta, SI)\n', (1727, 1738), True, 'import numpy as np\n'), ((2418, 2449), 'numpy.bitwise_or', 'np.bitwise_or', (['(v1 != 0)', '(v2 != 0)'], {}), '(v1 != 0, v2 != 0)\n', (2431, 2449), True, 'import numpy as np\n'), ((2506, 2537), 'numpy.bitwise_or', 'np.bitwise_or', (['(v1 != 0)', '(v2 != 0)'], {}), '(v1 != 0, v2 != 0)\n', (2519, 2537), True, 'import numpy as np\n'), ((743, 765), 'numpy.ones_like', 'np.ones_like', (['sk_v1_v2'], {}), '(sk_v1_v2)\n', (755, 765), True, 'import numpy as np\n')] |
from math import log10, exp
import numpy
def get_index_given_truth_values(variables, truth_values, cardinalities):
"""
The function converts truth table tuples to array indices
:param variables: The variable in factor
:param truth_values: The values of given variables in the truth table (same order as variable)
:param cardinalities: The cardinality of the given variables (same order as variable)
:return:The index for the array
"""
index = 0
number = 0
while number < len(variables):
number_of_tuples_for_current_var = numpy.prod(cardinalities[number + 1:]) * truth_values[number]
index = index + number_of_tuples_for_current_var
number += 1
return int(index)
def get_truth_values_given_index(variables, index_value, cardinalities):
"""
Gives the truth value (values in tuple) for given index of the array
@param variables: list containing all the variables in the graph which are in the factor
@param index_value: Index value for which the tuple is to be founded
@param cardinalities: Cardinalities of the given variables
@return: the tuple for corresponding index value
"""
number = 0
truth_table_value = []
while number < len(variables):
truth_table_value.append(int(index_value // numpy.prod(cardinalities[number + 1:])))
index_value = index_value - (index_value // numpy.prod(cardinalities[number + 1:])) * numpy.prod(
cardinalities[number + 1:])
number += 1
return truth_table_value
def convert_to_log_space(distribution_array):
new_distribution_array = []
for each in distribution_array:
if each is not list:
each_new = log10(each)
else:
each_new = [(log10(each_value)) for each_value in each]
new_distribution_array.append(each_new)
return new_distribution_array
def convert_to_exponent_space(distribution_array):
new_distribution_array = []
for each in distribution_array:
if each is not list:
each_new = 10 ** each
else:
each_new = [10 ** each_value for each_value in each]
new_distribution_array.append(each_new)
return new_distribution_array
def compute_ordering(num_of_var, var_in_clique, evidence):
"""
@param num_of_var: Number of variable for which the ordering is needed
@param var_in_clique: the number of variables in each clique
@param evidence: The evidence provided
@return: variables according to their degree for elimination
"""
min_degree_for_each_var = [0] * num_of_var
evidence_var = [x[0] for x in evidence]
for each in var_in_clique:
each_without_evidence = each
for each_var in each_without_evidence:
min_degree_for_each_var[each_var] += len(each_without_evidence) - 1
sorted_variable = numpy.argsort(min_degree_for_each_var)
return min_degree_for_each_var, sorted_variable
def instantiate(num_of_var, evidence, cardinalities, var_in_clique, distribution_array):
"""
Used to instantiate the factors given the evidence
@param num_of_var: The number of variables in the graph
@param evidence: The given evidence
@param cardinalities: THe cardinalities of the variables
@param var_in_clique: Variables in the clique
@param distribution_array: The probability distribution array
@return: The instantiated probability distribution table and updated variable list
"""
for each in evidence:
variable = each[0]
value = each[1]
var_in_clique = numpy.array(var_in_clique)
cardinalities = numpy.array(cardinalities)
for each_clique in range(len(var_in_clique)):
if variable in var_in_clique[each_clique]:
for each_tuple in range(len(distribution_array[each_clique])):
# Truth value is calculated for each tuple and then compared if it is equal to the evidence variable
truth_value = get_truth_values_given_index(var_in_clique[each_clique], each_tuple,
cardinalities[var_in_clique[each_clique]])
if truth_value[(list(var_in_clique[each_clique])).index(variable)] != value:
index_to_delete = get_index_given_truth_values(var_in_clique[each_clique], truth_value,
cardinalities[var_in_clique[each_clique]])
# It the tuple does not match with the evidence then we ignore it
distribution_array[each_clique][index_to_delete] = -1
var_in_clique = list(var_in_clique)
val = list(var_in_clique[each_clique]).index(variable)
# deleting the evidence variable from the array
var_in_clique[each_clique] = numpy.ndarray.tolist(numpy.delete(var_in_clique[each_clique], val))
num_of_var[each_clique] -= 1
# removing the values that does not correspond with the evidence
distribution_array[each_clique] = list(filter(lambda a: a != -1, distribution_array[each_clique]))
return var_in_clique, distribution_array
def product_of_factors(factor_1, factor_2, var_in_factor_1, var_in_factor_2, cardinalities):
"""
Takes two factors and returns the product of those two
@param factor_1: The first factor
@param factor_2: The second factor
@param var_in_factor_1: tThe variables in factor 1
@param var_in_factor_2: The variables in factor 2
@param cardinalities: The cardinalities of the variables in facotrs
@return: The product of the given two factors
"""
var_in_output = []
factor_1 = convert_to_log_space(factor_1)
factor_2 = convert_to_log_space(factor_2)
for each_var in var_in_factor_1:
var_in_output.append(each_var)
for each_var in var_in_factor_2:
if each_var not in var_in_output:
var_in_output.append(each_var)
common_var = list(set(var_in_factor_1).intersection(set(var_in_factor_2)))
new_factor = []
cardinalities = numpy.array(cardinalities)
for each_index_value_of_factor_1 in range(numpy.product(cardinalities[var_in_factor_1])):
# Get the truth values for both the tuples
truth_value_1 = numpy.array(
get_truth_values_given_index(var_in_factor_1, each_index_value_of_factor_1, cardinalities[var_in_factor_1]))
for each_index_value_of_factor_2 in range(numpy.product(cardinalities[var_in_factor_2])):
truth_value_2 = numpy.array(get_truth_values_given_index(var_in_factor_2, each_index_value_of_factor_2,
cardinalities[var_in_factor_2]))
# Check if both the tuples are compatible or not, if yes then take their product
if (truth_value_1[numpy.where(numpy.isin(var_in_factor_1, common_var))] == truth_value_2[
numpy.where(numpy.isin(var_in_factor_2, common_var))]).all():
new_factor.append(factor_1[each_index_value_of_factor_1] + factor_2[each_index_value_of_factor_2])
new_factor = convert_to_exponent_space(new_factor)
return new_factor, var_in_output
def sum_out(factor, set_of_variable_to_sum_out, var_in_factor, cardinalities):
"""
Takes a factor and a variable to sum and returns a factor with other variables
@param factor: The factor in which the sum will take place
@param set_of_variable_to_sum_out: THe set of variables for which sum needs to be done
@param var_in_factor: The variables in the factor/clique
@param cardinalities: The cardinalities of the variables in the factor
@return:
"""
cardinalities = numpy.array(cardinalities)
set_of_variable_to_sum_out = [set_of_variable_to_sum_out]
var_in_final_factor = list(filter(lambda x: x not in set_of_variable_to_sum_out, var_in_factor))
# Getting the size of new factor
num_tuple_new_factor = numpy.product(numpy.array(cardinalities)[var_in_final_factor])
new_factor = [0] * num_tuple_new_factor
for each_tuple in range(len(factor)):
truth_value = numpy.array(
get_truth_values_given_index(var_in_factor, each_tuple, cardinalities[var_in_factor]))
index_for_new_factor = get_index_given_truth_values(var_in_final_factor, truth_value[
numpy.where(numpy.isin(var_in_factor, var_in_final_factor))], cardinalities[var_in_final_factor])
# Adding value compatible with the variable
new_factor[index_for_new_factor] = new_factor[index_for_new_factor] + factor[each_tuple]
return new_factor, var_in_final_factor | [
"numpy.product",
"numpy.prod",
"numpy.delete",
"numpy.isin",
"numpy.argsort",
"numpy.array",
"math.log10"
] | [((2942, 2980), 'numpy.argsort', 'numpy.argsort', (['min_degree_for_each_var'], {}), '(min_degree_for_each_var)\n', (2955, 2980), False, 'import numpy\n'), ((6303, 6329), 'numpy.array', 'numpy.array', (['cardinalities'], {}), '(cardinalities)\n', (6314, 6329), False, 'import numpy\n'), ((7957, 7983), 'numpy.array', 'numpy.array', (['cardinalities'], {}), '(cardinalities)\n', (7968, 7983), False, 'import numpy\n'), ((3677, 3703), 'numpy.array', 'numpy.array', (['var_in_clique'], {}), '(var_in_clique)\n', (3688, 3703), False, 'import numpy\n'), ((3729, 3755), 'numpy.array', 'numpy.array', (['cardinalities'], {}), '(cardinalities)\n', (3740, 3755), False, 'import numpy\n'), ((6377, 6422), 'numpy.product', 'numpy.product', (['cardinalities[var_in_factor_1]'], {}), '(cardinalities[var_in_factor_1])\n', (6390, 6422), False, 'import numpy\n'), ((587, 625), 'numpy.prod', 'numpy.prod', (['cardinalities[number + 1:]'], {}), '(cardinalities[number + 1:])\n', (597, 625), False, 'import numpy\n'), ((1755, 1766), 'math.log10', 'log10', (['each'], {}), '(each)\n', (1760, 1766), False, 'from math import log10, exp\n'), ((6688, 6733), 'numpy.product', 'numpy.product', (['cardinalities[var_in_factor_2]'], {}), '(cardinalities[var_in_factor_2])\n', (6701, 6733), False, 'import numpy\n'), ((8229, 8255), 'numpy.array', 'numpy.array', (['cardinalities'], {}), '(cardinalities)\n', (8240, 8255), False, 'import numpy\n'), ((1476, 1514), 'numpy.prod', 'numpy.prod', (['cardinalities[number + 1:]'], {}), '(cardinalities[number + 1:])\n', (1486, 1514), False, 'import numpy\n'), ((1808, 1825), 'math.log10', 'log10', (['each_value'], {}), '(each_value)\n', (1813, 1825), False, 'from math import log10, exp\n'), ((1340, 1378), 'numpy.prod', 'numpy.prod', (['cardinalities[number + 1:]'], {}), '(cardinalities[number + 1:])\n', (1350, 1378), False, 'import numpy\n'), ((1434, 1472), 'numpy.prod', 'numpy.prod', (['cardinalities[number + 1:]'], {}), '(cardinalities[number + 1:])\n', (1444, 1472), False, 'import numpy\n'), ((5033, 5078), 'numpy.delete', 'numpy.delete', (['var_in_clique[each_clique]', 'val'], {}), '(var_in_clique[each_clique], val)\n', (5045, 5078), False, 'import numpy\n'), ((8622, 8668), 'numpy.isin', 'numpy.isin', (['var_in_factor', 'var_in_final_factor'], {}), '(var_in_factor, var_in_final_factor)\n', (8632, 8668), False, 'import numpy\n'), ((7093, 7132), 'numpy.isin', 'numpy.isin', (['var_in_factor_1', 'common_var'], {}), '(var_in_factor_1, common_var)\n', (7103, 7132), False, 'import numpy\n'), ((7182, 7221), 'numpy.isin', 'numpy.isin', (['var_in_factor_2', 'common_var'], {}), '(var_in_factor_2, common_var)\n', (7192, 7221), False, 'import numpy\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def nearest_euclidean(vector, k=3):
distances = []
data = [[5.5, 0.5, 4.5, 2],
[7.4, 1.1, 3.6, 0],
[5.9, 0.2, 3.4, 2],
[9.9, 0.1, 0.8, 0],
[6.9, -0.1, 0.6, 2],
[6.8, -0.3, 5.1, 2],
[4.1, 0.3, 5.1, 1],
[1.3, -0.2, 1.8, 1],
[4.5, 0.4, 2.0, 0],
[0.5, 0.0, 2.3, 1],
[5.9, -0.1, 4.4, 0],
[9.3, -0.2, 3.2, 0],
[1.0, 0.1, 2.8, 1],
[0.4, 0.1, 4.3, 1],
[2.7, -0.5, 4.2, 1]]
for i in range(len(data)):
distance = 0
for j in range(len(vector)):
distance += (vector[j] - data[i][j])**2
distances.append([np.sqrt(distance), data[i][-1]])
if k == 1:
return min(distances)[1]
else:
distances.sort(key=lambda x: x[0])
found_neighbors = []
for i in range(0, k):
found_neighbors.append(distances[i][1])
return max(found_neighbors, key=found_neighbors.count)
def nearest_euclidean_regression(vector, k=3):
distances = []
data = [[5.5, 0.5, 4.5, 2],
[7.4, 1.1, 3.6, 0],
[5.9, 0.2, 3.4, 2],
[9.9, 0.1, 0.8, 0],
[6.9, -0.1, 0.6, 2],
[6.8, -0.3, 5.1, 2],
[4.1, 0.3, 5.1, 1],
[1.3, -0.2, 1.8, 1],
[4.5, 0.4, 2.0, 0],
[0.5, 0.0, 2.3, 1],
[5.9, -0.1, 4.4, 0],
[9.3, -0.2, 3.2, 0],
[1.0, 0.1, 2.8, 1],
[0.4, 0.1, 4.3, 1],
[2.7, -0.5, 4.2, 1]]
for i in range(len(data)):
distance = 0
for j in range(len(vector)):
distance += (vector[j] - data[i][j])**2
distances.append([np.sqrt(distance), data[i][-1]])
if k == 1:
return min(distances)[1]
else:
distances.sort(key=lambda x: x[0])
found_neighbors = 0
for i in range(k):
found_neighbors += distances[i][0]
return found_neighbors / k
print('The K=3 NN Classification for the first list is: ', nearest_euclidean([4.1, -0.1, 2.2]))
print('The K=3 NN Classification for the second list is: ', nearest_euclidean([6.1, 0.4, 1.3]))
print('The K=3 NN Regression for the first list is: ', nearest_euclidean_regression([4.1, -0.1, 2.2]))
print('The K=3 NN Regression for the second list is: ', nearest_euclidean_regression([6.1, 0.4, 1.3]))
| [
"numpy.sqrt"
] | [((799, 816), 'numpy.sqrt', 'np.sqrt', (['distance'], {}), '(distance)\n', (806, 816), True, 'import numpy as np\n'), ((1829, 1846), 'numpy.sqrt', 'np.sqrt', (['distance'], {}), '(distance)\n', (1836, 1846), True, 'import numpy as np\n')] |
from setuptools import setup, Extension
# Bypass import numpy before running install_requires
# https://stackoverflow.com/questions/54117786/add-numpy-get-include-argument-to-setuptools-without-preinstalled-numpy
class get_numpy_include:
def __str__(self):
import numpy
return numpy.get_include()
module = Extension(
'k4a_module',
sources=['pyk4a/pyk4a.cpp'],
include_dirs=[get_numpy_include(), '/usr/local/include/opencv4', '/usr/include', '/usr/local/include'],
library_dirs=['/usr/local/lib', '/usr/lib/x86_64-linux-gnu'],
libraries=['k4a', 'k4abt', 'opencv_core', 'opencv_calib3d', 'opencv_imgproc', 'turbojpeg']
)
setup(
name='pyk4a',
version='0.6',
description='Python wrapper for Azure Kinect SDK',
license='GPL-3.0',
author='<NAME>',
install_requires=['numpy'],
author_email='<EMAIL>',
url='https://github.com/etiennedub/pyk4a/',
packages=['pyk4a'],
ext_modules=[module]
)
| [
"setuptools.setup",
"numpy.get_include"
] | [((664, 943), 'setuptools.setup', 'setup', ([], {'name': '"""pyk4a"""', 'version': '"""0.6"""', 'description': '"""Python wrapper for Azure Kinect SDK"""', 'license': '"""GPL-3.0"""', 'author': '"""<NAME>"""', 'install_requires': "['numpy']", 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/etiennedub/pyk4a/"""', 'packages': "['pyk4a']", 'ext_modules': '[module]'}), "(name='pyk4a', version='0.6', description=\n 'Python wrapper for Azure Kinect SDK', license='GPL-3.0', author=\n '<NAME>', install_requires=['numpy'], author_email='<EMAIL>', url=\n 'https://github.com/etiennedub/pyk4a/', packages=['pyk4a'], ext_modules\n =[module])\n", (669, 943), False, 'from setuptools import setup, Extension\n'), ((299, 318), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (316, 318), False, 'import numpy\n')] |
import numpy as np
def numpy_random4_test(s):
return np.random.rand(s)
| [
"numpy.random.rand"
] | [((56, 73), 'numpy.random.rand', 'np.random.rand', (['s'], {}), '(s)\n', (70, 73), True, 'import numpy as np\n')] |
# Computes the ssfr and mass of all target objects
'''
'''
import sys
import os
import string
import numpy as np
import pandas as pd
from astropy.io import ascii
# Read the redshift data
red_data_loc = '/Users/galaxies-air/COSMOS/COSMOSData/all_c_hasinger.txt'
red_data = ascii.read(red_data_loc).to_pandas()
# Read the muzzin data
muzzin_data_loc = '/Users/galaxies-air/COSMOS/Muzzin_Data/UVISTA_final_colors_sfrs_v4.1.dat'
muzzin_data = ascii.read(muzzin_data_loc).to_pandas()
# Preserving order and number of objects in red_data (our measurements)
merged_data = red_data.merge(
muzzin_data, how='left', left_on='id', right_on='ID')
''' Duplicate Filtering '''
def filter_df(df):
# Input a pandas dataframe (df), gives back indicies of rows that have good measurements
return np.logical_and.reduce((df['Bad'] == 0, df['Unsure'] == 0, df['Star'] == 0))
# First we need to isolate the duplicates into a dataframe
duplicate_indicies = merged_data.duplicated(subset='id', keep=False)
duplicate_df = merged_data[duplicate_indicies]
# Then, find all objects that have at least one good measurement, and remove any duplicates from that
duplicates_df_filt = duplicate_df[filter_df(duplicate_df)]
# This keeps only the first measurement of each object - possibly chagne to be the one with lower errors later
duplicates_df_filt = duplicates_df_filt[np.logical_not(duplicates_df_filt.duplicated(
subset='id'))]
# Find which single objects have good measurements
single_objs_df = merged_data[np.logical_not(duplicate_indicies)]
single_objs_df_filt = single_objs_df[filter_df(single_objs_df)]
# We are now left with a df of object IDs that were duplicates, but now have at least one good measurement
# Concatenate this df with the objects that were NOT duplicates, to get a full count of objects that have good redshifts
good_objects_indices = pd.concat([single_objs_df_filt,
duplicates_df_filt]).index
# Count the total number of objects
no_stars = merged_data[merged_data['Star'] == 0]
unique_objs = np.logical_not(no_stars.duplicated(subset='id'))
unique_indicies = no_stars[unique_objs].index
total_measured = len(good_objects_indices)
total_objects = len(unique_indicies)
print('Measured objects: ' + str(total_measured))
print('Total objects: ' + str(total_objects))
print('Fraction measured: ' + str(total_measured/total_objects))
muzzin_mass = merged_data['LMASS']
muzzin_ssfr = np.log10(merged_data['SFR_tot'] / (10**muzzin_mass))
# Putting only the relevant data into a frame
all_objs = pd.DataFrame(
np.transpose([merged_data['OBJID'], muzzin_mass, muzzin_ssfr, np.zeros(len(merged_data))]), columns=['OBJID', 'LMASS', 'sSFR', 'Measured'])
# Setting the objects that had a measurement to 1 before we cahnge the indices
all_objs['Measured'].iloc[good_objects_indices] = 1.
# Selecting only the unique objects
unique_objs = all_objs.iloc[unique_indicies]
| [
"numpy.log10",
"numpy.logical_not",
"numpy.logical_and.reduce",
"pandas.concat",
"astropy.io.ascii.read"
] | [((2444, 2496), 'numpy.log10', 'np.log10', (["(merged_data['SFR_tot'] / 10 ** muzzin_mass)"], {}), "(merged_data['SFR_tot'] / 10 ** muzzin_mass)\n", (2452, 2496), True, 'import numpy as np\n'), ((799, 874), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (["(df['Bad'] == 0, df['Unsure'] == 0, df['Star'] == 0)"], {}), "((df['Bad'] == 0, df['Unsure'] == 0, df['Star'] == 0))\n", (820, 874), True, 'import numpy as np\n'), ((1511, 1545), 'numpy.logical_not', 'np.logical_not', (['duplicate_indicies'], {}), '(duplicate_indicies)\n', (1525, 1545), True, 'import numpy as np\n'), ((1864, 1916), 'pandas.concat', 'pd.concat', (['[single_objs_df_filt, duplicates_df_filt]'], {}), '([single_objs_df_filt, duplicates_df_filt])\n', (1873, 1916), True, 'import pandas as pd\n'), ((276, 300), 'astropy.io.ascii.read', 'ascii.read', (['red_data_loc'], {}), '(red_data_loc)\n', (286, 300), False, 'from astropy.io import ascii\n'), ((444, 471), 'astropy.io.ascii.read', 'ascii.read', (['muzzin_data_loc'], {}), '(muzzin_data_loc)\n', (454, 471), False, 'from astropy.io import ascii\n')] |
# Copyright (c) 2021 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import os
from shutil import rmtree
import click
import epic
from epic.detection.detectors_factory import DetectorsFactory
from epic.preprocessing.preprocess import preprocess
from epic.utils.file_processing import (load_imgs, load_input_dirs,
load_motc_dets, save_imgs,
save_motc_dets, save_video)
from epic.utils.image_processing import draw_dets
import numpy as np
from torch import device, tensor
from torchvision.ops.boxes import batched_nms
import yaml
DETECTIONS_DIR_NAME = 'Detections'
MOTC_DETS_FILENAME = 'motc_dets.txt'
VID_FILENAME = 'video'
@click.command('detection')
@click.argument('root-dir', type=click.Path(exists=True, file_okay=False))
@click.argument('yaml-config', type=click.Path(exists=True, dir_okay=False))
@click.option('--multi-sequence', is_flag=True, help='perform object '
'detection in images located in root directory '
'subfolders instead')
@click.option('--save-dets', is_flag=True, help='save detections in '
'MOTChallenge CSV text-file format')
@click.option('--vis-dets', help='visualize detections in output images',
is_flag=True)
@click.option('--num-frames', type=click.IntRange(1), help='number of frames '
'to detect objects in')
@click.option('--motchallenge', is_flag=True, help='assume root directory is '
'in MOTChallenge format')
@click.option('--num-workers', help='number of workers to utilize for '
'parallel processing (default = CPU core count)',
type=click.IntRange(1))
@click.option('--preprocess', 'pre_proc', is_flag=True,
help='preprocess dataset')
@click.option('--always', is_flag=True,
help='perform object detection for all image sequences, '
'even those with existing MOTChallenge CSV text-files')
def detect(root_dir, yaml_config, vis_dets=True, save_dets=False,
multi_sequence=False, num_frames=None, motchallenge=False,
pre_proc=False, num_workers=None, iterate=False, always=False):
""" Detect objects in images using trained object detection model.
Output files are stored in a folder created within an image directory.
ROOT_DIR:
directory to search for images in
YAML_CONFIG:
path to EPIC configuration file in YAML format
"""
with open(yaml_config) as f:
config = yaml.safe_load(f)
if pre_proc:
root_dir = preprocess.callback(root_dir, yaml_config, num_workers)
config = config['detection']
det_fcty = DetectorsFactory()
detector_name = config['detector_name']
detector = det_fcty.get_detector(detector_name, **config[detector_name])
epic.LOGGER.info(f'Processing root directory \'{root_dir}\'.')
dirs = load_input_dirs(root_dir, multi_sequence)
epic.LOGGER.info(f'Found {len(dirs)} potential image sequence(s).')
for input_dir in dirs:
prefix = f'(Image sequence: {os.path.basename(input_dir)})'
epic.LOGGER.info(f'{prefix} Processing.')
imgs = (load_imgs(input_dir) if not motchallenge else load_imgs(
os.path.join(input_dir, epic.OFFL_MOTC_IMGS_DIRNAME)))
if len(imgs) == 0:
epic.LOGGER.error(f'{prefix} No images found, skipping...')
continue
if num_frames is not None:
if len(imgs) < num_frames:
epic.LOGGER.error(f'{prefix} Number of images found is '
'less than specified --num-frames, '
'skipping...')
continue
else:
imgs = imgs[0:num_frames]
motc_dets_path = os.path.join(input_dir, epic.DETECTIONS_DIR_NAME, (
epic.MOTC_DETS_FILENAME)) if not motchallenge else (os.path.join(
input_dir, epic.OFFL_MOTC_DETS_DIRNAME,
epic.OFFL_MOTC_DETS_FILENAME))
output_dir = os.path.join(input_dir, DETECTIONS_DIR_NAME)
if always or not os.path.isfile(motc_dets_path):
epic.LOGGER.info(f'{prefix} Detecting objects.')
dets = run(imgs, config, detector)
if os.path.isdir(output_dir):
rmtree(output_dir) # catch?
os.mkdir(output_dir)
# elif
else:
dets = load_motc_dets(motc_dets_path)
if save_dets: # and not os.path.isfile(motc_dets_path) and not always:
epic.LOGGER.info(f'{prefix} Saving detections.')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
save_motc_dets(dets, MOTC_DETS_FILENAME, output_dir)
if motchallenge:
dets_dir = os.path.join(input_dir,
epic.OFFL_MOTC_DETS_DIRNAME)
if not os.path.isdir(dets_dir):
os.mkdir(dets_dir)
save_motc_dets(dets, epic.OFFL_MOTC_DETS_FILENAME, dets_dir)
if vis_dets:
epic.LOGGER.info(f'{prefix} Visualizing detections.')
draw_dets(dets, imgs)
save_imgs(imgs, output_dir)
vid_path = os.path.join(output_dir, VID_FILENAME)
save_video(imgs, vid_path)
if iterate:
yield input_dir
def run(imgs, config, detector):
img_wh = (imgs[0][1].shape[1], imgs[0][1].shape[0])
cfg_wh = (config['window_width'], config['window_height'])
win_wh = (tuple([cfg_wh[i] if cfg_wh[i] < img_wh[i] else img_wh[i]
for i in range(0, 2)]) if not config['full_window']
else img_wh)
win_pos_wh = sliding_window_positions(img_wh, win_wh,
config['window_overlap'])
dets = sliding_window_detection(imgs, detector, win_wh, win_pos_wh,
config['nms_threshold'])
return dets
def sliding_window_positions(img_wh, win_wh, win_ovlp_pct):
win_sep_wh = [win_x - round(win_ovlp_pct / 100 * win_x) for win_x in
win_wh]
win_pos_wh = ([0], [0])
for x, win_ovlp_px_x in enumerate(win_sep_wh):
i = 0
while win_pos_wh[x][i] + win_wh[x] != img_wh[x]:
if (win_pos_wh[x][i] + win_sep_wh[x] + win_wh[x] <= img_wh[x]):
win_pos_wh[x].append(win_pos_wh[x][i] + win_sep_wh[x])
else:
win_pos_wh[x].append(img_wh[x] - win_wh[x])
i += 1
return win_pos_wh
def sliding_window_detection(imgs, detector, win_wh, win_pos_wh, nms_thresh):
dets = []
for img in imgs:
img_dets, bboxes, classes, scores = [], [], [], []
for win_pos_h in win_pos_wh[1]:
for win_pos_w in win_pos_wh[0]:
offsets = np.array([win_pos_w, win_pos_h, win_pos_w,
win_pos_h]).astype('float32')
ds = detector.detect(img[1][win_pos_h: win_pos_h + win_wh[1],
win_pos_w: win_pos_w + win_wh[0]])
ds = [d for d in ds if d['bbox'][3] - d['bbox'][1] != 0 and d['bbox'][2] - d['bbox'][0] != 0]
for d in ds:
d['bbox'] = np.add(np.array(d['bbox']).astype('float32'),
offsets)
d['label'] = 0 # TODO multiclass support
bboxes.append(d['bbox'])
classes.append(d['label'])
scores.append(d['score'])
d['bbox'] = d['bbox'].tolist()
img_dets.append(d)
dev = device('cpu') # torch?
det_idxs = batched_nms(tensor(bboxes, device=dev), tensor(scores,
device=dev), tensor(classes, device=dev),
nms_thresh)
dets.append([[img_dets[idx]] for idx in det_idxs])
return dets
| [
"epic.utils.file_processing.save_motc_dets",
"epic.utils.file_processing.load_motc_dets",
"epic.utils.file_processing.load_input_dirs",
"epic.utils.file_processing.save_imgs",
"epic.LOGGER.error",
"numpy.array",
"epic.utils.file_processing.load_imgs",
"click.IntRange",
"click.option",
"os.path.isd... | [((764, 790), 'click.command', 'click.command', (['"""detection"""'], {}), "('detection')\n", (777, 790), False, 'import click\n'), ((944, 1088), 'click.option', 'click.option', (['"""--multi-sequence"""'], {'is_flag': '(True)', 'help': '"""perform object detection in images located in root directory subfolders instead"""'}), "('--multi-sequence', is_flag=True, help=\n 'perform object detection in images located in root directory subfolders instead'\n )\n", (956, 1088), False, 'import click\n'), ((1114, 1221), 'click.option', 'click.option', (['"""--save-dets"""'], {'is_flag': '(True)', 'help': '"""save detections in MOTChallenge CSV text-file format"""'}), "('--save-dets', is_flag=True, help=\n 'save detections in MOTChallenge CSV text-file format')\n", (1126, 1221), False, 'import click\n'), ((1235, 1325), 'click.option', 'click.option', (['"""--vis-dets"""'], {'help': '"""visualize detections in output images"""', 'is_flag': '(True)'}), "('--vis-dets', help='visualize detections in output images',\n is_flag=True)\n", (1247, 1325), False, 'import click\n'), ((1454, 1559), 'click.option', 'click.option', (['"""--motchallenge"""'], {'is_flag': '(True)', 'help': '"""assume root directory is in MOTChallenge format"""'}), "('--motchallenge', is_flag=True, help=\n 'assume root directory is in MOTChallenge format')\n", (1466, 1559), False, 'import click\n'), ((1747, 1833), 'click.option', 'click.option', (['"""--preprocess"""', '"""pre_proc"""'], {'is_flag': '(True)', 'help': '"""preprocess dataset"""'}), "('--preprocess', 'pre_proc', is_flag=True, help=\n 'preprocess dataset')\n", (1759, 1833), False, 'import click\n'), ((1844, 2003), 'click.option', 'click.option', (['"""--always"""'], {'is_flag': '(True)', 'help': '"""perform object detection for all image sequences, even those with existing MOTChallenge CSV text-files"""'}), "('--always', is_flag=True, help=\n 'perform object detection for all image sequences, even those with existing MOTChallenge CSV text-files'\n )\n", (1856, 2003), False, 'import click\n'), ((2742, 2760), 'epic.detection.detectors_factory.DetectorsFactory', 'DetectorsFactory', ([], {}), '()\n', (2758, 2760), False, 'from epic.detection.detectors_factory import DetectorsFactory\n'), ((2887, 2947), 'epic.LOGGER.info', 'epic.LOGGER.info', (['f"""Processing root directory \'{root_dir}\'."""'], {}), '(f"Processing root directory \'{root_dir}\'.")\n', (2903, 2947), False, 'import epic\n'), ((2961, 3002), 'epic.utils.file_processing.load_input_dirs', 'load_input_dirs', (['root_dir', 'multi_sequence'], {}), '(root_dir, multi_sequence)\n', (2976, 3002), False, 'from epic.utils.file_processing import load_imgs, load_input_dirs, load_motc_dets, save_imgs, save_motc_dets, save_video\n'), ((2582, 2599), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2596, 2599), False, 'import yaml\n'), ((2637, 2692), 'epic.preprocessing.preprocess.preprocess.callback', 'preprocess.callback', (['root_dir', 'yaml_config', 'num_workers'], {}), '(root_dir, yaml_config, num_workers)\n', (2656, 2692), False, 'from epic.preprocessing.preprocess import preprocess\n'), ((3179, 3220), 'epic.LOGGER.info', 'epic.LOGGER.info', (['f"""{prefix} Processing."""'], {}), "(f'{prefix} Processing.')\n", (3195, 3220), False, 'import epic\n'), ((4118, 4162), 'os.path.join', 'os.path.join', (['input_dir', 'DETECTIONS_DIR_NAME'], {}), '(input_dir, DETECTIONS_DIR_NAME)\n', (4130, 4162), False, 'import os\n'), ((824, 864), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(False)'}), '(exists=True, file_okay=False)\n', (834, 864), False, 'import click\n'), ((902, 941), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (912, 941), False, 'import click\n'), ((1371, 1388), 'click.IntRange', 'click.IntRange', (['(1)'], {}), '(1)\n', (1385, 1388), False, 'import click\n'), ((1727, 1744), 'click.IntRange', 'click.IntRange', (['(1)'], {}), '(1)\n', (1741, 1744), False, 'import click\n'), ((7732, 7745), 'torch.device', 'device', (['"""cpu"""'], {}), "('cpu')\n", (7738, 7745), False, 'from torch import device, tensor\n'), ((3237, 3257), 'epic.utils.file_processing.load_imgs', 'load_imgs', (['input_dir'], {}), '(input_dir)\n', (3246, 3257), False, 'from epic.utils.file_processing import load_imgs, load_input_dirs, load_motc_dets, save_imgs, save_motc_dets, save_video\n'), ((3404, 3463), 'epic.LOGGER.error', 'epic.LOGGER.error', (['f"""{prefix} No images found, skipping..."""'], {}), "(f'{prefix} No images found, skipping...')\n", (3421, 3463), False, 'import epic\n'), ((3863, 3937), 'os.path.join', 'os.path.join', (['input_dir', 'epic.DETECTIONS_DIR_NAME', 'epic.MOTC_DETS_FILENAME'], {}), '(input_dir, epic.DETECTIONS_DIR_NAME, epic.MOTC_DETS_FILENAME)\n', (3875, 3937), False, 'import os\n'), ((3979, 4066), 'os.path.join', 'os.path.join', (['input_dir', 'epic.OFFL_MOTC_DETS_DIRNAME', 'epic.OFFL_MOTC_DETS_FILENAME'], {}), '(input_dir, epic.OFFL_MOTC_DETS_DIRNAME, epic.\n OFFL_MOTC_DETS_FILENAME)\n', (3991, 4066), False, 'import os\n'), ((4232, 4280), 'epic.LOGGER.info', 'epic.LOGGER.info', (['f"""{prefix} Detecting objects."""'], {}), "(f'{prefix} Detecting objects.')\n", (4248, 4280), False, 'import epic\n'), ((4343, 4368), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (4356, 4368), False, 'import os\n'), ((4427, 4447), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (4435, 4447), False, 'import os\n'), ((4496, 4526), 'epic.utils.file_processing.load_motc_dets', 'load_motc_dets', (['motc_dets_path'], {}), '(motc_dets_path)\n', (4510, 4526), False, 'from epic.utils.file_processing import load_imgs, load_input_dirs, load_motc_dets, save_imgs, save_motc_dets, save_video\n'), ((4620, 4668), 'epic.LOGGER.info', 'epic.LOGGER.info', (['f"""{prefix} Saving detections."""'], {}), "(f'{prefix} Saving detections.')\n", (4636, 4668), False, 'import epic\n'), ((4764, 4816), 'epic.utils.file_processing.save_motc_dets', 'save_motc_dets', (['dets', 'MOTC_DETS_FILENAME', 'output_dir'], {}), '(dets, MOTC_DETS_FILENAME, output_dir)\n', (4778, 4816), False, 'from epic.utils.file_processing import load_imgs, load_input_dirs, load_motc_dets, save_imgs, save_motc_dets, save_video\n'), ((5164, 5217), 'epic.LOGGER.info', 'epic.LOGGER.info', (['f"""{prefix} Visualizing detections."""'], {}), "(f'{prefix} Visualizing detections.')\n", (5180, 5217), False, 'import epic\n'), ((5230, 5251), 'epic.utils.image_processing.draw_dets', 'draw_dets', (['dets', 'imgs'], {}), '(dets, imgs)\n', (5239, 5251), False, 'from epic.utils.image_processing import draw_dets\n'), ((5264, 5291), 'epic.utils.file_processing.save_imgs', 'save_imgs', (['imgs', 'output_dir'], {}), '(imgs, output_dir)\n', (5273, 5291), False, 'from epic.utils.file_processing import load_imgs, load_input_dirs, load_motc_dets, save_imgs, save_motc_dets, save_video\n'), ((5315, 5353), 'os.path.join', 'os.path.join', (['output_dir', 'VID_FILENAME'], {}), '(output_dir, VID_FILENAME)\n', (5327, 5353), False, 'import os\n'), ((5366, 5392), 'epic.utils.file_processing.save_video', 'save_video', (['imgs', 'vid_path'], {}), '(imgs, vid_path)\n', (5376, 5392), False, 'from epic.utils.file_processing import load_imgs, load_input_dirs, load_motc_dets, save_imgs, save_motc_dets, save_video\n'), ((7787, 7813), 'torch.tensor', 'tensor', (['bboxes'], {'device': 'dev'}), '(bboxes, device=dev)\n', (7793, 7813), False, 'from torch import device, tensor\n'), ((7815, 7841), 'torch.tensor', 'tensor', (['scores'], {'device': 'dev'}), '(scores, device=dev)\n', (7821, 7841), False, 'from torch import device, tensor\n'), ((7874, 7901), 'torch.tensor', 'tensor', (['classes'], {'device': 'dev'}), '(classes, device=dev)\n', (7880, 7901), False, 'from torch import device, tensor\n'), ((3140, 3167), 'os.path.basename', 'os.path.basename', (['input_dir'], {}), '(input_dir)\n', (3156, 3167), False, 'import os\n'), ((3310, 3362), 'os.path.join', 'os.path.join', (['input_dir', 'epic.OFFL_MOTC_IMGS_DIRNAME'], {}), '(input_dir, epic.OFFL_MOTC_IMGS_DIRNAME)\n', (3322, 3362), False, 'import os\n'), ((3575, 3687), 'epic.LOGGER.error', 'epic.LOGGER.error', (['f"""{prefix} Number of images found is less than specified --num-frames, skipping..."""'], {}), "(\n f'{prefix} Number of images found is less than specified --num-frames, skipping...'\n )\n", (3592, 3687), False, 'import epic\n'), ((4188, 4218), 'os.path.isfile', 'os.path.isfile', (['motc_dets_path'], {}), '(motc_dets_path)\n', (4202, 4218), False, 'import os\n'), ((4386, 4404), 'shutil.rmtree', 'rmtree', (['output_dir'], {}), '(output_dir)\n', (4392, 4404), False, 'from shutil import rmtree\n'), ((4688, 4713), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (4701, 4713), False, 'import os\n'), ((4731, 4751), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (4739, 4751), False, 'import os\n'), ((4873, 4925), 'os.path.join', 'os.path.join', (['input_dir', 'epic.OFFL_MOTC_DETS_DIRNAME'], {}), '(input_dir, epic.OFFL_MOTC_DETS_DIRNAME)\n', (4885, 4925), False, 'import os\n'), ((5069, 5129), 'epic.utils.file_processing.save_motc_dets', 'save_motc_dets', (['dets', 'epic.OFFL_MOTC_DETS_FILENAME', 'dets_dir'], {}), '(dets, epic.OFFL_MOTC_DETS_FILENAME, dets_dir)\n', (5083, 5129), False, 'from epic.utils.file_processing import load_imgs, load_input_dirs, load_motc_dets, save_imgs, save_motc_dets, save_video\n'), ((4989, 5012), 'os.path.isdir', 'os.path.isdir', (['dets_dir'], {}), '(dets_dir)\n', (5002, 5012), False, 'import os\n'), ((5034, 5052), 'os.mkdir', 'os.mkdir', (['dets_dir'], {}), '(dets_dir)\n', (5042, 5052), False, 'import os\n'), ((6904, 6958), 'numpy.array', 'np.array', (['[win_pos_w, win_pos_h, win_pos_w, win_pos_h]'], {}), '([win_pos_w, win_pos_h, win_pos_w, win_pos_h])\n', (6912, 6958), True, 'import numpy as np\n'), ((7340, 7359), 'numpy.array', 'np.array', (["d['bbox']"], {}), "(d['bbox'])\n", (7348, 7359), True, 'import numpy as np\n')] |
import numpy as np
import random
#graph_ids = [(int(i / 22458)+1) for i in range(44906)]
graph_ids = []
for i in range(44906):
graph_ids.append(random.randint(1, 20))
print(">>> Check train_graph_id.npy")
graph_ids = np.asarray(graph_ids)
with open(f"train_graph_id.npy", "wb") as f:
np.save(f, graph_ids)
| [
"numpy.asarray",
"random.randint",
"numpy.save"
] | [((222, 243), 'numpy.asarray', 'np.asarray', (['graph_ids'], {}), '(graph_ids)\n', (232, 243), True, 'import numpy as np\n'), ((293, 314), 'numpy.save', 'np.save', (['f', 'graph_ids'], {}), '(f, graph_ids)\n', (300, 314), True, 'import numpy as np\n'), ((149, 170), 'random.randint', 'random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (163, 170), False, 'import random\n')] |
import numpy as np
from scipy.stats import multivariate_normal as mvn
from scipy.stats import invgamma
from bingo.evaluation.fitness_function import FitnessFunction
from smcpy.mcmc.vector_mcmc import VectorMCMC
from smcpy.mcmc.vector_mcmc_kernel import VectorMCMCKernel
from smcpy.samplers import AdaptiveSampler #SMCSampler
from smcpy import ImproperUniform
class BayesFitnessFunction(FitnessFunction):
def __init__(self, continuous_local_opt, num_particles=150, phi_exponent=8,
smc_steps=15, mcmc_steps=12, ess_threshold=0.75, std=None,
return_nmll_only=True, num_multistarts=1,
uniformly_weighted_proposal=True):
if smc_steps <= 2:
raise ValueError('smc_steps must be > 2')
if phi_exponent <= 0:
raise ValueError('phi_exponent must be > 0')
self._num_particles = num_particles
self._smc_steps = smc_steps
self._mcmc_steps = mcmc_steps
self._ess_threshold = ess_threshold
self._std = std
self._return_nmll_only = return_nmll_only
self._num_multistarts = num_multistarts
self._uniformly_weighted_proposal = uniformly_weighted_proposal
self._num_observations = len(continuous_local_opt.training_data.x)
self._cont_local_opt = continuous_local_opt
self._fbf_phi_idx, self._phi_seq = self._calc_phi_sequence(phi_exponent)
self._eval_count = 0
self._norm_phi = 1 / \
np.sqrt(self._cont_local_opt.training_data.y.shape[0])
def __call__(self, individual):
param_names = self.get_parameter_names(individual)
individual = self.do_local_opt(individual)
try:
proposal = self.generate_proposal_samples(individual,
self._num_particles)
except (ValueError, np.linalg.LinAlgError) as e:
if self._return_nmll_only:
return np.nan
return np.nan, None, None
priors = [ImproperUniform() for _ in range(len(param_names))]
if self._std is None:
priors.append(ImproperUniform(0, None))
param_names.append('std_dev')
vector_mcmc = VectorMCMC(lambda x: self.evaluate_model(x, individual),
self.training_data.y.flatten(), priors,
log_like_args=self._std)
mcmc_kernel = VectorMCMCKernel(vector_mcmc, param_order=param_names)
smc = AdaptiveSampler(mcmc_kernel)
try:
step_list, marginal_log_likes = smc.sample(self._num_particles,
self._mcmc_steps,
self._ess_threshold,
proposal=proposal,
required_phi=self._norm_phi)
except (ValueError, np.linalg.LinAlgError, ZeroDivisionError) as e:
print(f'error: {e}')
if self._return_nmll_only:
return np.nan
return np.nan, None, None
# means = step_list[-1].compute_mean(package=False)
max_idx = np.argmax(step_list[-1].log_likes)
maps = step_list[-1].params[max_idx]
individual.set_local_optimization_params(maps[:-1])
self.step_list = step_list
try:
nmll = -1 * (marginal_log_likes[-1] -
marginal_log_likes[smc.req_phi_index[0]])
except:
import pdb;pdb.set_trace()
if self._return_nmll_only:
return nmll
return nmll, step_list, vector_mcmc
@staticmethod
def get_parameter_names(individual):
num_params = individual.get_number_local_optimization_params()
return [f'p{i}' for i in range(num_params)]
def do_local_opt(self, individual):
individual._notify_modification()
individual._needs_opt = True
_ = self._cont_local_opt(individual)
return individual
def estimate_covariance(self, individual):
self.do_local_opt(individual)
num_params = individual.get_number_local_optimization_params()
x = self.training_data.x
f, f_deriv = individual.evaluate_equation_with_local_opt_gradient_at(x)
ssqe = np.sum((self.training_data.y - f) ** 2)
var_ols = ssqe / (len(f) - num_params)
cov = var_ols * np.linalg.inv(f_deriv.T.dot(f_deriv))
return individual.constants, cov, var_ols, ssqe
def generate_proposal_samples(self, individual, num_samples):
param_names = self.get_parameter_names(individual)
pdf = np.ones((num_samples, 1))
samples = np.ones((num_samples, len(param_names)))
num_multistarts = self._num_multistarts
if param_names == []:
num_multistarts = 1
param_dists = []
max_count = 10 * num_multistarts
count = 0
while len(param_dists) < num_multistarts and count<max_count:
mean, cov, _, _ = self.estimate_covariance(individual)
try:
param_dists.append(mvn(mean, cov+(np.eye(cov.shape[0])*1e-8),
allow_singular=True))
except:
pass
count += 1
if len(param_dists) == 0:
raise ValueError
pdf, samples = self._get_samples_and_pdf(param_dists, num_samples)
print(f'Number of Successful restarts: {len(param_dists)}')
cov_estimates = [self.estimate_covariance(individual)
for _ in range(num_multistarts)]
if self._std is None:
len_data = len(self.training_data.x)
noise_dists = [invgamma((0.01 + len_data) / 2,
scale=(0.01 * var_ols + ssqe) / 2)
for _, _, var_ols, ssqe in cov_estimates]
noise_pdf, noise_samples = self._get_samples_and_pdf(noise_dists,
num_samples)
param_names.append('std_dev')
samples = np.concatenate((samples, noise_samples), axis=1)
pdf *= noise_pdf
if self._uniformly_weighted_proposal:
pdf = np.ones_like(pdf)
samples = dict(zip(param_names, samples.T))
return samples, pdf
@staticmethod
def _get_samples_and_pdf(distributions, num_samples):
sub_samples = num_samples // len(distributions)
samples = np.vstack([proposal.rvs(sub_samples).reshape(sub_samples, -1)
for proposal in distributions])
if samples.shape[0] != num_samples:
missed_samples = num_samples - samples.shape[0]
new_samples = distributions[np.random.choice(len(distributions))]\
.rvs(missed_samples).reshape((missed_samples, -1))
samples = np.vstack([samples, new_samples])
pdf = np.zeros((samples.shape[0], 1))
for dist in distributions:
pdf += dist.pdf(samples).reshape(-1, 1)
pdf /= len(distributions)
return pdf, samples
def evaluate_model(self, params, individual):
self._eval_count += 1
individual.set_local_optimization_params(params.T)
return individual.evaluate_equation_at(self.training_data.x).T
def _calc_phi_sequence(self, phi_exponent):
x = np.linspace(0, 1, self._smc_steps - 1)
phi_seq = (np.exp(x * phi_exponent) - 1) / (np.exp(phi_exponent) - 1)
fbf_phi = 1 / np.sqrt(self._num_observations)
fbf_phi_index = np.searchsorted(phi_seq, [fbf_phi])
phi_seq = np.insert(phi_seq, fbf_phi_index, fbf_phi)
return int(fbf_phi_index), phi_seq
@property
def eval_count(self):
return self._eval_count + self._cont_local_opt.eval_count
@eval_count.setter
def eval_count(self, value):
self._eval_count = value - self._cont_local_opt.eval_count
@property
def training_data(self):
return self._cont_local_opt.training_data
@training_data.setter
def training_data(self, training_data):
self._cont_local_opt.training_data = training_data
| [
"numpy.insert",
"numpy.ones_like",
"numpy.eye",
"smcpy.ImproperUniform",
"numpy.sqrt",
"numpy.ones",
"numpy.searchsorted",
"numpy.argmax",
"smcpy.mcmc.vector_mcmc_kernel.VectorMCMCKernel",
"scipy.stats.invgamma",
"numpy.sum",
"numpy.zeros",
"numpy.linspace",
"numpy.exp",
"numpy.vstack",
... | [((2446, 2500), 'smcpy.mcmc.vector_mcmc_kernel.VectorMCMCKernel', 'VectorMCMCKernel', (['vector_mcmc'], {'param_order': 'param_names'}), '(vector_mcmc, param_order=param_names)\n', (2462, 2500), False, 'from smcpy.mcmc.vector_mcmc_kernel import VectorMCMCKernel\n'), ((2515, 2543), 'smcpy.samplers.AdaptiveSampler', 'AdaptiveSampler', (['mcmc_kernel'], {}), '(mcmc_kernel)\n', (2530, 2543), False, 'from smcpy.samplers import AdaptiveSampler\n'), ((3231, 3265), 'numpy.argmax', 'np.argmax', (['step_list[-1].log_likes'], {}), '(step_list[-1].log_likes)\n', (3240, 3265), True, 'import numpy as np\n'), ((4349, 4388), 'numpy.sum', 'np.sum', (['((self.training_data.y - f) ** 2)'], {}), '((self.training_data.y - f) ** 2)\n', (4355, 4388), True, 'import numpy as np\n'), ((4694, 4719), 'numpy.ones', 'np.ones', (['(num_samples, 1)'], {}), '((num_samples, 1))\n', (4701, 4719), True, 'import numpy as np\n'), ((7061, 7092), 'numpy.zeros', 'np.zeros', (['(samples.shape[0], 1)'], {}), '((samples.shape[0], 1))\n', (7069, 7092), True, 'import numpy as np\n'), ((7514, 7552), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(self._smc_steps - 1)'], {}), '(0, 1, self._smc_steps - 1)\n', (7525, 7552), True, 'import numpy as np\n'), ((7709, 7744), 'numpy.searchsorted', 'np.searchsorted', (['phi_seq', '[fbf_phi]'], {}), '(phi_seq, [fbf_phi])\n', (7724, 7744), True, 'import numpy as np\n'), ((7763, 7805), 'numpy.insert', 'np.insert', (['phi_seq', 'fbf_phi_index', 'fbf_phi'], {}), '(phi_seq, fbf_phi_index, fbf_phi)\n', (7772, 7805), True, 'import numpy as np\n'), ((1497, 1551), 'numpy.sqrt', 'np.sqrt', (['self._cont_local_opt.training_data.y.shape[0]'], {}), '(self._cont_local_opt.training_data.y.shape[0])\n', (1504, 1551), True, 'import numpy as np\n'), ((2036, 2053), 'smcpy.ImproperUniform', 'ImproperUniform', ([], {}), '()\n', (2051, 2053), False, 'from smcpy import ImproperUniform\n'), ((6201, 6249), 'numpy.concatenate', 'np.concatenate', (['(samples, noise_samples)'], {'axis': '(1)'}), '((samples, noise_samples), axis=1)\n', (6215, 6249), True, 'import numpy as np\n'), ((6344, 6361), 'numpy.ones_like', 'np.ones_like', (['pdf'], {}), '(pdf)\n', (6356, 6361), True, 'import numpy as np\n'), ((7003, 7036), 'numpy.vstack', 'np.vstack', (['[samples, new_samples]'], {}), '([samples, new_samples])\n', (7012, 7036), True, 'import numpy as np\n'), ((7653, 7684), 'numpy.sqrt', 'np.sqrt', (['self._num_observations'], {}), '(self._num_observations)\n', (7660, 7684), True, 'import numpy as np\n'), ((2144, 2168), 'smcpy.ImproperUniform', 'ImproperUniform', (['(0)', 'None'], {}), '(0, None)\n', (2159, 2168), False, 'from smcpy import ImproperUniform\n'), ((3571, 3586), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3584, 3586), False, 'import pdb\n'), ((5808, 5874), 'scipy.stats.invgamma', 'invgamma', (['((0.01 + len_data) / 2)'], {'scale': '((0.01 * var_ols + ssqe) / 2)'}), '((0.01 + len_data) / 2, scale=(0.01 * var_ols + ssqe) / 2)\n', (5816, 5874), False, 'from scipy.stats import invgamma\n'), ((7572, 7596), 'numpy.exp', 'np.exp', (['(x * phi_exponent)'], {}), '(x * phi_exponent)\n', (7578, 7596), True, 'import numpy as np\n'), ((7605, 7625), 'numpy.exp', 'np.exp', (['phi_exponent'], {}), '(phi_exponent)\n', (7611, 7625), True, 'import numpy as np\n'), ((5204, 5224), 'numpy.eye', 'np.eye', (['cov.shape[0]'], {}), '(cov.shape[0])\n', (5210, 5224), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import random
import param_gedi as param
import tensorflow_addons as tfa
class Parser:
"""Parse tfrecord"""
def __init__(self, p):
self.p = p
def tfrec_parse(self, row):
"""
Parse single item in tfrecord. You most likely want tfrec_batch_parse.
Args:
row: A scalar string Tensor, a single serialized Example.
Returns:
"""
features = {
# 'filename': tf.io.FixedLenFeature([], tf.string),
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64)
}
parsed = tf.io.parse_single_example(row, features)
# file = parsed['filename']
img = tf.decode_raw(parsed['image'], tf.float32)
lbl = tf.cast(parsed['label'], tf.int32)
lbls = tf.one_hot(lbl, 2) # test this in pytest
img = tf.decode_raw(img, tf.float32)
img = tf.reshape(img, [-1, self.p.orig_width, self.p.orig_height, self.p.orig_channels])
if self.p.orig_height > self.p.vgg_height:
x0 = (self.p.orig_width - self.p.vgg_width) // 2
y0 = (self.p.orig_height - self.p.vgg_height) // 2
img = tf.image.crop_to_bounding_box(img, y0, x0, 224, 224)
# img = tf.divide(img, 255.0) # normalize here
# img = tf.divide(img, self.p.max_gedi) # normalize here
return img, lbls
def tfrec_batch_parse(self, row):
"""
Parse tfrecord by batch.
Args:
row: A scalar string Tensor, a single serialized Example.
Returns:
"""
features = {
'filename': tf.io.FixedLenFeature([], tf.string),
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
'ratio': tf.io.FixedLenFeature([], tf.float32)
}
parsed = tf.io.parse_example(row, features)
files = parsed['filename']
img = tf.io.decode_raw(parsed['image'], tf.float32)
lbl = tf.cast(parsed['label'], tf.int32)
ratio = parsed['ratio'] # useful to have ratio, but model is a binary classifier with binary ground truth.
lbls = tf.one_hot(lbl, 2) # one hot, verify this in pytest
img = tf.reshape(img, [-1, self.p.orig_size[0], self.p.orig_size[1], self.p.orig_size[2]])
# if self.p.orig_size[0] > self.p.target_size[0]:
# x0 = (self.p.orig_size[1] - self.p.target_size[1]) // 2
# y0 = (self.p.orig_size[0] - self.p.target_size[0]) // 2
# img = tf.image.crop_to_bounding_box(img, y0, x0, 224, 224)
# img = tf.divide(img, self.p.max_gedi) # normalize here
return img, lbls, files
def reshape_ims(self, imgs, lbls, files):
"""
Reshape images for model
"""
if self.p.orig_size[-1] > self.p.target_size[-1]:
# Remove alpha channel
channels = tf.unstack(imgs, axis=-1)
imgs = tf.stack([channels[0], channels[1], channels[2]], axis=-1)
if self.p.randomcrop:
if self.p.orig_size[0] > self.p.target_size[0]:
imgs = tf.image.random_crop(imgs, size=[self.p.BATCH_SIZE, 224, 224, 1])
else:
if self.p.orig_size[0] > self.p.target_size[0]:
y0 = (self.p.orig_size[0] - self.p.target_size[0]) // 2
x0 = (self.p.orig_size[1] - self.p.target_size[1]) // 2
imgs = tf.image.crop_to_bounding_box(imgs, y0, x0, self.p.target_size[1], self.p.target_size[0])
return imgs, lbls, files
@staticmethod
def transformImg(imgIn, forward_transform):
"""
https://stackoverflow.com/questions/52214953/tensorflow-is-there-a-way-to-implement-tensor-wise-image-shear-rotation-transl
Args:
imgIn:
forward_transform:
Returns:
"""
t = tf.contrib.image.matrices_to_flat_transforms(tf.linalg.inv(forward_transform))
# please notice that forward_transform must be a float matrix,
# e.g. [[2.0,0,0],[0,1.0,0],[0,0,1]] will work
# but [[2,0,0],[0,1,0],[0,0,1]] will not
imgOut = tf.contrib.image.transform(imgIn, t, interpolation="BILINEAR", name=None)
return imgOut
def use_binary_lbls(self, imgs, lbls, files):
"""Convert from one-hot encoded labels to single digit label 0 or 1"""
newlbls = tf.argmax(lbls, axis=1)
newlbls = tf.cast(newlbls, dtype=tf.float32)
return imgs, newlbls, files
def inception_scale(self, imgs, lbls, files):
"""Scaling for inception model"""
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(imgs), 1.0), [imgs])
with tf.control_dependencies([assert_op]):
images = tf.subtract(imgs, 1.0)
images = tf.multiply(images, 2.0)
return images, lbls, files
def random_shear(self, img):
"""
Shears the whole batch the crops.
Args:
img:
Returns:
"""
# shear .5 would shear 50% of width, so it would be 112 of 224px.
shear_x = np.random.uniform(0, 0.1)
shear_y = np.random.uniform(0, 0.1)
forward_transform = [[1.0, shear_x, 0],
[shear_y, 1.0, 0],
[0, 0, 1.0]]
shear = self.transformImg(img, forward_transform)
scales = np.ones(self.p.batch_size) * .8
boxes = np.zeros((len(scales), 4))
for i, scale in enumerate(scales):
x1 = y1 = 0.5 - (0.5 * scale)
x2 = y2 = 0.5 + (0.5 * scale)
boxes[i] = [x1, y1, x2, y2]
idx = [i for i in range(self.p.batch_size)]
crops = tf.image.crop_and_resize(shear, boxes=boxes, box_ind=idx, crop_size=[224, 224])
return crops
def zoom(self, img, be_random=True):
"""
Random zooms
Args:
img:
thresh:
be_random:
Returns:
"""
# Generate 20 crop settings, ranging from a 1% to 20% crop.
# scales = list(np.arange(0.8, 1.0, 0.01))
if be_random:
scales = list(np.random.uniform(0.8, 1.0, self.p.batch_size))
else:
scales = np.ones(self.p.batch_size) * .5
boxes = np.zeros((len(scales), 4))
for i, scale in enumerate(scales):
x1 = y1 = 0.5 - (0.5 * scale)
x2 = y2 = 0.5 + (0.5 * scale)
boxes[i] = [x1, y1, x2, y2]
idx = [i for i in range(self.p.batch_size)]
crops = tf.image.crop_and_resize(img, boxes=boxes, box_ind=idx, crop_size=[224, 224])
# def random_crop(_img):
# # Create different crops for an image
# crops = tf.image.crop_and_resize(_img, boxes=boxes, box_ind=idx, crop_size=[224, 224])
# # Return a random crop
# # return crops[tf.random_uniform(shape=[], minval=0, maxval=len(scales), dtype=tf.int32)]
# return crops
# choice = tf.random_uniform(shape=[], minval=0., maxval=1., dtype=tf.float32)
#
# # Only apply cropping 50% of the time
#
# # img = tf.cond(choice < thresh, lambda: img, lambda: random_crop(img))
return crops
def augment(self, img, lbls, files):
"""
Augmentations. Img is set to have a max of one.
Args:
img:
lbls:
Returns:
"""
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(img), 1.0), [img])
with tf.control_dependencies([assert_op]):
img = tf.image.random_flip_up_down(img)
img = tf.image.random_flip_left_right(img)
#
turns = tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)
img = tf.cond(tf.equal(turns, tf.constant(1)), lambda: tf.image.rot90(img, 1), lambda: tf.identity(img))
img = tf.cond(tf.equal(turns, tf.constant(2)), lambda: tf.image.rot90(img, 2), lambda: tf.identity(img))
img = tf.cond(tf.equal(turns, tf.constant(3)), lambda: tf.image.rot90(img, 3), lambda: tf.identity(img))
# tensorf object has no attribute ndim error
# img = tf.keras.preprocessing.image.random_shear(img, 1, row_axis=1, col_axis=2, channel_axis=3)
# tf.print('max img', tf.reduce_max(img))
# tf.print('min img', tf.reduce_min(img))
img = tf.image.random_brightness(img,
max_delta=self.p.random_brightness) # Image normalized to 1, delta is amount of brightness to add/subtract
img = tf.image.random_contrast(img, self.p.min_contrast,
self.p.max_contrast) # (x- mean) * contrast factor + mean
# add noise to all crops
# noise = tf.random.uniform(
# tf.shape(img), minval=0.0, maxval=1.0, dtype=tf.dtypes.float32, seed=None, name='noise'
# )
# noise = tf.where(noise < .2, noise / 2, 0., name='noise_cond')
# img += noise
# blur image with gaussian filter
# img = tfa.image.gaussian_filter2d(img, filter_shape=(5, 5))
# tf.print('aug img', tf.reduce_max(img))
# tf.print('aug img', tf.reduce_min(img))
return img, lbls, files
def remove_negatives_in_img(self, img):
"""If there are negatives in image, subtract by min to get make all values nonnegative"""
_min = tf.reduce_min(img)
# _mins = tf.reduce_min(img, axis=0, keepdims=True)
# _mins = tf.reduce_min(_mins, axis=1, keepdims=True)
# _mins = tf.reduce_min(_mins, axis=2, keepdims=True)
# _zeros = tf.zeros_like(_mins)
# subtr = tf.where(tf.less(_mins, _zeros), _mins, _zeros)
# img = img - subtr
subtr = tf.where(tf.less(_min, 0.), _min, 0.)
img = img - subtr
return img
def divide_by_max_in_img(self, img):
"""Divide by max in image by batch"""
_maxs = tf.reduce_max(img, axis=0, keepdims=True)
_maxs = tf.reduce_max(_maxs, axis=1, keepdims=True)
_maxs = tf.reduce_max(_maxs, axis=2, keepdims=True)
img = img / _maxs
return img
def cut_off_vals(self, imgs, lbls, files):
"""Cut off values"""
imgs = tf.clip_by_value(imgs, clip_value_min=0., clip_value_max=1.)
return imgs, lbls, files
def normalize_whitening(self, imgs, lbls):
"""Scales each image in batch to have mean 0 and variance 1"""
# imgs = tf.map_fn(lambda x: tf.image.per_image_standardization(x), imgs)
imgs = tf.image.per_image_standardization(imgs)
return imgs, lbls
def format_example(self, imgs, lbls, files):
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(imgs), 1.0), [imgs])
with tf.control_dependencies([assert_op]):
images = tf.cast(imgs, tf.float32)
images = images * 255.0
images = (images / 127.5) - 1.0
images = tf.image.resize(images, (self.p.target_size[0], self.p.target_size[1]))
return images, lbls, files
def set_max_to_one_by_image(self, imgs, lbls, files):
"""Divide each image by its maximum"""
imgs = tf.map_fn(lambda x: self.remove_negatives_in_img(x), imgs)
imgs = tf.map_fn(lambda x: self.divide_by_max_in_img(x), imgs)
return imgs, lbls, files
def set_max_to_one_by_batch(self, imgs, lbls, files):
"""Divide each batch by its maximum"""
imgs = tf.map_fn(lambda x: self.remove_negatives_in_img(x), imgs)
imgs = imgs / tf.reduce_max(imgs, keepdims=True)
return imgs, lbls, files
def rescale_im_and_clip_16bit(self, imgs, lbls, files):
imgs = tf.map_fn(lambda x: (x - self.p.orig_min_value) / (self.p.orig_max_value - self.p.orig_min_value), imgs)
imgs = tf.clip_by_value(imgs, clip_value_min=0., clip_value_max=1.)
return imgs, lbls, files
def rescale_im_and_clip_renorm(self, imgs, lbls, files):
imgs = tf.map_fn(
lambda x: (x - self.p.training_min_value) / (self.p.training_max_value - self.p.training_min_value), imgs)
imgs = tf.clip_by_value(imgs, clip_value_min=0., clip_value_max=1.)
return imgs, lbls, files
def normalize_resnet(self, img, lbls, files):
"""Set up resnet"""
rgb = img
if int(rgb.get_shape()[-1]) == 1:
red, green, blue = rgb, rgb, rgb
else:
red, green, blue = tf.split(
axis=3, num_or_size_splits=3, value=rgb)
assert_op = tf.Assert(tf.reduce_all(tf.equal(red.get_shape()[1:], tf.constant([224, 224, 1]))), [red])
with tf.control_dependencies([assert_op]):
new_img = tf.concat(axis=3, values=[
red, green, blue
], name='rgb')
# normed = tf.image.per_image_standardization(new_img)
return new_img, lbls, files
def make_vgg(self, img, lbls, files):
"""
Subtracts the vgg16 training mean by channel.
Args:
img:
lbls:
files:
Returns:
"""
rgb = img * 255.0
if int(img.get_shape()[-1]) == 1:
red, green, blue = rgb, rgb, rgb
else:
red, green, blue = tf.split(
axis=3, num_or_size_splits=3, value=rgb)
assert_op = tf.Assert(tf.reduce_all(tf.equal(red.get_shape()[1:], tf.constant([224, 224, 1]))), [red])
with tf.control_dependencies([assert_op]):
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - self.p.VGG_MEAN[0],
green - self.p.VGG_MEAN[1],
red - self.p.VGG_MEAN[2],
], name='bgr')
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
return bgr, lbls, files
def make_resnet(self, img, lbls, files):
"""
Subtracts the vgg16 training mean by channel.
Args:
img:
lbls:
files:
Returns:
"""
rgb = img * 255.0
if int(img.get_shape()[-1]) == 1:
red, green, blue = rgb, rgb, rgb
else:
red, green, blue = tf.split(
axis=3, num_or_size_splits=3, value=rgb)
assert_op = tf.Assert(tf.reduce_all(tf.equal(red.get_shape()[1:], tf.constant([224, 224, 1]))), [red])
with tf.control_dependencies([assert_op]):
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
res = tf.concat(axis=3, values=[
red - self.p.VGG_MEAN[0],
green - self.p.VGG_MEAN[1],
blue - self.p.VGG_MEAN[2],
], name='res')
assert res.get_shape().as_list()[1:] == [224, 224, 3]
return res, lbls, files
def chk_make_vgg(self, img, lbls, files):
"""
Subtracts the vgg16 training mean by channel.
Args:
img:
lbls:
files:
Returns:
"""
if int(img.get_shape()[-1]) == 1:
red, green, blue = rgb, rgb, rgb
else:
red, green, blue = tf.split(
axis=3, num_or_size_splits=3, value=rgb)
assert_op = tf.Assert(tf.reduce_all(tf.equal(red.get_shape()[1:], tf.constant([224, 224, 1]))), [red])
with tf.control_dependencies([assert_op]):
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - self.p.VGG_MEAN[0],
green - self.p.VGG_MEAN[1],
red - self.p.VGG_MEAN[2],
], name='bgr')
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
return bgr, lbls, files
@staticmethod
def tf_equalize_histogram(image):
"""
https://stackoverflow.com/questions/42835247/how-to-implement-histogram-equalization-for-images-in-tensorflow
Args:
image:
Returns:
"""
values_range = tf.constant([0., 65535.], dtype=tf.float32)
histogram = tf.histogram_fixed_width(tf.cast(image, tf.float32), values_range, 65536)
cdf = tf.cumsum(histogram)
cdf_min = cdf[tf.reduce_min(tf.where(tf.greater(cdf, 0)))]
img_shape = tf.shape(image)
print('im shape', image.get_shape())
pix_cnt = img_shape[0] * img_shape[1]
px_map = tf.round(tf.cast(cdf - cdf_min, tf.float32) * 65536. / tf.cast(pix_cnt - 1, tf.float32))
px_map = tf.cast(px_map, tf.uint16)
print('px map shape', px_map.get_shape())
eq_hist = tf.expand_dims(tf.gather_nd(px_map, tf.cast(image, tf.int32)), 2)
print('eq hist shape', eq_hist.get_shape())
eq_hist = tf.cast(eq_hist, tf.float32)
return eq_hist
def normalize_histeq(self, imgs, lbls, files):
imgs = tf.map_fn(lambda x: self.remove_negatives_in_img(x), imgs)
imgs = tf.map_fn(lambda x: self.tf_equalize_histogram(x), imgs)
return imgs, lbls, files
if __name__ == '__main__':
p = param.Param()
Parse = Parser()
# get_tfrecord_length(p.train_rec, get_max=True)
ds = tf.data.TFRecordDataset(p.data_test,
num_parallel_reads=p.num_parallel_calls) # possibly use multiple record files
ds = ds.batch(p.BATCH_SIZE, drop_remainder=False) # batch images, no skips
ds = ds.map(Parse.tfrec_batch_parse,
num_parallel_calls=p.num_parallel_calls)
it = iter(ds)
img, lbls, files, ratio = next(it)
print(ratio)
| [
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.split",
"tensorflow.multiply",
"tensorflow.linalg.inv",
"tensorflow.io.FixedLenFeature",
"tensorflow.control_dependencies",
"tensorflow.io.decode_raw",
"tensorflow.cast",
"tensorflow.reduce_min",
"tensorflow.image.crop_to_bounding_box",
"te... | [((17547, 17560), 'param_gedi.Param', 'param.Param', ([], {}), '()\n', (17558, 17560), True, 'import param_gedi as param\n'), ((17644, 17721), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['p.data_test'], {'num_parallel_reads': 'p.num_parallel_calls'}), '(p.data_test, num_parallel_reads=p.num_parallel_calls)\n', (17667, 17721), True, 'import tensorflow as tf\n'), ((674, 715), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['row', 'features'], {}), '(row, features)\n', (700, 715), True, 'import tensorflow as tf\n'), ((766, 808), 'tensorflow.decode_raw', 'tf.decode_raw', (["parsed['image']", 'tf.float32'], {}), "(parsed['image'], tf.float32)\n", (779, 808), True, 'import tensorflow as tf\n'), ((823, 857), 'tensorflow.cast', 'tf.cast', (["parsed['label']", 'tf.int32'], {}), "(parsed['label'], tf.int32)\n", (830, 857), True, 'import tensorflow as tf\n'), ((873, 891), 'tensorflow.one_hot', 'tf.one_hot', (['lbl', '(2)'], {}), '(lbl, 2)\n', (883, 891), True, 'import tensorflow as tf\n'), ((929, 959), 'tensorflow.decode_raw', 'tf.decode_raw', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (942, 959), True, 'import tensorflow as tf\n'), ((974, 1061), 'tensorflow.reshape', 'tf.reshape', (['img', '[-1, self.p.orig_width, self.p.orig_height, self.p.orig_channels]'], {}), '(img, [-1, self.p.orig_width, self.p.orig_height, self.p.\n orig_channels])\n', (984, 1061), True, 'import tensorflow as tf\n'), ((1939, 1973), 'tensorflow.io.parse_example', 'tf.io.parse_example', (['row', 'features'], {}), '(row, features)\n', (1958, 1973), True, 'import tensorflow as tf\n'), ((2023, 2068), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['image']", 'tf.float32'], {}), "(parsed['image'], tf.float32)\n", (2039, 2068), True, 'import tensorflow as tf\n'), ((2083, 2117), 'tensorflow.cast', 'tf.cast', (["parsed['label']", 'tf.int32'], {}), "(parsed['label'], tf.int32)\n", (2090, 2117), True, 'import tensorflow as tf\n'), ((2249, 2267), 'tensorflow.one_hot', 'tf.one_hot', (['lbl', '(2)'], {}), '(lbl, 2)\n', (2259, 2267), True, 'import tensorflow as tf\n'), ((2316, 2405), 'tensorflow.reshape', 'tf.reshape', (['img', '[-1, self.p.orig_size[0], self.p.orig_size[1], self.p.orig_size[2]]'], {}), '(img, [-1, self.p.orig_size[0], self.p.orig_size[1], self.p.\n orig_size[2]])\n', (2326, 2405), True, 'import tensorflow as tf\n'), ((4228, 4301), 'tensorflow.contrib.image.transform', 'tf.contrib.image.transform', (['imgIn', 't'], {'interpolation': '"""BILINEAR"""', 'name': 'None'}), "(imgIn, t, interpolation='BILINEAR', name=None)\n", (4254, 4301), True, 'import tensorflow as tf\n'), ((4472, 4495), 'tensorflow.argmax', 'tf.argmax', (['lbls'], {'axis': '(1)'}), '(lbls, axis=1)\n', (4481, 4495), True, 'import tensorflow as tf\n'), ((4514, 4548), 'tensorflow.cast', 'tf.cast', (['newlbls'], {'dtype': 'tf.float32'}), '(newlbls, dtype=tf.float32)\n', (4521, 4548), True, 'import tensorflow as tf\n'), ((5175, 5200), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (5192, 5200), True, 'import numpy as np\n'), ((5219, 5244), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (5236, 5244), True, 'import numpy as np\n'), ((5770, 5849), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (['shear'], {'boxes': 'boxes', 'box_ind': 'idx', 'crop_size': '[224, 224]'}), '(shear, boxes=boxes, box_ind=idx, crop_size=[224, 224])\n', (5794, 5849), True, 'import tensorflow as tf\n'), ((6612, 6689), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (['img'], {'boxes': 'boxes', 'box_ind': 'idx', 'crop_size': '[224, 224]'}), '(img, boxes=boxes, box_ind=idx, crop_size=[224, 224])\n', (6636, 6689), True, 'import tensorflow as tf\n'), ((8456, 8523), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['img'], {'max_delta': 'self.p.random_brightness'}), '(img, max_delta=self.p.random_brightness)\n', (8482, 8523), True, 'import tensorflow as tf\n'), ((8651, 8722), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['img', 'self.p.min_contrast', 'self.p.max_contrast'], {}), '(img, self.p.min_contrast, self.p.max_contrast)\n', (8675, 8722), True, 'import tensorflow as tf\n'), ((9483, 9501), 'tensorflow.reduce_min', 'tf.reduce_min', (['img'], {}), '(img)\n', (9496, 9501), True, 'import tensorflow as tf\n'), ((10024, 10065), 'tensorflow.reduce_max', 'tf.reduce_max', (['img'], {'axis': '(0)', 'keepdims': '(True)'}), '(img, axis=0, keepdims=True)\n', (10037, 10065), True, 'import tensorflow as tf\n'), ((10082, 10125), 'tensorflow.reduce_max', 'tf.reduce_max', (['_maxs'], {'axis': '(1)', 'keepdims': '(True)'}), '(_maxs, axis=1, keepdims=True)\n', (10095, 10125), True, 'import tensorflow as tf\n'), ((10142, 10185), 'tensorflow.reduce_max', 'tf.reduce_max', (['_maxs'], {'axis': '(2)', 'keepdims': '(True)'}), '(_maxs, axis=2, keepdims=True)\n', (10155, 10185), True, 'import tensorflow as tf\n'), ((10324, 10386), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['imgs'], {'clip_value_min': '(0.0)', 'clip_value_max': '(1.0)'}), '(imgs, clip_value_min=0.0, clip_value_max=1.0)\n', (10340, 10386), True, 'import tensorflow as tf\n'), ((10634, 10674), 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['imgs'], {}), '(imgs)\n', (10668, 10674), True, 'import tensorflow as tf\n'), ((11767, 11875), 'tensorflow.map_fn', 'tf.map_fn', (['(lambda x: (x - self.p.orig_min_value) / (self.p.orig_max_value - self.p.\n orig_min_value))', 'imgs'], {}), '(lambda x: (x - self.p.orig_min_value) / (self.p.orig_max_value -\n self.p.orig_min_value), imgs)\n', (11776, 11875), True, 'import tensorflow as tf\n'), ((11887, 11949), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['imgs'], {'clip_value_min': '(0.0)', 'clip_value_max': '(1.0)'}), '(imgs, clip_value_min=0.0, clip_value_max=1.0)\n', (11903, 11949), True, 'import tensorflow as tf\n'), ((12058, 12179), 'tensorflow.map_fn', 'tf.map_fn', (['(lambda x: (x - self.p.training_min_value) / (self.p.training_max_value -\n self.p.training_min_value))', 'imgs'], {}), '(lambda x: (x - self.p.training_min_value) / (self.p.\n training_max_value - self.p.training_min_value), imgs)\n', (12067, 12179), True, 'import tensorflow as tf\n'), ((12203, 12265), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['imgs'], {'clip_value_min': '(0.0)', 'clip_value_max': '(1.0)'}), '(imgs, clip_value_min=0.0, clip_value_max=1.0)\n', (12219, 12265), True, 'import tensorflow as tf\n'), ((16504, 16549), 'tensorflow.constant', 'tf.constant', (['[0.0, 65535.0]'], {'dtype': 'tf.float32'}), '([0.0, 65535.0], dtype=tf.float32)\n', (16515, 16549), True, 'import tensorflow as tf\n'), ((16656, 16676), 'tensorflow.cumsum', 'tf.cumsum', (['histogram'], {}), '(histogram)\n', (16665, 16676), True, 'import tensorflow as tf\n'), ((16765, 16780), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (16773, 16780), True, 'import tensorflow as tf\n'), ((16995, 17021), 'tensorflow.cast', 'tf.cast', (['px_map', 'tf.uint16'], {}), '(px_map, tf.uint16)\n', (17002, 17021), True, 'import tensorflow as tf\n'), ((17227, 17255), 'tensorflow.cast', 'tf.cast', (['eq_hist', 'tf.float32'], {}), '(eq_hist, tf.float32)\n', (17234, 17255), True, 'import tensorflow as tf\n'), ((552, 588), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (573, 588), True, 'import tensorflow as tf\n'), ((611, 646), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (632, 646), True, 'import tensorflow as tf\n'), ((1250, 1302), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['img', 'y0', 'x0', '(224)', '(224)'], {}), '(img, y0, x0, 224, 224)\n', (1279, 1302), True, 'import tensorflow as tf\n'), ((1697, 1733), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (1718, 1733), True, 'import tensorflow as tf\n'), ((1756, 1792), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (1777, 1792), True, 'import tensorflow as tf\n'), ((1815, 1850), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (1836, 1850), True, 'import tensorflow as tf\n'), ((1873, 1910), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.float32'], {}), '([], tf.float32)\n', (1894, 1910), True, 'import tensorflow as tf\n'), ((2991, 3016), 'tensorflow.unstack', 'tf.unstack', (['imgs'], {'axis': '(-1)'}), '(imgs, axis=-1)\n', (3001, 3016), True, 'import tensorflow as tf\n'), ((3036, 3094), 'tensorflow.stack', 'tf.stack', (['[channels[0], channels[1], channels[2]]'], {'axis': '(-1)'}), '([channels[0], channels[1], channels[2]], axis=-1)\n', (3044, 3094), True, 'import tensorflow as tf\n'), ((4002, 4034), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['forward_transform'], {}), '(forward_transform)\n', (4015, 4034), True, 'import tensorflow as tf\n'), ((4770, 4806), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_op]'], {}), '([assert_op])\n', (4793, 4806), True, 'import tensorflow as tf\n'), ((4829, 4851), 'tensorflow.subtract', 'tf.subtract', (['imgs', '(1.0)'], {}), '(imgs, 1.0)\n', (4840, 4851), True, 'import tensorflow as tf\n'), ((4873, 4897), 'tensorflow.multiply', 'tf.multiply', (['images', '(2.0)'], {}), '(images, 2.0)\n', (4884, 4897), True, 'import tensorflow as tf\n'), ((5459, 5485), 'numpy.ones', 'np.ones', (['self.p.batch_size'], {}), '(self.p.batch_size)\n', (5466, 5485), True, 'import numpy as np\n'), ((7584, 7620), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_op]'], {}), '([assert_op])\n', (7607, 7620), True, 'import tensorflow as tf\n'), ((7640, 7673), 'tensorflow.image.random_flip_up_down', 'tf.image.random_flip_up_down', (['img'], {}), '(img)\n', (7668, 7673), True, 'import tensorflow as tf\n'), ((7692, 7728), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['img'], {}), '(img)\n', (7723, 7728), True, 'import tensorflow as tf\n'), ((7764, 7827), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[]', 'minval': '(0)', 'maxval': '(4)', 'dtype': 'tf.int32'}), '(shape=[], minval=0, maxval=4, dtype=tf.int32)\n', (7781, 7827), True, 'import tensorflow as tf\n'), ((9845, 9863), 'tensorflow.less', 'tf.less', (['_min', '(0.0)'], {}), '(_min, 0.0)\n', (9852, 9863), True, 'import tensorflow as tf\n'), ((10844, 10880), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_op]'], {}), '([assert_op])\n', (10867, 10880), True, 'import tensorflow as tf\n'), ((10903, 10928), 'tensorflow.cast', 'tf.cast', (['imgs', 'tf.float32'], {}), '(imgs, tf.float32)\n', (10910, 10928), True, 'import tensorflow as tf\n'), ((11030, 11101), 'tensorflow.image.resize', 'tf.image.resize', (['images', '(self.p.target_size[0], self.p.target_size[1])'], {}), '(images, (self.p.target_size[0], self.p.target_size[1]))\n', (11045, 11101), True, 'import tensorflow as tf\n'), ((11623, 11657), 'tensorflow.reduce_max', 'tf.reduce_max', (['imgs'], {'keepdims': '(True)'}), '(imgs, keepdims=True)\n', (11636, 11657), True, 'import tensorflow as tf\n'), ((12526, 12575), 'tensorflow.split', 'tf.split', ([], {'axis': '(3)', 'num_or_size_splits': '(3)', 'value': 'rgb'}), '(axis=3, num_or_size_splits=3, value=rgb)\n', (12534, 12575), True, 'import tensorflow as tf\n'), ((12718, 12754), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_op]'], {}), '([assert_op])\n', (12741, 12754), True, 'import tensorflow as tf\n'), ((12778, 12834), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[red, green, blue]', 'name': '"""rgb"""'}), "(axis=3, values=[red, green, blue], name='rgb')\n", (12787, 12834), True, 'import tensorflow as tf\n'), ((13336, 13385), 'tensorflow.split', 'tf.split', ([], {'axis': '(3)', 'num_or_size_splits': '(3)', 'value': 'rgb'}), '(axis=3, num_or_size_splits=3, value=rgb)\n', (13344, 13385), True, 'import tensorflow as tf\n'), ((13528, 13564), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_op]'], {}), '([assert_op])\n', (13551, 13564), True, 'import tensorflow as tf\n'), ((13785, 13909), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[blue - self.p.VGG_MEAN[0], green - self.p.VGG_MEAN[1], red - self.p.\n VGG_MEAN[2]]', 'name': '"""bgr"""'}), "(axis=3, values=[blue - self.p.VGG_MEAN[0], green - self.p.\n VGG_MEAN[1], red - self.p.VGG_MEAN[2]], name='bgr')\n", (13794, 13909), True, 'import tensorflow as tf\n'), ((14432, 14481), 'tensorflow.split', 'tf.split', ([], {'axis': '(3)', 'num_or_size_splits': '(3)', 'value': 'rgb'}), '(axis=3, num_or_size_splits=3, value=rgb)\n', (14440, 14481), True, 'import tensorflow as tf\n'), ((14624, 14660), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_op]'], {}), '([assert_op])\n', (14647, 14660), True, 'import tensorflow as tf\n'), ((14881, 15005), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[red - self.p.VGG_MEAN[0], green - self.p.VGG_MEAN[1], blue - self.p.\n VGG_MEAN[2]]', 'name': '"""res"""'}), "(axis=3, values=[red - self.p.VGG_MEAN[0], green - self.p.VGG_MEAN\n [1], blue - self.p.VGG_MEAN[2]], name='res')\n", (14890, 15005), True, 'import tensorflow as tf\n'), ((15503, 15552), 'tensorflow.split', 'tf.split', ([], {'axis': '(3)', 'num_or_size_splits': '(3)', 'value': 'rgb'}), '(axis=3, num_or_size_splits=3, value=rgb)\n', (15511, 15552), True, 'import tensorflow as tf\n'), ((15695, 15731), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_op]'], {}), '([assert_op])\n', (15718, 15731), True, 'import tensorflow as tf\n'), ((15952, 16076), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[blue - self.p.VGG_MEAN[0], green - self.p.VGG_MEAN[1], red - self.p.\n VGG_MEAN[2]]', 'name': '"""bgr"""'}), "(axis=3, values=[blue - self.p.VGG_MEAN[0], green - self.p.\n VGG_MEAN[1], red - self.p.VGG_MEAN[2]], name='bgr')\n", (15961, 16076), True, 'import tensorflow as tf\n'), ((16593, 16619), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (16600, 16619), True, 'import tensorflow as tf\n'), ((3208, 3273), 'tensorflow.image.random_crop', 'tf.image.random_crop', (['imgs'], {'size': '[self.p.BATCH_SIZE, 224, 224, 1]'}), '(imgs, size=[self.p.BATCH_SIZE, 224, 224, 1])\n', (3228, 3273), True, 'import tensorflow as tf\n'), ((3516, 3610), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['imgs', 'y0', 'x0', 'self.p.target_size[1]', 'self.p.target_size[0]'], {}), '(imgs, y0, x0, self.p.target_size[1], self.p.\n target_size[0])\n', (3545, 3610), True, 'import tensorflow as tf\n'), ((4722, 4741), 'tensorflow.reduce_max', 'tf.reduce_max', (['imgs'], {}), '(imgs)\n', (4735, 4741), True, 'import tensorflow as tf\n'), ((6218, 6264), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.0)', 'self.p.batch_size'], {}), '(0.8, 1.0, self.p.batch_size)\n', (6235, 6264), True, 'import numpy as np\n'), ((6301, 6327), 'numpy.ones', 'np.ones', (['self.p.batch_size'], {}), '(self.p.batch_size)\n', (6308, 6327), True, 'import numpy as np\n'), ((7538, 7556), 'tensorflow.reduce_max', 'tf.reduce_max', (['img'], {}), '(img)\n', (7551, 7556), True, 'import tensorflow as tf\n'), ((10796, 10815), 'tensorflow.reduce_max', 'tf.reduce_max', (['imgs'], {}), '(imgs)\n', (10809, 10815), True, 'import tensorflow as tf\n'), ((16944, 16976), 'tensorflow.cast', 'tf.cast', (['(pix_cnt - 1)', 'tf.float32'], {}), '(pix_cnt - 1, tf.float32)\n', (16951, 16976), True, 'import tensorflow as tf\n'), ((17127, 17151), 'tensorflow.cast', 'tf.cast', (['image', 'tf.int32'], {}), '(image, tf.int32)\n', (17134, 17151), True, 'import tensorflow as tf\n'), ((7871, 7885), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (7882, 7885), True, 'import tensorflow as tf\n'), ((7896, 7918), 'tensorflow.image.rot90', 'tf.image.rot90', (['img', '(1)'], {}), '(img, 1)\n', (7910, 7918), True, 'import tensorflow as tf\n'), ((7928, 7944), 'tensorflow.identity', 'tf.identity', (['img'], {}), '(img)\n', (7939, 7944), True, 'import tensorflow as tf\n'), ((7989, 8003), 'tensorflow.constant', 'tf.constant', (['(2)'], {}), '(2)\n', (8000, 8003), True, 'import tensorflow as tf\n'), ((8014, 8036), 'tensorflow.image.rot90', 'tf.image.rot90', (['img', '(2)'], {}), '(img, 2)\n', (8028, 8036), True, 'import tensorflow as tf\n'), ((8046, 8062), 'tensorflow.identity', 'tf.identity', (['img'], {}), '(img)\n', (8057, 8062), True, 'import tensorflow as tf\n'), ((8107, 8121), 'tensorflow.constant', 'tf.constant', (['(3)'], {}), '(3)\n', (8118, 8121), True, 'import tensorflow as tf\n'), ((8132, 8154), 'tensorflow.image.rot90', 'tf.image.rot90', (['img', '(3)'], {}), '(img, 3)\n', (8146, 8154), True, 'import tensorflow as tf\n'), ((8164, 8180), 'tensorflow.identity', 'tf.identity', (['img'], {}), '(img)\n', (8175, 8180), True, 'import tensorflow as tf\n'), ((12668, 12694), 'tensorflow.constant', 'tf.constant', (['[224, 224, 1]'], {}), '([224, 224, 1])\n', (12679, 12694), True, 'import tensorflow as tf\n'), ((13478, 13504), 'tensorflow.constant', 'tf.constant', (['[224, 224, 1]'], {}), '([224, 224, 1])\n', (13489, 13504), True, 'import tensorflow as tf\n'), ((14574, 14600), 'tensorflow.constant', 'tf.constant', (['[224, 224, 1]'], {}), '([224, 224, 1])\n', (14585, 14600), True, 'import tensorflow as tf\n'), ((15645, 15671), 'tensorflow.constant', 'tf.constant', (['[224, 224, 1]'], {}), '([224, 224, 1])\n', (15656, 15671), True, 'import tensorflow as tf\n'), ((16722, 16740), 'tensorflow.greater', 'tf.greater', (['cdf', '(0)'], {}), '(cdf, 0)\n', (16732, 16740), True, 'import tensorflow as tf\n'), ((16898, 16932), 'tensorflow.cast', 'tf.cast', (['(cdf - cdf_min)', 'tf.float32'], {}), '(cdf - cdf_min, tf.float32)\n', (16905, 16932), True, 'import tensorflow as tf\n')] |
import numpy as np
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
import logging
import glob
import os
import tmllc
logging.basicConfig(format='%(asctime)s %(levelname)s: %(name)s(%(funcName)s): %(message)s', level=logging.INFO)
runName = "fakeWideParams"
saveDirDiag = os.path.join("runs", runName, "comparison")
figSave = True
figShow = True
pattern = os.path.join("runs", runName, "*", "modelDefinition.json")
models = glob.glob(pattern)
dataset = "valid"
saveDirData = os.path.join("runs", runName, "data")
features = tmllc.io.loadDataset("{}Features".format(dataset), saveDirData)
labels = tmllc.io.loadDataset("{}Labels".format(dataset), saveDirData)
truthTable = tmllc.io.loadTable("{}TruthTable".format(dataset), saveDirData)
modelNames = []
f1s = []
accs = []
precisions = []
recalls = []
ROCfprs = []
ROCtprs = []
ROCaucs = []
lossHistory = []
lossValHistory = []
if not os.path.exists(saveDirDiag) and figSave:
os.mkdir(saveDirDiag)
for modelfname in models:
path = modelfname.split("/")
modelPath = os.path.join(*path[:-1])
modelName = path[-2]
modelNames.append(modelName)
logging.info("Working on model {}".format(modelName))
model = tmllc.io.loadModel(modelPath)
threshold = tmllc.io.jsonRead(os.path.join(modelPath, "threshold.json"))
threshold = threshold['threshold']
history = tmllc.io.loadHistory(modelPath)
lossValHistory.append(history['val_loss'])
lossHistory.append(history['loss'])
preds = model.predict(features)
preds = preds.reshape(np.size(preds))
predClass = tmllc.utils.predictClass(preds, threshold)
f1 = metrics.f1_score(labels, predClass)
f1s.append(f1)
acc = metrics.accuracy_score(labels, predClass)
accs.append(acc)
precision = metrics.precision_score(labels, predClass)
precisions.append(precision)
recall = metrics.recall_score(labels, predClass)
recalls.append(recall)
fpr, tpr, _ = metrics.roc_curve(labels, preds)
ROCfprs.append(fpr)
ROCtprs.append(tpr)
ROCaucs.append(metrics.auc(fpr, tpr))
ind = np.arange(len(modelNames))
metricsLabels = ["$F_1$ score", "Precision", "Recall", "Accuracy"]
fnames = ["scoreF1", "scorePrecision", "scoreRecall", "scoreAccuracy"]
for arr, metricLabel, fname in zip([f1s, precisions, recalls, accs], metricsLabels, fnames):
fig, ax = plt.subplots()
plt.grid(True)
bars = plt.bar(ind, arr, zorder=9)
bestId = np.argmax(arr)
bars[bestId].set_facecolor("r")
ax.set_xticks(ind)
ax.set_xticklabels(modelNames)
plt.title(metricLabel)
if figSave: tmllc.plots.figSave(os.path.join(saveDirDiag, fname), fig)
fig = plt.figure()
for fpr, tpr, auc, modelName in zip(ROCfprs, ROCtprs, ROCaucs, modelNames):
plt.plot(fpr, tpr, label='{} (AUC = {:0.4f})'.format(modelName, auc))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.xlim([-0.01,1.01])
plt.legend(loc='lower right')
plt.grid(True)
plt.tight_layout()
if figSave: tmllc.plots.figSave(os.path.join(saveDirDiag, "roc"), fig)
fig = plt.figure()
for losses, name in zip([lossHistory, lossValHistory], ["Training loss", "Validation loss"]):
if name == "Training loss":
ls = '--'
tr = True
trColors = []
else:
ls = '-'
tr = False
for ii, (loss, modelName) in enumerate(zip(losses, modelNames)):
if tr:
color = None
label = None
else:
color = trColors[ii]
label = modelName
plot = plt.plot(loss, label=label, ls=ls, color=color)
if tr:
trColors.append(plot[0].get_color())
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(loc='best')
plt.grid(True)
if figSave: tmllc.plots.figSave(os.path.join(saveDirDiag, "loss"), fig)
if figShow: plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_curve",
"os.path.exists",
"tmllc.io.loadHistory",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.mkdir",
"glob.glob",
... | [((137, 259), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s: %(name)s(%(funcName)s): %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s %(levelname)s: %(name)s(%(funcName)s): %(message)s', level\n =logging.INFO)\n", (156, 259), False, 'import logging\n'), ((292, 335), 'os.path.join', 'os.path.join', (['"""runs"""', 'runName', '"""comparison"""'], {}), "('runs', runName, 'comparison')\n", (304, 335), False, 'import os\n'), ((377, 435), 'os.path.join', 'os.path.join', (['"""runs"""', 'runName', '"""*"""', '"""modelDefinition.json"""'], {}), "('runs', runName, '*', 'modelDefinition.json')\n", (389, 435), False, 'import os\n'), ((445, 463), 'glob.glob', 'glob.glob', (['pattern'], {}), '(pattern)\n', (454, 463), False, 'import glob\n'), ((497, 534), 'os.path.join', 'os.path.join', (['"""runs"""', 'runName', '"""data"""'], {}), "('runs', runName, 'data')\n", (509, 534), False, 'import os\n'), ((2707, 2719), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2717, 2719), True, 'import matplotlib.pyplot as plt\n'), ((3130, 3142), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3140, 3142), True, 'import matplotlib.pyplot as plt\n'), ((3744, 3763), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (3754, 3763), True, 'import matplotlib.pyplot as plt\n'), ((3764, 3782), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3774, 3782), True, 'import matplotlib.pyplot as plt\n'), ((3783, 3805), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3793, 3805), True, 'import matplotlib.pyplot as plt\n'), ((3806, 3820), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3814, 3820), True, 'import matplotlib.pyplot as plt\n'), ((952, 973), 'os.mkdir', 'os.mkdir', (['saveDirDiag'], {}), '(saveDirDiag)\n', (960, 973), False, 'import os\n'), ((1055, 1079), 'os.path.join', 'os.path.join', (['*path[:-1]'], {}), '(*path[:-1])\n', (1067, 1079), False, 'import os\n'), ((1218, 1247), 'tmllc.io.loadModel', 'tmllc.io.loadModel', (['modelPath'], {}), '(modelPath)\n', (1236, 1247), False, 'import tmllc\n'), ((1388, 1419), 'tmllc.io.loadHistory', 'tmllc.io.loadHistory', (['modelPath'], {}), '(modelPath)\n', (1408, 1419), False, 'import tmllc\n'), ((1611, 1653), 'tmllc.utils.predictClass', 'tmllc.utils.predictClass', (['preds', 'threshold'], {}), '(preds, threshold)\n', (1635, 1653), False, 'import tmllc\n'), ((1668, 1703), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['labels', 'predClass'], {}), '(labels, predClass)\n', (1684, 1703), True, 'import sklearn.metrics as metrics\n'), ((1733, 1774), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['labels', 'predClass'], {}), '(labels, predClass)\n', (1755, 1774), True, 'import sklearn.metrics as metrics\n'), ((1812, 1854), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['labels', 'predClass'], {}), '(labels, predClass)\n', (1835, 1854), True, 'import sklearn.metrics as metrics\n'), ((1901, 1940), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['labels', 'predClass'], {}), '(labels, predClass)\n', (1921, 1940), True, 'import sklearn.metrics as metrics\n'), ((1991, 2023), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['labels', 'preds'], {}), '(labels, preds)\n', (2008, 2023), True, 'import sklearn.metrics as metrics\n'), ((2394, 2408), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2406, 2408), True, 'import matplotlib.pyplot as plt\n'), ((2413, 2427), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2421, 2427), True, 'import matplotlib.pyplot as plt\n'), ((2439, 2466), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'arr'], {'zorder': '(9)'}), '(ind, arr, zorder=9)\n', (2446, 2466), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2494), 'numpy.argmax', 'np.argmax', (['arr'], {}), '(arr)\n', (2489, 2494), True, 'import numpy as np\n'), ((2593, 2615), 'matplotlib.pyplot.title', 'plt.title', (['metricLabel'], {}), '(metricLabel)\n', (2602, 2615), True, 'import matplotlib.pyplot as plt\n'), ((2874, 2907), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (2884, 2907), True, 'import matplotlib.pyplot as plt\n'), ((2912, 2944), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (2922, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2949, 2972), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.01, 1.01]'], {}), '([-0.01, 1.01])\n', (2957, 2972), True, 'import matplotlib.pyplot as plt\n'), ((2976, 3005), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2986, 3005), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3024), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3018, 3024), True, 'import matplotlib.pyplot as plt\n'), ((3029, 3047), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3045, 3047), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3916), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3914, 3916), True, 'import matplotlib.pyplot as plt\n'), ((907, 934), 'os.path.exists', 'os.path.exists', (['saveDirDiag'], {}), '(saveDirDiag)\n', (921, 934), False, 'import os\n'), ((1287, 1328), 'os.path.join', 'os.path.join', (['modelPath', '"""threshold.json"""'], {}), "(modelPath, 'threshold.json')\n", (1299, 1328), False, 'import os\n'), ((1574, 1588), 'numpy.size', 'np.size', (['preds'], {}), '(preds)\n', (1581, 1588), True, 'import numpy as np\n'), ((2091, 2112), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (2102, 2112), True, 'import sklearn.metrics as metrics\n'), ((3631, 3678), 'matplotlib.pyplot.plot', 'plt.plot', (['loss'], {'label': 'label', 'ls': 'ls', 'color': 'color'}), '(loss, label=label, ls=ls, color=color)\n', (3639, 3678), True, 'import matplotlib.pyplot as plt\n'), ((3853, 3886), 'os.path.join', 'os.path.join', (['saveDirDiag', '"""loss"""'], {}), "(saveDirDiag, 'loss')\n", (3865, 3886), False, 'import os\n'), ((2657, 2689), 'os.path.join', 'os.path.join', (['saveDirDiag', 'fname'], {}), '(saveDirDiag, fname)\n', (2669, 2689), False, 'import os\n'), ((3084, 3116), 'os.path.join', 'os.path.join', (['saveDirDiag', '"""roc"""'], {}), "(saveDirDiag, 'roc')\n", (3096, 3116), False, 'import os\n')] |
"""
The main sampling script.
Takes pixel positions (x, y) and the number of components (npeaks)
from the command line (via sys.argv[]), sets the priors, the likelihood
function, preforms some sanity checks, and fires up MultiNest through
pymultinest.
"""
import os
import sys
import warnings
import mpi4py
import numpy as np
from astropy import log
from astropy.io import fits
# Those import warnings are annoying
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import pymultinest
from pyspeckit.spectrum.models.ammonia import cold_ammonia_model
from pyspecnest.ammonia import get_nh3_model
from pyspecnest.chaincrunch import pars_xy, lnZ_xy, get_zero_evidence
# All the I/O functions now reside here
import opencube
# Configuration for line modelling setup (gets passed to pyspeckit)
from config import line_names, npars
# Path settings
from config import name_id, proj_dir, file_Zs
from config import default_yx, default_npeaks
# Kwargs for digesting and saving spectral cube (making it on the
# fly every time we need a spectrum is too slow!)
from config import cube_save_kwargs
# Finally, MultiNest settings and priors
from config import n_live_points, sampling_efficiency, get_priors_xoff_wrapped
# Compatibility with Python 2.7
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def mpi_rank():
"""
Returns the rank of the calling process.
"""
comm = mpi4py.MPI.COMM_WORLD
rank = comm.Get_rank()
return rank
# pop culture references always deserve their own function
def i_am_root():
"""
Checks if the running subprocess is of rank 0
"""
try:
return True if mpi_rank() == 0 else False
except AttributeError:
# not running MPI
return True
try:
npeaks = int(sys.argv[1])
except IndexError:
npeaks = default_npeaks
log.info("npeaks not specified, setting to {}".format(npeaks))
try:
yx = int(sys.argv[2]), int(sys.argv[3])
except IndexError:
yx = default_yx
log.info("xy-pixel not specified, setting to {}".format(yx[::-1]))
try:
plotting = bool(int(sys.argv[4]))
except IndexError:
plotting = 0
if not plotting:
plot_fit, plot_corner, show_fit, show_corner = False, False, False, False
else: # defaults a for non-batch run
plot_fit = True
plot_corner = True
show_fit = True
show_corner = True
from chainconsumer import ChainConsumer # Optional if no plotting is done
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
y, x = yx
sp = opencube.get_spectrum(x, y, **cube_save_kwargs)
fittype_fmt = 'cold_ammonia_x{}'
fitmodel = cold_ammonia_model
sp.specfit.Registry.add_fitter(fittype_fmt.format(npeaks), npars=npars,
function=fitmodel(line_names=line_names))
# is this still needed?
opencube.update_model(sp, fittype_fmt.format(npeaks))
# needed because of this:
# https://github.com/pyspeckit/pyspeckit/issues/179
sp.specfit.fitter.npeaks = npeaks
# npeaks > 1 seems to break because of fitter.parnames is not mirrored
if len(sp.specfit.fitter.parnames) == npars and npeaks > 1:
sp.specfit.fitter.parnames *= npeaks
# TODO: make a wrapper function for pyspecnest instead!
priors = get_priors_xoff_wrapped(npeaks)
nh3_model = get_nh3_model(sp, line_names, sp.error,
priors=priors, npeaks=npeaks)
# Safeguard - check some common causes of failure before scheduling
# a job that would just throw tens of thousands of errors at us
no_valid_chans = not np.any(np.isfinite(nh3_model.ydata))
sanity_check = np.isfinite(
nh3_model.log_likelihood([15, 5, 15, 0.2, 7, 0.5] * npeaks,
nh3_model.npars, nh3_model.dof))
if no_valid_chans or not sanity_check:
# This should fail if, e.g., the errors are not finite
log.error("no valid pixels at x={}; y={}. Aborting.".format(*yx[::-1]))
sys.exit()
# The first process gets to make the directory structure!
output_dir = os.path.join(proj_dir, 'nested-sampling/')
fig_dir = os.path.join(output_dir, 'figs/')
suffix = 'x{1}y{0}'.format(*yx)
chains_dir = '{}chains/{}_{}'.format(output_dir, name_id, suffix)
if not os.path.exists(chains_dir):
try: # hacks around a race condition
os.makedirs(chains_dir)
except OSError as e:
if e.errno != 17:
raise
chains_dir = '{}/{}-'.format(chains_dir, npeaks)
# Run MultiNest on the model+priors specified
pymultinest.run(nh3_model.xoff_symmetric_log_likelihood,
nh3_model.prior_uniform, nh3_model.npars,
outputfiles_basename=chains_dir,
verbose=True, n_live_points=n_live_points,
sampling_efficiency=sampling_efficiency)
# The remainder of the script is not essential for sampling, and can be safely
# moved out into a script of its own.
if i_am_root() and plot_fit:
# parse the results as sensible output
from pyspecnest.chaincrunch import analyzer_xy
a = analyzer_xy(x, y, npeaks, output_dir=output_dir,
name_id=name_id, npars=npars)
a_lnZ = a.get_stats()['global evidence']
log.info('ln(Z) for model with {} line(s) = {:.1f}'.format(npeaks, a_lnZ))
try:
lnZ0 = fits.getdata(file_Zs)[0]
except (FileNotFoundError, OSError) as e:
cubes = opencube.make_cube_shh()
lnZ0 = get_zero_evidence(data=cubes.cube, rms=cubes.errorcube,
normalize=False)
Zs = lnZ_xy(list(np.arange(npeaks+1)), x=x, y=y, output_dir=output_dir,
name_id=name_id, silent=True, lnZ0=(lnZ0[y, x], 0))
log.info('ln(Z{}/Z{}) = {:.2f}'.format(npeaks, npeaks-1,
Zs[npeaks] - Zs[npeaks-1]))
if npeaks > 1:
log.info('ln(Z{}/Z{}) = {:.2f}'.format(npeaks, 0, Zs[npeaks] - Zs[0]))
if plot_fit and i_am_root():
sp.plotter(errstyle='fill')
mle_pars = pars_xy(x=x, y=y, npars=npars, npeaks=npeaks,
output_dir=output_dir, name_id=name_id)
mle_parinfo = sp.specfit.fitter._make_parinfo(mle_pars, npeaks=npeaks)[0]
try:
sp.specfit.plot_fit(xarr=sp.xarr, pars=mle_parinfo,
show_components=True)
except TypeError:
# eh? does it want pars or parinfo?
sp.specfit.plot_fit(xarr=sp.xarr, pars=mle_pars, show_components=True)
# annotate the Bayes factors
plt.annotate('ln(Z{}/Z{}) = {:.2f}'.format(npeaks, npeaks-1,
Zs[npeaks] - Zs[npeaks-1]), xy=(0.05, 0.90),
xycoords='axes fraction')
if npeaks > 1:
plt.annotate('ln(Z{}/Z{}) = {:.2f}'.format(npeaks, 0,
Zs[npeaks] - Zs[0]), xy=(0.05, 0.85),
xycoords='axes fraction')
if show_fit:
plt.show()
fig_name = "{}-fit-{}-x{}".format(name_id, suffix, npeaks)
plt.savefig(os.path.join(fig_dir, fig_name + ".pdf"))
if plot_corner and i_am_root():
mle_multinest = pars_xy(x=x, y=y, npars=npars, npeaks=npeaks,
output_dir=output_dir, name_id=name_id)
unfrozen_slice = nh3_model.get_nonfixed_slice(a.data.shape, axis=1)
c = ChainConsumer()
parameters = nh3_model.get_names(latex=True, no_fixed=True)
c.add_chain(a.data[:, 2:][unfrozen_slice], parameters=parameters)
c.configure(statistics="max", summary=True)
fig = c.plotter.plot(figsize="column")
fig.get_size_inches()
fig.set_size_inches(9, 7)
fig_name = "{}-corner-{}-x{}".format(name_id, suffix, npeaks)
plt.savefig(fig_dir + fig_name + ".pdf")
if show_corner:
plt.show()
| [
"opencube.make_cube_shh",
"numpy.isfinite",
"sys.exit",
"numpy.arange",
"os.path.exists",
"pyspecnest.chaincrunch.analyzer_xy",
"pymultinest.run",
"pyspecnest.chaincrunch.get_zero_evidence",
"warnings.simplefilter",
"chainconsumer.ChainConsumer",
"matplotlib.pyplot.savefig",
"opencube.get_spec... | [((2554, 2601), 'opencube.get_spectrum', 'opencube.get_spectrum', (['x', 'y'], {}), '(x, y, **cube_save_kwargs)\n', (2575, 2601), False, 'import opencube\n'), ((3240, 3271), 'config.get_priors_xoff_wrapped', 'get_priors_xoff_wrapped', (['npeaks'], {}), '(npeaks)\n', (3263, 3271), False, 'from config import n_live_points, sampling_efficiency, get_priors_xoff_wrapped\n'), ((3285, 3354), 'pyspecnest.ammonia.get_nh3_model', 'get_nh3_model', (['sp', 'line_names', 'sp.error'], {'priors': 'priors', 'npeaks': 'npeaks'}), '(sp, line_names, sp.error, priors=priors, npeaks=npeaks)\n', (3298, 3354), False, 'from pyspecnest.ammonia import get_nh3_model\n'), ((3987, 4029), 'os.path.join', 'os.path.join', (['proj_dir', '"""nested-sampling/"""'], {}), "(proj_dir, 'nested-sampling/')\n", (3999, 4029), False, 'import os\n'), ((4040, 4073), 'os.path.join', 'os.path.join', (['output_dir', '"""figs/"""'], {}), "(output_dir, 'figs/')\n", (4052, 4073), False, 'import os\n'), ((4446, 4675), 'pymultinest.run', 'pymultinest.run', (['nh3_model.xoff_symmetric_log_likelihood', 'nh3_model.prior_uniform', 'nh3_model.npars'], {'outputfiles_basename': 'chains_dir', 'verbose': '(True)', 'n_live_points': 'n_live_points', 'sampling_efficiency': 'sampling_efficiency'}), '(nh3_model.xoff_symmetric_log_likelihood, nh3_model.\n prior_uniform, nh3_model.npars, outputfiles_basename=chains_dir,\n verbose=True, n_live_points=n_live_points, sampling_efficiency=\n sampling_efficiency)\n', (4461, 4675), False, 'import pymultinest\n'), ((422, 447), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (445, 447), False, 'import warnings\n'), ((453, 484), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (474, 484), False, 'import warnings\n'), ((2510, 2537), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (2516, 2537), True, 'import matplotlib.pyplot as plt\n'), ((3904, 3914), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3912, 3914), False, 'import sys\n'), ((4179, 4205), 'os.path.exists', 'os.path.exists', (['chains_dir'], {}), '(chains_dir)\n', (4193, 4205), False, 'import os\n'), ((4975, 5053), 'pyspecnest.chaincrunch.analyzer_xy', 'analyzer_xy', (['x', 'y', 'npeaks'], {'output_dir': 'output_dir', 'name_id': 'name_id', 'npars': 'npars'}), '(x, y, npeaks, output_dir=output_dir, name_id=name_id, npars=npars)\n', (4986, 5053), False, 'from pyspecnest.chaincrunch import analyzer_xy\n'), ((5911, 6000), 'pyspecnest.chaincrunch.pars_xy', 'pars_xy', ([], {'x': 'x', 'y': 'y', 'npars': 'npars', 'npeaks': 'npeaks', 'output_dir': 'output_dir', 'name_id': 'name_id'}), '(x=x, y=y, npars=npars, npeaks=npeaks, output_dir=output_dir,\n name_id=name_id)\n', (5918, 6000), False, 'from pyspecnest.chaincrunch import pars_xy, lnZ_xy, get_zero_evidence\n'), ((6966, 7055), 'pyspecnest.chaincrunch.pars_xy', 'pars_xy', ([], {'x': 'x', 'y': 'y', 'npars': 'npars', 'npeaks': 'npeaks', 'output_dir': 'output_dir', 'name_id': 'name_id'}), '(x=x, y=y, npars=npars, npeaks=npeaks, output_dir=output_dir,\n name_id=name_id)\n', (6973, 7055), False, 'from pyspecnest.chaincrunch import pars_xy, lnZ_xy, get_zero_evidence\n'), ((7160, 7175), 'chainconsumer.ChainConsumer', 'ChainConsumer', ([], {}), '()\n', (7173, 7175), False, 'from chainconsumer import ChainConsumer\n'), ((7528, 7568), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + fig_name + '.pdf')"], {}), "(fig_dir + fig_name + '.pdf')\n", (7539, 7568), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3570), 'numpy.isfinite', 'np.isfinite', (['nh3_model.ydata'], {}), '(nh3_model.ydata)\n', (3553, 3570), True, 'import numpy as np\n'), ((4256, 4279), 'os.makedirs', 'os.makedirs', (['chains_dir'], {}), '(chains_dir)\n', (4267, 4279), False, 'import os\n'), ((6780, 6790), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6788, 6790), True, 'import matplotlib.pyplot as plt\n'), ((6871, 6911), 'os.path.join', 'os.path.join', (['fig_dir', "(fig_name + '.pdf')"], {}), "(fig_dir, fig_name + '.pdf')\n", (6883, 6911), False, 'import os\n'), ((7598, 7608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7606, 7608), True, 'import matplotlib.pyplot as plt\n'), ((5224, 5245), 'astropy.io.fits.getdata', 'fits.getdata', (['file_Zs'], {}), '(file_Zs)\n', (5236, 5245), False, 'from astropy.io import fits\n'), ((5311, 5335), 'opencube.make_cube_shh', 'opencube.make_cube_shh', ([], {}), '()\n', (5333, 5335), False, 'import opencube\n'), ((5351, 5423), 'pyspecnest.chaincrunch.get_zero_evidence', 'get_zero_evidence', ([], {'data': 'cubes.cube', 'rms': 'cubes.errorcube', 'normalize': '(False)'}), '(data=cubes.cube, rms=cubes.errorcube, normalize=False)\n', (5368, 5423), False, 'from pyspecnest.chaincrunch import pars_xy, lnZ_xy, get_zero_evidence\n'), ((5479, 5500), 'numpy.arange', 'np.arange', (['(npeaks + 1)'], {}), '(npeaks + 1)\n', (5488, 5500), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import torch,time,os,pickle,random
from torch import nn as nn
from nnLayer import *
from metrics import *
from collections import Counter,Iterable
from sklearn.model_selection import StratifiedKFold,KFold
from torch.backends import cudnn
from tqdm import tqdm
from Others import *
class BaseClassifier:
def __init__(self):
pass
def calculate_y_logit(self, X, XLen):
pass
def cv_train(self, dataClass, trainSize=256, batchSize=256, epoch=100, stopRounds=10, earlyStop=10, saveRounds=1,
optimType='Adam', preheat=5, lr1=0.001, lr2=0.00003, momentum=0.9, weightDecay=0, kFold=5, isHigherBetter=True, metrics="AUC", report=["ACC", "AUC"],
savePath='model', seed=9527, loc=-1):
skf = StratifiedKFold(n_splits=kFold, random_state=seed, shuffle=True)
validRes = []
tvIdList = list(range(dataClass.trainSampleNum+dataClass.validSampleNum))#dataClass.trainIdList+dataClass.validIdList
#self._save_emb('cache/_preEmbedding.pkl')
for i,(trainIndices,validIndices) in enumerate(skf.split(tvIdList, [i[2] for i in dataClass.eSeqData])):
print(f'CV_{i+1}:')
if loc>0 and i+1!=loc:
print(f'Pass CV_{i+1}')
continue
self.reset_parameters()
#self._load_emb('cache/_preEmbedding.pkl')
dataClass.trainIdList,dataClass.validIdList = trainIndices,validIndices
dataClass.trainSampleNum,dataClass.validSampleNum = len(trainIndices),len(validIndices)
res = self.train(dataClass,trainSize,batchSize,epoch,stopRounds,earlyStop,saveRounds,optimType,preheat,lr1,lr2,momentum,weightDecay,
isHigherBetter,metrics,report,f"{savePath}_cv{i+1}")
validRes.append(res)
Metrictor.table_show(validRes, report)
def cv_train_by_protein(self, dataClass, trainSize=256, batchSize=256, epoch=100, stopRounds=10, earlyStop=10, saveRounds=1,
optimType='Adam', preheat=5, lr1=0.001, lr2=0.00003, momentum=0.9, weightDecay=0, kFold=5, isHigherBetter=True, metrics="AUC", report=["ACC", "AUC"],
savePath='model', seed=9527, loc=-1):
kf = KFold(n_splits=kFold, random_state=seed, shuffle=True)
validRes = []
proteins = list(range(len(dataClass.p2id)))#dataClass.trainIdList+dataClass.validIdList
#self._save_emb('cache/_preEmbedding.pkl')
for i,(trainProteins,validProteins) in enumerate(kf.split(proteins)):
print(f'CV_{i+1}:')
if loc>0 and i+1!=loc:
print(f'Pass CV_{i+1}')
continue
self.reset_parameters()
#self._load_emb('cache/_preEmbedding.pkl')
dataClass.trainIdList = [i for i in range(len(dataClass.eSeqData)) if dataClass.eSeqData[i,0] in trainProteins]
dataClass.validIdList = [i for i in range(len(dataClass.eSeqData)) if dataClass.eSeqData[i,0] in validProteins]
dataClass.trainSampleNum,dataClass.validSampleNum = len(dataClass.trainIdList),len(dataClass.validIdList)
res = self.train(dataClass,trainSize,batchSize,epoch,stopRounds,earlyStop,saveRounds,optimType,preheat,lr1,lr2,momentum,weightDecay,
isHigherBetter,metrics,report,f"{savePath}_cv{i+1}")
validRes.append(res)
Metrictor.table_show(validRes, report)
def get_optimizer(self, optimType, lr, weightDecay, momentum):
if optimType=='Adam':
return torch.optim.Adam(self.moduleList.parameters(), lr=lr, weight_decay=weightDecay)
elif optimType=='AdamW':
return torch.optim.AdamW(self.moduleList.parameters(), lr=lr, weight_decay=weightDecay)
elif optimType=='SGD':
return torch.optim.SGD(self.moduleList.parameters(), lr=lr, momentum=momentum, weight_decay=weightDecay)
def train(self, dataClass, trainSize=256, batchSize=256, epoch=100, stopRounds=10, earlyStop=10, saveRounds=1,
optimType='Adam', preheat=5, lr1=0.001, lr2=0.00003, momentum=0.9, weightDecay=0, isHigherBetter=True, metrics="AUC", report=["ACC", "AUC"],
savePath='model'):
dataClass.describe()
assert batchSize%trainSize==0
metrictor = Metrictor()
self.stepCounter = 0
self.stepUpdate = batchSize//trainSize
self.preheat()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.moduleList.parameters()), lr=lr1, weight_decay=weightDecay)
schedulerRLR = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max' if isHigherBetter else 'min', factor=0.5, patience=4, verbose=True)
trainStream = dataClass.random_batch_data_stream(batchSize=trainSize, type='train', sampleType=self.sampleType, device=self.device)
itersPerEpoch = (dataClass.trainSampleNum+trainSize-1)//trainSize
mtc,bestMtc,stopSteps = 0.0,0.0,0
if dataClass.validSampleNum>0: validStream = dataClass.random_batch_data_stream(batchSize=trainSize, type='valid', sampleType=self.sampleType, device=self.device, log=True)
st = time.time()
print('Start pre-heat training:')
for e in range(epoch):
if e==preheat:
if preheat>0:
self.load(savePath+'.pkl')
self.normal()
optimizer = self.get_optimizer(optimType=optimType, lr=lr2, weightDecay=weightDecay,momentum=momentum)
#self.schedulerWU = ScheduledOptim(optimizer, lr2, 1000)
schedulerRLR = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max' if isHigherBetter else 'min', factor=0.5, patience=30, verbose=True)
print('Start normal training: ')
for i in range(itersPerEpoch):
self.to_train_mode()
X,Y = next(trainStream)
if X['res']:
loss = self._train_step(X,Y, optimizer)
if stopRounds>0 and (e*itersPerEpoch+i+1)%stopRounds==0:
self.to_eval_mode()
print(f"After iters {e*itersPerEpoch+i+1}: [train] loss= {loss:.3f};", end='')
if dataClass.validSampleNum>0:
X,Y = next(validStream)
loss = self.calculate_loss(X,Y)
print(f' [valid] loss= {loss:.3f};', end='')
restNum = ((itersPerEpoch-i-1)+(epoch-e-1)*itersPerEpoch)*trainSize
speed = (e*itersPerEpoch+i+1)*trainSize/(time.time()-st)
print(" speed: %.3lf items/s; remaining time: %.3lfs;"%(speed, restNum/speed))
if dataClass.validSampleNum>0 and (e+1)%saveRounds==0:
self.to_eval_mode()
print(f'========== Epoch:{e+1:5d} ==========')
Y_pre,Y = self.calculate_y_prob_by_iterator(dataClass.one_epoch_batch_data_stream(trainSize, type='train', mode='predict', device=self.device))
metrictor.set_data(Y_pre, Y)
print(f'[Total Train]',end='')
metrictor(report)
print(f'[Total Valid]',end='')
Y_pre,Y = self.calculate_y_prob_by_iterator(dataClass.one_epoch_batch_data_stream(trainSize, type='valid', mode='predict', device=self.device))
metrictor.set_data(Y_pre, Y)
res = metrictor(report)
mtc = res[metrics]
schedulerRLR.step(mtc)
print('=================================')
if (mtc>bestMtc and isHigherBetter) or (mtc<bestMtc and not isHigherBetter):
print(f'Bingo!!! Get a better Model with val {metrics}: {mtc:.3f}!!!')
bestMtc = mtc
self.save("%s.pkl"%savePath, e+1, bestMtc, dataClass)
stopSteps = 0
else:
stopSteps += 1
if stopSteps>=earlyStop:
print(f'The val {metrics} has not improved for more than {earlyStop} steps in epoch {e+1}, stop training.')
break
self.load("%s.pkl"%savePath)
self.to_eval_mode()
os.rename("%s.pkl"%savePath, "%s_%s.pkl"%(savePath, ("%.3lf"%bestMtc)[2:]))
print(f'============ Result ============')
print(f'[Total Train]',end='')
Y_pre,Y = self.calculate_y_prob_by_iterator(dataClass.one_epoch_batch_data_stream(trainSize, type='train', mode='predict', device=self.device))
metrictor.set_data(Y_pre, Y)
metrictor(report)
print(f'[Total Valid]',end='')
Y_pre,Y = self.calculate_y_prob_by_iterator(dataClass.one_epoch_batch_data_stream(trainSize, type='valid', mode='predict', device=self.device))
metrictor.set_data(Y_pre, Y)
res = metrictor(report)
if dataClass.testSampleNum>0:
print(f'[Total Test]',end='')
Y_pre,Y = self.calculate_y_prob_by_iterator(dataClass.one_epoch_batch_data_stream(trainSize, type='test', mode='predict', device=self.device))
metrictor.set_data(Y_pre, Y)
metrictor(report)
#metrictor.each_class_indictor_show(dataClass.id2lab)
print(f'================================')
return res
def reset_parameters(self):
for module in self.moduleList:
for subModule in module.modules():
if hasattr(subModule, "reset_parameters"):
subModule.reset_parameters()
def save(self, path, epochs, bestMtc=None, dataClass=None):
stateDict = {'epochs':epochs, 'bestMtc':bestMtc}
for module in self.moduleList:
stateDict[module.name] = module.state_dict()
if dataClass is not None:
#stateDict['trainIdList'],stateDict['validIdList'],stateDict['testIdList'] = dataClass.trainIdList,dataClass.validIdList,dataClass.testIdList
if 'am2id' in stateDict:
stateDict['am2id'],stateDict['id2am'] = dataClass.am2id,dataClass.id2am
if 'go2id' in stateDict:
stateDict['go2id'],stateDict['id2go'] = dataClass.go2id,dataClass.id2go
if 'at2id' in stateDict:
stateDict['at2id'],stateDict['id2at'] = dataClass.at2id,dataClass.id2at
torch.save(stateDict, path)
print('Model saved in "%s".'%path)
def load(self, path, map_location=None, dataClass=None):
parameters = torch.load(path, map_location=map_location)
for module in self.moduleList:
module.load_state_dict(parameters[module.name])
if dataClass is not None:
# if "trainIdList" in parameters:
# dataClass.trainIdList = parameters['trainIdList']
# if "validIdList" in parameters:
# dataClass.validIdList = parameters['validIdList']
# if "testIdList" in parameters:
# dataClass.testIdList = parameters['testIdList']
if 'am2id' in parameters:
dataClass.am2id,dataClass.id2am = parameters['am2id'],parameters['id2am']
if 'go2id' in parameters:
dataClass.go2id,dataClass.id2go = parameters['go2id'],parameters['id2go']
if 'at2id' in parameters:
dataClass.at2id,dataClass.id2at = parameters['at2id'],parameters['id2at']
print("%d epochs and %.3lf val Score 's model load finished."%(parameters['epochs'], parameters['bestMtc']))
def _save_emb(self, path):
stateDict = {}
for module in self.embModuleList:
stateDict[module.name] = module.state_dict()
torch.save(stateDict, path)
print('Pre-trained Embedding saved in "%s".'%path)
def _load_emb(self, path, map_location=None):
parameters = torch.load(path, map_location=map_location)
for module in self.embModuleList:
module.load_state_dict(parameters[module.name])
print('Pre-trained Embedding loaded in "%s".'%path)
def preheat(self):
for param in self.finetunedEmbList.parameters():
param.requires_grad = False
def normal(self):
for param in self.finetunedEmbList.parameters():
param.requires_grad = True
def calculate_y_prob(self, X, mode):
Y_pre = self.calculate_y_logit(X, mode)['y_logit']
return torch.sigmoid(Y_pre)
# def calculate_y(self, X):
# Y_pre = self.calculate_y_prob(X)
# return torch.argmax(Y_pre, dim=1)
def calculate_loss(self, X, Y):
out = self.calculate_y_logit(X, 'predict')
Y_logit = out['y_logit']
addLoss = 0.0
if 'loss' in out: addLoss += out['loss']
return self.criterion(Y_logit, Y) + addLoss
def calculate_indicator_by_iterator(self, dataStream, classNum, report):
metrictor = Metrictor(classNum)
Y_prob_pre,Y = self.calculate_y_prob_by_iterator(dataStream)
metrictor.set_data(Y_prob_pre, Y)
return metrictor(report)
def calculate_y_prob_by_iterator(self, dataStream):
YArr,Y_preArr = [],[]
while True:
try:
X,Y = next(dataStream)
except:
break
Y_pre,Y = self.calculate_y_prob(X, mode='predict').cpu().data.numpy(),Y.cpu().data.numpy()
YArr.append(Y)
Y_preArr.append(Y_pre)
YArr,Y_preArr = np.hstack(YArr).astype('int32'),np.hstack(Y_preArr).astype('float32')
return Y_preArr, YArr
# def calculate_y_by_iterator(self, dataStream):
# Y_preArr, YArr = self.calculate_y_prob_by_iterator(dataStream)
# return Y_preArr.argmax(axis=1), YArr
def to_train_mode(self):
for module in self.moduleList:
module.train()
def to_eval_mode(self):
for module in self.moduleList:
module.eval()
def _train_step(self, X, Y, optimizer):
self.stepCounter += 1
if self.stepCounter<self.stepUpdate:
p = False
else:
self.stepCounter = 0
p = True
loss = self.calculate_loss(X, Y)/self.stepUpdate
loss.backward()
if p:
nn.utils.clip_grad_norm_(self.moduleList.parameters(), max_norm=20, norm_type=2)
optimizer.step()
optimizer.zero_grad()
#self.schedulerWU.step_and_update_lr()
#self.schedulerWU.zero_grad()
return loss*self.stepUpdate
class DTI_E2E(BaseClassifier):
def __init__(self, amEmbedding, goEmbedding, atEmbedding, seqMaxLen,
rnnHiddenSize=16, gcnHiddenSize=64, gcnSkip=3, fcHiddenSize=32, dropout=0.1, gama=0.0005,
embFreeze=False, sampleType='PWRL', device=torch.device('cuda')):
self.amEmbedding = TextEmbedding(torch.tensor(amEmbedding,dtype=torch.float32), dropout, freeze=embFreeze, name='amEmbedding').to(device)
self.goEmbedding = TextEmbedding(torch.tensor(goEmbedding,dtype=torch.float32), dropout, freeze=False, name='goEmbedding').to(device)
self.atEmbedding = TextEmbedding(torch.tensor(atEmbedding,dtype=torch.float32), dropout, freeze=embFreeze, name='atEmbedding').to(device)
self.pBiLSTM = TextLSTM(amEmbedding.shape[1], rnnHiddenSize, bidirectional=False, name='pBiLSTM').to(device)
self.dGCN = GCN(atEmbedding.shape[0], atEmbedding.shape[1], gcnHiddenSize, [gcnHiddenSize]*(gcnSkip-1), name='dGCN').to(device)
self.U = MLP(gcnHiddenSize,rnnHiddenSize, dropout=0.0, name='U').to(device)
self.pFcLinear = MLP(rnnHiddenSize, fcHiddenSize, [fcHiddenSize]*2, dropout=dropout, name='pFcLinear').to(device)
self.dFcLinear = MLP(gcnHiddenSize, fcHiddenSize, [fcHiddenSize]*2, dropout=dropout, name='dFcLinear').to(device)
self.criterion = PairWiseRankingLoss(gama) if sampleType=='PWRL' else torch.nn.BCEWithLogitsLoss()
self.embModuleList = nn.ModuleList([self.amEmbedding,self.goEmbedding,self.atEmbedding])
self.finetunedEmbList = nn.ModuleList([self.amEmbedding,self.goEmbedding,self.atEmbedding])
self.moduleList = nn.ModuleList([self.amEmbedding,self.goEmbedding,self.atEmbedding,
self.pBiLSTM,self.dGCN,self.U,self.pFcLinear,self.dFcLinear])
self.sampleType = sampleType
self.device = device
def calculate_y_logit(self, X, mode='train'):
Xam,Xgo,Xat = X['aminoSeq'],X['goSeq'],X['atomGra'] # => batchSize1 × amSeqLen, batchSize1 × goSeqLen, batchSize2 × nodeNum × nodeNum
Xam = self.amEmbedding(Xam) # => batchSize1 × amSeqLen × amSize, batchSize1 × goSeqLen × goSize
Xgo = self.goEmbedding(Xgo)
Xam = self.pBiLSTM(Xam) # => batchSize1 × amSeqLen × rnnHiddenSize
P = torch.cat([Xam, Xgo], dim=1) # => batchSize1 × (amSeqLen+goSeqLen) × rnnHiddenSize
D = self.dGCN(self.atEmbedding.embedding.weight, Xat) # => batchSize2 × nodeNum × gcnHiddenSize
if mode=='train':
P = P.unsqueeze(dim=1) # => batchSize1 × 1 × (amSeqLen+goSeqLen) × rnnHiddenSize
D = D.unsqueeze(dim=0) # => 1 × batchSize2 × nodeNum × gcnHiddenSize
alpha = F.tanh( torch.matmul(torch.matmul(P,self.U.out.weight),D.transpose(-1,-2)) ) # => batchSize1 × batchSize2 × (amSeqLen+goSeqLen) × nodeNum
pAlpha,_ = torch.max(alpha, dim=-1) # => batchSize1 × batchSize2 × (amSeqLen+goSeqLen)
dAlpha,_ = torch.max(alpha, dim=-2) # => batchSize1 × batchSize2 × nodeNum
pAlpha = F.softmax(pAlpha, dim=-1).unsqueeze(dim=-2) # => batchSize1 × batchSize2 × 1 × (amSeqLen+goSeqLen)
dAlpha = F.softmax(dAlpha, dim=-1).unsqueeze(dim=-2) # => batchSize1 × batchSize2 × 1 × nodeNum
Xp,Xd = torch.matmul(pAlpha,P).squeeze(dim=-2),torch.matmul(dAlpha,D).squeeze(dim=-2) # => batchSize1 × batchSiz2 × rnnHiddenSize, batchSize1 × batchSize2 × gcnHiddenSize
Xp,Xd = self.pFcLinear(Xp),self.dFcLinear(Xd) # => batchSize1 × batchSiz2 × fcHiddenSize, batchSize1 × batchSiz2 × fcHiddenSize
return {"y_logit":torch.sum(Xp*Xd, dim=-1)} # => batchSize1 × batchSiz2
class DTI_E2E_nogo(BaseClassifier):
def __init__(self, amEmbedding, atEmbedding, seqMaxLen,
rnnHiddenSize=16, gcnHiddenSize=64, gcnSkip=3, fcHiddenSize=32, dropout=0.1, gama=0.0005,
embFreeze=False, sampleType='PWRL', device=torch.device('cuda')):
self.amEmbedding = TextEmbedding(torch.tensor(amEmbedding,dtype=torch.float32), dropout, freeze=embFreeze, name='amEmbedding').to(device)
self.atEmbedding = TextEmbedding(torch.tensor(atEmbedding,dtype=torch.float32), dropout, freeze=embFreeze, name='atEmbedding').to(device)
self.pBiLSTM = TextLSTM(amEmbedding.shape[1], rnnHiddenSize, bidirectional=False, name='pBiLSTM').to(device)
self.dGCN = GCN(atEmbedding.shape[0], atEmbedding.shape[1], gcnHiddenSize, [gcnHiddenSize]*(gcnSkip-1), name='dGCN').to(device)
self.U = MLP(gcnHiddenSize,rnnHiddenSize, dropout=0.0, name='U').to(device)
self.pFcLinear = MLP(rnnHiddenSize, fcHiddenSize, [fcHiddenSize]*2, dropout=dropout, name='pFcLinear').to(device)
self.dFcLinear = MLP(gcnHiddenSize, fcHiddenSize, [fcHiddenSize]*2, dropout=dropout, name='dFcLinear').to(device)
self.criterion = PairWiseRankingLoss(gama) if sampleType=='PWRL' else torch.nn.BCEWithLogitsLoss()
self.embModuleList = nn.ModuleList([self.amEmbedding,self.atEmbedding])
self.finetunedEmbList = nn.ModuleList([self.amEmbedding,self.atEmbedding])
self.moduleList = nn.ModuleList([self.amEmbedding,self.atEmbedding,
self.pBiLSTM,self.dGCN,self.U,self.pFcLinear,self.dFcLinear])
self.sampleType = sampleType
self.device = device
def calculate_y_logit(self, X, mode='train'):
Xam,Xat = X['aminoSeq'],X['atomGra'] # => batchSize1 × amSeqLen, batchSize1 × goSeqLen, batchSize2 × nodeNum × nodeNum
Xam = self.amEmbedding(Xam) # => batchSize1 × amSeqLen × amSize, batchSize1 × goSeqLen × goSize
Xam = self.pBiLSTM(Xam) # => batchSize1 × amSeqLen × rnnHiddenSize
P = Xam # => batchSize1 × (amSeqLen+goSeqLen) × rnnHiddenSize
D = self.dGCN(self.atEmbedding.embedding.weight, Xat) # => batchSize2 × nodeNum × gcnHiddenSize
if mode=='train':
P = P.unsqueeze(dim=1) # => batchSize1 × 1 × (amSeqLen+goSeqLen) × rnnHiddenSize
D = D.unsqueeze(dim=0) # => 1 × batchSize2 × nodeNum × gcnHiddenSize
alpha = F.tanh( torch.matmul(torch.matmul(P,self.U.out.weight),D.transpose(-1,-2)) ) # => batchSize1 × batchSize2 × (amSeqLen+goSeqLen) × nodeNum
pAlpha,_ = torch.max(alpha, dim=-1) # => batchSize1 × batchSize2 × (amSeqLen+goSeqLen)
dAlpha,_ = torch.max(alpha, dim=-2) # => batchSize1 × batchSize2 × nodeNum
pAlpha = F.softmax(pAlpha, dim=-1).unsqueeze(dim=-2) # => batchSize1 × batchSize2 × 1 × (amSeqLen+goSeqLen)
dAlpha = F.softmax(dAlpha, dim=-1).unsqueeze(dim=-2) # => batchSize1 × batchSize2 × 1 × nodeNum
Xp,Xd = torch.matmul(pAlpha,P).squeeze(dim=-2),torch.matmul(dAlpha,D).squeeze(dim=-2) # => batchSize1 × batchSiz2 × rnnHiddenSize, batchSize1 × batchSize2 × gcnHiddenSize
Xp,Xd = self.pFcLinear(Xp),self.dFcLinear(Xd) # => batchSize1 × batchSiz2 × fcHiddenSize, batchSize1 × batchSiz2 × fcHiddenSize
return {"y_logit":torch.sum(Xp*Xd, dim=-1)} # => batchSize1 × batchSiz2
class DTI_Bridge(BaseClassifier):
def __init__(self, outSize,
cHiddenSizeList,
fHiddenSizeList,
fSize=1024, cSize=8422,
gcnHiddenSizeList=[], fcHiddenSizeList=[], nodeNum=32, resnet=True,
hdnDropout=0.1, fcDropout=0.2, device=torch.device('cuda'), sampleType='CEL',
useFeatures = {"kmers":True,"pSeq":True,"FP":True,"dSeq":True},
maskDTI=False):
self.nodeEmbedding = TextEmbedding(torch.tensor(np.random.normal(size=(max(nodeNum,0),outSize)), dtype=torch.float32), dropout=hdnDropout, name='nodeEmbedding').to(device)
self.amEmbedding = TextEmbedding(torch.eye(24), dropout=hdnDropout, freeze=True, name='amEmbedding').to(device)
self.pCNN = TextCNN(24, 64, [25], ln=True, name='pCNN').to(device)
self.pFcLinear = MLP(64, outSize, dropout=hdnDropout, bnEveryLayer=True, dpEveryLayer=True, outBn=True, outAct=True, outDp=True, name='pFcLinear').to(device)
self.dCNN = TextCNN(75, 64, [7], ln=True, name='dCNN').to(device)
self.dFcLinear = MLP(64, outSize, dropout=hdnDropout, bnEveryLayer=True, dpEveryLayer=True, outBn=True, outAct=True, outDp=True, name='dFcLinear').to(device)
self.fFcLinear = MLP(fSize, outSize, fHiddenSizeList, outAct=True, name='fFcLinear', dropout=hdnDropout, dpEveryLayer=True, outDp=True, bnEveryLayer=True, outBn=True).to(device)
self.cFcLinear = MLP(cSize, outSize, cHiddenSizeList, outAct=True, name='cFcLinear', dropout=hdnDropout, dpEveryLayer=True, outDp=True, bnEveryLayer=True, outBn=True).to(device)
self.nodeGCN = GCN(outSize, outSize, gcnHiddenSizeList, name='nodeGCN', dropout=hdnDropout, dpEveryLayer=True, outDp=True, bnEveryLayer=True, outBn=True, resnet=resnet).to(device)
self.fcLinear = MLP(outSize, 1, fcHiddenSizeList, dropout=fcDropout, bnEveryLayer=True, dpEveryLayer=True).to(device)
self.criterion = nn.BCEWithLogitsLoss()
self.embModuleList = nn.ModuleList([])
self.finetunedEmbList = nn.ModuleList([])
self.moduleList = nn.ModuleList([self.nodeEmbedding,self.cFcLinear,self.fFcLinear,self.nodeGCN,self.fcLinear,
self.amEmbedding, self.pCNN, self.pFcLinear, self.dCNN, self.dFcLinear])
self.sampleType = sampleType
self.device = device
self.resnet = resnet
self.nodeNum = nodeNum
self.hdnDropout = hdnDropout
self.useFeatures = useFeatures
self.maskDTI = maskDTI
def calculate_y_logit(self, X, mode='train'):
Xam = (self.cFcLinear(X['aminoCtr']).unsqueeze(1) if self.useFeatures['kmers'] else 0) + \
(self.pFcLinear(self.pCNN(self.amEmbedding(X['aminoSeq']))).unsqueeze(1) if self.useFeatures['pSeq'] else 0) # => batchSize × 1 × outSize
Xat = (self.fFcLinear(X['atomFin']).unsqueeze(1) if self.useFeatures['FP'] else 0) + \
(self.dFcLinear(self.dCNN(X['atomFea'])).unsqueeze(1) if self.useFeatures['dSeq'] else 0) # => batchSize × 1 × outSize
if self.nodeNum>0:
node = self.nodeEmbedding.dropout2(self.nodeEmbedding.dropout1(self.nodeEmbedding.embedding.weight)).repeat(len(Xat), 1, 1)
node = torch.cat([Xam, Xat, node], dim=1) # => batchSize × nodeNum × outSize
nodeDist = torch.sqrt(torch.sum(node**2,dim=2,keepdim=True)+1e-8)# => batchSize × nodeNum × 1
cosNode = torch.matmul(node,node.transpose(1,2)) / (nodeDist*nodeDist.transpose(1,2)+1e-8) # => batchSize × nodeNum × nodeNum
#cosNode = cosNode*0.5 + 0.5
cosNode = F.relu(cosNode) # => batchSize × nodeNum × nodeNum
cosNode[:,range(node.shape[1]),range(node.shape[1])] = 1 # => batchSize × nodeNum × nodeNum
if self.maskDTI: cosNode[:,0,1] = cosNode[:,1,0] = 0
D = torch.eye(node.shape[1], dtype=torch.float32, device=self.device).repeat(len(Xam),1,1) # => batchSize × nodeNum × nodeNum
D[:,range(node.shape[1]),range(node.shape[1])] = 1/(torch.sum(cosNode,dim=2)**0.5)
pL = torch.matmul(torch.matmul(D,cosNode),D) # => batchSize × batchnodeNum × nodeNumSize
node_gcned = self.nodeGCN(node, pL) # => batchSize × nodeNum × outSize
node_embed = node_gcned[:,0,:]*node_gcned[:,1,:] # => batchSize × outSize
else:
node_embed = (Xam*Xat).squeeze(dim=1) # => batchSize × outSize
#if self.resnet:
# node_gcned += torch.cat([Xam[:,0,:],Xat[:,0,:]],dim=1)
return {"y_logit":self.fcLinear(node_embed).squeeze(dim=1)}#, "loss":1*l2}
def get_index(seqData, sP, sD):
sPsD = [i[0] in sP and i[1] in sD for i in seqData]
sPuD = [i[0] in sP and i[1] not in sD for i in seqData]
uPsD = [i[0] not in sP and i[1] in sD for i in seqData]
uPuD = [i[0] not in sP and i[1] not in sD for i in seqData]
return sPsD,sPuD,uPsD,uPuD | [
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.hstack",
"torch.nn.ModuleList",
"os.rename",
"torch.load",
"torch.sigmoid",
"torch.max",
"torch.eye",
"sklearn.model_selection.StratifiedKFold",
"torch.tensor",
"torch.sum",
"torch.matmul",
"torch.save",
"torch.nn.BCEWithLogitsLoss",
"... | [((791, 855), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'kFold', 'random_state': 'seed', 'shuffle': '(True)'}), '(n_splits=kFold, random_state=seed, shuffle=True)\n', (806, 855), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((2248, 2302), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'kFold', 'random_state': 'seed', 'shuffle': '(True)'}), '(n_splits=kFold, random_state=seed, shuffle=True)\n', (2253, 2302), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((4611, 4747), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': "('max' if isHigherBetter else 'min')", 'factor': '(0.5)', 'patience': '(4)', 'verbose': '(True)'}), "(optimizer, mode='max' if\n isHigherBetter else 'min', factor=0.5, patience=4, verbose=True)\n", (4653, 4747), False, 'import torch, time, os, pickle, random\n'), ((5194, 5205), 'time.time', 'time.time', ([], {}), '()\n', (5203, 5205), False, 'import torch, time, os, pickle, random\n'), ((8268, 8354), 'os.rename', 'os.rename', (["('%s.pkl' % savePath)", "('%s_%s.pkl' % (savePath, ('%.3lf' % bestMtc)[2:]))"], {}), "('%s.pkl' % savePath, '%s_%s.pkl' % (savePath, ('%.3lf' % bestMtc)\n [2:]))\n", (8277, 8354), False, 'import torch, time, os, pickle, random\n'), ((10362, 10389), 'torch.save', 'torch.save', (['stateDict', 'path'], {}), '(stateDict, path)\n', (10372, 10389), False, 'import torch, time, os, pickle, random\n'), ((10515, 10558), 'torch.load', 'torch.load', (['path'], {'map_location': 'map_location'}), '(path, map_location=map_location)\n', (10525, 10558), False, 'import torch, time, os, pickle, random\n'), ((11693, 11720), 'torch.save', 'torch.save', (['stateDict', 'path'], {}), '(stateDict, path)\n', (11703, 11720), False, 'import torch, time, os, pickle, random\n'), ((11851, 11894), 'torch.load', 'torch.load', (['path'], {'map_location': 'map_location'}), '(path, map_location=map_location)\n', (11861, 11894), False, 'import torch, time, os, pickle, random\n'), ((12410, 12430), 'torch.sigmoid', 'torch.sigmoid', (['Y_pre'], {}), '(Y_pre)\n', (12423, 12430), False, 'import torch, time, os, pickle, random\n'), ((14780, 14800), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (14792, 14800), False, 'import torch, time, os, pickle, random\n'), ((15954, 16023), 'torch.nn.ModuleList', 'nn.ModuleList', (['[self.amEmbedding, self.goEmbedding, self.atEmbedding]'], {}), '([self.amEmbedding, self.goEmbedding, self.atEmbedding])\n', (15967, 16023), True, 'from torch import nn as nn\n'), ((16054, 16123), 'torch.nn.ModuleList', 'nn.ModuleList', (['[self.amEmbedding, self.goEmbedding, self.atEmbedding]'], {}), '([self.amEmbedding, self.goEmbedding, self.atEmbedding])\n', (16067, 16123), True, 'from torch import nn as nn\n'), ((16148, 16287), 'torch.nn.ModuleList', 'nn.ModuleList', (['[self.amEmbedding, self.goEmbedding, self.atEmbedding, self.pBiLSTM, self.\n dGCN, self.U, self.pFcLinear, self.dFcLinear]'], {}), '([self.amEmbedding, self.goEmbedding, self.atEmbedding, self.\n pBiLSTM, self.dGCN, self.U, self.pFcLinear, self.dFcLinear])\n', (16161, 16287), True, 'from torch import nn as nn\n'), ((16804, 16832), 'torch.cat', 'torch.cat', (['[Xam, Xgo]'], {'dim': '(1)'}), '([Xam, Xgo], dim=1)\n', (16813, 16832), False, 'import torch, time, os, pickle, random\n'), ((17382, 17406), 'torch.max', 'torch.max', (['alpha'], {'dim': '(-1)'}), '(alpha, dim=-1)\n', (17391, 17406), False, 'import torch, time, os, pickle, random\n'), ((17477, 17501), 'torch.max', 'torch.max', (['alpha'], {'dim': '(-2)'}), '(alpha, dim=-2)\n', (17486, 17501), False, 'import torch, time, os, pickle, random\n'), ((18423, 18443), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (18435, 18443), False, 'import torch, time, os, pickle, random\n'), ((19455, 19506), 'torch.nn.ModuleList', 'nn.ModuleList', (['[self.amEmbedding, self.atEmbedding]'], {}), '([self.amEmbedding, self.atEmbedding])\n', (19468, 19506), True, 'from torch import nn as nn\n'), ((19538, 19589), 'torch.nn.ModuleList', 'nn.ModuleList', (['[self.amEmbedding, self.atEmbedding]'], {}), '([self.amEmbedding, self.atEmbedding])\n', (19551, 19589), True, 'from torch import nn as nn\n'), ((19615, 19735), 'torch.nn.ModuleList', 'nn.ModuleList', (['[self.amEmbedding, self.atEmbedding, self.pBiLSTM, self.dGCN, self.U, self.\n pFcLinear, self.dFcLinear]'], {}), '([self.amEmbedding, self.atEmbedding, self.pBiLSTM, self.dGCN,\n self.U, self.pFcLinear, self.dFcLinear])\n', (19628, 19735), True, 'from torch import nn as nn\n'), ((20756, 20780), 'torch.max', 'torch.max', (['alpha'], {'dim': '(-1)'}), '(alpha, dim=-1)\n', (20765, 20780), False, 'import torch, time, os, pickle, random\n'), ((20851, 20875), 'torch.max', 'torch.max', (['alpha'], {'dim': '(-2)'}), '(alpha, dim=-2)\n', (20860, 20875), False, 'import torch, time, os, pickle, random\n'), ((21851, 21871), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (21863, 21871), False, 'import torch, time, os, pickle, random\n'), ((23538, 23560), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (23558, 23560), True, 'from torch import nn as nn\n'), ((23599, 23616), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (23612, 23616), True, 'from torch import nn as nn\n'), ((23649, 23666), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (23662, 23666), True, 'from torch import nn as nn\n'), ((23693, 23870), 'torch.nn.ModuleList', 'nn.ModuleList', (['[self.nodeEmbedding, self.cFcLinear, self.fFcLinear, self.nodeGCN, self.\n fcLinear, self.amEmbedding, self.pCNN, self.pFcLinear, self.dCNN, self.\n dFcLinear]'], {}), '([self.nodeEmbedding, self.cFcLinear, self.fFcLinear, self.\n nodeGCN, self.fcLinear, self.amEmbedding, self.pCNN, self.pFcLinear,\n self.dCNN, self.dFcLinear])\n', (23706, 23870), True, 'from torch import nn as nn\n'), ((15896, 15924), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (15922, 15924), False, 'import torch, time, os, pickle, random\n'), ((18104, 18130), 'torch.sum', 'torch.sum', (['(Xp * Xd)'], {'dim': '(-1)'}), '(Xp * Xd, dim=-1)\n', (18113, 18130), False, 'import torch, time, os, pickle, random\n'), ((19397, 19425), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (19423, 19425), False, 'import torch, time, os, pickle, random\n'), ((21478, 21504), 'torch.sum', 'torch.sum', (['(Xp * Xd)'], {'dim': '(-1)'}), '(Xp * Xd, dim=-1)\n', (21487, 21504), False, 'import torch, time, os, pickle, random\n'), ((24845, 24879), 'torch.cat', 'torch.cat', (['[Xam, Xat, node]'], {'dim': '(1)'}), '([Xam, Xat, node], dim=1)\n', (24854, 24879), False, 'import torch, time, os, pickle, random\n'), ((5636, 5773), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': "('max' if isHigherBetter else 'min')", 'factor': '(0.5)', 'patience': '(30)', 'verbose': '(True)'}), "(optimizer, mode='max' if\n isHigherBetter else 'min', factor=0.5, patience=30, verbose=True)\n", (5678, 5773), False, 'import torch, time, os, pickle, random\n'), ((17246, 17280), 'torch.matmul', 'torch.matmul', (['P', 'self.U.out.weight'], {}), '(P, self.U.out.weight)\n', (17258, 17280), False, 'import torch, time, os, pickle, random\n'), ((20620, 20654), 'torch.matmul', 'torch.matmul', (['P', 'self.U.out.weight'], {}), '(P, self.U.out.weight)\n', (20632, 20654), False, 'import torch, time, os, pickle, random\n'), ((25707, 25731), 'torch.matmul', 'torch.matmul', (['D', 'cosNode'], {}), '(D, cosNode)\n', (25719, 25731), False, 'import torch, time, os, pickle, random\n'), ((13456, 13471), 'numpy.hstack', 'np.hstack', (['YArr'], {}), '(YArr)\n', (13465, 13471), True, 'import numpy as np\n'), ((13488, 13507), 'numpy.hstack', 'np.hstack', (['Y_preArr'], {}), '(Y_preArr)\n', (13497, 13507), True, 'import numpy as np\n'), ((14844, 14890), 'torch.tensor', 'torch.tensor', (['amEmbedding'], {'dtype': 'torch.float32'}), '(amEmbedding, dtype=torch.float32)\n', (14856, 14890), False, 'import torch, time, os, pickle, random\n'), ((14990, 15036), 'torch.tensor', 'torch.tensor', (['goEmbedding'], {'dtype': 'torch.float32'}), '(goEmbedding, dtype=torch.float32)\n', (15002, 15036), False, 'import torch, time, os, pickle, random\n'), ((15132, 15178), 'torch.tensor', 'torch.tensor', (['atEmbedding'], {'dtype': 'torch.float32'}), '(atEmbedding, dtype=torch.float32)\n', (15144, 15178), False, 'import torch, time, os, pickle, random\n'), ((17779, 17802), 'torch.matmul', 'torch.matmul', (['pAlpha', 'P'], {}), '(pAlpha, P)\n', (17791, 17802), False, 'import torch, time, os, pickle, random\n'), ((17818, 17841), 'torch.matmul', 'torch.matmul', (['dAlpha', 'D'], {}), '(dAlpha, D)\n', (17830, 17841), False, 'import torch, time, os, pickle, random\n'), ((18487, 18533), 'torch.tensor', 'torch.tensor', (['amEmbedding'], {'dtype': 'torch.float32'}), '(amEmbedding, dtype=torch.float32)\n', (18499, 18533), False, 'import torch, time, os, pickle, random\n'), ((18633, 18679), 'torch.tensor', 'torch.tensor', (['atEmbedding'], {'dtype': 'torch.float32'}), '(atEmbedding, dtype=torch.float32)\n', (18645, 18679), False, 'import torch, time, os, pickle, random\n'), ((21153, 21176), 'torch.matmul', 'torch.matmul', (['pAlpha', 'P'], {}), '(pAlpha, P)\n', (21165, 21176), False, 'import torch, time, os, pickle, random\n'), ((21192, 21215), 'torch.matmul', 'torch.matmul', (['dAlpha', 'D'], {}), '(dAlpha, D)\n', (21204, 21215), False, 'import torch, time, os, pickle, random\n'), ((22246, 22259), 'torch.eye', 'torch.eye', (['(24)'], {}), '(24)\n', (22255, 22259), False, 'import torch, time, os, pickle, random\n'), ((24949, 24990), 'torch.sum', 'torch.sum', (['(node ** 2)'], {'dim': '(2)', 'keepdim': '(True)'}), '(node ** 2, dim=2, keepdim=True)\n', (24958, 24990), False, 'import torch, time, os, pickle, random\n'), ((25460, 25525), 'torch.eye', 'torch.eye', (['node.shape[1]'], {'dtype': 'torch.float32', 'device': 'self.device'}), '(node.shape[1], dtype=torch.float32, device=self.device)\n', (25469, 25525), False, 'import torch, time, os, pickle, random\n'), ((25646, 25671), 'torch.sum', 'torch.sum', (['cosNode'], {'dim': '(2)'}), '(cosNode, dim=2)\n', (25655, 25671), False, 'import torch, time, os, pickle, random\n'), ((6613, 6624), 'time.time', 'time.time', ([], {}), '()\n', (6622, 6624), False, 'import torch, time, os, pickle, random\n')] |
import os
import tempfile
import unittest
import shutil
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from trixi.logger.visdom import PytorchVisdomLogger
from trixi.logger.visdom.numpyvisdomlogger import start_visdom
class TestPytorchVisdomLogger(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPytorchVisdomLogger, cls).setUpClass()
try:
start_visdom()
except:
print("Could not start visdom, it might be already running.")
def setUp(self):
self.visdomLogger = PytorchVisdomLogger()
def test_show_image(self):
image = np.random.random_sample((3, 128, 128))
tensor = torch.from_numpy(image)
self.visdomLogger._NumpyVisdomLogger__show_image(tensor.numpy(), "image")
def test_show_images(self):
images = np.random.random_sample((4, 3, 128, 128))
tensors = torch.from_numpy(images)
self.visdomLogger._NumpyVisdomLogger__show_images(tensors.numpy(), "image")
def test_show_image_grid(self):
images = np.random.random_sample((4, 3, 128, 128))
tensor = torch.from_numpy(images)
self.visdomLogger._PytorchVisdomLogger__show_image_grid(tensor, "image_grid")
def test_show_barplot(self):
tensor = torch.from_numpy(np.random.random_sample(5))
self.visdomLogger.show_barplot(tensor, name="barplot")
self.visdomLogger._NumpyVisdomLogger__show_barplot(tensor.numpy(), name="barplot")
def test_show_lineplot(self):
x = [0, 1, 2, 3, 4, 5]
y = np.random.random_sample(6)
self.visdomLogger.show_lineplot(y, x, name="lineplot1")
self.visdomLogger._NumpyVisdomLogger__show_lineplot(y, x, name="lineplot1")
def test_show_piechart(self):
array = torch.from_numpy(np.random.random_sample(5))
self.visdomLogger.show_piechart(array, name="piechart")
self.visdomLogger._NumpyVisdomLogger__show_piechart(array, name="piechart")
def test_show_scatterplot(self):
array = torch.from_numpy(np.random.random_sample((5, 2)))
self.visdomLogger.show_scatterplot(array, name="scatterplot")
self.visdomLogger._NumpyVisdomLogger__show_scatterplot(array.numpy(), name="scatterplot")
def test_show_value(self):
val = torch.from_numpy(np.random.random_sample(1))
self.visdomLogger.show_value(val, "value")
self.visdomLogger._NumpyVisdomLogger__show_value(val.numpy(), "value")
val = torch.from_numpy(np.random.random_sample(1))
self.visdomLogger.show_value(val, "value")
val = torch.from_numpy(np.random.random_sample(1))
self.visdomLogger.show_value(val, "value", counter=4)
def test_show_text(self):
text = "\nTest 4 fun: zD ;-D 0o"
self.visdomLogger.show_text(text)
self.visdomLogger._NumpyVisdomLogger__show_text(text)
def test_get_roc_curve(self):
array = np.random.random_sample(100)
labels = np.random.choice((0, 1), 100)
self.visdomLogger.show_roc_curve(array, labels, name="roc")
def test_get_pr_curve(self):
array = np.random.random_sample(100)
labels = np.random.choice((0, 1), 100)
self.visdomLogger.show_roc_curve(array, labels, name="pr")
def test_get_classification_metric(self):
array = np.random.random_sample(100)
labels = np.random.choice((0, 1), 100)
self.visdomLogger.show_classification_metrics(array, labels, metric=("roc-auc", "pr-score"),
name="classification-metrics")
def test_show_image_gradient(self):
net = Net()
random_input = torch.from_numpy(np.random.randn(28 * 28).reshape((1, 1, 28, 28))).float()
fake_labels = torch.from_numpy(np.array([2])).long()
criterion = torch.nn.CrossEntropyLoss()
err_fn = lambda x: criterion(x, fake_labels)
self.visdomLogger.show_image_gradient(name="grads-vanilla", model=net, inpt=random_input, err_fn=err_fn,
grad_type="vanilla")
time.sleep(1)
self.visdomLogger.show_image_gradient(name="grads-svanilla", model=net, inpt=random_input, err_fn=err_fn,
grad_type="smooth-vanilla")
time.sleep(1)
self.visdomLogger.show_image_gradient(name="grads-guided", model=net, inpt=random_input, err_fn=err_fn,
grad_type="guided")
time.sleep(1)
self.visdomLogger.show_image_gradient(name="grads-sguided", model=net, inpt=random_input, err_fn=err_fn,
grad_type="smooth-guided")
time.sleep(1)
def test_plot_model_structure(self):
net = Net()
self.visdomLogger.plot_model_structure(net, (1, 1, 28, 28))
def test_plot_model_statistics(self):
net = Net()
self.visdomLogger.plot_model_statistics(net, plot_grad=False)
self.visdomLogger.plot_model_statistics(net, plot_grad=True)
def test_show_embedding(self):
array = torch.from_numpy(np.random.random_sample((100, 100)))
self.visdomLogger.show_embedding(array, method="tsne")
self.visdomLogger.show_embedding(array, method="umap")
class Net(nn.Module):
"""
Small network to test save/load functionality
"""
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.random_sample",
"torch.nn.CrossEntropyLoss",
"numpy.random.choice",
"torch.nn.Dropout2d",
"torch.from_numpy",
"time.sleep",
"torch.nn.Conv2d",
"trixi.logger.visdom.PytorchVisdomLogger",
"torch.nn.functional.dropout",
"numpy.array",
"torch.nn.Linear",
"torch.nn.functional.log_soft... | [((6079, 6094), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6092, 6094), False, 'import unittest\n'), ((603, 624), 'trixi.logger.visdom.PytorchVisdomLogger', 'PytorchVisdomLogger', ([], {}), '()\n', (622, 624), False, 'from trixi.logger.visdom import PytorchVisdomLogger\n'), ((673, 711), 'numpy.random.random_sample', 'np.random.random_sample', (['(3, 128, 128)'], {}), '((3, 128, 128))\n', (696, 711), True, 'import numpy as np\n'), ((729, 752), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (745, 752), False, 'import torch\n'), ((885, 926), 'numpy.random.random_sample', 'np.random.random_sample', (['(4, 3, 128, 128)'], {}), '((4, 3, 128, 128))\n', (908, 926), True, 'import numpy as np\n'), ((945, 969), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (961, 969), False, 'import torch\n'), ((1108, 1149), 'numpy.random.random_sample', 'np.random.random_sample', (['(4, 3, 128, 128)'], {}), '((4, 3, 128, 128))\n', (1131, 1149), True, 'import numpy as np\n'), ((1167, 1191), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (1183, 1191), False, 'import torch\n'), ((1606, 1632), 'numpy.random.random_sample', 'np.random.random_sample', (['(6)'], {}), '(6)\n', (1629, 1632), True, 'import numpy as np\n'), ((2978, 3006), 'numpy.random.random_sample', 'np.random.random_sample', (['(100)'], {}), '(100)\n', (3001, 3006), True, 'import numpy as np\n'), ((3024, 3053), 'numpy.random.choice', 'np.random.choice', (['(0, 1)', '(100)'], {}), '((0, 1), 100)\n', (3040, 3053), True, 'import numpy as np\n'), ((3173, 3201), 'numpy.random.random_sample', 'np.random.random_sample', (['(100)'], {}), '(100)\n', (3196, 3201), True, 'import numpy as np\n'), ((3219, 3248), 'numpy.random.choice', 'np.random.choice', (['(0, 1)', '(100)'], {}), '((0, 1), 100)\n', (3235, 3248), True, 'import numpy as np\n'), ((3380, 3408), 'numpy.random.random_sample', 'np.random.random_sample', (['(100)'], {}), '(100)\n', (3403, 3408), True, 'import numpy as np\n'), ((3426, 3455), 'numpy.random.choice', 'np.random.choice', (['(0, 1)', '(100)'], {}), '((0, 1), 100)\n', (3442, 3455), True, 'import numpy as np\n'), ((3883, 3910), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (3908, 3910), False, 'import torch\n'), ((4154, 4167), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4164, 4167), False, 'import time\n'), ((4365, 4378), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4375, 4378), False, 'import time\n'), ((4566, 4579), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4576, 4579), False, 'import time\n'), ((4775, 4788), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4785, 4788), False, 'import time\n'), ((5526, 5557), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(10)'], {'kernel_size': '(5)'}), '(1, 10, kernel_size=5)\n', (5535, 5557), True, 'import torch.nn as nn\n'), ((5579, 5611), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)'], {'kernel_size': '(5)'}), '(10, 20, kernel_size=5)\n', (5588, 5611), True, 'import torch.nn as nn\n'), ((5638, 5652), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {}), '()\n', (5650, 5652), True, 'import torch.nn as nn\n'), ((5672, 5690), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(50)'], {}), '(320, 50)\n', (5681, 5690), True, 'import torch.nn as nn\n'), ((5710, 5727), 'torch.nn.Linear', 'nn.Linear', (['(50)', '(10)'], {}), '(50, 10)\n', (5719, 5727), True, 'import torch.nn as nn\n'), ((5946, 5982), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (5955, 5982), True, 'import torch.nn.functional as F\n'), ((6022, 6045), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (6035, 6045), True, 'import torch.nn.functional as F\n'), ((448, 462), 'trixi.logger.visdom.numpyvisdomlogger.start_visdom', 'start_visdom', ([], {}), '()\n', (460, 462), False, 'from trixi.logger.visdom.numpyvisdomlogger import start_visdom\n'), ((1346, 1372), 'numpy.random.random_sample', 'np.random.random_sample', (['(5)'], {}), '(5)\n', (1369, 1372), True, 'import numpy as np\n'), ((1849, 1875), 'numpy.random.random_sample', 'np.random.random_sample', (['(5)'], {}), '(5)\n', (1872, 1875), True, 'import numpy as np\n'), ((2096, 2127), 'numpy.random.random_sample', 'np.random.random_sample', (['(5, 2)'], {}), '((5, 2))\n', (2119, 2127), True, 'import numpy as np\n'), ((2360, 2386), 'numpy.random.random_sample', 'np.random.random_sample', (['(1)'], {}), '(1)\n', (2383, 2386), True, 'import numpy as np\n'), ((2550, 2576), 'numpy.random.random_sample', 'np.random.random_sample', (['(1)'], {}), '(1)\n', (2573, 2576), True, 'import numpy as np\n'), ((2661, 2687), 'numpy.random.random_sample', 'np.random.random_sample', (['(1)'], {}), '(1)\n', (2684, 2687), True, 'import numpy as np\n'), ((5191, 5226), 'numpy.random.random_sample', 'np.random.random_sample', (['(100, 100)'], {}), '((100, 100))\n', (5214, 5226), True, 'import numpy as np\n'), ((3841, 3854), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (3849, 3854), True, 'import numpy as np\n'), ((3744, 3768), 'numpy.random.randn', 'np.random.randn', (['(28 * 28)'], {}), '(28 * 28)\n', (3759, 3768), True, 'import numpy as np\n')] |
#%%
import jax
import jax.numpy as jnp
import haiku as hk
import distrax
import torch
from torch.distributions.categorical import Categorical
import gym
import numpy as np
from functools import partial
import time
#%%
env = gym.make('CartPole-v0')
import torch.nn as nn
policy = nn.Sequential(
nn.Linear(env.observation_space.shape[0], 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, env.action_space.n),
nn.Softmax(dim=-1),
)
optim = torch.optim.SGD(policy.parameters(), lr=1e-3)
optim.zero_grad()
def _policy_fcn(obs):
a_probs = hk.Sequential([
hk.Linear(32), jax.nn.relu,
hk.Linear(32), jax.nn.relu,
hk.Linear(env.action_space.n), jax.nn.softmax
])(obs)
return a_probs
policy_fcn = hk.transform(_policy_fcn)
policy_fcn = hk.without_apply_rng(policy_fcn)
p_frwd = jax.jit(policy_fcn.apply)
seed = 0
rng = jax.random.PRNGKey(seed)
obs = env.reset() # dummy input
p_params = policy_fcn.init(rng, obs)
#%%
def torch_policy(obs):
a_space = policy(torch.from_numpy(obs).float())
a_space = Categorical(a_space)
a = a_space.sample()
log_prob = a_space.log_prob(a)
return a, log_prob
def torch_rollout():
np.random.seed(seed)
env.seed(seed)
obs = env.reset()
log_probs = []
rewards = []
while True:
## --
a, log_prob = torch_policy(obs)
a = a.numpy()
## --
a = np.random.choice(env.action_space.n)
obs2, r, done, _ = env.step(a)
if done: break
obs = obs2
log_probs.append(log_prob)
rewards.append(r)
## --
log_probs = torch.stack(log_probs)
r = torch.tensor(rewards)
loss = -(log_probs * r).sum()
## --
return loss
@jax.jit
def jax_policy(p_params, obs, key):
a_probs = p_frwd(p_params, obs)
a_probs = distrax.Categorical(probs=a_probs)
a, log_prob = a_probs.sample_and_log_prob(seed=key)
return a, log_prob
def jax_rollout(p_params, rng):
np.random.seed(seed)
env.seed(seed)
obs = env.reset()
log_probs = []
rewards = []
while True:
## --
rng, key = jax.random.split(rng, 2)
a, log_prob = jax_policy(p_params, obs, key)
a = a.astype(int)
## --
a = np.random.choice(env.action_space.n)
obs2, r, done, _ = env.step(a)
if done: break
obs = obs2
log_probs.append(log_prob)
rewards.append(r)
## --
log_prob = jnp.stack(log_probs)
r = np.stack(rewards)
loss = -(log_prob * r).sum()
## --
return loss
## only sample policy (log_prob is computed in loss)
@jax.jit
def jax_policy2(p_params, obs, key):
a_probs = p_frwd(p_params, obs)
a_probs = distrax.Categorical(probs=a_probs)
a = a_probs.sample(seed=key)
return a
def jax_rollout2(p_params, rng):
np.random.seed(seed)
env.seed(seed)
obs = env.reset()
observ, action, rew = [], [], []
while True:
## --
rng, key = jax.random.split(rng, 2)
a = jax_policy2(p_params, obs, key)
a = a.astype(int)
## --
a = np.random.choice(env.action_space.n)
obs2, r, done, _ = env.step(a)
observ.append(obs)
action.append(a)
rew.append(r)
if done: break
obs = obs2
obs = jnp.stack(observ)
a = jnp.stack(action)
r = jnp.stack(rew)
return obs, a, r
def jax_loss(p_params, obs, a, r):
a_probs = p_frwd(p_params, obs)
log_prob = distrax.Categorical(probs=a_probs).log_prob(a.astype(int))
loss = -(log_prob * r).sum()
return loss
def batch_jax_loss(params, obs, a, r):
return jax.vmap(partial(jax_loss, params))(obs, a, r).sum()
rng = jax.random.PRNGKey(seed)
#%%
#### PYTORCH
times = []
for _ in range(50):
start = time.time()
loss = torch_rollout()
loss.backward()
times.append(time.time() - start)
# 0.03423449039459228
print(f'PYTORCH TIME: {np.mean(times)}')
#%%
#### JAX
# rollout_loss fcn
jax_rollout_jitgrad = jax.jit(jax.value_and_grad(jax_rollout))
times = []
for _ in range(50):
rng = jax.random.PRNGKey(seed)
start = time.time()
rng, key = jax.random.split(rng, 2)
loss, grad = jax_rollout_jitgrad(p_params, key)
loss.block_until_ready()
times.append(time.time() - start)
# 0.21324730396270752
print(f'JAX (rollout_loss) TIME: {np.mean(times)}')
#%%
#### JAX
# rollout fcn & loss fcn
jit_jax_rollout2 = jax.jit(jax_rollout2)
jax_loss_jit = jax.jit(jax.value_and_grad(batch_jax_loss))
times = []
for _ in range(50):
rng = jax.random.PRNGKey(seed)
start = time.time()
rng, key = jax.random.split(rng, 2)
# batch = jit_jax_rollout2(p_params, key)
batch = jax_rollout2(p_params, key)
loss, grad = jax_loss_jit(p_params, *batch)
loss.block_until_ready()
times.append(time.time() - start)
# 0.10453275203704834 with jit_jax_rollout2
# 0.07715171337127685 with **no-jit**-rollout2
print(f'JAX (rollout -> loss) TIME: {np.mean(times)}')
#%% | [
"torch.nn.ReLU",
"haiku.transform",
"torch.from_numpy",
"jax.jit",
"gym.make",
"jax.random.split",
"numpy.mean",
"jax.random.PRNGKey",
"numpy.stack",
"numpy.random.seed",
"jax.value_and_grad",
"haiku.Linear",
"numpy.random.choice",
"distrax.Categorical",
"jax.numpy.stack",
"time.time",... | [((236, 259), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (244, 259), False, 'import gym\n'), ((778, 803), 'haiku.transform', 'hk.transform', (['_policy_fcn'], {}), '(_policy_fcn)\n', (790, 803), True, 'import haiku as hk\n'), ((817, 849), 'haiku.without_apply_rng', 'hk.without_apply_rng', (['policy_fcn'], {}), '(policy_fcn)\n', (837, 849), True, 'import haiku as hk\n'), ((859, 884), 'jax.jit', 'jax.jit', (['policy_fcn.apply'], {}), '(policy_fcn.apply)\n', (866, 884), False, 'import jax\n'), ((901, 925), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['seed'], {}), '(seed)\n', (919, 925), False, 'import jax\n'), ((3811, 3835), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['seed'], {}), '(seed)\n', (3829, 3835), False, 'import jax\n'), ((4562, 4583), 'jax.jit', 'jax.jit', (['jax_rollout2'], {}), '(jax_rollout2)\n', (4569, 4583), False, 'import jax\n'), ((312, 357), 'torch.nn.Linear', 'nn.Linear', (['env.observation_space.shape[0]', '(32)'], {}), '(env.observation_space.shape[0], 32)\n', (321, 357), True, 'import torch.nn as nn\n'), ((363, 372), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (370, 372), True, 'import torch.nn as nn\n'), ((379, 396), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(32)'], {}), '(32, 32)\n', (388, 396), True, 'import torch.nn as nn\n'), ((403, 412), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (410, 412), True, 'import torch.nn as nn\n'), ((419, 452), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'env.action_space.n'], {}), '(32, env.action_space.n)\n', (428, 452), True, 'import torch.nn as nn\n'), ((459, 477), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (469, 477), True, 'import torch.nn as nn\n'), ((1091, 1111), 'torch.distributions.categorical.Categorical', 'Categorical', (['a_space'], {}), '(a_space)\n', (1102, 1111), False, 'from torch.distributions.categorical import Categorical\n'), ((1221, 1241), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1235, 1241), True, 'import numpy as np\n'), ((1656, 1678), 'torch.stack', 'torch.stack', (['log_probs'], {}), '(log_probs)\n', (1667, 1678), False, 'import torch\n'), ((1687, 1708), 'torch.tensor', 'torch.tensor', (['rewards'], {}), '(rewards)\n', (1699, 1708), False, 'import torch\n'), ((1870, 1904), 'distrax.Categorical', 'distrax.Categorical', ([], {'probs': 'a_probs'}), '(probs=a_probs)\n', (1889, 1904), False, 'import distrax\n'), ((2029, 2049), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2043, 2049), True, 'import numpy as np\n'), ((2532, 2552), 'jax.numpy.stack', 'jnp.stack', (['log_probs'], {}), '(log_probs)\n', (2541, 2552), True, 'import jax.numpy as jnp\n'), ((2561, 2578), 'numpy.stack', 'np.stack', (['rewards'], {}), '(rewards)\n', (2569, 2578), True, 'import numpy as np\n'), ((2793, 2827), 'distrax.Categorical', 'distrax.Categorical', ([], {'probs': 'a_probs'}), '(probs=a_probs)\n', (2812, 2827), False, 'import distrax\n'), ((2912, 2932), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2926, 2932), True, 'import numpy as np\n'), ((3416, 3433), 'jax.numpy.stack', 'jnp.stack', (['observ'], {}), '(observ)\n', (3425, 3433), True, 'import jax.numpy as jnp\n'), ((3442, 3459), 'jax.numpy.stack', 'jnp.stack', (['action'], {}), '(action)\n', (3451, 3459), True, 'import jax.numpy as jnp\n'), ((3468, 3482), 'jax.numpy.stack', 'jnp.stack', (['rew'], {}), '(rew)\n', (3477, 3482), True, 'import jax.numpy as jnp\n'), ((3898, 3909), 'time.time', 'time.time', ([], {}), '()\n', (3907, 3909), False, 'import time\n'), ((4132, 4163), 'jax.value_and_grad', 'jax.value_and_grad', (['jax_rollout'], {}), '(jax_rollout)\n', (4150, 4163), False, 'import jax\n'), ((4207, 4231), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['seed'], {}), '(seed)\n', (4225, 4231), False, 'import jax\n'), ((4249, 4260), 'time.time', 'time.time', ([], {}), '()\n', (4258, 4260), False, 'import time\n'), ((4281, 4305), 'jax.random.split', 'jax.random.split', (['rng', '(2)'], {}), '(rng, 2)\n', (4297, 4305), False, 'import jax\n'), ((4607, 4641), 'jax.value_and_grad', 'jax.value_and_grad', (['batch_jax_loss'], {}), '(batch_jax_loss)\n', (4625, 4641), False, 'import jax\n'), ((4685, 4709), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['seed'], {}), '(seed)\n', (4703, 4709), False, 'import jax\n'), ((4722, 4733), 'time.time', 'time.time', ([], {}), '()\n', (4731, 4733), False, 'import time\n'), ((4754, 4778), 'jax.random.split', 'jax.random.split', (['rng', '(2)'], {}), '(rng, 2)\n', (4770, 4778), False, 'import jax\n'), ((1443, 1479), 'numpy.random.choice', 'np.random.choice', (['env.action_space.n'], {}), '(env.action_space.n)\n', (1459, 1479), True, 'import numpy as np\n'), ((2179, 2203), 'jax.random.split', 'jax.random.split', (['rng', '(2)'], {}), '(rng, 2)\n', (2195, 2203), False, 'import jax\n'), ((2320, 2356), 'numpy.random.choice', 'np.random.choice', (['env.action_space.n'], {}), '(env.action_space.n)\n', (2336, 2356), True, 'import numpy as np\n'), ((3062, 3086), 'jax.random.split', 'jax.random.split', (['rng', '(2)'], {}), '(rng, 2)\n', (3078, 3086), False, 'import jax\n'), ((3193, 3229), 'numpy.random.choice', 'np.random.choice', (['env.action_space.n'], {}), '(env.action_space.n)\n', (3209, 3229), True, 'import numpy as np\n'), ((3591, 3625), 'distrax.Categorical', 'distrax.Categorical', ([], {'probs': 'a_probs'}), '(probs=a_probs)\n', (3610, 3625), False, 'import distrax\n'), ((3982, 3993), 'time.time', 'time.time', ([], {}), '()\n', (3991, 3993), False, 'import time\n'), ((4049, 4063), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (4056, 4063), True, 'import numpy as np\n'), ((4407, 4418), 'time.time', 'time.time', ([], {}), '()\n', (4416, 4418), False, 'import time\n'), ((4485, 4499), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (4492, 4499), True, 'import numpy as np\n'), ((4963, 4974), 'time.time', 'time.time', ([], {}), '()\n', (4972, 4974), False, 'import time\n'), ((5113, 5127), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (5120, 5127), True, 'import numpy as np\n'), ((614, 627), 'haiku.Linear', 'hk.Linear', (['(32)'], {}), '(32)\n', (623, 627), True, 'import haiku as hk\n'), ((650, 663), 'haiku.Linear', 'hk.Linear', (['(32)'], {}), '(32)\n', (659, 663), True, 'import haiku as hk\n'), ((686, 715), 'haiku.Linear', 'hk.Linear', (['env.action_space.n'], {}), '(env.action_space.n)\n', (695, 715), True, 'import haiku as hk\n'), ((1046, 1067), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (1062, 1067), False, 'import torch\n'), ((3760, 3785), 'functools.partial', 'partial', (['jax_loss', 'params'], {}), '(jax_loss, params)\n', (3767, 3785), False, 'from functools import partial\n')] |
import numpy as np
from pydex.core.designer import Designer
from examples.non_cubic_spaces.experimental_spaces import triangle, heart, circle, folium
"""
Setting: a non-dynamic experimental system with 2 time-invariant control variables and
1 response.
Problem: design optimal experiment for a order 2 polynomial, with complete interaction
Solution: a full 3^2 factorial design (3 level)
"""
def simulate(ti_controls, model_parameters):
return np.array([
# constant term
model_parameters[0] +
# linear term
model_parameters[1] * ti_controls[0] +
model_parameters[2] * ti_controls[1] +
# interaction term
model_parameters[3] * ti_controls[0] * ti_controls[1] +
# squared terms
model_parameters[4] * ti_controls[0] ** 2 +
model_parameters[5] * ti_controls[1] ** 2
])
designer_1 = Designer()
designer_1.simulate = simulate
designer_1.model_parameters = np.ones(6) # values won't affect design, but still needed
""" initializing initial grid """
reso = 41j
tic_1, tic_2 = np.mgrid[-1:1:reso, -1:1:reso]
tic_1 = tic_1.flatten()
tic_2 = tic_2.flatten()
tic = np.array([tic_1, tic_2]).T
package, optimizer = ("cvxpy", "MOSEK")
criterion = designer_1.a_opt_criterion
""" FOLIUM """
# filtering initial grid
folium_tic = folium(tic)
designer_1.ti_controls_candidates = folium_tic
designer_1.initialize(verbose=2) # 0: silent, 1: overview, 2: detailed, 3: very detailed
# designing experiment
designer_1.design_experiment(criterion=criterion, package=package, optimizer=optimizer,
write=False)
# visualize results
designer_1.print_optimal_candidates()
designer_1.plot_optimal_efforts()
designer_1.plot_optimal_controls(non_opt_candidates=True)
""" TRIANGLE """
# filtering initial grid
triangle_tic = triangle(tic)
designer_1.ti_controls_candidates = triangle_tic
designer_1.initialize(verbose=2) # 0: silent, 1: overview, 2: detailed, 3: very detailed
# designing experiment
designer_1.design_experiment(criterion=criterion, package=package, optimizer=optimizer,
write=False)
# visualize results
designer_1.print_optimal_candidates()
designer_1.plot_optimal_efforts()
designer_1.plot_optimal_controls(non_opt_candidates=True)
""" CIRCLE """
# filtering initial grid
circle_tic = circle(tic)
designer_1.ti_controls_candidates = circle_tic
designer_1.initialize(verbose=2) # 0: silent, 1: overview, 2: detailed, 3: very detailed
# designing experiment
designer_1.design_experiment(criterion=criterion, package=package, optimizer=optimizer,
write=False)
# visualize results
designer_1.print_optimal_candidates()
designer_1.plot_optimal_efforts()
designer_1.plot_optimal_controls(non_opt_candidates=True)
""" HEART """
# filtering initial grid
heart_tic = heart(tic)
designer_1.ti_controls_candidates = heart_tic
designer_1.initialize(verbose=2) # 0: silent, 1: overview, 2: detailed, 3: very detailed
# designing experiment
designer_1.design_experiment(criterion=criterion, package=package, optimizer=optimizer,
write=False)
# visualize results
designer_1.print_optimal_candidates()
designer_1.plot_optimal_efforts()
designer_1.plot_optimal_controls(non_opt_candidates=True)
designer_1.show_plots()
| [
"examples.non_cubic_spaces.experimental_spaces.heart",
"numpy.ones",
"examples.non_cubic_spaces.experimental_spaces.triangle",
"numpy.array",
"pydex.core.designer.Designer",
"examples.non_cubic_spaces.experimental_spaces.circle",
"examples.non_cubic_spaces.experimental_spaces.folium"
] | [((874, 884), 'pydex.core.designer.Designer', 'Designer', ([], {}), '()\n', (882, 884), False, 'from pydex.core.designer import Designer\n'), ((947, 957), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (954, 957), True, 'import numpy as np\n'), ((1313, 1324), 'examples.non_cubic_spaces.experimental_spaces.folium', 'folium', (['tic'], {}), '(tic)\n', (1319, 1324), False, 'from examples.non_cubic_spaces.experimental_spaces import triangle, heart, circle, folium\n'), ((1825, 1838), 'examples.non_cubic_spaces.experimental_spaces.triangle', 'triangle', (['tic'], {}), '(tic)\n', (1833, 1838), False, 'from examples.non_cubic_spaces.experimental_spaces import triangle, heart, circle, folium\n'), ((2337, 2348), 'examples.non_cubic_spaces.experimental_spaces.circle', 'circle', (['tic'], {}), '(tic)\n', (2343, 2348), False, 'from examples.non_cubic_spaces.experimental_spaces import triangle, heart, circle, folium\n'), ((2843, 2853), 'examples.non_cubic_spaces.experimental_spaces.heart', 'heart', (['tic'], {}), '(tic)\n', (2848, 2853), False, 'from examples.non_cubic_spaces.experimental_spaces import triangle, heart, circle, folium\n'), ((454, 720), 'numpy.array', 'np.array', (['[model_parameters[0] + model_parameters[1] * ti_controls[0] + \n model_parameters[2] * ti_controls[1] + model_parameters[3] *\n ti_controls[0] * ti_controls[1] + model_parameters[4] * ti_controls[0] **\n 2 + model_parameters[5] * ti_controls[1] ** 2]'], {}), '([model_parameters[0] + model_parameters[1] * ti_controls[0] + \n model_parameters[2] * ti_controls[1] + model_parameters[3] *\n ti_controls[0] * ti_controls[1] + model_parameters[4] * ti_controls[0] **\n 2 + model_parameters[5] * ti_controls[1] ** 2])\n', (462, 720), True, 'import numpy as np\n'), ((1152, 1176), 'numpy.array', 'np.array', (['[tic_1, tic_2]'], {}), '([tic_1, tic_2])\n', (1160, 1176), True, 'import numpy as np\n')] |
import numpy as np
arr1 = np.ones (2, dtype=bool)
print("1D Array with ones ")
print(arr1)
#[True True] | [
"numpy.ones"
] | [((27, 49), 'numpy.ones', 'np.ones', (['(2)'], {'dtype': 'bool'}), '(2, dtype=bool)\n', (34, 49), True, 'import numpy as np\n')] |
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import seaborn
import pandas as pd
fig, ax = plt.subplots()
fig.set_tight_layout(True)
# Initialize the figure
plt.style.use('seaborn-darkgrid')
# Query the figure's on-screen size and DPI. Note that when saving the figure to
# a file, we need to provide a DPI for that separately.
print('fig size: {0} DPI, size in inches {1}'.format(
fig.get_dpi(), fig.get_size_inches()))
df1 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/plant_20.txt',sep='\t', names=["time", "tt", "plant", "strip", "row", "pos", "species", "weed",
"age","nrbranches","leafArea","fpar","rfr","biom","yields","leafmass",
"stemmass", "rootmass","shootrootratio","abovebiom","transpiration"])
df2 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/field_20.txt',sep='\t', names=["time", "species", "LAI", "nrShoots", "fAbs", "assCO2", "biomAbove", "yield", "harvestIndex","leafArea","fieldRFR"])
df3 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/plant_clm_20.txt',sep='\t', names=["time", "tt", "plant", "strip", "row", "pos", "species", "weed",
"age","nrbranches","leafArea","fpar","rfr","biom","yields","leafmass",
"stemmass", "rootmass","shootrootratio","abovebiom","transpiration"])
df4 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/field_clm_20.txt',sep='\t', names=["time", "species", "LAI", "nrShoots", "fAbs", "assCO2", "biomAbove", "yield", "harvestIndex","leafArea","fieldRFR"])
df5 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/plant_gday_14.txt',sep='\t', names=["time", "tt", "plant", "strip", "row", "pos", "species", "weed",
"age","nrbranches","leafArea","fpar","rfr","biom","yields","leafmass",
"stemmass", "rootmass","shootrootratio","abovebiom","transpiration"])
df6 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/field_gday_14.txt',sep='\t', names=["time", "species", "LAI", "nrShoots", "fAbs", "assCO2", "biomAbove", "yield", "harvestIndex","leafArea","fieldRFR"])
time = df1.time.values
time = np.array(time)
x = time/17.02
areaplant1 = df1.leafArea.values
areaplant1 = np.array(areaplant1)
areaplant3 = df3.leafArea.values
areaplant3 = np.array(areaplant3)
areaplant5 = df5.leafArea.values
areaplant5 = np.array(areaplant5)
areafield = 2*2
shootrootratio = df1.transpiration.values
shootrootratio = np.array(shootrootratio)
y = (shootrootratio*10e2*60*60*24*areaplant1/2.54)/areafield
fAbs = df2.fAbs.values
fAbs = np.array(fAbs)
y = (1. - fAbs)/(1.-0.1)
assCO2 = df1.rootmass.values
assCO2 = np.array(assCO2)
#y = (assCO2*10e2*60*60*24*areaplant1/2.54)/areafield
y = assCO2
assCO2 = df3.rootmass.values
assCO2 = np.array(assCO2)
#y1 = (assCO2*10e2*60*60*24*areaplant3/2.54)/areafield
y1 = assCO2
assCO2 = df5.rootmass.values
assCO2 = np.array(assCO2)
#y2 = (assCO2*10e2*60*60*24*areaplant5/2.54)/areafield
y2 = assCO2
#beta = df3.beta.values
#beta = np.array(beta)
#y = beta
# Plot a scatter that persists (isn't redrawn) and the initial line.
#x = np.arange(0, 20, 0.1)
ax.set_xlabel('Time (days)')
ax.set_ylabel(r'Root biomass (mg)')
#ax.set_ylabel(r'CO$_{2}$ Assimilation (mol.CO2.m$^{-2}$.s$^{-1}$)')
#ax.set_ylabel(r'Transpiration (inches.day$^{-1}$)')
#ax.set_ylim(0,1e-6*10e2*60*60*24*0.04/2.54/4)
ax.plot(x, y, 'k-.', linewidth=2,label =r'REFERENCE')
#ax.scatter(x, y1)
line, = ax.plot(x, y, 'r-', linewidth=2, label =r'$\beta$.A')
line1, = ax.plot(x, y1, 'k-', linewidth=2, label = r'$\beta$.V$_{cmax}$')
line2, = ax.plot(x, y2, 'b-', linewidth=2, label = r'$\beta$.V$_{cmax}$ & $\beta$.J$_{max}$')
ax.legend()
def update(i):
#label = 'timestep {0}'.format(i)
label = 'Beta {0}'.format(i/20.)
print(label)
df1 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/plant_%s.txt'%i,sep='\t', names=["time", "tt", "plant", "strip", "row", "pos", "species", "weed",
"age","nrbranches","leafArea","fpar","rfr","biom","yields","leafmass",
"stemmass", "rootmass","shootrootratio","abovebiom","transpiration"])
df2 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/field_%s.txt'%i,sep='\t', names=["time", "species", "LAI", "nrShoots", "fAbs", "assCO2", "biomAbove", "yield", "harvestIndex","leafArea","fieldRFR"])
df3 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/plant_clm_%s.txt'%i,sep='\t', names=["time", "tt", "plant", "strip", "row", "pos", "species", "weed",
"age","nrbranches","leafArea","fpar","rfr","biom","yields","leafmass",
"stemmass", "rootmass","shootrootratio","abovebiom","transpiration"])
df4 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/field_clm_%s.txt'%i,sep='\t', names=["time", "species", "LAI", "nrShoots", "fAbs", "assCO2", "biomAbove", "yield", "harvestIndex","leafArea","fieldRFR"])
df5 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/plant_gday_%s.txt'%i,sep='\t', names=["time", "tt", "plant", "strip", "row", "pos", "species", "weed",
"age","nrbranches","leafArea","fpar","rfr","biom","yields","leafmass",
"stemmass", "rootmass","shootrootratio","abovebiom","transpiration"])
df6 = pd.read_csv('/home/renato/groimp_efficient/beta_sensitivity/field_gday_%s.txt'%i,sep='\t', names=["time", "species", "LAI", "nrShoots", "fAbs", "assCO2", "biomAbove", "yield", "harvestIndex","leafArea","fieldRFR"])
time = df1.time.values
time = np.array(time)
x = time
areaplant1 = df1.leafArea.values
areaplant1 = np.array(areaplant1)
areaplant3 = df1.leafArea.values
areaplant3 = np.array(areaplant3)
areaplant5 = df1.leafArea.values
areaplant5 = np.array(areaplant5)
areafield = 2*2
biomAbove = df1.transpiration.values
biomAbove = np.array(biomAbove)
y = (biomAbove*10e2*60*60*24*areaplant1/2.54)/areafield
assCO2 = df2.fAbs.values
assCO2 = np.array(assCO2)
y = (1. - assCO2)/(1.-0.1)
assCO2 = df1.rootmass.values
assCO2 = np.array(assCO2)
#y = (assCO2*10e2*60*60*24*areaplant1/2.54)/areafield
y = assCO2
assCO2 = df3.rootmass.values
assCO2 = np.array(assCO2)
#y1 = (assCO2*10e2*60*60*24*areaplant3/2.54)/areafield
y1 = assCO2
assCO2 = df5.rootmass.values
assCO2 = np.array(assCO2)
#y2 = (assCO2*10e2*60*60*24*areaplant5/2.54)/areafield
y2 = assCO2
#beta = df3.beta.values
#beta = np.array(beta)
#y = beta
# Update the line and the axes (with a new xlabel). Return a tuple of
# "artists" that have to be redrawn for this frame.
line.set_ydata(y)
line1.set_ydata(y1)
line2.set_ydata(y2)
#ax.set_xlabel(label)
ax.set_title(label)
ax.legend()
return line, line1, line2, ax
if __name__ == '__main__':
# FuncAnimation will call the 'update' function for each frame; here
# animating over 10 frames, with an interval of 200ms between frames.
anim = FuncAnimation(fig, update, frames=np.arange(1, 21), interval=500)
if len(sys.argv) > 1 and sys.argv[1] == 'save':
anim.save('rootmass_3_methods.gif', dpi=80, writer='imagemagick')
else:
# plt.show() will just loop the animation forever.
plt.show()
| [
"pandas.read_csv",
"matplotlib.pyplot.style.use",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((155, 169), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (167, 169), True, 'import matplotlib.pyplot as plt\n'), ((222, 255), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (235, 255), True, 'import matplotlib.pyplot as plt\n'), ((498, 821), 'pandas.read_csv', 'pd.read_csv', (['"""/home/renato/groimp_efficient/beta_sensitivity/plant_20.txt"""'], {'sep': '"""\t"""', 'names': "['time', 'tt', 'plant', 'strip', 'row', 'pos', 'species', 'weed', 'age',\n 'nrbranches', 'leafArea', 'fpar', 'rfr', 'biom', 'yields', 'leafmass',\n 'stemmass', 'rootmass', 'shootrootratio', 'abovebiom', 'transpiration']"}), "('/home/renato/groimp_efficient/beta_sensitivity/plant_20.txt',\n sep='\\t', names=['time', 'tt', 'plant', 'strip', 'row', 'pos',\n 'species', 'weed', 'age', 'nrbranches', 'leafArea', 'fpar', 'rfr',\n 'biom', 'yields', 'leafmass', 'stemmass', 'rootmass', 'shootrootratio',\n 'abovebiom', 'transpiration'])\n", (509, 821), True, 'import pandas as pd\n'), ((831, 1049), 'pandas.read_csv', 'pd.read_csv', (['"""/home/renato/groimp_efficient/beta_sensitivity/field_20.txt"""'], {'sep': '"""\t"""', 'names': "['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2', 'biomAbove',\n 'yield', 'harvestIndex', 'leafArea', 'fieldRFR']"}), "('/home/renato/groimp_efficient/beta_sensitivity/field_20.txt',\n sep='\\t', names=['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2',\n 'biomAbove', 'yield', 'harvestIndex', 'leafArea', 'fieldRFR'])\n", (842, 1049), True, 'import pandas as pd\n'), ((1047, 1374), 'pandas.read_csv', 'pd.read_csv', (['"""/home/renato/groimp_efficient/beta_sensitivity/plant_clm_20.txt"""'], {'sep': '"""\t"""', 'names': "['time', 'tt', 'plant', 'strip', 'row', 'pos', 'species', 'weed', 'age',\n 'nrbranches', 'leafArea', 'fpar', 'rfr', 'biom', 'yields', 'leafmass',\n 'stemmass', 'rootmass', 'shootrootratio', 'abovebiom', 'transpiration']"}), "('/home/renato/groimp_efficient/beta_sensitivity/plant_clm_20.txt',\n sep='\\t', names=['time', 'tt', 'plant', 'strip', 'row', 'pos',\n 'species', 'weed', 'age', 'nrbranches', 'leafArea', 'fpar', 'rfr',\n 'biom', 'yields', 'leafmass', 'stemmass', 'rootmass', 'shootrootratio',\n 'abovebiom', 'transpiration'])\n", (1058, 1374), True, 'import pandas as pd\n'), ((1385, 1607), 'pandas.read_csv', 'pd.read_csv', (['"""/home/renato/groimp_efficient/beta_sensitivity/field_clm_20.txt"""'], {'sep': '"""\t"""', 'names': "['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2', 'biomAbove',\n 'yield', 'harvestIndex', 'leafArea', 'fieldRFR']"}), "('/home/renato/groimp_efficient/beta_sensitivity/field_clm_20.txt',\n sep='\\t', names=['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2',\n 'biomAbove', 'yield', 'harvestIndex', 'leafArea', 'fieldRFR'])\n", (1396, 1607), True, 'import pandas as pd\n'), ((1605, 1933), 'pandas.read_csv', 'pd.read_csv', (['"""/home/renato/groimp_efficient/beta_sensitivity/plant_gday_14.txt"""'], {'sep': '"""\t"""', 'names': "['time', 'tt', 'plant', 'strip', 'row', 'pos', 'species', 'weed', 'age',\n 'nrbranches', 'leafArea', 'fpar', 'rfr', 'biom', 'yields', 'leafmass',\n 'stemmass', 'rootmass', 'shootrootratio', 'abovebiom', 'transpiration']"}), "('/home/renato/groimp_efficient/beta_sensitivity/plant_gday_14.txt',\n sep='\\t', names=['time', 'tt', 'plant', 'strip', 'row', 'pos',\n 'species', 'weed', 'age', 'nrbranches', 'leafArea', 'fpar', 'rfr',\n 'biom', 'yields', 'leafmass', 'stemmass', 'rootmass', 'shootrootratio',\n 'abovebiom', 'transpiration'])\n", (1616, 1933), True, 'import pandas as pd\n'), ((1944, 2167), 'pandas.read_csv', 'pd.read_csv', (['"""/home/renato/groimp_efficient/beta_sensitivity/field_gday_14.txt"""'], {'sep': '"""\t"""', 'names': "['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2', 'biomAbove',\n 'yield', 'harvestIndex', 'leafArea', 'fieldRFR']"}), "('/home/renato/groimp_efficient/beta_sensitivity/field_gday_14.txt',\n sep='\\t', names=['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2',\n 'biomAbove', 'yield', 'harvestIndex', 'leafArea', 'fieldRFR'])\n", (1955, 2167), True, 'import pandas as pd\n'), ((2190, 2204), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (2198, 2204), True, 'import numpy as np\n'), ((2267, 2287), 'numpy.array', 'np.array', (['areaplant1'], {}), '(areaplant1)\n', (2275, 2287), True, 'import numpy as np\n'), ((2335, 2355), 'numpy.array', 'np.array', (['areaplant3'], {}), '(areaplant3)\n', (2343, 2355), True, 'import numpy as np\n'), ((2403, 2423), 'numpy.array', 'np.array', (['areaplant5'], {}), '(areaplant5)\n', (2411, 2423), True, 'import numpy as np\n'), ((2501, 2525), 'numpy.array', 'np.array', (['shootrootratio'], {}), '(shootrootratio)\n', (2509, 2525), True, 'import numpy as np\n'), ((2618, 2632), 'numpy.array', 'np.array', (['fAbs'], {}), '(fAbs)\n', (2626, 2632), True, 'import numpy as np\n'), ((2697, 2713), 'numpy.array', 'np.array', (['assCO2'], {}), '(assCO2)\n', (2705, 2713), True, 'import numpy as np\n'), ((2818, 2834), 'numpy.array', 'np.array', (['assCO2'], {}), '(assCO2)\n', (2826, 2834), True, 'import numpy as np\n'), ((2941, 2957), 'numpy.array', 'np.array', (['assCO2'], {}), '(assCO2)\n', (2949, 2957), True, 'import numpy as np\n'), ((3853, 4180), 'pandas.read_csv', 'pd.read_csv', (["('/home/renato/groimp_efficient/beta_sensitivity/plant_%s.txt' % i)"], {'sep': '"""\t"""', 'names': "['time', 'tt', 'plant', 'strip', 'row', 'pos', 'species', 'weed', 'age',\n 'nrbranches', 'leafArea', 'fpar', 'rfr', 'biom', 'yields', 'leafmass',\n 'stemmass', 'rootmass', 'shootrootratio', 'abovebiom', 'transpiration']"}), "('/home/renato/groimp_efficient/beta_sensitivity/plant_%s.txt' %\n i, sep='\\t', names=['time', 'tt', 'plant', 'strip', 'row', 'pos',\n 'species', 'weed', 'age', 'nrbranches', 'leafArea', 'fpar', 'rfr',\n 'biom', 'yields', 'leafmass', 'stemmass', 'rootmass', 'shootrootratio',\n 'abovebiom', 'transpiration'])\n", (3864, 4180), True, 'import pandas as pd\n'), ((4192, 4414), 'pandas.read_csv', 'pd.read_csv', (["('/home/renato/groimp_efficient/beta_sensitivity/field_%s.txt' % i)"], {'sep': '"""\t"""', 'names': "['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2', 'biomAbove',\n 'yield', 'harvestIndex', 'leafArea', 'fieldRFR']"}), "('/home/renato/groimp_efficient/beta_sensitivity/field_%s.txt' %\n i, sep='\\t', names=['time', 'species', 'LAI', 'nrShoots', 'fAbs',\n 'assCO2', 'biomAbove', 'yield', 'harvestIndex', 'leafArea', 'fieldRFR'])\n", (4203, 4414), True, 'import pandas as pd\n'), ((4414, 4750), 'pandas.read_csv', 'pd.read_csv', (["('/home/renato/groimp_efficient/beta_sensitivity/plant_clm_%s.txt' % i)"], {'sep': '"""\t"""', 'names': "['time', 'tt', 'plant', 'strip', 'row', 'pos', 'species', 'weed', 'age',\n 'nrbranches', 'leafArea', 'fpar', 'rfr', 'biom', 'yields', 'leafmass',\n 'stemmass', 'rootmass', 'shootrootratio', 'abovebiom', 'transpiration']"}), "(\n '/home/renato/groimp_efficient/beta_sensitivity/plant_clm_%s.txt' % i,\n sep='\\t', names=['time', 'tt', 'plant', 'strip', 'row', 'pos',\n 'species', 'weed', 'age', 'nrbranches', 'leafArea', 'fpar', 'rfr',\n 'biom', 'yields', 'leafmass', 'stemmass', 'rootmass', 'shootrootratio',\n 'abovebiom', 'transpiration'])\n", (4425, 4750), True, 'import pandas as pd\n'), ((4757, 4988), 'pandas.read_csv', 'pd.read_csv', (["('/home/renato/groimp_efficient/beta_sensitivity/field_clm_%s.txt' % i)"], {'sep': '"""\t"""', 'names': "['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2', 'biomAbove',\n 'yield', 'harvestIndex', 'leafArea', 'fieldRFR']"}), "(\n '/home/renato/groimp_efficient/beta_sensitivity/field_clm_%s.txt' % i,\n sep='\\t', names=['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2',\n 'biomAbove', 'yield', 'harvestIndex', 'leafArea', 'fieldRFR'])\n", (4768, 4988), True, 'import pandas as pd\n'), ((4982, 5319), 'pandas.read_csv', 'pd.read_csv', (["('/home/renato/groimp_efficient/beta_sensitivity/plant_gday_%s.txt' % i)"], {'sep': '"""\t"""', 'names': "['time', 'tt', 'plant', 'strip', 'row', 'pos', 'species', 'weed', 'age',\n 'nrbranches', 'leafArea', 'fpar', 'rfr', 'biom', 'yields', 'leafmass',\n 'stemmass', 'rootmass', 'shootrootratio', 'abovebiom', 'transpiration']"}), "(\n '/home/renato/groimp_efficient/beta_sensitivity/plant_gday_%s.txt' % i,\n sep='\\t', names=['time', 'tt', 'plant', 'strip', 'row', 'pos',\n 'species', 'weed', 'age', 'nrbranches', 'leafArea', 'fpar', 'rfr',\n 'biom', 'yields', 'leafmass', 'stemmass', 'rootmass', 'shootrootratio',\n 'abovebiom', 'transpiration'])\n", (4993, 5319), True, 'import pandas as pd\n'), ((5326, 5558), 'pandas.read_csv', 'pd.read_csv', (["('/home/renato/groimp_efficient/beta_sensitivity/field_gday_%s.txt' % i)"], {'sep': '"""\t"""', 'names': "['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2', 'biomAbove',\n 'yield', 'harvestIndex', 'leafArea', 'fieldRFR']"}), "(\n '/home/renato/groimp_efficient/beta_sensitivity/field_gday_%s.txt' % i,\n sep='\\t', names=['time', 'species', 'LAI', 'nrShoots', 'fAbs', 'assCO2',\n 'biomAbove', 'yield', 'harvestIndex', 'leafArea', 'fieldRFR'])\n", (5337, 5558), True, 'import pandas as pd\n'), ((5583, 5597), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (5591, 5597), True, 'import numpy as np\n'), ((5671, 5691), 'numpy.array', 'np.array', (['areaplant1'], {}), '(areaplant1)\n', (5679, 5691), True, 'import numpy as np\n'), ((5747, 5767), 'numpy.array', 'np.array', (['areaplant3'], {}), '(areaplant3)\n', (5755, 5767), True, 'import numpy as np\n'), ((5823, 5843), 'numpy.array', 'np.array', (['areaplant5'], {}), '(areaplant5)\n', (5831, 5843), True, 'import numpy as np\n'), ((5924, 5943), 'numpy.array', 'np.array', (['biomAbove'], {}), '(biomAbove)\n', (5932, 5943), True, 'import numpy as np\n'), ((6047, 6063), 'numpy.array', 'np.array', (['assCO2'], {}), '(assCO2)\n', (6055, 6063), True, 'import numpy as np\n'), ((6144, 6160), 'numpy.array', 'np.array', (['assCO2'], {}), '(assCO2)\n', (6152, 6160), True, 'import numpy as np\n'), ((6283, 6299), 'numpy.array', 'np.array', (['assCO2'], {}), '(assCO2)\n', (6291, 6299), True, 'import numpy as np\n'), ((6422, 6438), 'numpy.array', 'np.array', (['assCO2'], {}), '(assCO2)\n', (6430, 6438), True, 'import numpy as np\n'), ((7340, 7350), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7348, 7350), True, 'import matplotlib.pyplot as plt\n'), ((7105, 7121), 'numpy.arange', 'np.arange', (['(1)', '(21)'], {}), '(1, 21)\n', (7114, 7121), True, 'import numpy as np\n')] |
import argparse
import matplotlib.pyplot as plt
import numpy as np
from dungeon_game import Dungeon
from agents import RandomAgent, AccountantAgent, QLearningAgent, DeepQLearningAgent
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--agent', type=str, default='RANDOM')
parser.add_argument('--learning_rate', type=float, default=0.1)
parser.add_argument('--discount', type=float, default=0.95)
parser.add_argument('--iterations', type=int, default=5000)
parser.add_argument('--plot', action='store_true')
FLAGS, unparsed = parser.parse_known_args()
randomAgent = RandomAgent()
accountantAgent = AccountantAgent()
qLearningAgent = QLearningAgent(iterations=FLAGS.iterations)
deepQLearningAgent = DeepQLearningAgent(iterations=FLAGS.iterations)
agent_list = [randomAgent, accountantAgent, qLearningAgent, deepQLearningAgent]
rewards = [list() for _ in range(len(agent_list))]
dungeon_list = [Dungeon() for _ in range(len(agent_list))]
for agent_number, agent in enumerate(agent_list):
for step in range(FLAGS.iterations):
old_state = dungeon_list[agent_number].state
action = agent.get_next_action(old_state)
new_state, reward = dungeon_list[agent_number].perform_action(action)
agent.update(old_state, new_state, action, reward)
rewards[agent_number].append(reward)
if step%100 == 0:
print("Agent: %s Step: %i Reward: %i" % (str(agent), step, sum(rewards[agent_number])))
if FLAGS.plot:
plots = list()
for i, agent in enumerate(agent_list):
plot, = plt.plot(np.cumsum(rewards[i]))
plots.append(plot)
plt.legend(plots, [str(agent) for agent in agent_list])
plt.xlabel("# Iterations")
plt.ylabel("Total reward")
plt.show()
| [
"agents.QLearningAgent",
"agents.AccountantAgent",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xlabel",
"dungeon_game.Dungeon",
"agents.DeepQLearningAgent",
"numpy.cumsum",
"agents.RandomAgent"
] | [((227, 252), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (250, 252), False, 'import argparse\n'), ((634, 647), 'agents.RandomAgent', 'RandomAgent', ([], {}), '()\n', (645, 647), False, 'from agents import RandomAgent, AccountantAgent, QLearningAgent, DeepQLearningAgent\n'), ((670, 687), 'agents.AccountantAgent', 'AccountantAgent', ([], {}), '()\n', (685, 687), False, 'from agents import RandomAgent, AccountantAgent, QLearningAgent, DeepQLearningAgent\n'), ((709, 752), 'agents.QLearningAgent', 'QLearningAgent', ([], {'iterations': 'FLAGS.iterations'}), '(iterations=FLAGS.iterations)\n', (723, 752), False, 'from agents import RandomAgent, AccountantAgent, QLearningAgent, DeepQLearningAgent\n'), ((778, 825), 'agents.DeepQLearningAgent', 'DeepQLearningAgent', ([], {'iterations': 'FLAGS.iterations'}), '(iterations=FLAGS.iterations)\n', (796, 825), False, 'from agents import RandomAgent, AccountantAgent, QLearningAgent, DeepQLearningAgent\n'), ((986, 995), 'dungeon_game.Dungeon', 'Dungeon', ([], {}), '()\n', (993, 995), False, 'from dungeon_game import Dungeon\n'), ((1820, 1846), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# Iterations"""'], {}), "('# Iterations')\n", (1830, 1846), True, 'import matplotlib.pyplot as plt\n'), ((1855, 1881), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total reward"""'], {}), "('Total reward')\n", (1865, 1881), True, 'import matplotlib.pyplot as plt\n'), ((1890, 1900), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1898, 1900), True, 'import matplotlib.pyplot as plt\n'), ((1693, 1714), 'numpy.cumsum', 'np.cumsum', (['rewards[i]'], {}), '(rewards[i])\n', (1702, 1714), True, 'import numpy as np\n')] |
import os
import sys
import warnings
from inspect import getmembers, isfunction
import inspect
import numpy as np
from ase.io import read
import scipy.sparse as sp
from Utilities import Initial
no_dir_template = "\nThere does not exist a suitable directory in which to place these" \
"quantities.\n\nInstead, we shall generate one at '%s'.\n"
no_file_template = "\nThere does not exist a file in which to write the quantity %s.\n" \
"\nInstead, we shall create the file '%s' at location '%s'."
AttrErr = "Unable to find a write object for {0}:\n"\
"\nException traceback:\n{1}.\n"
class Writer():
"""
Robert:
This class object has been written with the purpose of handling the
creation and distribution of Sapphire Output.
In version 0.10.1, the pickle function is inadequate to facilitate
the entirity of the metadata.
In principle, all of the handling of output should be handled out of
sight of the user.
"""
def __init__(self, System, Metadata):
self.output_info_file = System['base_dir']+'Output_Info.txt'
self.output_error_file = System['base_dir']+'Output_Errors.txt'
self.Quants = {
'Dir': 'Time_Dependent/', 'File': 'R_Cut', 'Iterate': False, 'Bool': False,
'Skip': True, 'Energy': False, 'Homo': False, 'Hetero': False, 'xyz': False
}
self.Metadata = Metadata # This is the data provided to the user by Sapphire after post processing
self.System = System # Significant system information regarding I/O streams
self.Logo = Initial.Logo().Logo()
with open(self.output_info_file, 'w') as outfile:
outfile.write(self.Logo)
outfile.write('\n')
with open(self.output_error_file, 'w') as outfile:
outfile.write(self.Logo)
outfile.write('\n')
"""
This provides a dictionary with the function names as keys and the
function itself.
This allows us to have 1-1-1 mapping between the output p
"""
self.functions_list = [o for o in getmembers(Writer) if isfunction(o[1])]
self.Functions = {}
for x in self.functions_list:
if x in self.Quants.keys():
self.Functions[x[0]] = inspect.getfullargspec(x[1])[0][1:]
def ensure_dir(self, base_dir='', file_path=''):
"""
Robert:
A simple script to verify the existence of a directory
given the path to it. If it does not exist, will create it.
"""
directory = base_dir + file_path
if not os.path.exists(directory):
os.makedirs(directory)
with open(self.output_info_file, 'w') as outfile:
outfile.write(no_dir_template % (base_dir+file_path))
def MakeFile(self, Attributes):
self.out = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
if not os.path.isfile(self.out):
with open(self.System['base_dir'] + Attributes['Dir'] + Attributes['File'], 'w') as out:
out.close()
else:
pass
def Masterkey(self, Quantity):
try:
with open(self.out, 'w') as f:
for item in self.Metadata[self.x]:
f.write(str(item)+'\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
def Adj(self, Quantity):
self.out = self.System['base_dir'] + Quantity['Dir'] + Quantity['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Quantity['Dir'])
for i, t in enumerate(self.Metadata[self.x]):
try:
self.filename = self.System['base_dir'] + Quantity['Dir'] + 'File%s' % i
self.Mat = sp.csr_matrix.todense(t)
with open(self.filename, 'w') as f:
for line in self.Mat:
np.savetxt(f, line, fmt='%d')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
def Ele(self, Quantity):
self.out = self.System['base_dir'] + Quantity['Dir'] + Quantity['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Quantity['Dir'])
with open(self.out, 'w') as file:
for i, t in enumerate(self.Metadata[self.x]):
try:
self.filename = self.System['base_dir'] + Quantity['Dir'] + 'File%s' % i
file.write('\t|\t'.join(str(item) for item in t[0])+'\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
def HeAdj(self, Quantity):
self.Homo = self.System['Homo']
for Ele in self.Homo:
if len(self.Metadata[self.x]) > 1:
Temp = np.column_stack((
self.Metadata[self.x][0][self.Homo.index(Ele)],
self.Metadata[self.x][1][self.Homo.index(Ele)]
))
for t in range(2, len(self.Metadata[self.x])):
Temp = np.column_stack((
Temp, np.array(self.Metadata[self.x][t][self.Homo.index(Ele)], int)
))
np.savetxt(
self.System['base_dir'] + Quantity['Dir'] + Quantity['File']+Ele,
Temp.transpose(), fmt='%d')
else:
np.savetxt(
self.System['base_dir'] + Quantity['Dir'] + Quantity['File']+Ele,
np.array(self.Metadata[self.x][0][self.Homo.index(Ele)]).transpose(),
fmt='%d')
def Write_Homo(self, Quantity):
# self.MakeFile(Quantity) #See if the file already exists
for Ele in self.System['Homo']:
File = str(self.x)[:-2]+Ele
self.out = self.System['base_dir'] + Quantity['Dir'] + Quantity['File']+Ele
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Quantity['Dir'])
try:
if not Quantity['Iterate'] and not Quantity['Bool'] and not Quantity['array']:
try:
np.savetxt(self.out, self.Metadata[File], fmt='%s')
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(File), str(e)))
try:
with open(self.out, 'a') as CurrentOut:
CurrentOut.write(str(File)+str(self.Metadata[File]))
CurrentOut.write('\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (File, e))
elif Quantity['Iterate'] and Quantity['array']:
try:
if len(self.Metadata[File]) > 1:
Temp = np.column_stack((self.Metadata[File][0], self.Metadata[File][1]))
for t in range(2, len(self.Metadata[File])):
Temp = np.column_stack((Temp, self.Metadata[File][t]))
np.savetxt(self.out, Temp.transpose(), fmt='%f')
else:
np.savetxt(
self.out,
np.array(self.Metadata[File][0]).transpose(),
fmt='%f')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (File, e))
elif Quantity['Iterate'] and not Quantity['array']:
try:
np.savetxt(self.out, np.array(self.Metadata[File], dtype=float).transpose(), fmt='%f')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (File, e))
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(File), str(e)))
def Write(self, Quantity):
self.out = self.System['base_dir'] + Quantity['Dir'] + Quantity['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Quantity['Dir']) # See if the directory already exists
# self.MakeFile(Quantity) #See if the file already exists
if Quantity['Exec']:
try:
with open(self.out, 'a') as CurrentOut:
CurrentOut.write(str(self.x)+'\t|\t'+str(self.Metadata[self.x]))
CurrentOut.write('\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
else:
try:
if Quantity['Bool']:
try:
with open(self.out, 'a') as CurrentOut:
CurrentOut.write(str(self.x) + '\t|\t' + str(self.Metadata[self.x]))
CurrentOut.write('\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
elif not Quantity['Iterate'] and not Quantity['Bool'] and not Quantity['array']:
try:
np.savetxt(self.out, self.Metadata[self.x], fmt='%s')
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
try:
with open(self.out, 'a') as CurrentOut:
CurrentOut.write(str(self.x)+str(self.Metadata[self.x]))
CurrentOut.write('\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
elif Quantity['Iterate'] and Quantity['array']:
try:
if len(self.Metadata[self.x]) > 1:
Temp = np.column_stack((self.Metadata[self.x][0], self.Metadata[self.x][1]))
for t in range(2, len(self.Metadata[self.x])):
Temp = np.column_stack((Temp, self.Metadata[self.x][t]))
np.savetxt(self.out, Temp.transpose(), fmt='%f')
else:
np.savetxt(
self.out,
np.array(self.Metadata[self.x][0]).transpose(),
fmt='%f')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
elif Quantity['Iterate'] and not Quantity['array']:
try:
np.savetxt(self.out, np.array(self.Metadata[self.x], dtype=float).transpose(), fmt='%f')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
def Run(self, Output_Type):
"""
Robert.
This will need to be handled internally delicately so as to not confuse
the user.
I would like to be able to determine whether or not to call a given
output file type based on it being part of the Full, Homo, or Hetero
sub-systems.
In principle, the User is at liberty (not now, but soon) to pre-select their
own output parameters. Though deviating from the defaults could be dangerous.
At present, one of three string-types can be assigned to the 'Output_Type'
free variable:
Full - Loads in the OutputInfoFull.py file for its attributes to be read.
Homo - Loads in the OutputInfoHomo.py file for its attributes to be read.
Hetero - Loads in the OutputInfoHetero.py file for its attributes to be read.
"""
if Output_Type == 'Full':
from Utilities import OutputInfoFull as Out # Case 1
elif Output_Type == 'Homo':
from Utilities import OutputInfoHomo as Out # Case 2
elif Output_Type == 'Hetero':
from Utilities import OutputInfoHetero as Out # Case 3
self.Write_List = []
for self.x in self.Metadata.keys(): # Things such as: 'pdf', 'R_Cut', ...
try:
if Output_Type == 'Homo' and self.x.startswith('ho'):
Attributes = getattr(Out, str(self.x[:-2])) # Pulls dictionaries with names corresponding to x as above
with open(self.output_info_file, 'a') as outfile:
outfile.write('Working now with %s and placing it in %s with file name %s.\n' % (self.x, Attributes['Dir'], Attributes['File']))
try:
self.Write_Homo(Attributes)
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
else:
Attributes = getattr(Out, str(self.x)) # Pulls dictionaries with names corresponding to x as above
if self.x == 'adj':
try:
self.Adj(Attributes)
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
elif self.x == 'Elements':
try:
self.Ele(Attributes)
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
elif self.x == 'headj':
try:
self.HeAdj(Attributes)
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
elif self.x == 'master':
try:
self.Masterkey(Attributes)
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
else:
self.Write(Attributes)
with open(self.output_info_file, 'a') as outfile:
outfile.write('Working now with %s and placing it in %s with file name %s.\n' % (self.x, Attributes['Dir'], Attributes['File']))
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
try:
from CNA.Utilities import Pattern_Key as PK
self.pattern_key = PK().Key()
with open(self.System['base_dir'] + 'RecognisedPatterns.txt', 'w') as outfile:
for i, thing in enumerate(self.pattern_key.keys()):
outfile.write(str(i) + ')\t' + str(thing)+':\t')
for item in self.pattern_key[thing]:
outfile.write(str(item) + ':\t' + str(self.pattern_key[thing][item])+'\t|\t')
outfile.write('\n\n')
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format('CNA_Patterns', e))
| [
"os.path.exists",
"inspect.getmembers",
"os.makedirs",
"scipy.sparse.csr_matrix.todense",
"numpy.column_stack",
"inspect.getfullargspec",
"CNA.Utilities.Pattern_Key",
"os.path.isfile",
"numpy.array",
"numpy.savetxt",
"inspect.isfunction",
"Utilities.Initial.Logo"
] | [((2651, 2676), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (2665, 2676), False, 'import os\n'), ((2691, 2713), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (2702, 2713), False, 'import os\n'), ((2983, 3007), 'os.path.isfile', 'os.path.isfile', (['self.out'], {}), '(self.out)\n', (2997, 3007), False, 'import os\n'), ((1604, 1618), 'Utilities.Initial.Logo', 'Initial.Logo', ([], {}), '()\n', (1616, 1618), False, 'from Utilities import Initial\n'), ((2136, 2154), 'inspect.getmembers', 'getmembers', (['Writer'], {}), '(Writer)\n', (2146, 2154), False, 'from inspect import getmembers, isfunction\n'), ((2158, 2174), 'inspect.isfunction', 'isfunction', (['o[1]'], {}), '(o[1])\n', (2168, 2174), False, 'from inspect import getmembers, isfunction\n'), ((3885, 3909), 'scipy.sparse.csr_matrix.todense', 'sp.csr_matrix.todense', (['t'], {}), '(t)\n', (3906, 3909), True, 'import scipy.sparse as sp\n'), ((16083, 16087), 'CNA.Utilities.Pattern_Key', 'PK', ([], {}), '()\n', (16085, 16087), True, 'from CNA.Utilities import Pattern_Key as PK\n'), ((2322, 2350), 'inspect.getfullargspec', 'inspect.getfullargspec', (['x[1]'], {}), '(x[1])\n', (2344, 2350), False, 'import inspect\n'), ((4028, 4057), 'numpy.savetxt', 'np.savetxt', (['f', 'line'], {'fmt': '"""%d"""'}), "(f, line, fmt='%d')\n", (4038, 4057), True, 'import numpy as np\n'), ((6378, 6429), 'numpy.savetxt', 'np.savetxt', (['self.out', 'self.Metadata[File]'], {'fmt': '"""%s"""'}), "(self.out, self.Metadata[File], fmt='%s')\n", (6388, 6429), True, 'import numpy as np\n'), ((9831, 9884), 'numpy.savetxt', 'np.savetxt', (['self.out', 'self.Metadata[self.x]'], {'fmt': '"""%s"""'}), "(self.out, self.Metadata[self.x], fmt='%s')\n", (9841, 9884), True, 'import numpy as np\n'), ((7233, 7298), 'numpy.column_stack', 'np.column_stack', (['(self.Metadata[File][0], self.Metadata[File][1])'], {}), '((self.Metadata[File][0], self.Metadata[File][1]))\n', (7248, 7298), True, 'import numpy as np\n'), ((7411, 7458), 'numpy.column_stack', 'np.column_stack', (['(Temp, self.Metadata[File][t])'], {}), '((Temp, self.Metadata[File][t]))\n', (7426, 7458), True, 'import numpy as np\n'), ((10698, 10767), 'numpy.column_stack', 'np.column_stack', (['(self.Metadata[self.x][0], self.Metadata[self.x][1])'], {}), '((self.Metadata[self.x][0], self.Metadata[self.x][1]))\n', (10713, 10767), True, 'import numpy as np\n'), ((10882, 10931), 'numpy.column_stack', 'np.column_stack', (['(Temp, self.Metadata[self.x][t])'], {}), '((Temp, self.Metadata[self.x][t]))\n', (10897, 10931), True, 'import numpy as np\n'), ((7680, 7712), 'numpy.array', 'np.array', (['self.Metadata[File][0]'], {}), '(self.Metadata[File][0])\n', (7688, 7712), True, 'import numpy as np\n'), ((8088, 8130), 'numpy.array', 'np.array', (['self.Metadata[File]'], {'dtype': 'float'}), '(self.Metadata[File], dtype=float)\n', (8096, 8130), True, 'import numpy as np\n'), ((11153, 11187), 'numpy.array', 'np.array', (['self.Metadata[self.x][0]'], {}), '(self.Metadata[self.x][0])\n', (11161, 11187), True, 'import numpy as np\n'), ((11565, 11609), 'numpy.array', 'np.array', (['self.Metadata[self.x]'], {'dtype': 'float'}), '(self.Metadata[self.x], dtype=float)\n', (11573, 11609), True, 'import numpy as np\n')] |
# code to calculate Rouge precision and recall for various texts, by taking the two text files
# that have the gold summaries and the predicted ones.
# Inputs:
# goldfile) File containing only gold summaries
# predfile) File containing predicted summaries
# ngram) n-gram model to use (1, 2, 3 ...) (Should be less than the total number of words in any paragraph)
# Output
# Baseline_n_gram.txt containing tab delimited Precision and Recall values for each pair.
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--goldfile', type=str, required=True)
parser.add_argument('--predfile', type=str, required=True)
parser.add_argument('--ngram', type=int, required=True)
args = parser.parse_args()
def rouge_metrics(system_list,reference_list):
reference_word_count = len(reference_list)
system_word_count = len(system_list)
if (system_word_count == 0) or (reference_word_count == 0):
rouge_recall = 0
rouge_precision = 0
else:
rouge_recall = (len(intersection(system_list,reference_list))*1.0)/reference_word_count
rouge_precision = (len(intersection(system_list,reference_list))*1.0)/system_word_count
return rouge_recall, rouge_precision
def intersection(system_lst, ref_lst):
intersection_lst = [value for value in system_lst if value in ref_lst]
return intersection_lst
def create_ngrams(text_list,n=2):
iterations = len(text_list)-n
ngrams = []
gram = []
for i in range(iterations+1):
gram = text_list[i:n+i]
ngrams.append(gram)
return ngrams
with open(args.goldfile, "r") as f:
original = f.read()
with open(args.predfile, "r") as f:
new = f.read()
rrecall = []
rprecision = []
with open("baseline_" + str(args.ngram) + "_gram.txt", "w") as f:
for row_original, row_new in zip(original.split("\n"), new.split("\n")):
system_list = row_new.split(" ")
reference_list = row_original.split(" ")
#print ("System List:", system_list)
#print ("Reference List:", reference_list)
system_2grams = create_ngrams(system_list, args.ngram)
reference_2grams = create_ngrams(reference_list,args.ngram)
rouge_2_recall, rouge_2_precision = rouge_metrics(system_2grams,reference_2grams)
rrecall.append(rouge_2_recall)
rprecision.append(rouge_2_precision)
line = str(rouge_2_recall) + "\t" + str(rouge_2_precision) + "\n"
f.write(line)
rrecall = np.mean(rrecall)
rprecision = np.mean(rprecision)
f_score = (2*rprecision*rrecall)/(rprecision + rrecall)
print ("Recall:", rrecall)
print ("Precision:", rprecision)
print ("FScore:", f_score)
| [
"numpy.mean",
"argparse.ArgumentParser"
] | [((532, 557), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (555, 557), False, 'import argparse\n'), ((2567, 2583), 'numpy.mean', 'np.mean', (['rrecall'], {}), '(rrecall)\n', (2574, 2583), True, 'import numpy as np\n'), ((2598, 2617), 'numpy.mean', 'np.mean', (['rprecision'], {}), '(rprecision)\n', (2605, 2617), True, 'import numpy as np\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Commonly used static datasets. Several can be used in both `density estimation` as well as classification
"""
import math
import numpy as np
import torch
from torch import sqrt, pow, cat, zeros, Tensor
from scipy.integrate import solve_ivp
from torchdyn import TTuple, Tuple
from sklearn.neighbors import KernelDensity
from torch.distributions import Normal
def randnsphere(dim:int, radius:float) -> Tensor:
"""Uniform sampling on a sphere of `dim` and `radius`
:param dim: dimension of the sphere
:type dim: int
:param radius: radius of the sphere
:type radius: float
"""
v = torch.randn(dim)
inv_len = radius / sqrt(pow(v, 2).sum())
return v * inv_len
def generate_concentric_spheres(n_samples:int=100, noise:float=1e-4, dim:int=3,
inner_radius:float=0.5, outer_radius:int=1) -> TTuple:
"""Creates a *concentric spheres* dataset of `n_samples` datasets points.
:param n_samples: number of datasets points in the generated dataset
:type n_samples: int
:param noise: standard deviation of noise magnitude added to each datasets point
:type noise: float
:param dim: dimension of the spheres
:type dim: float
:param inner_radius: radius of the inner sphere
:type inner_radius: float
:param outer_radius: radius of the outer sphere
:type outer_radius: float
"""
X, y = zeros((n_samples, dim)), torch.zeros(n_samples)
y[:n_samples // 2] = 1
samples = []
for i in range(n_samples // 2):
samples.append(randnsphere(dim, inner_radius)[None, :])
X[:n_samples // 2] = cat(samples)
X[:n_samples // 2] += zeros((n_samples // 2, dim)).normal_(0, std=noise)
samples = []
for i in range(n_samples // 2):
samples.append(randnsphere(dim, outer_radius)[None, :])
X[n_samples // 2:] = cat(samples)
X[n_samples // 2:] += zeros((n_samples // 2, dim)).normal_(0, std=noise)
return X, y
def generate_moons(n_samples:int=100, noise:float=1e-4, **kwargs) -> TTuple:
"""Creates a *moons* dataset of `n_samples` datasets points.
:param n_samples: number of datasets points in the generated dataset
:type n_samples: int
:param noise: standard deviation of noise magnitude added to each datasets point
:type noise: float
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack([np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y)]).T
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp)])
if noise is not None:
X += np.random.rand(n_samples, 1) * noise
X, y = Tensor(X), Tensor(y).long()
return X, y
def generate_spirals(n_samples=100, noise=1e-4, **kwargs) -> TTuple:
"""Creates a *spirals* dataset of `n_samples` datasets points.
:param n_samples: number of datasets points in the generated dataset
:type n_samples: int
:param noise: standard deviation of noise magnitude added to each datasets point
:type noise: float
"""
n = np.sqrt(np.random.rand(n_samples, 1)) * 780 * (2 * np.pi) / 360
d1x = -np.cos(n) * n + np.random.rand(n_samples, 1) * noise
d1y = np.sin(n) * n + np.random.rand(n_samples, 1) * noise
X, y = (np.vstack((np.hstack((d1x, d1y)), np.hstack((-d1x, -d1y)))),
np.hstack((np.zeros(n_samples), np.ones(n_samples))))
X, y = torch.Tensor(X), torch.Tensor(y).long()
return X, y
def generate_gaussians(n_samples=100, n_gaussians=7, dim=2,
radius=0.5, std_gaussians=0.1, noise=1e-3) -> TTuple:
"""Creates `dim`-dimensional `n_gaussians` on a ring of radius `radius`.
:param n_samples: number of datasets points in the generated dataset
:type n_samples: int
:param n_gaussians: number of gaussians distributions placed on the circle of radius `radius`
:type n_gaussians: int
:param dim: dimension of the dataset. The distributions are placed on the hyperplane (x1, x2, 0, 0..) if dim > 2
:type dim: int
:param radius: radius of the circle on which the distributions lie
:type radius: int
:param std_gaussians: standard deviation of the gaussians.
:type std_gaussians: int
:param noise: standard deviation of noise magnitude added to each datasets point
:type noise: float
"""
X = torch.zeros(n_samples * n_gaussians, dim) ; y = torch.zeros(n_samples * n_gaussians).long()
angle = torch.zeros(1)
if dim > 2: loc = torch.cat([radius*torch.cos(angle), radius*torch.sin(angle), torch.zeros(dim-2)])
else: loc = torch.cat([radius*torch.cos(angle), radius*torch.sin(angle)])
dist = Normal(loc, scale=std_gaussians)
for i in range(n_gaussians):
angle += 2*math.pi / n_gaussians
if dim > 2: dist.loc = torch.Tensor([radius*torch.cos(angle), torch.sin(angle), radius*torch.zeros(dim-2)])
else: dist.loc = torch.Tensor([radius*torch.cos(angle), radius*torch.sin(angle)])
X[i*n_samples:(i+1)*n_samples] = dist.sample(sample_shape=(n_samples,)) + torch.randn(dim)*noise
y[i*n_samples:(i+1)*n_samples] = i
return X, y
def generate_gaussians_spiral(n_samples=100, n_gaussians=7, n_gaussians_per_loop=4, dim=2,
radius_start=1, radius_end=0.2, std_gaussians_start=0.3,
std_gaussians_end=0.1, noise=1e-3) -> TTuple:
"""Creates `dim`-dimensional `n_gaussians` on a spiral.
:param n_samples: number of datasets points in the generated dataset
:type n_samples: int
:param n_gaussians: number of total gaussians distributions placed on the spirals
:type n_gaussians: int
:param n_gaussians_per_loop: number of gaussians distributions per loop of the spiral
:type n_gaussians_per_loop: int
:param dim: dimension of the dataset. The distributions are placed on the hyperplane (x1, x2, 0, 0..) if dim > 2
:type dim: int
:param radius_start: starting radius of the spiral
:type radius_start: int
:param radius_end: end radius of the spiral
:type radius_end: int
:param std_gaussians_start: standard deviation of the gaussians at the start of the spiral. Linear interpolation (start, end, num_gaussians)
:type std_gaussians_start: int
:param std_gaussians_end: standard deviation of the gaussians at the end of the spiral
:type std_gaussians_end: int
:param noise: standard deviation of noise magnitude added to each datasets point
:type noise: float
"""
X = torch.zeros(n_samples * n_gaussians, dim) ; y = torch.zeros(n_samples * n_gaussians).long()
angle = torch.zeros(1)
radiuses = torch.linspace(radius_start, radius_end, n_gaussians)
std_devs = torch.linspace(std_gaussians_start, std_gaussians_end, n_gaussians)
if dim > 2: loc = torch.cat([radiuses[0]*torch.cos(angle), radiuses[0]*torch.sin(angle), torch.zeros(dim-2)])
else: loc = torch.cat([radiuses[0]*torch.cos(angle), radiuses[0]*torch.sin(angle)])
dist = Normal(loc, scale=std_devs[0])
for i in range(n_gaussians):
angle += 2*math.pi / n_gaussians_per_loop
if dim > 2: dist.loc = torch.Tensor([radiuses[i]*torch.cos(angle), torch.sin(angle), radiuses[i]*torch.zeros(dim-2)])
else: dist.loc = torch.Tensor([radiuses[i]*torch.cos(angle), radiuses[i]*torch.sin(angle)])
dist.scale = std_devs[i]
X[i*n_samples:(i+1)*n_samples] = dist.sample(sample_shape=(n_samples,)) + torch.randn(dim)*noise
y[i*n_samples:(i+1)*n_samples] = i
return X, y
def generate_diffeqml(n_samples=100, noise=1e-3) -> Tuple[Tensor, None]:
"""Samples `n_samples` 2-dim points from the DiffEqML logo.
:param n_samples: number of datasets points in the generated dataset
:type n_samples: int
:param noise: standard deviation of noise magnitude added to each datasets point
:type noise: float
"""
mu = 1
X0 = [[0,2],[-1.6, -1.2],[1.6, -1.2],]
ti, tf = 0., 3.2
t = np.linspace(ti,tf,500)
# define the ODE model
def odefunc(t,x):
dxdt = -x[1] + mu*x[0]*(1- x[0]**2 - x[1]**2)
dydt = x[0] + mu*x[1]*(1- x[0]**2 - x[1]**2)
return np.array([dxdt,dydt]).T
# integrate ODE
X = []
for x0 in X0:
sol = solve_ivp(odefunc, [ti, tf], x0, method='LSODA', t_eval=t)
X.append(torch.tensor(sol.y.T).float())
theta = torch.linspace(0,2*np.pi, 1000)
X.append(torch.cat([2*torch.cos(theta)[:,None], 2*torch.sin(theta)[:,None]],1))
X = torch.cat(X)
k = KernelDensity(kernel='gaussian',bandwidth=.01)
k.fit(X)
X = torch.tensor(k.sample(n_samples) + noise*np.random.randn(n_samples, 2)).float()
return X, None
class ToyDataset:
"""Handles the generation of classification toy datasets"""
def generate(self, n_samples:int, dataset_type:str, **kwargs) -> TTuple:
"""Handles the generation of classification toy datasets
:param n_samples: number of datasets points in the generated dataset
:type n_samples: int
:param dataset_type: {'moons', 'spirals', 'spheres', 'gaussians', 'gaussians_spiral', diffeqml'}
:type dataset_type: str
:param dim: if 'spheres': dimension of the spheres
:type dim: float
:param inner_radius: if 'spheres': radius of the inner sphere
:type inner_radius: float
:param outer_radius: if 'spheres': radius of the outer sphere
:type outer_radius: float
"""
if dataset_type == 'moons':
return generate_moons(n_samples=n_samples, **kwargs)
elif dataset_type == 'spirals':
return generate_spirals(n_samples=n_samples, **kwargs)
elif dataset_type == 'spheres':
return generate_concentric_spheres(n_samples=n_samples, **kwargs)
elif dataset_type == 'gaussians':
return generate_gaussians(n_samples=n_samples, **kwargs)
elif dataset_type == 'gaussians_spiral':
return generate_gaussians_spiral(n_samples=n_samples, **kwargs)
elif dataset_type == 'diffeqml':
return generate_diffeqml(n_samples=n_samples, **kwargs)
| [
"numpy.random.rand",
"numpy.hstack",
"torch.sin",
"torch.pow",
"numpy.array",
"torch.cos",
"numpy.sin",
"sklearn.neighbors.KernelDensity",
"numpy.linspace",
"torch.randn",
"torch.distributions.Normal",
"numpy.ones",
"torch.Tensor",
"numpy.cos",
"numpy.random.randn",
"torch.cat",
"sci... | [((1160, 1176), 'torch.randn', 'torch.randn', (['dim'], {}), '(dim)\n', (1171, 1176), False, 'import torch\n'), ((2161, 2173), 'torch.cat', 'cat', (['samples'], {}), '(samples)\n', (2164, 2173), False, 'from torch import sqrt, pow, cat, zeros, Tensor\n'), ((2393, 2405), 'torch.cat', 'cat', (['samples'], {}), '(samples)\n', (2396, 2405), False, 'from torch import sqrt, pow, cat, zeros, Tensor\n'), ((5218, 5259), 'torch.zeros', 'torch.zeros', (['(n_samples * n_gaussians)', 'dim'], {}), '(n_samples * n_gaussians, dim)\n', (5229, 5259), False, 'import torch\n'), ((5322, 5336), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (5333, 5336), False, 'import torch\n'), ((5530, 5562), 'torch.distributions.Normal', 'Normal', (['loc'], {'scale': 'std_gaussians'}), '(loc, scale=std_gaussians)\n', (5536, 5562), False, 'from torch.distributions import Normal\n'), ((7383, 7424), 'torch.zeros', 'torch.zeros', (['(n_samples * n_gaussians)', 'dim'], {}), '(n_samples * n_gaussians, dim)\n', (7394, 7424), False, 'import torch\n'), ((7487, 7501), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (7498, 7501), False, 'import torch\n'), ((7517, 7570), 'torch.linspace', 'torch.linspace', (['radius_start', 'radius_end', 'n_gaussians'], {}), '(radius_start, radius_end, n_gaussians)\n', (7531, 7570), False, 'import torch\n'), ((7586, 7653), 'torch.linspace', 'torch.linspace', (['std_gaussians_start', 'std_gaussians_end', 'n_gaussians'], {}), '(std_gaussians_start, std_gaussians_end, n_gaussians)\n', (7600, 7653), False, 'import torch\n'), ((7868, 7898), 'torch.distributions.Normal', 'Normal', (['loc'], {'scale': 'std_devs[0]'}), '(loc, scale=std_devs[0])\n', (7874, 7898), False, 'from torch.distributions import Normal\n'), ((8844, 8868), 'numpy.linspace', 'np.linspace', (['ti', 'tf', '(500)'], {}), '(ti, tf, 500)\n', (8855, 8868), True, 'import numpy as np\n'), ((9246, 9280), 'torch.linspace', 'torch.linspace', (['(0)', '(2 * np.pi)', '(1000)'], {}), '(0, 2 * np.pi, 1000)\n', (9260, 9280), False, 'import torch\n'), ((9370, 9382), 'torch.cat', 'torch.cat', (['X'], {}), '(X)\n', (9379, 9382), False, 'import torch\n'), ((9391, 9439), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""', 'bandwidth': '(0.01)'}), "(kernel='gaussian', bandwidth=0.01)\n", (9404, 9439), False, 'from sklearn.neighbors import KernelDensity\n'), ((1944, 1967), 'torch.zeros', 'zeros', (['(n_samples, dim)'], {}), '((n_samples, dim))\n', (1949, 1967), False, 'from torch import sqrt, pow, cat, zeros, Tensor\n'), ((1969, 1991), 'torch.zeros', 'torch.zeros', (['n_samples'], {}), '(n_samples)\n', (1980, 1991), False, 'import torch\n'), ((2964, 3000), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n_samples_out'], {}), '(0, np.pi, n_samples_out)\n', (2975, 3000), True, 'import numpy as np\n'), ((3028, 3064), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n_samples_out'], {}), '(0, np.pi, n_samples_out)\n', (3039, 3064), True, 'import numpy as np\n'), ((3531, 3540), 'torch.Tensor', 'Tensor', (['X'], {}), '(X)\n', (3537, 3540), False, 'from torch import sqrt, pow, cat, zeros, Tensor\n'), ((4277, 4292), 'torch.Tensor', 'torch.Tensor', (['X'], {}), '(X)\n', (4289, 4292), False, 'import torch\n'), ((9126, 9184), 'scipy.integrate.solve_ivp', 'solve_ivp', (['odefunc', '[ti, tf]', 'x0'], {'method': '"""LSODA"""', 't_eval': 't'}), "(odefunc, [ti, tf], x0, method='LSODA', t_eval=t)\n", (9135, 9184), False, 'from scipy.integrate import solve_ivp\n'), ((2200, 2228), 'torch.zeros', 'zeros', (['(n_samples // 2, dim)'], {}), '((n_samples // 2, dim))\n', (2205, 2228), False, 'from torch import sqrt, pow, cat, zeros, Tensor\n'), ((2432, 2460), 'torch.zeros', 'zeros', (['(n_samples // 2, dim)'], {}), '((n_samples // 2, dim))\n', (2437, 2460), False, 'from torch import sqrt, pow, cat, zeros, Tensor\n'), ((3096, 3131), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n_samples_in'], {}), '(0, np.pi, n_samples_in)\n', (3107, 3131), True, 'import numpy as np\n'), ((3344, 3382), 'numpy.zeros', 'np.zeros', (['n_samples_out'], {'dtype': 'np.intp'}), '(n_samples_out, dtype=np.intp)\n', (3352, 3382), True, 'import numpy as np\n'), ((3403, 3439), 'numpy.ones', 'np.ones', (['n_samples_in'], {'dtype': 'np.intp'}), '(n_samples_in, dtype=np.intp)\n', (3410, 3439), True, 'import numpy as np\n'), ((3482, 3510), 'numpy.random.rand', 'np.random.rand', (['n_samples', '(1)'], {}), '(n_samples, 1)\n', (3496, 3510), True, 'import numpy as np\n'), ((4027, 4055), 'numpy.random.rand', 'np.random.rand', (['n_samples', '(1)'], {}), '(n_samples, 1)\n', (4041, 4055), True, 'import numpy as np\n'), ((4074, 4083), 'numpy.sin', 'np.sin', (['n'], {}), '(n)\n', (4080, 4083), True, 'import numpy as np\n'), ((4090, 4118), 'numpy.random.rand', 'np.random.rand', (['n_samples', '(1)'], {}), '(n_samples, 1)\n', (4104, 4118), True, 'import numpy as np\n'), ((5266, 5302), 'torch.zeros', 'torch.zeros', (['(n_samples * n_gaussians)'], {}), '(n_samples * n_gaussians)\n', (5277, 5302), False, 'import torch\n'), ((7431, 7467), 'torch.zeros', 'torch.zeros', (['(n_samples * n_gaussians)'], {}), '(n_samples * n_gaussians)\n', (7442, 7467), False, 'import torch\n'), ((9039, 9061), 'numpy.array', 'np.array', (['[dxdt, dydt]'], {}), '([dxdt, dydt])\n', (9047, 9061), True, 'import numpy as np\n'), ((3163, 3198), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n_samples_in'], {}), '(0, np.pi, n_samples_in)\n', (3174, 3198), True, 'import numpy as np\n'), ((3225, 3262), 'numpy.append', 'np.append', (['outer_circ_x', 'inner_circ_x'], {}), '(outer_circ_x, inner_circ_x)\n', (3234, 3262), True, 'import numpy as np\n'), ((3283, 3320), 'numpy.append', 'np.append', (['outer_circ_y', 'inner_circ_y'], {}), '(outer_circ_y, inner_circ_y)\n', (3292, 3320), True, 'import numpy as np\n'), ((3542, 3551), 'torch.Tensor', 'Tensor', (['y'], {}), '(y)\n', (3548, 3551), False, 'from torch import sqrt, pow, cat, zeros, Tensor\n'), ((4011, 4020), 'numpy.cos', 'np.cos', (['n'], {}), '(n)\n', (4017, 4020), True, 'import numpy as np\n'), ((4150, 4171), 'numpy.hstack', 'np.hstack', (['(d1x, d1y)'], {}), '((d1x, d1y))\n', (4159, 4171), True, 'import numpy as np\n'), ((4173, 4196), 'numpy.hstack', 'np.hstack', (['(-d1x, -d1y)'], {}), '((-d1x, -d1y))\n', (4182, 4196), True, 'import numpy as np\n'), ((4223, 4242), 'numpy.zeros', 'np.zeros', (['n_samples'], {}), '(n_samples)\n', (4231, 4242), True, 'import numpy as np\n'), ((4244, 4262), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (4251, 4262), True, 'import numpy as np\n'), ((4294, 4309), 'torch.Tensor', 'torch.Tensor', (['y'], {}), '(y)\n', (4306, 4309), False, 'import torch\n'), ((5420, 5440), 'torch.zeros', 'torch.zeros', (['(dim - 2)'], {}), '(dim - 2)\n', (5431, 5440), False, 'import torch\n'), ((5926, 5942), 'torch.randn', 'torch.randn', (['dim'], {}), '(dim)\n', (5937, 5942), False, 'import torch\n'), ((7748, 7768), 'torch.zeros', 'torch.zeros', (['(dim - 2)'], {}), '(dim - 2)\n', (7759, 7768), False, 'import torch\n'), ((8325, 8341), 'torch.randn', 'torch.randn', (['dim'], {}), '(dim)\n', (8336, 8341), False, 'import torch\n'), ((1205, 1214), 'torch.pow', 'pow', (['v', '(2)'], {}), '(v, 2)\n', (1208, 1214), False, 'from torch import sqrt, pow, cat, zeros, Tensor\n'), ((3944, 3972), 'numpy.random.rand', 'np.random.rand', (['n_samples', '(1)'], {}), '(n_samples, 1)\n', (3958, 3972), True, 'import numpy as np\n'), ((5377, 5393), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (5386, 5393), False, 'import torch\n'), ((5402, 5418), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (5411, 5418), False, 'import torch\n'), ((5475, 5491), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (5484, 5491), False, 'import torch\n'), ((5500, 5516), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (5509, 5516), False, 'import torch\n'), ((5708, 5724), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (5717, 5724), False, 'import torch\n'), ((7700, 7716), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (7709, 7716), False, 'import torch\n'), ((7730, 7746), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (7739, 7746), False, 'import torch\n'), ((7808, 7824), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (7817, 7824), False, 'import torch\n'), ((7838, 7854), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (7847, 7854), False, 'import torch\n'), ((8058, 8074), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (8067, 8074), False, 'import torch\n'), ((9202, 9223), 'torch.tensor', 'torch.tensor', (['sol.y.T'], {}), '(sol.y.T)\n', (9214, 9223), False, 'import torch\n'), ((5690, 5706), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (5699, 5706), False, 'import torch\n'), ((5733, 5753), 'torch.zeros', 'torch.zeros', (['(dim - 2)'], {}), '(dim - 2)\n', (5744, 5753), False, 'import torch\n'), ((5800, 5816), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (5809, 5816), False, 'import torch\n'), ((5825, 5841), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (5834, 5841), False, 'import torch\n'), ((8040, 8056), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (8049, 8056), False, 'import torch\n'), ((8088, 8108), 'torch.zeros', 'torch.zeros', (['(dim - 2)'], {}), '(dim - 2)\n', (8099, 8108), False, 'import torch\n'), ((8160, 8176), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (8169, 8176), False, 'import torch\n'), ((8190, 8206), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (8199, 8206), False, 'import torch\n'), ((9304, 9320), 'torch.cos', 'torch.cos', (['theta'], {}), '(theta)\n', (9313, 9320), False, 'import torch\n'), ((9332, 9348), 'torch.sin', 'torch.sin', (['theta'], {}), '(theta)\n', (9341, 9348), False, 'import torch\n'), ((9501, 9530), 'numpy.random.randn', 'np.random.randn', (['n_samples', '(2)'], {}), '(n_samples, 2)\n', (9516, 9530), True, 'import numpy as np\n')] |
#!/usr/local/bin/python
# Script to read and plot the mp3 file waveforms
import os
import wave
import matplotlib.pyplot as plt
import numpy as np
def main():
source_dir = "./WAVFiles/"
plt.figure(figsize=(12, 12))
plot_index = 1
for filename in os.listdir(source_dir):
if filename == ".DS_Store":
continue
with wave.open(source_dir + filename, "rb") as spf:
# Extract Raw Audio from File
signal = spf.readframes(-1)
signal = np.fromstring(signal, "Int16")
# Split the data into channels [COMMENTED OUT]
# channels = [[] for channel in range(spf.getnchannels())]
# for index, datum in enumerate(signal):
# channels[index % len(channels)].append(datum)
# Get time from indices
fs = spf.getframerate() # sampling frequency
time = np.linspace(0, len(signal) / fs, num=len(signal))
# Channel time calculation
# time = np.linspace(0, len(signal) / len(channels) / fs,
# num=len(signal) / len(channels))
plt.subplot(3, 3, plot_index)
plt.tight_layout()
plt.title("Signal: {}".format(filename[2:-4]))
plt.axis(ymin=-9000, ymax=9000)
plt.plot(time, signal)
# for channel in channels:
# plt.plot(time, channel)
plot_index += 1
plt.savefig("SignalWavePlots.pdf")
plt.show()
plt.clf()
if __name__ == '__main__':
main()
| [
"wave.open",
"os.listdir",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"numpy.fromstring",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((197, 225), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (207, 225), True, 'import matplotlib.pyplot as plt\n'), ((265, 287), 'os.listdir', 'os.listdir', (['source_dir'], {}), '(source_dir)\n', (275, 287), False, 'import os\n'), ((1422, 1456), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SignalWavePlots.pdf"""'], {}), "('SignalWavePlots.pdf')\n", (1433, 1456), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1471), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1469, 1471), True, 'import matplotlib.pyplot as plt\n'), ((1476, 1485), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1483, 1485), True, 'import matplotlib.pyplot as plt\n'), ((1137, 1166), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', 'plot_index'], {}), '(3, 3, plot_index)\n', (1148, 1166), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1193), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1191, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1257, 1288), 'matplotlib.pyplot.axis', 'plt.axis', ([], {'ymin': '(-9000)', 'ymax': '(9000)'}), '(ymin=-9000, ymax=9000)\n', (1265, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1319), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'signal'], {}), '(time, signal)\n', (1305, 1319), True, 'import matplotlib.pyplot as plt\n'), ((360, 398), 'wave.open', 'wave.open', (['(source_dir + filename)', '"""rb"""'], {}), "(source_dir + filename, 'rb')\n", (369, 398), False, 'import wave\n'), ((510, 540), 'numpy.fromstring', 'np.fromstring', (['signal', '"""Int16"""'], {}), "(signal, 'Int16')\n", (523, 540), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def recognize_from_video(model, labels):
cap = cv2.VideoCapture(0)
once = True
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
img_to_show = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
gray = cv2.resize(img_to_show, dsize=(200, 200), interpolation=cv2.INTER_CUBIC)
frame = np.array(gray) * (1/255.0)
# print(frame.shape)
frame = np.expand_dims(frame, axis=0)
# print(frame.shape)
images = np.vstack([frame])
classes = model.predict(images)
if classes[0] > 0.5:
obj = labels[1]
else:
obj = labels[0]
# results = dict()
# for i in range(len(labels)):
# results[labels[i]] = round(classes[0][i] * 100, 2)
# # print(file, results)
#
# results = sorted(results.items(), key=lambda x: x[1], reverse=True)
# obj = ''
# for item in results:
# obj += '{}: ({}%) '.format(item[0], item[1])
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (20, 40)
fontScale = 0.5
fontColor = (0, 0, 0)
lineType = 2
cv2.putText(img_to_show, obj,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
# Display the resulting frame
cv2.imshow('Video', img_to_show)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | [
"cv2.putText",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"numpy.expand_dims",
"cv2.cvtColor",
"numpy.vstack",
"cv2.resize",
"cv2.waitKey"
] | [((82, 101), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (98, 101), False, 'import cv2\n'), ((1658, 1681), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1679, 1681), False, 'import cv2\n'), ((271, 309), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (283, 309), False, 'import cv2\n'), ((325, 397), 'cv2.resize', 'cv2.resize', (['img_to_show'], {'dsize': '(200, 200)', 'interpolation': 'cv2.INTER_CUBIC'}), '(img_to_show, dsize=(200, 200), interpolation=cv2.INTER_CUBIC)\n', (335, 397), False, 'import cv2\n'), ((487, 516), 'numpy.expand_dims', 'np.expand_dims', (['frame'], {'axis': '(0)'}), '(frame, axis=0)\n', (501, 516), True, 'import numpy as np\n'), ((564, 582), 'numpy.vstack', 'np.vstack', (['[frame]'], {}), '([frame])\n', (573, 582), True, 'import numpy as np\n'), ((1251, 1346), 'cv2.putText', 'cv2.putText', (['img_to_show', 'obj', 'bottomLeftCornerOfText', 'font', 'fontScale', 'fontColor', 'lineType'], {}), '(img_to_show, obj, bottomLeftCornerOfText, font, fontScale,\n fontColor, lineType)\n', (1262, 1346), False, 'import cv2\n'), ((1490, 1522), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'img_to_show'], {}), "('Video', img_to_show)\n", (1500, 1522), False, 'import cv2\n'), ((415, 429), 'numpy.array', 'np.array', (['gray'], {}), '(gray)\n', (423, 429), True, 'import numpy as np\n'), ((1534, 1548), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1545, 1548), False, 'import cv2\n')] |
"""Radius-based distance."""
import numpy as np
from sklearn.metrics import pairwise_distances
def r_subloop(X,
Y,
radius=1,
function="cosine"):
"""Calculate distance from each word in word_a to each word in word_b."""
dist_mtr = pairwise_distances(X, Y, metric=function)
results = []
for x in radius:
results.append(np.sum(dist_mtr < x, 1))
return np.stack(results)
| [
"numpy.sum",
"numpy.stack",
"sklearn.metrics.pairwise_distances"
] | [((282, 323), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['X', 'Y'], {'metric': 'function'}), '(X, Y, metric=function)\n', (300, 323), False, 'from sklearn.metrics import pairwise_distances\n'), ((422, 439), 'numpy.stack', 'np.stack', (['results'], {}), '(results)\n', (430, 439), True, 'import numpy as np\n'), ((385, 408), 'numpy.sum', 'np.sum', (['(dist_mtr < x)', '(1)'], {}), '(dist_mtr < x, 1)\n', (391, 408), True, 'import numpy as np\n')] |
import os
import numpy as np
import pickle
def load_dict(dict_path):
"""Loads a dictionary in pickle format."""
with open(dict_path,'rb') as fh:
d = pickle.load(fh)
return d
def keywords2numbers(workspace,corpus_name,keywords,lowercase):
# load dictionaries for encoding words to numbers
if lowercase == True:
dict_words = load_dict(workspace+corpus_name+'/data/idx/wordslc.pickle') # lowercase
else:
dict_words = load_dict(workspace+corpus_name+'/data/idx/words.pickle')
dict_words = {v: k for k, v in dict_words.items()}
# new keywords dict
encoded_keywords = dict()
for w,keyness in keywords.items():
k = tuple(dict_words[w])
encoded_keywords[k] = (w,keyness)
return encoded_keywords
def search_node(npy_path,encoded_keywords,encoded_pos):
"""Searches the node in every text file and gets positions."""
files = os.listdir(npy_path)
data = []
dict_positions = dict()
# get positions
for filename in files:
# load arr
arr = np.load(npy_path + filename)
# get matching indexes
for k,v in encoded_keywords.items(): # k = encoded_word - v = (word,keyness)
ix = arr_search_indexes(arr,k,encoded_pos)
positions = []
for i in ix.tolist():
p = round((i / arr.shape[0])*100,2)
positions.append(p)
if k in dict_positions:
dict_positions[k]+=positions
else:
dict_positions[k]=positions
# make data
for k,v in encoded_keywords.items():
data.append( (v[0],len(dict_positions[k]),dict_positions[k],v[1]))
return data
def arr_search_indexes(arr,search_w,search_t):
"""Searches the node in arr and returns the matching indexes. (Max. 3 words)"""
ix = np.isin(arr[:,[0]],search_w)
ix = np.where(ix)[0]
return ix
def translate(positions):
"""Translate numbers back to words in a list format for kitconc KIWC object."""
dispersion = []
dpts = dict()
i = 0
total_s1 = 0
total_s2 = 0
total_s3 = 0
total_s4 = 0
total_s5 = 0
for d in positions:
i+=1
s1 = 0
s2 = 0
s3 = 0
s4 = 0
s5 = 0
dpts[d[0]] = d[2]
for point in d[2]:
p = round(point)
if p >= 0 and p <= 19:
s1+=1
elif p >= 20 and p <= 39:
s2+=1
elif p >= 40 and p <= 59:
s3+=1
elif p >= 60 and p <= 79:
s4+=1
elif p >= 80 and p <= 100:
s5+=1
total_s1 += s1
total_s2 += s2
total_s3 += s3
total_s4 += s4
total_s5 += s5
dispersion.append((i ,d[0],d[3],d[1],s1,s2,s3,s4,s5))
return ((total_s1,total_s2,total_s3,total_s4,total_s5),dpts,dispersion)
def make_keywords_dispersion(workspace,corpus_name,keywords,lowercase):
dispersion=[]
dpts = dict()
totals = tuple()
encoded_keywords = keywords2numbers(workspace, corpus_name, keywords, lowercase)
if encoded_keywords != None: # search node is OK
positions = search_node(workspace+corpus_name + '/data/npy/',encoded_keywords,None)
if len(positions)!=0:
totals,dpts,dispersion = translate(positions)
return (totals,dpts,dispersion)
| [
"os.listdir",
"numpy.where",
"pickle.load",
"numpy.isin",
"numpy.load"
] | [((927, 947), 'os.listdir', 'os.listdir', (['npy_path'], {}), '(npy_path)\n', (937, 947), False, 'import os\n'), ((1854, 1884), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w'], {}), '(arr[:, [0]], search_w)\n', (1861, 1884), True, 'import numpy as np\n'), ((170, 185), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (181, 185), False, 'import pickle\n'), ((1071, 1099), 'numpy.load', 'np.load', (['(npy_path + filename)'], {}), '(npy_path + filename)\n', (1078, 1099), True, 'import numpy as np\n'), ((1892, 1904), 'numpy.where', 'np.where', (['ix'], {}), '(ix)\n', (1900, 1904), True, 'import numpy as np\n')] |
# Copyright (c) 2020 Carnegie Mellon University, <NAME> <<EMAIL>>
# For License information please see the LICENSE file in the root directory.
import numpy as np
from contextlib import suppress
from evaluator_base import ATEEvaluator, RPEEvaluator, KittiEvaluator, quats2SEs, transform_trajs
# from trajectory_transform import timestamp_associate
class TartanAirEvaluator:
@staticmethod
def evaluate_one_trajectory(gt_traj, est_traj, scale=False, kittitype=True):
"""
scale = True: calculate a global scale
"""
# load trajectories
with suppress(TypeError):
gt_traj = np.loadtxt(gt_traj)
est_traj = np.loadtxt(est_traj)
if gt_traj.shape[0] != est_traj.shape[0]:
raise Exception("POSEFILE_LENGTH_ILLEGAL")
if gt_traj.shape[1] != 7 or est_traj.shape[1] != 7:
raise Exception("POSEFILE_FORMAT_ILLEGAL")
# transform and scale
gt_traj_trans, est_traj_trans, s = transform_trajs(gt_traj, est_traj, scale)
# print(" Scale, {}".format(s))
gt_SEs, est_SEs = quats2SEs(gt_traj_trans, est_traj_trans)
(
ate_score,
gt_ate_aligned,
est_ate_aligned,
ate_rot,
ate_trans,
ate_scale,
ate_T,
) = ATEEvaluator.evaluate(gt_traj, est_traj, scale)
rpe_score = RPEEvaluator.evaluate(gt_SEs, est_SEs)
kitti_score = KittiEvaluator.evaluate(gt_SEs, est_SEs, kittitype=kittitype)
return {
"ate_score": ate_score,
"rpe_score": rpe_score,
"kitti_score": kitti_score,
"gt_aligned": gt_ate_aligned,
"est_aligned": est_ate_aligned,
"scale": s,
"ate_scale": ate_scale,
"ate_rot": ate_rot,
"ate_trans": ate_trans,
"ate_T": ate_T,
}
if __name__ == "__main__":
# scale = True for monocular track, scale = False for stereo track
result = TartanAirEvaluator.evaluate_one_trajectory("pose_gt.txt", "pose_est.txt", scale=True)
print(result)
| [
"evaluator_base.KittiEvaluator.evaluate",
"evaluator_base.ATEEvaluator.evaluate",
"numpy.loadtxt",
"evaluator_base.quats2SEs",
"contextlib.suppress",
"evaluator_base.transform_trajs",
"evaluator_base.RPEEvaluator.evaluate"
] | [((989, 1030), 'evaluator_base.transform_trajs', 'transform_trajs', (['gt_traj', 'est_traj', 'scale'], {}), '(gt_traj, est_traj, scale)\n', (1004, 1030), False, 'from evaluator_base import ATEEvaluator, RPEEvaluator, KittiEvaluator, quats2SEs, transform_trajs\n'), ((1099, 1139), 'evaluator_base.quats2SEs', 'quats2SEs', (['gt_traj_trans', 'est_traj_trans'], {}), '(gt_traj_trans, est_traj_trans)\n', (1108, 1139), False, 'from evaluator_base import ATEEvaluator, RPEEvaluator, KittiEvaluator, quats2SEs, transform_trajs\n'), ((1329, 1376), 'evaluator_base.ATEEvaluator.evaluate', 'ATEEvaluator.evaluate', (['gt_traj', 'est_traj', 'scale'], {}), '(gt_traj, est_traj, scale)\n', (1350, 1376), False, 'from evaluator_base import ATEEvaluator, RPEEvaluator, KittiEvaluator, quats2SEs, transform_trajs\n'), ((1397, 1435), 'evaluator_base.RPEEvaluator.evaluate', 'RPEEvaluator.evaluate', (['gt_SEs', 'est_SEs'], {}), '(gt_SEs, est_SEs)\n', (1418, 1435), False, 'from evaluator_base import ATEEvaluator, RPEEvaluator, KittiEvaluator, quats2SEs, transform_trajs\n'), ((1458, 1519), 'evaluator_base.KittiEvaluator.evaluate', 'KittiEvaluator.evaluate', (['gt_SEs', 'est_SEs'], {'kittitype': 'kittitype'}), '(gt_SEs, est_SEs, kittitype=kittitype)\n', (1481, 1519), False, 'from evaluator_base import ATEEvaluator, RPEEvaluator, KittiEvaluator, quats2SEs, transform_trajs\n'), ((587, 606), 'contextlib.suppress', 'suppress', (['TypeError'], {}), '(TypeError)\n', (595, 606), False, 'from contextlib import suppress\n'), ((630, 649), 'numpy.loadtxt', 'np.loadtxt', (['gt_traj'], {}), '(gt_traj)\n', (640, 649), True, 'import numpy as np\n'), ((673, 693), 'numpy.loadtxt', 'np.loadtxt', (['est_traj'], {}), '(est_traj)\n', (683, 693), True, 'import numpy as np\n')] |
import logging
import mlflow
import numpy as np
import GPyOpt
import GPy
#from numpy.random import seed
from GPyOpt.models.gpmodel import GPModel
from GPyOpt.acquisitions import AcquisitionLCB
import networkx as nx
import collections
from myBOModular import MyBOModular
from myGPModel import MyGPModel
from GPyOpt.core.task.space import Design_space
from common import Config
import random
import os
import pickle
# CLEAN UP?
from function_optimizer import GraphOverlap, GraphNonOverlap, Tree, GraphFunction, OptimalGraphFunction
from exceptions import EarlyTerminationException
def normalize(v):
norm=np.linalg.norm(v, ord=1)
if norm==0:
norm=np.finfo(v.dtype).eps
return v/norm
from datasets import ComponentFunction, SyntheticComponentFunction
import function_optimizer
class MetaLoader(type):
registry = {}
loader_ids = []
def __new__(cls, cls_name, bases, attrs):
new_class = super(cls, MetaLoader).__new__(cls, cls_name, bases, attrs)
MetaLoader.registry[cls_name] = new_class
MetaLoader.loader_ids.append(cls_name)
return new_class
@staticmethod
def get_loader_constructor(loader_id):
logging.info("Load algorithm loader[%s].", loader_id)
return MetaLoader.registry[loader_id]
class Algorithm(type, metaclass=MetaLoader):
registry = {}
algorithm_ids = []
def __new__(cls, cls_name, bases, attrs):
new_class = super(cls, Algorithm).__new__(cls, cls_name, bases, attrs)
Algorithm.registry[cls_name] = new_class
Algorithm.algorithm_ids.append(cls_name)
return new_class
@staticmethod
def get_constructor(algorithm_id):
logging.info("Using algorithm with algorithm_id[%s].", algorithm_id)
return Algorithm.registry[algorithm_id]
from febo.models.gp import GPConfig
from febo.controller.simple import SimpleControllerConfig
from febo.environment.benchmarks import BenchmarkEnvironmentConfig
from febo.solvers.candidate import GridSolverConfig
from febo.algorithms.rembo import RemboConfig
from febo.models.model import ModelConfig
from febo.environment.benchmarks import BenchmarkEnvironment
from febo.environment import DiscreteDomain, ContinuousDomain
from febo.controller import SimpleController
import febo
class AdaptorBenchmark(BenchmarkEnvironment):
def __init__(self, fn):
super().__init__(path=None)
self.fn = fn
self.mlflow_logging = self.fn.mlflow_logging
dim = self.fn.domain.dimension
L = []
U = []
# Number of points per dimension
n_points = []
# Go through each domain of the dimension and find the l and u
for d in self.fn.domain.combined_domain:
L.append(np.min(d))
U.append(np.max(d))
n_points.append(len(d))
self._domain = ContinuousDomain(np.array(L), np.array(U))
#GridSolverConfig.points_per_dimension = np.max(n_points)
RemboConfig.emb_d = self.fn.get_emb_dim()
# ??
#self._domain = DiscreteDomain(np.array([[0.0, 0.0], [1.0, 1.0]]))
self._max_value = -self.mlflow_logging.y_opt
def f(self, x):
return np.float64(-self.fn(np.array([x])))
class GADDUCBAlgorithm(object):
def __init__(self, n_iter, algorithm_random_seed, n_rand, algoID="", fn=None, **kwargs):
self.algoID = algoID
self.n_iter = n_iter
self.domain = fn.domain
self.fn = fn
self.algorithm_random_seed = algorithm_random_seed
self.n_rand = n_rand
# Use the same Random Seed everywhere
# generate init design depends on the random seed setting.
np.random.seed(algorithm_random_seed)
random.seed(algorithm_random_seed)
self.rs = np.random.RandomState(algorithm_random_seed)
self.initial_design = self.domain.random_X(self.rs, n_rand)
def get_algorithm_id(self):
return self.__class__.__name__ + self.algoID
def run(self):
raise NotImplementedError
from boattack.utilities.utilities import get_init_data
from boattack.bayesopt import Bayes_opt
from boattack.utilities.upsampler import upsample_projection
class BattackAlgorithm(GADDUCBAlgorithm):
def __init__(self, fn, model_type, acq_type, sparse='None', nsubspaces=1, batch_size=None, update_freq=None, noise_var=None, exploration_weight=None,
grid_size=None, **kwargs):
GADDUCBAlgorithm.__init__(self, fn=fn, **kwargs)
#x_init, y_init = get_init_data(obj_func=fn.f_adapted, n_init=self.n_rand, bounds=fn.x_bounds_adapted)
beta = exploration_weight
obj_func = self.fn.obj_func
nchannel = self.fn.nchannel
high_dim = self.fn.high_dim
low_dim = self.fn.low_dim
dim_reduction = self.fn.dim_reduction
results_file_name = fn.results_file_name
failed_file_name = fn.failed_file_name
logging.info(f"Results file={results_file_name}")
logging.info(f"Failed file={failed_file_name}")
X_opt_all_slices = []
Y_opt_all_slices = []
X_query_all_slices = []
Y_query_all_slices = []
X_reduced_opt_all_slices = []
X_reduced_query_all_slices = []
# Generate initial observation data for BO
if os.path.exists(results_file_name) and 'LDR' not in model_type:
logging.info('load old init data')
with open(results_file_name, 'rb') as pre_file:
previous_bo_results = pickle.load(pre_file)
x_init = previous_bo_results['X_reduced_query'][0]
y_init = previous_bo_results['Y_query'][0]
else:
logging.info('generate new init data')
# There are some significant problems with a discrete domain.
try:
#x_init, y_init = get_init_data(obj_func=fn, n_init=self.n_rand, bounds=fn.x_bounds_adapted)
# There is some strange sampling that they are doing...
x_init = self.initial_design
y_init = self.fn(x_init)
except EarlyTerminationException as e:
# Failed on init, so we fix the init problem
fn.mlflow_logging.log_battack(int(True), fn.cnn.target_label[0])
fn.mlflow_logging.log_init_y(np.min(e.metrics['y']))
while fn.mlflow_logging.t_y < self.n_iter:
fn.mlflow_logging.log_cost_ba()
fn.mlflow_logging.log_battack(int(True), fn.cnn.target_label[0])
fn.mlflow_logging.log_y(e.metrics['y'])
return
#x_init, y_init = get_init_data(obj_func=f, n_init=n_init, bounds=x_bounds)
#x_init, y_init = get_init_data(obj_func=fn.f_adapted, n_init=self.n_rand, bounds=fn.x_bounds_adapted)
logging.info(f'X init shape {x_init.shape}')
# Initialise BO
#bayes_opt = Bayes_opt(func=f, bounds=x_bounds, saving_path=failed_file_name)
bayes_opt = Bayes_opt(func=fn, bounds=fn.x_bounds_adapted, saving_path=failed_file_name, noise_var=noise_var)
bayes_opt.initialise(X_init=x_init, Y_init=y_init, model_type=model_type, acq_type=acq_type,
sparse=sparse, nsubspaces=nsubspaces, batch_size=batch_size, update_freq=update_freq,
nchannel=nchannel, high_dim=high_dim, dim_reduction=dim_reduction,
cost_metric=None, seed=self.algorithm_random_seed, beta=beta, gridSize=grid_size)
# Run BO
logging.info("Run bayes_opt")
X_query_full, Y_query, X_opt_full, Y_opt, time_record = bayes_opt.run(total_iterations=self.n_iter)
# Reduce the memory needed for storing results
if 'LDR' in model_type:
X_query = X_query_full[-2:]
X_opt = X_opt_full[-2:]
else:
X_query = X_query_full
X_opt = X_opt_full[-2:]
# Store the results
Y_opt_all_slices.append(Y_opt)
Y_query_all_slices.append(Y_query)
opt_dr_list = bayes_opt.opt_dr_list
if dim_reduction == 'NONE':
X_reduced_opt_all_slices.append(X_opt.astype(np.float16))
X_reduced_query_all_slices.append(X_query.astype(np.float16))
X_query_all_slices.append(X_query)
X_opt_all_slices.append(X_opt)
logging.info(f'Y_opt={Y_opt[-1]}, X_opt shape{X_opt.shape}, X_h_opt shape{X_opt.shape}, '
f'X_query shape{X_query.shape}, X_h_query shape{X_query.shape}, opt_dr={opt_dr_list[-1]}')
else:
X_reduced_opt_all_slices.append(X_opt.astype(np.float16))
X_reduced_query_all_slices.append(X_query.astype(np.float16))
# Transform data from reduced search space to original high-dimensional input space
X_h_query = upsample_projection(dim_reduction, X_query, low_dim=low_dim, high_dim=high_dim,
nchannel=nchannel)
X_query_all_slices.append(X_h_query)
X_h_opt = upsample_projection(dim_reduction, X_opt, low_dim=low_dim, high_dim=high_dim,
nchannel=nchannel)
X_opt_all_slices.append(X_h_opt)
logging.info(f'Y_opt={Y_opt[-1]}, X_opt shape{X_opt.shape}, X_h_opt shape{X_h_opt.shape}, '
f'X_query shape{X_query.shape}, X_h_query shape{X_h_query.shape}')
# For ImageNet images, save only the L_inf norm and L2 norm instead of the adversarial image
if 'imagenet' in obj_func:
l_inf_sum = np.abs(X_h_opt[-1, :]).sum()
l_2_norm = np.sqrt(np.sum((epsilon * X_h_opt[-1, :].ravel()) ** 2))
X_opt_all_slices = [l_inf_sum]
X_query_all_slices = [l_2_norm]
# Save the results locally
results = {'X_opt': X_opt_all_slices,
'Y_opt': Y_opt_all_slices,
'X_query': X_query_all_slices,
'Y_query': Y_query_all_slices,
'X_reduced_opt': X_reduced_opt_all_slices,
'X_reduced_query': X_reduced_query_all_slices,
'dr_opt_list': opt_dr_list,
'runtime': time_record}
with open(results_file_name, 'wb') as file:
pickle.dump(results, file)
def run(self):
logging.info("RUN")
def FEBO_Algorithm_Cls(self):
raise NotImplementedError
class BoAttack(BattackAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.Random
class FEBOAlgorithm(GADDUCBAlgorithm):
def __init__(self, initial_kernel_params=None, noise_var=None, **kwargs):
GADDUCBAlgorithm.__init__(self, **kwargs)
# Config the FEBO domains
GPConfig.noise_var = noise_var
# Default is RBF
if not 'gpy_kernel' in initial_kernel_params:
initial_kernel_params['gpy_kernel'] = 'GPy.kern.RBF'
GPConfig.kernels = [(initial_kernel_params['gpy_kernel'], {'variance': initial_kernel_params['variance'], 'lengthscale': initial_kernel_params['lengthscale'] , 'ARD': True})]
SimpleControllerConfig.T = self.n_iter
SimpleControllerConfig.best_predicted_every = 1
self.linebo_env = AdaptorBenchmark(self.fn)
_data = []
for x in self.initial_design:
y = self.fn(np.array([x]))
evaluation = np.empty(shape=(), dtype=self.linebo_env.dtype)
evaluation["x"] = x
evaluation["y"] = -y
evaluation["y_exact"] = -y
evaluation["y_max"] = self.linebo_env._max_value
_data.append(evaluation)
self.initial_data = _data
# Attempt to return f instead of y if that exist
self.fn.mlflow_logging.log_init_y(np.min(self.fn.history_y))
def run(self):
# Setup
s = None
try:
FEBO_Algo = self.FEBO_Algorithm_Cls()
s = AdaptorController(fn=self.fn, algorithm=FEBO_Algo(), environment=self.linebo_env)
s.initialize(algo_kwargs = dict(initial_data=self.initial_data))
s.run()
except Exception as e:
logging.exception("Exception")
finally:
if s:
s.finalize()
def FEBO_Algorithm_Cls(self):
raise NotImplementedError
class FEBO_Random(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.Random
class NelderMead(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.NelderMead
class RandomLineBO(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.RandomLineBO
class CoordinateLineBO(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.CoordinateLineBO
class AscentLineBO(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.AscentLineBO
class UCB(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.UCB
class Rembo(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
from febo.algorithms.rembo import Rembo
return Rembo
class InterleavedRembo(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
from febo.algorithms.rembo import InterleavedRembo
return InterleavedRembo
class AdaptorController(SimpleController):
def __init__(self, fn, *args, **kwargs):
super(AdaptorController, self).__init__(*args, **kwargs)
self.fn = fn
def run(self):
logging.info(f"Starting optimization: {self.algorithm.name}")
# interaction loop
while not self._exit:
self._run_step()
evaluation = self._data[-1]
self.fn.mlflow_logging.log_y(np.min(self.fn.history_y[-1]))
# Random algorithm
class Random(GADDUCBAlgorithm, metaclass=Algorithm):
def __init__(self, **kwargs):
GADDUCBAlgorithm.__init__(self, **kwargs)
self.mlflow_logging = self.fn.mlflow_logging
def run(self):
f = self.fn
initial_design = self.initial_design
n_iter = self.n_iter
initial_design_iter = self.domain.random_X(self.rs, n_iter)
Y = []
Y_best = []
X_rand = []
y_best = np.inf
for x in initial_design:
y = f(np.array([x]))
Y.append(y)
if y < y_best:
y_best = y
Y_best.append(y_best)
X_rand.append(x)
self.mlflow_logging.log_init_y(np.min(self.fn.history_y))
for x in initial_design_iter:
y = f(np.array([x]))
self.mlflow_logging.log_y(np.min(self.fn.history_y[-1]))
Y.append(y)
if y < y_best:
y_best = y
Y_best.append(y_best)
X_rand.append(x)
return Y_best, Y, X_rand
# BayesianOptimization algorithm
class BayesianOptimization(GADDUCBAlgorithm):
def __init__(self, algorithm_random_seed, lengthscaleNumIter, n_iter, initial_graph=None, initial_kernel_params=None, learnDependencyStructureRate=50,
learnParameterRate=None, graphSamplingNumIter=100, fully_optimize_lengthscales=False, exploration_weight=2,
normalize_Y=False, eps=-1, noise_var=0., max_eval=-1, p = 0.5, M=0, max_group_size=0, opt_restart=None, param_exploration=0.1,
acq_opt_restarts=1, **kwargs):
GADDUCBAlgorithm.__init__(self, n_iter, algorithm_random_seed, **kwargs)
self.learnDependencyStructureRate=learnDependencyStructureRate
self.learnParameterRate = learnParameterRate
self.graphSamplingNumIter=graphSamplingNumIter
self.lengthscaleNumIter=lengthscaleNumIter
self.fully_optimize_lengthscales=fully_optimize_lengthscales
self.exploration_weight=exploration_weight
self.normalize_Y=normalize_Y
self.eps=eps
self.noise_var=noise_var
self.max_eval=max_eval
self.p=p
self.acq_opt_restarts = acq_opt_restarts
self.result_path=Config().base_path
# Additional Param
self.exact_feval = False
# GF should be init here
# TODO
dim = self.fn.domain.dimension
if initial_graph is None:
initial_graph = nx.empty_graph(dim)
self.initial_graph = initial_graph
self.graph_function = self.get_GraphFunction()(self.initial_graph, initial_kernel_params)
assert(dim == self.graph_function.dimension())
dim = self.graph_function.dimension()
if M == 0:
M = dim
if max_group_size == 0:
max_group_size = dim
self.M=M
self.max_group_size=max_group_size
self.opt_restart = opt_restart
self.param_exploration = param_exploration
def run(self):
try:
mybo = MyBOModular(self.domain, self.initial_design, self.graph_function,
max_eval=self.max_eval, fn=self.fn, fn_optimizer=self.make_fn_optimizer(),
noise_var=self.noise_var, exact_feval=self.exact_feval,
exploration_weight_function=self.exploration_weight,
learnDependencyStructureRate=self.learnDependencyStructureRate,
learnParameterRate=self.learnParameterRate,
normalize_Y=self.normalize_Y,
acq_opt_restarts=self.acq_opt_restarts)
except EarlyTerminationException as e:
self.fn.mlflow_logging.log_cost_ba()
self.fn.mlflow_logging.log_y(e.metrics['y'])
while self.fn.mlflow_logging.t_y < self.n_iter:
self.fn.mlflow_logging.log_cost_ba()
self.fn.mlflow_logging.log_battack(int(True), self.fn.cnn.target_label[0])
self.fn.mlflow_logging.log_y(e.metrics['y'])
return None, None
if self.n_iter > 0:
try:
mybo.run_optimization(self.n_iter, eps=self.eps)
except EarlyTerminationException as e:
cost_metrics = self.fn.mlflow_logging.cost_metrics
ba_metrics = self.fn.mlflow_logging.ba_metrics
self.fn.mlflow_logging.log_y(e.metrics['y'])
while self.fn.mlflow_logging.t_y < self.n_iter:
self.fn.mlflow_logging.log_cost(cost_metrics['acq_cost'])
self.fn.mlflow_logging.log_battack(**ba_metrics)
self.fn.mlflow_logging.log_y(e.metrics['y'])
# np.save(os.path.join(self.result_path,'all_graphs.npy'), mybo.all_graphs)
return mybo.Y.flatten(), mybo
def FnOptimizer(self):
raise NotImplementedError
def make_fn_optimizer(self):
FnOptimizer = self.FnOptimizer()
# Update M and max_group_size just in case its not specified
return FnOptimizer(graphSamplingNumIter=self.graphSamplingNumIter, lengthscaleNumIter=self.lengthscaleNumIter, cycles=self.cycles,
fully_optimize_lengthscales=self.fully_optimize_lengthscales, p=self.p, M=self.M, max_group_size=self.max_group_size, sigma2=self.noise_var,
opt_restart=self.opt_restart, param_exploration=self.param_exploration)
def get_GraphFunction(self):
return GraphFunction
class GraphOverlap(BayesianOptimization, metaclass=Algorithm):
__metaclass__ = Algorithm
def __init__(self, **kwargs):
BayesianOptimization.__init__(self, **kwargs)
def FnOptimizer(self):
self.cycles = True
return function_optimizer.GraphOverlap
class GraphNonOverlap(BayesianOptimization, metaclass=Algorithm):
__metaclass__ = Algorithm
def __init__(self, **kwargs):
BayesianOptimization.__init__(self, **kwargs)
def FnOptimizer(self):
self.cycles = True
return function_optimizer.GraphNonOverlap
class Tree(BayesianOptimization, metaclass=Algorithm):
__metaclass__ = Algorithm
def __init__(self, **kwargs):
BayesianOptimization.__init__(self, **kwargs)
def FnOptimizer(self):
self.cycles = False
return function_optimizer.Tree
class Optimal(BayesianOptimization, metaclass=Algorithm):
__metaclass__ = Algorithm
def __init__(self, n_iter, initial_kernel_params, learnDependencyStructureRate, fn, **kwargs):
self.fn = fn
# Make sure the fn that it accesses is the true fn without noise
self.fn.__call__ = self.fn.eval
logging.info("Ignoring intial_kernel_params and noise_var.")
# Redefine the inital kernel params to the true kernel
initial_kernel_params = self.fn.kernel_params
# n_iter + kwargs['n_rand'] + 10
# TODO Should take lengthscale from function
BayesianOptimization.__init__(self, n_iter=n_iter, initial_graph=fn.graph, initial_kernel_params=initial_kernel_params,
learnDependencyStructureRate=-1, fn = fn, **kwargs)
# We also use the optimal lengthscale
# We also tweak the exportation to be 0
self.noise_var = 0
self.exact_feval = True
self.fn.fn_noise_sd = 0
'''
self.exploration_weight = 0
self.noise_var = 0
self.exact_feval = True
self.fn.fn_noise_var = 0
'''
# The following is a new field
logging.info("Using True Graph = {}".format(self.initial_graph.edges()))
logging.info("exploration_weight = {}".format(self.exploration_weight))
logging.info("noise_var = {}".format(self.noise_var))
logging.info("exact_feval = {}".format(self.exact_feval))
def make_fn_optimizer(self):
return None
def get_GraphFunction(self):
return OptimalGraphFunction
| [
"networkx.empty_graph",
"logging.exception",
"numpy.array",
"numpy.linalg.norm",
"logging.info",
"numpy.random.RandomState",
"os.path.exists",
"boattack.bayesopt.Bayes_opt",
"boattack.utilities.upsampler.upsample_projection",
"numpy.max",
"numpy.empty",
"numpy.random.seed",
"numpy.min",
"n... | [((611, 635), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'ord': '(1)'}), '(v, ord=1)\n', (625, 635), True, 'import numpy as np\n'), ((1179, 1232), 'logging.info', 'logging.info', (['"""Load algorithm loader[%s]."""', 'loader_id'], {}), "('Load algorithm loader[%s].', loader_id)\n", (1191, 1232), False, 'import logging\n'), ((1679, 1747), 'logging.info', 'logging.info', (['"""Using algorithm with algorithm_id[%s]."""', 'algorithm_id'], {}), "('Using algorithm with algorithm_id[%s].', algorithm_id)\n", (1691, 1747), False, 'import logging\n'), ((3696, 3733), 'numpy.random.seed', 'np.random.seed', (['algorithm_random_seed'], {}), '(algorithm_random_seed)\n', (3710, 3733), True, 'import numpy as np\n'), ((3742, 3776), 'random.seed', 'random.seed', (['algorithm_random_seed'], {}), '(algorithm_random_seed)\n', (3753, 3776), False, 'import random\n'), ((3795, 3839), 'numpy.random.RandomState', 'np.random.RandomState', (['algorithm_random_seed'], {}), '(algorithm_random_seed)\n', (3816, 3839), True, 'import numpy as np\n'), ((4940, 4989), 'logging.info', 'logging.info', (['f"""Results file={results_file_name}"""'], {}), "(f'Results file={results_file_name}')\n", (4952, 4989), False, 'import logging\n'), ((4998, 5045), 'logging.info', 'logging.info', (['f"""Failed file={failed_file_name}"""'], {}), "(f'Failed file={failed_file_name}')\n", (5010, 5045), False, 'import logging\n'), ((6869, 6913), 'logging.info', 'logging.info', (['f"""X init shape {x_init.shape}"""'], {}), "(f'X init shape {x_init.shape}')\n", (6881, 6913), False, 'import logging\n'), ((7045, 7146), 'boattack.bayesopt.Bayes_opt', 'Bayes_opt', ([], {'func': 'fn', 'bounds': 'fn.x_bounds_adapted', 'saving_path': 'failed_file_name', 'noise_var': 'noise_var'}), '(func=fn, bounds=fn.x_bounds_adapted, saving_path=failed_file_name,\n noise_var=noise_var)\n', (7054, 7146), False, 'from boattack.bayesopt import Bayes_opt\n'), ((7601, 7630), 'logging.info', 'logging.info', (['"""Run bayes_opt"""'], {}), "('Run bayes_opt')\n", (7613, 7630), False, 'import logging\n'), ((10426, 10445), 'logging.info', 'logging.info', (['"""RUN"""'], {}), "('RUN')\n", (10438, 10445), False, 'import logging\n'), ((13795, 13856), 'logging.info', 'logging.info', (['f"""Starting optimization: {self.algorithm.name}"""'], {}), "(f'Starting optimization: {self.algorithm.name}')\n", (13807, 13856), False, 'import logging\n'), ((20817, 20877), 'logging.info', 'logging.info', (['"""Ignoring intial_kernel_params and noise_var."""'], {}), "('Ignoring intial_kernel_params and noise_var.')\n", (20829, 20877), False, 'import logging\n'), ((665, 682), 'numpy.finfo', 'np.finfo', (['v.dtype'], {}), '(v.dtype)\n', (673, 682), True, 'import numpy as np\n'), ((2869, 2880), 'numpy.array', 'np.array', (['L'], {}), '(L)\n', (2877, 2880), True, 'import numpy as np\n'), ((2882, 2893), 'numpy.array', 'np.array', (['U'], {}), '(U)\n', (2890, 2893), True, 'import numpy as np\n'), ((5313, 5346), 'os.path.exists', 'os.path.exists', (['results_file_name'], {}), '(results_file_name)\n', (5327, 5346), False, 'import os\n'), ((5388, 5422), 'logging.info', 'logging.info', (['"""load old init data"""'], {}), "('load old init data')\n", (5400, 5422), False, 'import logging\n'), ((5687, 5725), 'logging.info', 'logging.info', (['"""generate new init data"""'], {}), "('generate new init data')\n", (5699, 5725), False, 'import logging\n'), ((8426, 8612), 'logging.info', 'logging.info', (['f"""Y_opt={Y_opt[-1]}, X_opt shape{X_opt.shape}, X_h_opt shape{X_opt.shape}, X_query shape{X_query.shape}, X_h_query shape{X_query.shape}, opt_dr={opt_dr_list[-1]}"""'], {}), "(\n f'Y_opt={Y_opt[-1]}, X_opt shape{X_opt.shape}, X_h_opt shape{X_opt.shape}, X_query shape{X_query.shape}, X_h_query shape{X_query.shape}, opt_dr={opt_dr_list[-1]}'\n )\n", (8438, 8612), False, 'import logging\n'), ((8906, 9009), 'boattack.utilities.upsampler.upsample_projection', 'upsample_projection', (['dim_reduction', 'X_query'], {'low_dim': 'low_dim', 'high_dim': 'high_dim', 'nchannel': 'nchannel'}), '(dim_reduction, X_query, low_dim=low_dim, high_dim=\n high_dim, nchannel=nchannel)\n', (8925, 9009), False, 'from boattack.utilities.upsampler import upsample_projection\n'), ((9120, 9221), 'boattack.utilities.upsampler.upsample_projection', 'upsample_projection', (['dim_reduction', 'X_opt'], {'low_dim': 'low_dim', 'high_dim': 'high_dim', 'nchannel': 'nchannel'}), '(dim_reduction, X_opt, low_dim=low_dim, high_dim=\n high_dim, nchannel=nchannel)\n', (9139, 9221), False, 'from boattack.utilities.upsampler import upsample_projection\n'), ((9318, 9482), 'logging.info', 'logging.info', (['f"""Y_opt={Y_opt[-1]}, X_opt shape{X_opt.shape}, X_h_opt shape{X_h_opt.shape}, X_query shape{X_query.shape}, X_h_query shape{X_h_query.shape}"""'], {}), "(\n f'Y_opt={Y_opt[-1]}, X_opt shape{X_opt.shape}, X_h_opt shape{X_h_opt.shape}, X_query shape{X_query.shape}, X_h_query shape{X_h_query.shape}'\n )\n", (9330, 9482), False, 'import logging\n'), ((10371, 10397), 'pickle.dump', 'pickle.dump', (['results', 'file'], {}), '(results, file)\n', (10382, 10397), False, 'import pickle\n'), ((11506, 11553), 'numpy.empty', 'np.empty', ([], {'shape': '()', 'dtype': 'self.linebo_env.dtype'}), '(shape=(), dtype=self.linebo_env.dtype)\n', (11514, 11553), True, 'import numpy as np\n'), ((11912, 11937), 'numpy.min', 'np.min', (['self.fn.history_y'], {}), '(self.fn.history_y)\n', (11918, 11937), True, 'import numpy as np\n'), ((14795, 14820), 'numpy.min', 'np.min', (['self.fn.history_y'], {}), '(self.fn.history_y)\n', (14801, 14820), True, 'import numpy as np\n'), ((16308, 16316), 'common.Config', 'Config', ([], {}), '()\n', (16314, 16316), False, 'from common import Config\n'), ((16545, 16564), 'networkx.empty_graph', 'nx.empty_graph', (['dim'], {}), '(dim)\n', (16559, 16564), True, 'import networkx as nx\n'), ((2737, 2746), 'numpy.min', 'np.min', (['d'], {}), '(d)\n', (2743, 2746), True, 'import numpy as np\n'), ((2769, 2778), 'numpy.max', 'np.max', (['d'], {}), '(d)\n', (2775, 2778), True, 'import numpy as np\n'), ((5521, 5542), 'pickle.load', 'pickle.load', (['pre_file'], {}), '(pre_file)\n', (5532, 5542), False, 'import pickle\n'), ((11466, 11479), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (11474, 11479), True, 'import numpy as np\n'), ((12302, 12332), 'logging.exception', 'logging.exception', (['"""Exception"""'], {}), "('Exception')\n", (12319, 12332), False, 'import logging\n'), ((14037, 14066), 'numpy.min', 'np.min', (['self.fn.history_y[-1]'], {}), '(self.fn.history_y[-1])\n', (14043, 14066), True, 'import numpy as np\n'), ((14599, 14612), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (14607, 14612), True, 'import numpy as np\n'), ((14879, 14892), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (14887, 14892), True, 'import numpy as np\n'), ((14932, 14961), 'numpy.min', 'np.min', (['self.fn.history_y[-1]'], {}), '(self.fn.history_y[-1])\n', (14938, 14961), True, 'import numpy as np\n'), ((3234, 3247), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (3242, 3247), True, 'import numpy as np\n'), ((9658, 9680), 'numpy.abs', 'np.abs', (['X_h_opt[-1, :]'], {}), '(X_h_opt[-1, :])\n', (9664, 9680), True, 'import numpy as np\n'), ((6353, 6375), 'numpy.min', 'np.min', (["e.metrics['y']"], {}), "(e.metrics['y'])\n", (6359, 6375), True, 'import numpy as np\n')] |
"""
State-Action-Reward-State-Action (sars'a') learning algorithm implementation
"""
from __future__ import division, print_function, absolute_import
import theano.tensor as T
import theano
import numpy as np
from lasagne.objectives import squared_error
from .helpers import get_end_indicator, get_action_Qvalues
from ..utils.grad import consider_constant
from ..utils import create_shared
default_gamma = create_shared('sarsa_gamma_default', np.float32(0.99), theano.config.floatX)
def get_reference_Qvalues(Qvalues,
actions,
rewards,
gamma_or_gammas=default_gamma,
qvalues_after_end="zeros"
):
"""
Returns reference Qvalues according to State-Action-Reward-State-Action (SARSA) algorithm
:param Qvalues: [batch,tick,action_id] - predicted Q-values
:param actions: [batch,tick] - committed actions
:param rewards: [batch,tick] - immediate rewards for taking actions at given time ticks
:param gamma_or_gammas: a single value or array[batch,tick](can broadcast dimensions) of delayed reward discounts
:param qvalues_after_end: symbolic expression for "future rewards" term for last tick used for reference only.
Defaults at T.zeros_like(rewards[:,0,None])
If you wish to simply ignore the last tick, use defaults and crop output's last tick ( qref[:,:-1] )
:return: Qreference - reference Q-values at [batch,tick] using formula
Q reference [batch,action_at_tick] = rewards[t] + gamma_or_gammas * Qs(t+1,action[t+1])
Where action[t+1] is simply action that agent took at next time tick [padded with qvalues_after_end]
"""
if qvalues_after_end == "zeros":
qvalues_after_end = T.zeros_like(rewards[:, 0, None])
# Q-values for "next" states (missing last tick): float[batch,tick-1,action]
next_Qvalues_predicted = Qvalues[:, 1:]
# actions committed at next ticks (missing last tick): int[batch,tick-1]
next_actions = actions[:, 1:]
future_rewards_estimate = get_action_Qvalues(next_Qvalues_predicted, next_actions)
# adding the last tick
future_rewards_estimate = T.concatenate(
[
future_rewards_estimate,
qvalues_after_end,
],
axis=1
)
# full Q-value formula (SARSA algorithm)
reference_Qvalues = rewards + gamma_or_gammas * future_rewards_estimate
return reference_Qvalues
def get_elementwise_objective(Qvalues,
actions,
rewards,
is_alive="always",
Qvalues_target=None,
gamma_or_gammas=0.95,
crop_last=True,
force_qvalues_after_end=True,
qvalues_after_end="zeros",
consider_reference_constant=True, ):
"""
Returns squared error between predicted and reference Qvalues according to Q-learning algorithm
Qreference(state,action) = reward(state,action) + gamma* Q(next_state,next_action)
loss = mean over (Qvalues - Qreference)**2
:param Qvalues: [batch,tick,action_id] - predicted qvalues
:param actions: [batch,tick] - commited actions
:param rewards: [batch,tick] - immediate rewards for taking actions at given time ticks
:param is_alive: [batch,tick] - whether given session is still active at given tick. Defaults to always active.
Default value of is_alive implies a simplified computation algorithm for Qlearning loss
:param Qvalues_target: Older snapshot Qvalues (e.g. from a target network). If None, uses current Qvalues
:param gamma_or_gammas: a single value or array[batch,tick](can broadcast dimensions) of delayed reward discounts
:param crop_last: if True, zeros-out loss at final tick, if False - computes loss VS Qvalues_after_end
:param force_qvalues_after_end: if true, sets reference Qvalues at session end to rewards[end] + qvalues_after_end
:param qvalues_after_end: [batch,1,n_actions] - symbolic expression for "next state q-values" for last tick used for reference only.
Defaults at T.zeros_like(Qvalues[:,0,None,:])
If you wish to simply ignore the last tick, use defaults and crop output's last tick ( qref[:,:-1] )
:param consider_reference_constant: whether or not zero-out gradient flow through reference_Qvalues
(True is highly recommended unless you know what you're doind)
:return: tensor [batch, tick] of squared errors over Qvalues (using formula above for loss)
"""
if Qvalues_target is None:
Qvalues_target = Qvalues
assert Qvalues.ndim == Qvalues_target.ndim == 3
assert actions.ndim == rewards.ndim ==2
if is_alive != 'always': assert is_alive.ndim==2
# get reference Qvalues via Q-learning algorithm
reference_Qvalues = get_reference_Qvalues(Qvalues_target, actions, rewards,
gamma_or_gammas=gamma_or_gammas,
qvalues_after_end=qvalues_after_end,
)
if consider_reference_constant:
# do not pass gradient through reference Q-values (since they DO depend on Q-values by default)
reference_Qvalues = consider_constant(reference_Qvalues)
# get predicted qvalues for committed actions (to compare with reference Q-values)
action_Qvalues = get_action_Qvalues(Qvalues, actions)
# if agent is always alive, return the simplified loss
if is_alive == "always":
# tensor of element-wise squared errors
elwise_squared_error = squared_error(reference_Qvalues, action_Qvalues)
else:
# we are given an is_alive matrix : uint8[batch,tick]
# if asked to force reference_Q[end_tick+1,a] = 0, do it
# note: if agent is always alive, this is meaningless
if force_qvalues_after_end:
# set future rewards at session end to rewards + qvalues_after_end
end_ids = get_end_indicator(is_alive, force_end_at_t_max=True).nonzero()
if qvalues_after_end == "zeros":
# "set reference Q-values at end action ids to just the immediate rewards"
reference_Qvalues = T.set_subtensor(reference_Qvalues[end_ids], rewards[end_ids])
else:
last_optimal_rewards = T.zeros_like(rewards[:, 0])
# "set reference Q-values at end action ids to the immediate rewards + qvalues after end"
reference_Qvalues = T.set_subtensor(reference_Qvalues[end_ids],
rewards[end_ids] + gamma_or_gammas * last_optimal_rewards[
end_ids[0], 0]
)
# tensor of element-wise squared errors
elwise_squared_error = squared_error(reference_Qvalues, action_Qvalues)
# zero-out loss after session ended
elwise_squared_error = elwise_squared_error * is_alive
if crop_last:
elwise_squared_error = T.set_subtensor(elwise_squared_error[:,-1],0)
return elwise_squared_error
| [
"theano.tensor.zeros_like",
"theano.tensor.set_subtensor",
"lasagne.objectives.squared_error",
"theano.tensor.concatenate",
"numpy.float32"
] | [((447, 463), 'numpy.float32', 'np.float32', (['(0.99)'], {}), '(0.99)\n', (457, 463), True, 'import numpy as np\n'), ((2243, 2310), 'theano.tensor.concatenate', 'T.concatenate', (['[future_rewards_estimate, qvalues_after_end]'], {'axis': '(1)'}), '([future_rewards_estimate, qvalues_after_end], axis=1)\n', (2256, 2310), True, 'import theano.tensor as T\n'), ((1825, 1858), 'theano.tensor.zeros_like', 'T.zeros_like', (['rewards[:, 0, None]'], {}), '(rewards[:, 0, None])\n', (1837, 1858), True, 'import theano.tensor as T\n'), ((5869, 5917), 'lasagne.objectives.squared_error', 'squared_error', (['reference_Qvalues', 'action_Qvalues'], {}), '(reference_Qvalues, action_Qvalues)\n', (5882, 5917), False, 'from lasagne.objectives import squared_error\n'), ((7143, 7191), 'lasagne.objectives.squared_error', 'squared_error', (['reference_Qvalues', 'action_Qvalues'], {}), '(reference_Qvalues, action_Qvalues)\n', (7156, 7191), False, 'from lasagne.objectives import squared_error\n'), ((7355, 7402), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['elwise_squared_error[:, -1]', '(0)'], {}), '(elwise_squared_error[:, -1], 0)\n', (7370, 7402), True, 'import theano.tensor as T\n'), ((6493, 6554), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['reference_Qvalues[end_ids]', 'rewards[end_ids]'], {}), '(reference_Qvalues[end_ids], rewards[end_ids])\n', (6508, 6554), True, 'import theano.tensor as T\n'), ((6612, 6639), 'theano.tensor.zeros_like', 'T.zeros_like', (['rewards[:, 0]'], {}), '(rewards[:, 0])\n', (6624, 6639), True, 'import theano.tensor as T\n'), ((6783, 6905), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['reference_Qvalues[end_ids]', '(rewards[end_ids] + gamma_or_gammas * last_optimal_rewards[end_ids[0], 0])'], {}), '(reference_Qvalues[end_ids], rewards[end_ids] + \n gamma_or_gammas * last_optimal_rewards[end_ids[0], 0])\n', (6798, 6905), True, 'import theano.tensor as T\n')] |
import argparse
import logging
from pathlib import Path
import numpy as np
import pandas as pd
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("Filter")
def filter_dataset(
smiles: pd.DataFrame,
activity: pd.DataFrame,
selected_molregno: set,
affinity_threshold: float,
keep_null: bool,
) -> pd.DataFrame:
"""Filters raw dataset and binarize affinity value.
Args:
smiles (pd.DataFrame): Smiles dataset.
activity (pd.DataFrame): Activity dataset.
selected_molregno (set): molregno of molecules to save.
affinity_threshold (float): Minimal value to consider a ligand to be active.
keep_null (bool): Keep molecules with null affinity value. Consider null molecules as not active, if true.
Returns:
pd.DataFrame: filtered dataset
"""
filtered_smiles = []
filtered_affinity = []
for molregno in selected_molregno:
all_affinities = activity[activity["molregno"] == molregno]["pchembl_value"]
is_multiple_known_affinities = all_affinities.shape[0] > 1
if is_multiple_known_affinities:
is_active = all_affinities.mean(skipna=True) > affinity_threshold
else:
affinity = all_affinities.iloc[0]
if np.isnan(affinity):
if keep_null:
is_active = False
else:
continue
else:
is_active = affinity > affinity_threshold
filtered_affinity.append(int(is_active))
new_smiles = smiles[smiles["molregno"] == molregno]["canonical_smiles"].iloc[0]
filtered_smiles.append(new_smiles)
filtered_data = pd.DataFrame(
{"smiles": filtered_smiles, "filtered_affinity": filtered_affinity}
)
return filtered_data
def get_parser():
parser = argparse.ArgumentParser(description="Filter trash out of raw data")
parser.add_argument(
"smiles_dataset", type=str, help="Path to the smiles.tsv.gz file"
)
parser.add_argument(
"activity_dataset", type=str, help="Path to the activities.tsv.gz file"
)
parser.add_argument("output", type=str, help="Path to save result file")
parser.add_argument(
"--threshold",
type=float,
default=8.0,
help="Minimal value to consider a ligand to be active",
)
parser.add_argument(
"--keep_null",
type=str,
choices=["true", "false"],
default="true",
help="Keep molecules with null affinity value. Consider null molecules as not active, if true.",
)
return parser
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
smiles = pd.read_csv(args.smiles_dataset, compression="gzip", sep="\t")
activity = pd.read_csv(args.activity_dataset, compression="gzip", sep="\t")
id_with_affinity = set(activity["molregno"].to_list())
all_id = set(smiles["molregno"].to_list())
smiles_with_affinity = all_id.intersection(id_with_affinity)
log.info(f"Total pairs smiles-known affinity: {len(smiles_with_affinity)}")
log.info(f"Creating filtered data table...")
filtered_data = filter_dataset(
smiles, activity, smiles_with_affinity, args.threshold, args.keep_null == "true"
)
# Create folder if there is none
output_dir = Path(args.output).parent
output_dir.mkdir(parents=True, exist_ok=True)
filtered_data.to_csv(args.output)
log.info(f"Done. Result is saved to {args.output}")
| [
"logging.basicConfig",
"logging.getLogger",
"argparse.ArgumentParser",
"pandas.read_csv",
"pathlib.Path",
"numpy.isnan",
"pandas.DataFrame"
] | [((97, 136), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (116, 136), False, 'import logging\n'), ((143, 170), 'logging.getLogger', 'logging.getLogger', (['"""Filter"""'], {}), "('Filter')\n", (160, 170), False, 'import logging\n'), ((1691, 1776), 'pandas.DataFrame', 'pd.DataFrame', (["{'smiles': filtered_smiles, 'filtered_affinity': filtered_affinity}"], {}), "({'smiles': filtered_smiles, 'filtered_affinity':\n filtered_affinity})\n", (1703, 1776), True, 'import pandas as pd\n'), ((1846, 1913), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Filter trash out of raw data"""'}), "(description='Filter trash out of raw data')\n", (1869, 1913), False, 'import argparse\n'), ((2720, 2782), 'pandas.read_csv', 'pd.read_csv', (['args.smiles_dataset'], {'compression': '"""gzip"""', 'sep': '"""\t"""'}), "(args.smiles_dataset, compression='gzip', sep='\\t')\n", (2731, 2782), True, 'import pandas as pd\n'), ((2798, 2862), 'pandas.read_csv', 'pd.read_csv', (['args.activity_dataset'], {'compression': '"""gzip"""', 'sep': '"""\t"""'}), "(args.activity_dataset, compression='gzip', sep='\\t')\n", (2809, 2862), True, 'import pandas as pd\n'), ((3352, 3369), 'pathlib.Path', 'Path', (['args.output'], {}), '(args.output)\n', (3356, 3369), False, 'from pathlib import Path\n'), ((1274, 1292), 'numpy.isnan', 'np.isnan', (['affinity'], {}), '(affinity)\n', (1282, 1292), True, 'import numpy as np\n')] |
import numpy as np
import pathlib, sys
def doFom(fom, dt, nsteps, saveFreq):
u = fom.u0.copy()
U = [u]
f = fom.createVelocity()
for i in range(1,nsteps+1):
# query rhs of discretized system
fom.velocity(u, i*dt, f)
# simple Euler forward
u = u + dt*f
if i % saveFreq == 0:
U.append(u)
Usolns = np.array(U)
return [u, Usolns.T]
| [
"numpy.array"
] | [((332, 343), 'numpy.array', 'np.array', (['U'], {}), '(U)\n', (340, 343), True, 'import numpy as np\n')] |
from service.visualization.maps.GenericMap import GenericMap
import numpy as np
class Log2Map(GenericMap):
name = "Log2"
def __init__(self, attrs):
super().__init__(attrs)
def apply(self, data):
variable = self.getAttrs()["variable"]
data[variable] = np.log2(data[variable])
return data
| [
"numpy.log2"
] | [((293, 316), 'numpy.log2', 'np.log2', (['data[variable]'], {}), '(data[variable])\n', (300, 316), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ray
from collections import namedtuple
import numpy as np
import random
from ray.rllib.agents.trainer import Trainer
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.policy.policy import Policy
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.annotations import override
from ray.tune.logger import pretty_print
from mprl.rl.envs.opnspl.poker_br_policy import PokerOracleBestResponsePolicy
from mprl.rl.common.stratego_model import STRATEGO_MODEL
from mprl.rl.common.stratego_preprocessor import STRATEGO_PREPROCESSOR
from mprl.rl.ppo.ppo_custom_eval_trainer import PPOCustomEvalTrainer
from mprl.rl.ppo.ppo_stratego_model_policy import PPOStrategoModelTFPolicy
from mprl.rl.common.util import numpy_unpack_obs
from mprl.rl.envs.opnspl.poker_multiagent_env import POKER_ENV
from mprl.rl.envs.opnspl.measure_exploitability_eval_callback import openspiel_policy_from_nonlstm_rllib_policy
from mprl.rl.envs.opnspl.util import policy_to_dict_but_we_can_actually_use_it
from mprl.rl.envs.opnspl.poker_multiagent_env import PokerMultiAgentEnv
from open_spiel.python.policy import tabular_policy_from_policy
from open_spiel.python import policy
import pyspiel
tf = try_import_tf()
RL_BR_POLICY = "rl_br_policy"
ORACLE_BR_POLICY = "oracle_br_policy"
EXPLOIT_POLICY = "exploit_policy"
# Used to return tuple actions as a list of batches per tuple element
TupleActions = namedtuple("TupleActions", ["batches"])
POLICY_TARGETS = "policy_targets"
OBSERVATION = 'observation'
VALID_ACTIONS_MASK = 'valid_actions_mask'
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
class PokerOpenSpeilPolicy(Policy):
@override(Policy)
def __init__(self, observation_space, action_space, config):
Policy.__init__(self, observation_space=observation_space, action_space=action_space, config=config)
if config["custom_preprocessor"]:
self.preprocessor = ModelCatalog.get_preprocessor_for_space(
observation_space=self.observation_space.original_space,
options={"custom_preprocessor": config["custom_preprocessor"]})
else:
raise ValueError("Custom preprocessor for PokerCFRPolicy needs to be specified on its passed config.")
env_id = config['env']
assert env_id == POKER_ENV
self.policy_dict = None
def set_policy_dict(self, policy_dict):
self.policy_dict = policy_dict
def _get_action_probs_for_infoset(self, infoset):
action_probs = np.zeros(shape=(self.action_space.n,), dtype=np.float32)
policy_lookup_val = self.policy_dict[str(np.asarray(infoset, dtype=np.float32).tolist())]
for action, prob in policy_lookup_val:
action_probs[action] = prob
return action_probs
@override(Policy)
def compute_actions(self,
obs_batch,
state_batches,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
"""Compute actions for the current policy.
Arguments:
obs_batch (np.ndarray): batch of observations
state_batches (list): list of RNN state input batches, if any
prev_action_batch (np.ndarray): batch of previous action values
prev_reward_batch (np.ndarray): batch of previous rewards
info_batch (info): batch of info objects
episodes (list): MultiAgentEpisode for each obs in obs_batch.
This provides access to all of the internal episode state,
which may be useful for model-based or multiagent algorithms.
kwargs: forward compatibility placeholder
Returns:
actions (np.ndarray): batch of output actions, with shape like
[BATCH_SIZE, ACTION_SHAPE].
state_outs (list): list of RNN state output batches, if any, with
shape like [STATE_SIZE, BATCH_SIZE].
info (dict): dictionary of extra feature batches, if any, with
shape like {"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
"""
obs_batch = numpy_unpack_obs(obs=np.asarray(obs_batch), space=self.observation_space.original_space,
preprocessor=self.preprocessor)
info_states = obs_batch["partial_observation"]
valid_actions = obs_batch['valid_actions_mask']
actions = []
policy_probs = []
for info_state, valid_mask in zip(info_states, valid_actions):
if self.policy_dict is None:
action_probs = valid_mask.copy() / sum(valid_mask)
else:
action_probs = self._get_action_probs_for_infoset(info_state)
action = np.random.choice(range(self.action_space.n), p=action_probs)
assert valid_mask[action] == 1.0
actions.append(action)
policy_probs.append(action_probs)
return actions, [], {POLICY_TARGETS: np.asarray(policy_probs)}
def compute_gradients(self, postprocessed_batch):
"""Computes gradients against a batch of experiences.
Either this or learn_on_batch() must be implemented by subclasses.
Returns:
grads (list): List of gradient output values
info (dict): Extra policy-specific values
"""
pass
def apply_gradients(self, gradients):
"""Applies previously computed gradients.
Either this or learn_on_batch() must be implemented by subclasses.
"""
pass
def get_weights(self):
"""Returns model weights.
Returns:
weights (obj): Serializable copy or view of model weights
"""
return None
def set_weights(self, weights):
"""Sets model weights.
Arguments:
weights (obj): Serializable copy or view of model weights
"""
pass
def get_initial_state(self):
"""Returns initial RNN state for the current policy."""
return []
def get_state(self):
"""Saves all local state.
Returns:
state (obj): Serialized local state.
"""
return self.get_weights()
def set_state(self, state):
"""Restores all local state.
Arguments:
state (obj): Serialized local state.
"""
self.set_weights(state)
def on_global_var_update(self, global_vars):
"""Called on an update to global vars.
Arguments:
global_vars (dict): Global variables broadcast from the driver.
"""
pass
def export_model(self, export_dir):
"""Export Policy to local directory for serving.
Arguments:
export_dir (str): Local writable directory.
"""
raise NotImplementedError
def export_checkpoint(self, export_dir):
"""Export Policy checkpoint to local directory.
Argument:
export_dir (str): Local writable directory.
"""
raise NotImplementedError
def get_openspeil_format_rl_br_policy(game_name, br_player_id, policy_to_exploit, policy_to_exploit_player_id):
ray.init(local_mode=True, ignore_reinit_error=True)
poker_game_version = game_name
observation_mode = "partially_observable"
poker_env_config = {
'version': poker_game_version,
'fixed_players': True
}
make_env_fn = lambda env_config: PokerMultiAgentEnv(env_config)
temp_env = make_env_fn(poker_env_config)
obs_space = temp_env.observation_space
act_space = temp_env.action_space
model_config = {
# === Options for custom models ===
# Name of a custom preprocessor to use
"custom_preprocessor": STRATEGO_PREPROCESSOR,
# Name of a custom model to use
"custom_model": STRATEGO_MODEL,
"custom_options": {
"mask_invalid_actions": True,
"observation_mode": observation_mode,
"q_fn": False
},
}
def train_policy_mapping_fn(agent_id):
if agent_id == br_player_id:
# this is just to quickly check that we're matching the Oracle BR by having it
# also play some games and report win stats too
# TODO: you can remove this if-statement if you dont care about verifying against the oracle BR
if random.random() < 0.1:
return ORACLE_BR_POLICY
return RL_BR_POLICY
elif agent_id == policy_to_exploit_player_id:
return EXPLOIT_POLICY
else:
raise ValueError(f"The env requested a policy for a player ID of {agent_id} "
f"but the BR policy has a player ID of {br_player_id} "
f"and the exploit policy has player ID of {policy_to_exploit_player_id}")
trainer_config = {
"log_level": "INFO",
"num_workers": 0, # 0 means a single worker instance in the same process as the optimizer
"memory_per_worker": 1419430400,
"num_gpus": 0, # (GPUs for training) not using gpus for anything by default
"num_gpus_per_worker": 0, # (GPUs per experience gathering worker process, can be a fraction)
"num_envs_per_worker": 1,
"env": POKER_ENV,
"env_config": poker_env_config,
"multiagent": {
"policies": {
RL_BR_POLICY: (PPOStrategoModelTFPolicy, obs_space, act_space, {
# the config dicts in these "policies" override any non-policy-specific params
'model': model_config,
"lr": 0.001,
}),
# TODO: you can remove the ORACLE BR policy here if you dont want to verify against it
# (there are two other TODO's in this file with Oracle BR stuff you can remove)
ORACLE_BR_POLICY: (PokerOracleBestResponsePolicy, obs_space, act_space, {
'custom_preprocessor': STRATEGO_PREPROCESSOR,
}),
EXPLOIT_POLICY: (PokerOpenSpeilPolicy, obs_space, act_space, {
'custom_preprocessor': STRATEGO_PREPROCESSOR,
}),
},
"policy_mapping_fn": train_policy_mapping_fn,
"policies_to_train": [RL_BR_POLICY],
},
"metrics_smoothing_episodes": 1000, # all reported RLLib metrics are averaged over this size episode window
"gamma": 1.0, # discount
"num_sgd_iter": 10, # train over train batch this many times each train() call
"sgd_minibatch_size": 128, #break train batch in to this size minibatches
"train_batch_size": 500,
"sample_batch_size": 10, # each worker returns chunks of this size (PPO continues gathering exp until train_batch_size is gathered in total among all policies)
"simple_optimizer": True, # non-simple optimizer does multi-gpu/preloading fancy stuff
"model": {
"conv_filters": [],
"fcnet_hiddens": [40, 40, 40], # poker network size here
},
}
trainer_class = PPOCustomEvalTrainer
trainer: Trainer = trainer_class(config=trainer_config)
# For technical reasons (can't pickle certain things),
# I have to set the policy probs for openspiel-based exploit policy here
def set_openspeil_exploit_policy_probs(worker):
game = pyspiel.load_game(game_name)
worker.policy_map[EXPLOIT_POLICY].set_policy_dict(
policy_to_dict_but_we_can_actually_use_it(player_policy=policy_to_exploit,
game=game,
player_id=policy_to_exploit_player_id))
trainer.workers.foreach_worker(set_openspeil_exploit_policy_probs)
###################
# For technical reasons (can't pickle certain things),
# I have to set the policy probs for openspiel-based BR policy here
# TODO: you can remove this chunk of logic if you remove the other two bits of Oracle BR code in earlier lines
local_br_policy = trainer.workers.local_worker().policy_map[ORACLE_BR_POLICY]
local_exploit_rllib_policy = trainer.workers.local_worker().policy_map[EXPLOIT_POLICY]
br_policy_probs_dict = local_br_policy.compute_best_response(policy_to_exploit=local_exploit_rllib_policy,
br_only_as_player_id=br_player_id)
def set_openspeil_oracle_br_policy_probs(worker):
worker.policy_map[ORACLE_BR_POLICY].set_policy_dict(br_policy_probs_dict)
trainer.workers.foreach_worker(set_openspeil_oracle_br_policy_probs)
####################
iterations = 100
for it in range(1, iterations + 1):
result = trainer.train()
print(f"Iteration {it} out of {iterations}")
print(pretty_print(result))
game = pyspiel.load_game(game_name)
open_spiel_policy_from_callable = openspiel_policy_from_nonlstm_rllib_policy(
openspiel_game=game, poker_game_version=poker_game_version,
rllib_policy=trainer.workers.local_worker().policy_map[RL_BR_POLICY])
return tabular_policy_from_policy(game=game, policy=open_spiel_policy_from_callable)
if __name__ == '__main__':
game_name = "kuhn_poker"
game = pyspiel.load_game(game_name)
tabular_policy = policy.TabularPolicy(game)
# rl_br_policy should have the same interface as a openspeil br policy from something like
# open_spiel.python.algorithms.best_response.BestResponsePolicy
rl_br_policy = get_openspeil_format_rl_br_policy(game_name=game_name,
br_player_id=0,
policy_to_exploit_player_id=1,
policy_to_exploit=tabular_policy)
| [
"open_spiel.python.policy.tabular_policy_from_policy",
"collections.namedtuple",
"mprl.rl.envs.opnspl.util.policy_to_dict_but_we_can_actually_use_it",
"pyspiel.load_game",
"open_spiel.python.policy.TabularPolicy",
"ray.rllib.models.catalog.ModelCatalog.get_preprocessor_for_space",
"numpy.asarray",
"ra... | [((1310, 1325), 'ray.rllib.utils.try_import_tf', 'try_import_tf', ([], {}), '()\n', (1323, 1325), False, 'from ray.rllib.utils import try_import_tf\n'), ((1515, 1554), 'collections.namedtuple', 'namedtuple', (['"""TupleActions"""', "['batches']"], {}), "('TupleActions', ['batches'])\n", (1525, 1554), False, 'from collections import namedtuple\n'), ((1951, 1967), 'ray.rllib.utils.annotations.override', 'override', (['Policy'], {}), '(Policy)\n', (1959, 1967), False, 'from ray.rllib.utils.annotations import override\n'), ((3078, 3094), 'ray.rllib.utils.annotations.override', 'override', (['Policy'], {}), '(Policy)\n', (3086, 3094), False, 'from ray.rllib.utils.annotations import override\n'), ((7579, 7630), 'ray.init', 'ray.init', ([], {'local_mode': '(True)', 'ignore_reinit_error': '(True)'}), '(local_mode=True, ignore_reinit_error=True)\n', (7587, 7630), False, 'import ray\n'), ((13293, 13321), 'pyspiel.load_game', 'pyspiel.load_game', (['game_name'], {}), '(game_name)\n', (13310, 13321), False, 'import pyspiel\n'), ((13562, 13639), 'open_spiel.python.policy.tabular_policy_from_policy', 'tabular_policy_from_policy', ([], {'game': 'game', 'policy': 'open_spiel_policy_from_callable'}), '(game=game, policy=open_spiel_policy_from_callable)\n', (13588, 13639), False, 'from open_spiel.python.policy import tabular_policy_from_policy\n'), ((13709, 13737), 'pyspiel.load_game', 'pyspiel.load_game', (['game_name'], {}), '(game_name)\n', (13726, 13737), False, 'import pyspiel\n'), ((13759, 13785), 'open_spiel.python.policy.TabularPolicy', 'policy.TabularPolicy', (['game'], {}), '(game)\n', (13779, 13785), False, 'from open_spiel.python import policy\n'), ((2041, 2146), 'ray.rllib.policy.policy.Policy.__init__', 'Policy.__init__', (['self'], {'observation_space': 'observation_space', 'action_space': 'action_space', 'config': 'config'}), '(self, observation_space=observation_space, action_space=\n action_space, config=config)\n', (2056, 2146), False, 'from ray.rllib.policy.policy import Policy\n'), ((2801, 2857), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.action_space.n,)', 'dtype': 'np.float32'}), '(shape=(self.action_space.n,), dtype=np.float32)\n', (2809, 2857), True, 'import numpy as np\n'), ((7852, 7882), 'mprl.rl.envs.opnspl.poker_multiagent_env.PokerMultiAgentEnv', 'PokerMultiAgentEnv', (['env_config'], {}), '(env_config)\n', (7870, 7882), False, 'from mprl.rl.envs.opnspl.poker_multiagent_env import PokerMultiAgentEnv\n'), ((11802, 11830), 'pyspiel.load_game', 'pyspiel.load_game', (['game_name'], {}), '(game_name)\n', (11819, 11830), False, 'import pyspiel\n'), ((1870, 1879), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1876, 1879), True, 'import numpy as np\n'), ((2217, 2386), 'ray.rllib.models.catalog.ModelCatalog.get_preprocessor_for_space', 'ModelCatalog.get_preprocessor_for_space', ([], {'observation_space': 'self.observation_space.original_space', 'options': "{'custom_preprocessor': config['custom_preprocessor']}"}), "(observation_space=self.\n observation_space.original_space, options={'custom_preprocessor':\n config['custom_preprocessor']})\n", (2256, 2386), False, 'from ray.rllib.models.catalog import ModelCatalog\n'), ((11902, 12030), 'mprl.rl.envs.opnspl.util.policy_to_dict_but_we_can_actually_use_it', 'policy_to_dict_but_we_can_actually_use_it', ([], {'player_policy': 'policy_to_exploit', 'game': 'game', 'player_id': 'policy_to_exploit_player_id'}), '(player_policy=policy_to_exploit,\n game=game, player_id=policy_to_exploit_player_id)\n', (11943, 12030), False, 'from mprl.rl.envs.opnspl.util import policy_to_dict_but_we_can_actually_use_it\n'), ((13259, 13279), 'ray.tune.logger.pretty_print', 'pretty_print', (['result'], {}), '(result)\n', (13271, 13279), False, 'from ray.tune.logger import pretty_print\n'), ((4569, 4590), 'numpy.asarray', 'np.asarray', (['obs_batch'], {}), '(obs_batch)\n', (4579, 4590), True, 'import numpy as np\n'), ((5396, 5420), 'numpy.asarray', 'np.asarray', (['policy_probs'], {}), '(policy_probs)\n', (5406, 5420), True, 'import numpy as np\n'), ((8776, 8791), 'random.random', 'random.random', ([], {}), '()\n', (8789, 8791), False, 'import random\n'), ((2907, 2944), 'numpy.asarray', 'np.asarray', (['infoset'], {'dtype': 'np.float32'}), '(infoset, dtype=np.float32)\n', (2917, 2944), True, 'import numpy as np\n')] |
from netpyne import sim, specs
from neuron import gui
import matplotlib.pyplot as plt
import numpy as np
netParams = specs.NetParams()
#############################################
#### POPULATION PARAMETERS #####
#############################################
#I tried this because the plotshape plotting is giving me an error (populations' ranges are invalid), but it doesn't disappear with this either
#Anyway, the plot is shown correctly
netParams.sizeX=2700
netParams.sizeY=100
netParams.sizeZ=100
PATTi=0 #pattern to be read
#Amount of cells in the network
nPyramidal=100
nCA3=100
nEC=20 #must be equal to the active pyr cells in the pattern
nSEP=10
nOLM=1
nBS=1
nB=2
nAA=1
STARTDEL = 50. # msecs
THETA = 250. # msecs (4 Hz)
GAMMA = 25. # msecs (40 Hz)
ECCA3DEL = 9. # msecs
# Population parameters
netParams.popParams['Pyramidal'] = {'cellType': 'Pyramidalcell',
'numCells': nPyramidal,
'cellModel': 'Pyramidal_model',
'xRange':[2100, 2100],
'yRange':[0, 100],
'zRange':[0, 100]}#100cells
netParams.popParams['OLM'] = {'cellType':
'OLMcell', 'numCells': nOLM,
'cellModel': 'OLM_model',
'xRange':[2700, 2700],
'yRange':[0, 100],
'zRange':[0, 100]}
netParams.popParams['BS'] = {'cellType':
'BScell', 'numCells': nBS,
'cellModel': 'BS_model',
'xRange':[1600, 1600],
'yRange':[0, 100],
'zRange':[0, 100]}
netParams.popParams['Basket'] = {'cellType':
'Basketcell', 'numCells': nB,
'cellModel': 'B_model',
'xRange':[900, 900],
'yRange':[0, 100],
'zRange':[0, 100]}
netParams.popParams['AA'] = {'cellType':
'AAcell', 'numCells': nAA,
'cellModel': 'AA_model',
'xRange':[0, 0],
'yRange':[0, 100],
'zRange':[0, 100]}
#'cellModel': 'RegnStim' ##they use this other model (not NetStim) in their EC and CA3 cells.
netParams.popParams['EC']={'cellModel':
'RegnStim', 'numCells': nEC, 'number': 1000, 'interval': GAMMA,'start': STARTDEL, 'noise': 0.2,\
'xRange':[0, 500], 'yRange':[100, 150], 'zRange':[0, 100] }
netParams.popParams['CA3']={'cellModel': 'RegnStim', 'numCells': nCA3, 'number': 1000, 'interval': GAMMA,'start': STARTDEL+ECCA3DEL, 'noise': 0.2,\
'xRange':[500, 1000], 'yRange':[100, 150], 'zRange':[0, 100]}
netParams.popParams['SEP']={'cellModel': 'BurstStim2', 'numCells': nSEP, 'interval':20, 'number': 1000, 'start': STARTDEL+(THETA/12.), 'noise': 0.4,\
'burstint':2.*THETA/3.,'burstlen':THETA/3.,'xRange':[1000, 1500], 'yRange':[100, 150], 'zRange':[0, 100]}
#Due to 40% noise in the interspike intervals,
#the 10 spike trains in the septal population are asynchronous.
#############################################
#### IMPORT CELL PARAMETERS #####
#############################################
netParams.importCellParams(label='Pyramidalcell', conds={'cellType': 'Pyramidalcell', 'cellModel': 'Pyramidal_model'}, \
fileName='pyramidal_cell_14Vb.hoc', cellName='PyramidalCell', importSynMechs=False)
netParams.importCellParams(label='OLMcell', conds={'cellType': 'OLMcell', 'cellModel': 'OLM_model'}, \
fileName='olm_cell2.hoc', cellName='OLMCell', importSynMechs=False)
#netParams.cellParams['OLMcell'].globals.Rm=20000.
netParams.importCellParams(label='BScell', conds={'cellType': 'BScell', 'cellModel': 'BS_model'}, \
fileName='bistratified_cell13S.hoc', cellName='BistratifiedCell', importSynMechs=False)
netParams.importCellParams(label='Basketcell', conds={'cellType': 'Basketcell', 'cellModel': 'B_model'}, \
fileName='basket_cell17S.hoc', cellName='BasketCell', importSynMechs=False)
netParams.importCellParams(label='AAcell', conds={'cellType': 'AAcell', 'cellModel': 'AA_model'}, \
fileName='axoaxonic_cell17S.hoc', cellName='AACell', importSynMechs=False)
##Setting thresholds
cells=['Pyramidalcell','OLMcell','BScell','Basketcell','AAcell']
for i in cells:
for sec in netParams.cellParams[i].secs:
netParams.cellParams[i].secs[sec].threshold = -10.0
#############################################
#### NETWORK CONNECTIONS #####
#############################################
weights={'Pyramidalcell2Pyramidalcell': 0.001, 'Pyramidalcell2AAcell':0.0005, 'Pyramidalcell2Basketcell':0.0005, 'Pyramidalcell2BScell':0.0005,'Pyramidalcell2OLMcell': 0.00005, \
'AAcell2Pyramidalcell': 0.04,\
'Basketcell2Pyramidalcell': 0.02, 'Basketcell2Basketcell': 0.001, 'Basketcell2BScell': 0.02,\
'BScell2Pyramidalcell': 0.002, 'BScell2Pyramidal_GABABasketcell': 0.0004, 'BScell2Basketcell': 0.01, \
'OLMcell2Pyramidalcell': 0.04, 'OLMcell2Pyramidal_GABABasketcell': 0.0004,'OLMcell2Basketcell': 0.01, }
delays={'Pyramidalcell2Pyramidalcell': 1., 'Pyramidalcell2AAcell':1., 'Pyramidalcell2Basketcell':1., 'Pyramidalcell2BScell':1.,'Pyramidalcell2OLMcell': 1., \
'AAcell2Pyramidalcell': 1., \
'Basketcell2Pyramidalcell': 1., 'Basketcell2Basketcell': 1., 'Basketcell2BScell': 1., \
'BScell2Pyramidalcell': 1., 'BScell2Pyramidal_GABABasketcell': 1., 'BScell2Basketcell': 1., \
'OLMcell2Pyramidalcell': 1., 'OLMcell2Pyramidal_GABABasketcell': 1.,'OLMcell2Basketcell': 1. }
# Cue (CA3) excitation
CHWGT = 0.0015 #// cue weight
CLWGT = 0.0005 #// unlearnt weight (usually 0)
CNWGT = 0.0005 #// excitatory weights (NMDA)
CDEL = 1. #// cue delay
#EC excitation
ECWGT = 0.0 # EC weight to PCs
#ECWGT = 0.001 # EC weight to PCs
ECDEL = 1. # EC delay
EIWGT = 0.00015 # excitatory weights to INs
EIDEL = 1. # delay (msecs)
# Septal inhibition
SEPWGT = 0.02 # SEP weight to BCs and AACs
SEPWGTL = 0.0002 # SEP weight to BSCs and OLMs
SEPDEL = 1. # SEP delay
#############################################
#### DESCRIPTION OF SYNAPTIC MECHANISMS #####
#############################################
###STDP configuration
STDPDFAC = 0. # depression factor
STDPPFAC = 0. # potentiation factor
#STDPDFAC = 0.2 # depression factor
#STDPPFAC = 1.0 # potentiation factor
AMPASUPP = 0.4 # fraction of AMPA during storage
STDPTHRESH = -55. # voltage threshold for STDP
STDPSTART = STARTDEL+(THETA/2.) # STDP starts at same time as EC input
STDPINT = THETA/2. # STDP interburst (recall) interval
STDPLEN = THETA/2. # STDP burst (storage) length
netParams.synMechParams['STDP']={'mod':'STDPE2', 'wmax': CHWGT, 'wmin':CLWGT,'d': STDPDFAC, 'p' : STDPPFAC, 'gscale': AMPASUPP, 'thresh': STDPTHRESH, \
'gbdel': STDPSTART, 'gbint': STDPINT, 'gblen': STDPLEN}
netParams.synMechParams['GABAA']={'mod':'MyExp2Syn', 'tau1':1.0, 'tau2':8.0, 'e':-75.0}
netParams.synMechParams['GABAB']={'mod':'MyExp2Syn', 'tau1':35.0, 'tau2':100.0, 'e':-75.0}
netParams.synMechParams['AMPA']={'mod':'MyExp2Syn', 'tau1':0.5, 'tau2':3.0, 'e':0.0}
netParams.synMechParams['NMDA']={'mod':'NMDA', 'tcon': 2.3, 'tcoff': 100.0, 'enmda': 0.0, 'gNMDAmax': 1.0, 'tauD': 800.0, 'tauF': 800.0, 'util': 0.3}
netParams.synMechParams['OLM_GABAA']={'mod':'Exp2Syn', 'tau1':1.0, 'tau2':8.0, 'e':-75.0}
netParams.synMechParams['OLM_GABAB']={'mod':'Exp2Syn', 'tau1':35.0, 'tau2':100.0, 'e':-75.0}
netParams.synMechParams['OLM_AMPA']={'mod':'Exp2Syn', 'tau1':0.5, 'tau2':3.0, 'e':0.0}
#netParams.synMechParams
####MyExp2Syn_0 == GABA-A ==? Exp2Syn_1 == {tau2: 8.0, tau1: 1.0, e: -75.0}
####MyExp2Syn_1 == AMPA ==? Exp2Syn_2 == {tau2: 3.0, tau1: 0.5, e: 0.0}
####MyExp2Syn_2 == GABA-B ==? Exp2Syn_0 == {tau2: 100.0, tau1: 35.0, e: -75.0}
####NMDA_3 == NMDA
###THE EXP2SYN MECHS ARE USED FOR OLM CONNECTIONS ONLY
#######################
##presyn = Pyramidal CHECKED
#######################
postsynList=['Pyramidal','AA','Basket','BS','OLM']
postsynDict={'Pyramidal':['radTprox'], 'AA': ['oriT1','oriT2'], 'Basket':['oriT1','oriT2'], 'BS':['oriT1','oriT2'], 'OLM':['dend1','dend2']}
for i in range(len(postsynList)):
k='Pyramidalcell2'+postsynList[i]+'cell'
netParams.connParams['Pyramidal->'+postsynList[i]] = {
'preConds': {'pop': 'Pyramidal'},
'postConds': {'pop': postsynList[i]},
'sec': postsynDict[postsynList[i]],
'synsPerConn':len(postsynDict[postsynList[i]]),
'synMech': 'AMPA',
'weight': weights[k],
'delay': delays[k]
#'threshold': -10.0
}
if postsynList[i]=='Pyramidal':
netParams.connParams['Pyramidal->Pyramidal']['convergence'] = 1. # PC_PC = 1 // # of connections received by each PC from other PCs (excit)
if postsynList[i]=='OLM':
netParams.connParams['Pyramidal->OLM']['synMech'] = 'OLM_AMPA'
#FOR THE CONNECTIONS TO OLM CELLS THEY USE A DIFFERENT SYNAPSE MODEL
#######################
##presyn == AA CHECKED
#######################
netParams.connParams['AA->Pyramidal'] = {
'preConds': {'pop': 'AA'},
'postConds': {'pop': 'Pyramidal'},
'sec': 'axon',
'loc': 0.1,
'synMech': 'GABAA',
'weight': weights['AAcell2Pyramidalcell'],
'delay': delays['AAcell2Pyramidalcell']
#'threshold': -10.0
}
#######################
##presyn == B CHECKED
#######################
postsynList=['Pyramidal','Basket','BS'] ##B->AA not connected
for i in range(len(postsynList)):
k='Basketcell2'+postsynList[i]+'cell'
netParams.connParams['B->'+postsynList[i]] = {
'preConds': {'pop': 'Basket'},
'postConds': {'pop': postsynList[i]},
'sec': 'soma',
'synMech': 'GABAA', #GABA-A
'weight': weights[k],
'delay': delays[k]
# 'threshold': -10.0
}
if postsynList[i]=='BS': netParams.connParams['B->BS']['loc'] = 0.6
##WITH THIS LINE IT DOESNT CREATE THE B->B CONNECTION
## elif postsynList[i]=='Basket': netParams.connParams['B->B']['convergence'] = 1. # BC_BC = 1 // # of connections received by each BC from other BCs (inhib)
#######################
##presyn == BS CHECKED
#######################
##BS->AA & BS->BS not connected
netParams.connParams['BS->B'] = {
'preConds': {'pop': 'BS'},
'postConds': {'pop': 'Basket'},
'sec': 'soma',
'synMech': 'GABAA',
'loc':0.6,
'weight': weights['BScell2Basketcell'],
'delay': delays['BScell2Basketcell']
#'threshold': -10.0
}
netParams.connParams['BS->Pyramidal'] = {
'preConds': {'pop': 'BS'},
'postConds': {'pop': 'Pyramidal'},
'sec': 'radTmed',
'synsPerConn':7,
'loc':[[0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2],[0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]],
'synMech': ['GABAA','GABAB'],
'weight': [weights['BScell2Pyramidalcell'], weights['BScell2Pyramidal_GABABasketcell']],
'delay': [delays['BScell2Pyramidalcell'],delays['BScell2Pyramidal_GABABasketcell']]
# 'threshold': -10.0
}
#######################
##presyn == OLM CHECKED
#######################
netParams.connParams['OLM->Pyramidal'] = {
'preConds': {'pop': 'OLM'},
'postConds': {'pop': 'Pyramidal'},
'sec': ['lm_thick1','lm_thick2'],
'synMech': ['GABAA','GABAB'], #GABA-A,GABA-B
'weight': [weights['OLMcell2Pyramidalcell'], weights['OLMcell2Pyramidal_GABABasketcell']],
'delay': [delays['OLMcell2Pyramidalcell'],delays['OLMcell2Pyramidal_GABABasketcell']],
'synsPerConn':21
#'threshold': -10.0
}
#############################################
#### STIMULATION - INPUTS #####
#############################################
#####################################
#####EC input to active pyramidal cells
#####################################
FPATT = "Weights/pattsN100S20P5.dat" #already stored patterns: each column is a pattern. Each line is a CA1 pyramidal cell
PATTS = np.transpose(np.loadtxt(fname=FPATT, dtype='int16')) #each column is a pattern - 100 lines (one per pyramidal cell)
lista_EC2Pyramidal=[] #to check which pyr cells are active in the pattern
##each EC cell will stimulate every active pyr cell in the pattern
for i in range(nEC):
for j in range(nPyramidal):
if PATTS[PATTi][j]:
lista_EC2Pyramidal.append([i,j])
netParams.connParams['EC->Pyramidal'] = {
'preConds': {'pop': 'EC'},
'postConds': {'pop': 'Pyramidal'},
'connList':lista_EC2Pyramidal,
'sec': ['lm_thick1','lm_thick2'],
'synMech': 'AMPA',
'loc':0.5,
'weight': ECWGT,
'delay': ECDEL,
'synsPerConn':2
#'threshold': 10.0
}
##############################
### EC to inhibitory neurons --
##############################
netParams.connParams['EC->IN'] = {
'preConds': {'pop': 'EC'},
'postConds': {'pop': ['Basket','AA']},
'sec': ['lmM1','lmM2'],
'synMech': 'AMPA',
'weight': EIWGT,
'delay': EIDEL,
'synsPerConn':2
#'threshold': -10.0
}
######################
####CA3 EXCITATION
#####################
FCONN = "Weights/wgtsN100S20P5.dat" #weights matrix generated with matlab file
#WGTCONN = np.transpose(np.loadtxt(fname=FCONN, dtype='int16')) #each column has the weights for one pyramidal cell
WGTCONN = (np.loadtxt(fname=FCONN, dtype='int16')) #each column has the weights for one pyramidal cell
#############################
####CA3 -> INHIBITORY CELLS
############################
lista_CA3active=[]
###connect CA3 input to all pyramidal cells but with different weights according to the WGTCONN[i][j] value
lista_CA3highW=[]
lista_CA3lowW=[]
for i in range(nCA3):
if PATTS[PATTi][i]: ##ONLY CONNECTIONS FROM ACTIVE CA3 CELLS IN THE PATTERN
lista_CA3active.append(i)
for i in lista_CA3active: ##ONLY CONNECTIONS FROM ACTIVE CA3 CELLS IN THE PATTERN
for j in range(nPyramidal):
if WGTCONN[i][j]:
lista_CA3highW.append([i,j])
else: lista_CA3lowW.append([i,j])
postsynList=['AA','Basket','BS']
postsynDict={'AA': ['radM1','radM2','radT1', 'radT2'], 'Basket':['radM1','radM2','radT1', 'radT2'], 'BS':['radM1','radM2','radT1', 'radT2']}
cellnums={'Basket':nB, 'AA': nAA, 'BS': nBS, 'OLM': nOLM}
list=[]
connections={}
for j in postsynList:
num=0
list=[]
connections[j]= list
for i in range(cellnums[j]):
for k in lista_CA3active: list.append([k,i])
for i in range(len(postsynList)):
k='CA3cell2'+postsynList[i]+'cell'
netParams.connParams['CA3->'+postsynList[i]] = {
'preConds': {'pop': 'CA3'},
'postConds': {'pop': postsynList[i]},
'connList':connections[postsynList[i]],
'sec': postsynDict[postsynList[i]],
'synsPerConn':len(postsynDict[postsynList[i]]),
'synMech': 'AMPA',
'weight': EIWGT,
'delay': EIDEL,
'loc':0.5
#'threshold': -10.0
}
netParams.connParams['CA3_highW->Pyramidal'] = {
'preConds': {'pop': 'CA3'},
'postConds': {'pop': 'Pyramidal'},
'connList':lista_CA3highW,
'sec': 'radTmed',
# 'synMech': 'AMPA',
'synMech': 'STDP',
'loc':0.5,
'weight': CHWGT,
'delay': CDEL
#'threshold': 10.0
}
netParams.connParams['CA3_lowW->Pyramidal'] = {
'preConds': {'pop': 'CA3'},
'postConds': {'pop': 'Pyramidal'},
'connList':lista_CA3lowW,
'sec': 'radTmed',
'synMech': 'STDP',
'loc':0.5,
'weight': CLWGT,
'delay': CDEL
#'threshold': 10.0
}
netParams.connParams['CA3_NMDA->Pyramidal'] = {
'preConds': {'pop': 'CA3'},
'postConds': {'pop': 'Pyramidal'},
'sec': 'radTmed',
'connList':lista_CA3highW+lista_CA3lowW,
'synMech': 'NMDA',
'loc':0.5,
'weight': CNWGT,
'delay': CDEL
#'threshold': 10.0
}
#######################
## Septal inhibition
#######################
postsynList=['AA','Basket','BS','OLM']
postsynDict={'AA': ['oriT1','oriT2'], 'Basket':['oriT1','oriT2'], 'BS':['oriT1','oriT2'], 'OLM':['soma']}
w_SEP={'AA': SEPWGT, 'Basket':SEPWGT, 'BS':SEPWGTL, 'OLM':SEPWGTL}
## CHECKED
for i in range(len(postsynList)):
netParams.connParams['SEP->'+postsynList[i]] = {
'preConds': {'pop': 'SEP'},
'postConds': {'pop': postsynList[i]},
'sec': postsynDict[postsynList[i]],
'loc':0.6,
'synMech': ['GABAA'], #,'MyExp2Syn_2'], #GABA-A, GABA-B
'synsPerConn':len(postsynDict[postsynList[i]]),
'weight': w_SEP[postsynList[i]],
'delay': SEPDEL
#'threshold': -10.0
}
if postsynList[i]=='OLM':
netParams.connParams['SEP->OLM']['loc'] = 0.5
netParams.connParams['SEP->OLM']['synMech'] = ['OLM_GABAA'] # connections to OLM use a differen synapse mode - check what are differences
| [
"numpy.loadtxt",
"netpyne.specs.NetParams"
] | [((123, 140), 'netpyne.specs.NetParams', 'specs.NetParams', ([], {}), '()\n', (138, 140), False, 'from netpyne import sim, specs\n'), ((12674, 12712), 'numpy.loadtxt', 'np.loadtxt', ([], {'fname': 'FCONN', 'dtype': '"""int16"""'}), "(fname=FCONN, dtype='int16')\n", (12684, 12712), True, 'import numpy as np\n'), ((11380, 11418), 'numpy.loadtxt', 'np.loadtxt', ([], {'fname': 'FPATT', 'dtype': '"""int16"""'}), "(fname=FPATT, dtype='int16')\n", (11390, 11418), True, 'import numpy as np\n')] |
"""
"""
import logging
import json
import os
import pickle
import scipy.spatial as sp
from filelock import FileLock
import numpy as np
import torch
from .base import BaseModule, create_trainer
logger = logging.getLogger(__name__)
class XSentRetrieval(BaseModule):
mode = 'base'
output_mode = 'classification'
example_type = 'text'
def __init__(self, hparams):
self.test_results_fpath = 'test_results'
if os.path.exists(self.test_results_fpath):
os.remove(self.test_results_fpath)
super().__init__(hparams)
def forward(self, **inputs):
outputs = self.model(**inputs)
last_hidden = outputs[0]
mean_pooled = torch.mean(last_hidden, 1)
return mean_pooled
def test_dataloader_en(self):
test_features = self.load_features('en')
dataloader = self.make_loader(test_features, self.hparams['eval_batch_size'])
return dataloader
def test_dataloader_in(self):
test_features = self.load_features('in')
dataloader = self.make_loader(test_features, self.hparams['eval_batch_size'])
return dataloader
def test_step(self, batch, batch_idx):
inputs = {'input_ids': batch[0], 'token_type_ids': batch[2],
'attention_mask': batch[1]}
labels = batch[3].detach().cpu().numpy()
sentvecs = self(**inputs)
sentvecs = sentvecs.detach().cpu().numpy()
sentvecs = np.hstack([labels[:, None], sentvecs])
return {'sentvecs': sentvecs}
def test_epoch_end(self, outputs):
all_sentvecs = np.vstack([x['sentvecs'] for x in outputs])
with FileLock(self.test_results_fpath + '.lock'):
if os.path.exists(self.test_results_fpath):
with open(self.test_results_fpath, 'rb') as fp:
data = pickle.load(fp)
data = np.vstack([data, all_sentvecs])
else:
data = all_sentvecs
with open(self.test_results_fpath, 'wb') as fp:
pickle.dump(data, fp)
return {'sentvecs': all_sentvecs}
@staticmethod
def add_model_specific_args(parser, root_dir):
return parser
def run_module(self):
self.eval()
self.freeze()
trainer = create_trainer(self, self.hparams)
trainer.test(self, self.test_dataloader_en())
sentvecs1 = pickle.load(open(self.test_results_fpath, 'rb'))
os.remove(self.test_results_fpath)
trainer.test(self, self.test_dataloader_in())
sentvecs2 = pickle.load(open(self.test_results_fpath, 'rb'))
os.remove(self.test_results_fpath)
sentvecs1 = sentvecs1[sentvecs1[:, 0].argsort()]
sentvecs2 = sentvecs2[sentvecs2[:, 0].argsort()]
result_path = os.path.join(self.hparams['output_dir'], 'test_results.txt')
with open(result_path, 'w') as fp:
metrics = {'test_acc': precision_at_10(sentvecs1, sentvecs2)}
json.dump(metrics, fp)
def precision_at_10(sentvecs1, sentvecs2):
n = sentvecs1.shape[0]
# mean centering
sentvecs1 = sentvecs1 - np.mean(sentvecs1, axis=0)
sentvecs2 = sentvecs2 - np.mean(sentvecs2, axis=0)
sim = sp.distance.cdist(sentvecs1, sentvecs2, 'cosine')
actual = np.array(range(n))
preds = sim.argsort(axis=1)[:, :10]
matches = np.any(preds == actual[:, None], axis=1)
return matches.mean()
| [
"logging.getLogger",
"os.path.exists",
"numpy.mean",
"pickle.dump",
"numpy.hstack",
"torch.mean",
"scipy.spatial.distance.cdist",
"filelock.FileLock",
"os.path.join",
"pickle.load",
"numpy.any",
"numpy.vstack",
"json.dump",
"os.remove"
] | [((206, 233), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (223, 233), False, 'import logging\n'), ((3225, 3274), 'scipy.spatial.distance.cdist', 'sp.distance.cdist', (['sentvecs1', 'sentvecs2', '"""cosine"""'], {}), "(sentvecs1, sentvecs2, 'cosine')\n", (3242, 3274), True, 'import scipy.spatial as sp\n'), ((3361, 3401), 'numpy.any', 'np.any', (['(preds == actual[:, None])'], {'axis': '(1)'}), '(preds == actual[:, None], axis=1)\n', (3367, 3401), True, 'import numpy as np\n'), ((444, 483), 'os.path.exists', 'os.path.exists', (['self.test_results_fpath'], {}), '(self.test_results_fpath)\n', (458, 483), False, 'import os\n'), ((695, 721), 'torch.mean', 'torch.mean', (['last_hidden', '(1)'], {}), '(last_hidden, 1)\n', (705, 721), False, 'import torch\n'), ((1453, 1491), 'numpy.hstack', 'np.hstack', (['[labels[:, None], sentvecs]'], {}), '([labels[:, None], sentvecs])\n', (1462, 1491), True, 'import numpy as np\n'), ((1594, 1637), 'numpy.vstack', 'np.vstack', (["[x['sentvecs'] for x in outputs]"], {}), "([x['sentvecs'] for x in outputs])\n", (1603, 1637), True, 'import numpy as np\n'), ((2457, 2491), 'os.remove', 'os.remove', (['self.test_results_fpath'], {}), '(self.test_results_fpath)\n', (2466, 2491), False, 'import os\n'), ((2624, 2658), 'os.remove', 'os.remove', (['self.test_results_fpath'], {}), '(self.test_results_fpath)\n', (2633, 2658), False, 'import os\n'), ((2797, 2857), 'os.path.join', 'os.path.join', (["self.hparams['output_dir']", '"""test_results.txt"""'], {}), "(self.hparams['output_dir'], 'test_results.txt')\n", (2809, 2857), False, 'import os\n'), ((3132, 3158), 'numpy.mean', 'np.mean', (['sentvecs1'], {'axis': '(0)'}), '(sentvecs1, axis=0)\n', (3139, 3158), True, 'import numpy as np\n'), ((3187, 3213), 'numpy.mean', 'np.mean', (['sentvecs2'], {'axis': '(0)'}), '(sentvecs2, axis=0)\n', (3194, 3213), True, 'import numpy as np\n'), ((497, 531), 'os.remove', 'os.remove', (['self.test_results_fpath'], {}), '(self.test_results_fpath)\n', (506, 531), False, 'import os\n'), ((1652, 1695), 'filelock.FileLock', 'FileLock', (["(self.test_results_fpath + '.lock')"], {}), "(self.test_results_fpath + '.lock')\n", (1660, 1695), False, 'from filelock import FileLock\n'), ((1712, 1751), 'os.path.exists', 'os.path.exists', (['self.test_results_fpath'], {}), '(self.test_results_fpath)\n', (1726, 1751), False, 'import os\n'), ((2987, 3009), 'json.dump', 'json.dump', (['metrics', 'fp'], {}), '(metrics, fp)\n', (2996, 3009), False, 'import json\n'), ((1883, 1914), 'numpy.vstack', 'np.vstack', (['[data, all_sentvecs]'], {}), '([data, all_sentvecs])\n', (1892, 1914), True, 'import numpy as np\n'), ((2045, 2066), 'pickle.dump', 'pickle.dump', (['data', 'fp'], {}), '(data, fp)\n', (2056, 2066), False, 'import pickle\n'), ((1844, 1859), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1855, 1859), False, 'import pickle\n')] |
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from past.utils import old_div
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.interpolate import RegularGridInterpolator
from scipy.interpolate import LinearNDInterpolator
from scipy.interpolate import NearestNDInterpolator
import math
from tqdm import trange
import tqdm
import time
import matplotlib
from matplotlib import ticker
from matplotlib.widgets import Slider, Button, RadioButtons
from scipy.optimize import curve_fit
import datetime
colorbar = True
#matplotlib.rc('axes', color_cycle=['r', 'g', 'b', '#004060'])
mainlabel = ""
units = "$\AA^{-1}$"
xunits = units
yunits = units
zunits = units
contours = 200
DPI = 300
format = ".png"
text = "Structure Factor"
PLOT_EWALDS = True # enable ewald-corrected SF plots
savelog = True
savelin = True
NBINSRAD = 0
normplot = 1
FP_THRESHOLD = 1.0E-12
theta = np.pi / 2
title_fontsize = 9
path = ""
def make_flat_plot(D, xr, yr, zr):
if len(xr) != 1 and len(yr) != 1 and len(zr) != 1:
print("error in make_flat_plot! one of these lengths must be 1")
exit()
for ix in xr:
for iy in yr:
for iz in zr:
r = D[ix, iy, iz, :4]
pts.append((r))
def pl(title, obj):
delim = "="*20
print(delim, title, delim)
print(obj)
def pli(obj, title=""):
pl(title, obj)
buf = input("enter q to quit, anything else to continue") # raw_input renamed to input() in python3
if buf == 'q':
exit()
def ple(title, obj):
pl(title, obj)
exit()
def csplot_wlog(X, Y, Z, contours, lab, xlab, ylab, **kwargs):
csplot(X, Y, Z, contours, lab, xlab, ylab, **kwargs)
csplot(X, Y, np.log(Z), contours, "log_"+lab, xlab, ylab, **kwargs)
def csplot(X, Y, Z, contours, lab, xlab, ylab,**kwargs):
title = lab+" S("+xlab+","+ylab+")"
fname = lab+"_"+xlab+"_"+ylab
fig, ax = plt.subplots()
plt.suptitle(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
if normplot == 1:
cax = plt.contourf(X, Y, Z / np.amax(Z), contours, vmin=0.0, vmax=0.05, **kwargs)
else:
cax = plt.contourf(X, Y, Z, contours, vmax=0.01*np.amax(Z), **kwargs)
# ax.set_aspect((np.amax(Y)-np.amin(Y))/(np.amax(X)-np.amin(X)))
# ax.set_aspect('auto')
cbar = fig.colorbar(cax)
plt.savefig(path+fname+format, dpi=DPI)
plt.clf()
def sfplot(data, lcscale, **kwargs):
""" data: plot slice through structure factor"""
if not os.path.exists(path):
os.makedirs(path)
cspos = 0.0
la = []
lb = 0
an = ['x', 'y', 'z'] # axes names
for i in range(data.shape[2] - 1):
if np.unique(data[..., i]).size > 1:
la.append(i)
else:
lb = i
cspos = data[0, 0, i]
title = mainlabel + "\n" + text + "\n" + an[lb] + "=" + str(round(cspos, 2)) + zunits
ltitle = mainlabel + "\n" + "log " + text + "\n" + an[lb] + "=" + str(round(cspos, 2)) + zunits
xlab = an[la[0]]
ylab = an[la[1]]
filename = path + an[lb] + "=" + str(round(cspos, 2))
xlab += "(" + xunits + ")"
ylab += "(" + yunits + ")"
if savelog:
plt.suptitle(ltitle, fontsize=title_fontsize)
plt.xlabel(xlab)
plt.ylabel(ylab)
max_log = np.amax(np.log(data[..., 3]))
plt.contourf(data[..., la[0]], data[..., la[1]], np.log(data[..., 3]), contours, vmax=lcscale*max_log, **kwargs)
plt.savefig(filename+"_log"+format, dpi=DPI)
plt.clf()
if savelin:
plt.suptitle(title, fontsize=title_fontsize)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.contourf(data[..., la[0]], data[..., la[1]], data[..., 3], contours, **kwargs)
plt.savefig(filename+format, dpi=DPI)
plt.clf()
def radial_integrate(D, Nbins, outputname):
SF = D[:, :, :, 3]
R = (D[:, :, :, 0]**2).astype(np.float16) + (D[:, :, :, 1]**2).astype(np.float16) + (D[:, :, :, 2]**2).astype(np.float16)
H, E = np.histogram(R, bins=Nbins, weights=SF)
Hc, E = np.histogram(R, bins=Nbins)
Hc = np.where(Hc != 0, Hc, 1.0)
H /= Hc
H[:1] = 0.0
H /= np.amax(H)
plt.plot(E[:-1], H)
plt.xlim(0, 5)
plt.savefig(outputname, dpi=DPI)
def spherical_integrate(D):
exit()
def Plot_Ewald_Sphere_Correction_old(D, wavelength_angstroms):
""" pass full 3d data,SF,wavelength in angstroms """
X = D[:, 0, 0, 0]
Y = D[0, :, 0, 1]
Z = D[0, 0, :, 2]
SF = D[:, :, :, 3]
K_ES = 2.0*math.pi/wavelength_angstroms # calculate k for incident xrays in inverse angstroms
ES = RegularGridInterpolator((X, Y, Z), SF)
pts = []
for ix in range(D.shape[0]):
xsq = X[ix]**2.0
for iy in range(D.shape[1]):
R = np.sqrt(xsq+Y[iy]**2.0)
theta = np.arctan(old_div(R,K_ES))
xnew = X[ix]*np.cos(theta)
ynew = Y[iy]*np.cos(theta)
znew = K_ES*(1.0-np.cos(theta))
pts.append((X[ix], Y[iy], xnew, ynew, znew))
pts = np.asarray(pts)
EWD = ES(pts[:, 2:])
EWD = EWD.reshape(D.shape[0], D.shape[1])
plt.contourf(D[:, :, 0, 0], D[:, :, 0, 1], EWD, 200, interpolation=interp)
plt.savefig("EWxy.png",dpi=300)
plt.clf()
plt.contourf(D[:, :, 0, 0], D[:, :, 0, 1], np.log(EWD), 200, interpolation=interp)
plt.savefig("EWxylog.png", dpi=300)
plt.clf()
def Plot_Ewald_Sphere_Correction(D, wavelength_angstroms, ucell=[], cscale=1, lcscale=1, **kwargs):
""" pass full 3d data,SF,wavelength in angstroms """
# cscale : factor by which to scale the maximum value of the colorbar
# lcscale : factor by which to scale the maximum value of the colorbar
if not os.path.exists(path):
os.makedirs(path)
X = D[:, 0, 0, 0]
Y = D[0, :, 0, 1]
Z = D[0, 0, :, 2]
SF = D[:, :, :, 3]
K_ES = 2.0*math.pi/wavelength_angstroms # calculate k for incident xrays in inverse angstroms
ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)
xypts = []
for ix in range(D.shape[0]):
xsq = X[ix]**2.0
for iy in range(D.shape[1]):
theta = np.arctan(old_div(np.sqrt(xsq + Y[iy]**2.0),K_ES))
xypts.append((X[ix]*np.cos(theta), Y[iy]*np.cos(theta), K_ES*(1.0 - np.cos(theta))))
xzpts = []
for ix in range(D.shape[0]):
xsq = X[ix]**2.0
for iz in range(D.shape[2]):
theta = np.arctan(old_div(np.sqrt(xsq + Z[iz]**2.0),K_ES))
xzpts.append((X[ix]*np.cos(theta), K_ES*(1.0-np.cos(theta)), Z[iz]*np.cos(theta)))
yzpts = []
for iy in range(D.shape[1]):
ysq = Y[iy]**2.0
for iz in range(D.shape[2]):
theta = np.arctan(old_div(np.sqrt(ysq+Z[iz]**2.0),K_ES))
yzpts.append((K_ES*(1.0-np.cos(theta)), Y[iy]*np.cos(theta), Z[iz]*np.cos(theta)))
xypts = np.asarray(xypts)
xzpts = np.asarray(xzpts)
yzpts = np.asarray(yzpts)
EWDxy = ES(xypts)
EWDxz = ES(xzpts)
EWDyz = ES(yzpts)
EWDxy = EWDxy.reshape(D.shape[0], D.shape[1])
EWDxz = EWDxz.reshape(D.shape[0], D.shape[2])
EWDyz = EWDyz.reshape(D.shape[1], D.shape[2])
title = "Ewald Corrected Structure Factor \n $\lambda=$"+str(wavelength_angstroms)+" $\AA$ $k_{ew}=$"+str(round(K_ES,2))+" $\AA^{-1}$"
ltitle = 'log ' + title
xlab = 'x (' + units + ")"
ylab = 'y (' + units + ")"
zlab = 'z (' + units + ")"
fname = "Ewald_"
plt.figure(1)
plt.suptitle(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
EWDmax_xy = np.amax(EWDxy)
plt.contourf(D[:, :, 0, 0], D[:, :, 0, 1], EWDxy, contours, vmax=cscale*EWDmax_xy, **kwargs)
plt.savefig(path + fname + "xy" + format, dpi=DPI)
plt.clf()
plt.figure(2)
plt.suptitle(ltitle)
plt.xlabel(xlab)
plt.ylabel(ylab)
EWDmax_xylog = np.amax(np.log(EWDxy))
plt.contourf(D[:, :, 0, 0], D[:, :, 0, 1], np.log(EWDxy), contours, vmax=lcscale*EWDmax_xylog, **kwargs)
plt.savefig(path + fname + "xylog" + format, dpi=DPI)
plt.clf()
plt.figure(3)
plt.suptitle(title)
plt.xlabel(xlab)
plt.ylabel(zlab)
EWDmax_xz = np.amax(EWDxz)
plt.contourf(D[:, 0, :, 0], D[:, 0, :, 2], EWDxz, contours, vmax=cscale*EWDmax_xz, **kwargs)
plt.savefig(path + fname + "xz" + format, dpi=DPI)
plt.clf()
plt.figure(4)
plt.suptitle(ltitle)
plt.xlabel(xlab)
plt.ylabel(zlab)
EWDmax_xzlog = np.amax(np.log(EWDxz))
plt.contourf(D[:, 0, :, 0], D[:, 0, :, 2], np.log(EWDxz), contours, vmax=lcscale*EWDmax_xzlog, **kwargs)
lims = [np.amax(D[:, 0, :, 0]), np.amax(D[:, 0, :, 2])]
qmax = min(lims)
plt.xlim([-qmax, qmax])
plt.ylim([-qmax, qmax])
plt.savefig(path + fname + "xzlog" + format, dpi=DPI)
plt.clf()
plt.figure(5)
plt.suptitle(title)
plt.xlabel(ylab)
plt.ylabel(zlab)
EWDmax_yz = np.amax(EWDyz)
plt.contourf(D[0, :, :, 1], D[0, :, :, 2], EWDyz, contours, vmax=cscale*EWDmax_yz, **kwargs)
plt.savefig(path + fname + "yz" + format, dpi=DPI)
plt.clf()
plt.figure(6)
plt.suptitle(ltitle)
plt.xlabel(ylab)
plt.ylabel(zlab)
EWDmax_yzlog = np.amax(np.log(EWDyz))
plt.contourf(D[0, :, :, 1], D[0, :, :, 2], np.log(EWDyz), contours, vmax=lcscale*EWDmax_yzlog, **kwargs)
plt.savefig(path + fname + "yzlog" + format, dpi=DPI)
plt.clf()
def lorentz(points, a, b):
"""
:param p: lorentzian parameters : [full width half max (FWHM), position of maximum]
:param p: position
:return:
"""
w = np.pi / a
x = (b - points) / (w/2)
return 1 / (1 + x**2)
def inverse_ft(D, ucell):
X = D[:, 0, 0, 0]
Y = D[0, :, 0, 1]
Z = D[0, 0, :, 2]
Z += Z[np.argmin(abs(Z))]
SF = D[..., 3]
fbin_x = X[1] - X[0] # size of x bins in fourier space
fbin_y = Y[1] - Y[0] # size of y bins in fourier space
fbin_z = Z[1] - Z[0] # size of z bins in fourier space
real_x = 2 * np.pi / fbin_x # largest x dimension in real space
real_y = 2 * np.pi / fbin_y # largest y dimension in real space
real_z = 2 * np.pi / fbin_z # largest z dimension in real space
rbin_x = real_x / X.shape[0]
rbin_y = real_y / Y.shape[0]
rbin_z = real_z / Z.shape[0]
X_real = np.linspace(-real_x / 2, real_x / 2, X.shape[0])
Y_real = np.linspace(-real_y / 2, real_y / 2, Y.shape[0])
Z_real = np.linspace(-real_z / 2, real_z / 2, Z.shape[0])
# reorder lists so they conform to numpy (https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.fft.ifftn.html)
start = list(X).index(0)
X_reordered = np.concatenate((X[start:], X[:start]))
ndx_x = [list(X).index(i) for i in X_reordered]
start = list(Y).index(0)
Y_reordered = np.concatenate((Y[start:], Y[:start]))
ndx_y = [list(Y).index(i) for i in Y_reordered]
start = list(Z).index(0)
Z_reordered = np.concatenate((Z[start:], Z[:start]))
ndx_z = [list(Z).index(i) for i in Z_reordered]
SF_reordered = SF[ndx_x, :, :]
SF_reordered = SF_reordered[:, ndx_y, :]
SF_reordered = SF_reordered[:, :, ndx_z]
# inverse fourier transform
inverse_fft = np.fft.ifftn(SF_reordered)
# reorder again
inverse_fft = inverse_fft[ndx_x, :, :]
inverse_fft = inverse_fft[:, ndx_y, :]
inverse_fft = inverse_fft[:, :, ndx_z]
# fourier transform of inversion as a test
# ft = np.abs(np.fft.fftn(inverse_fft))**2
# ft = ft[ndx_x, :]
# ft = ft[:, ndx_y]
# plt.imshow(ft)
# plt.show()
inverse_fft = inverse_fft.real / np.amax(inverse_fft.real)
final, rfin, zfin = angle_average(X_real, Y_real, Z_real, inverse_fft, ucell=ucell)
rbound1 = 0
rbound2 = 0
while rfin[rbound1] < -15:
rbound1 += 1
while rfin[rbound2] < 15:
rbound2 += 1
zbound1 = 0
zbound2 = 0
while zfin[0][zbound1] < -15:
zbound1 += 1
while zfin[0][zbound2] < 15:
zbound2 += 1
levels = np.linspace(np.amin(final), 0.001 * np.amax(final), 200)
plt.contourf(rfin[rbound1:rbound2], zfin[0][zbound1:zbound2], final[rbound1:rbound2, zbound1:zbound2].T,
levels=levels, cmap='seismic', extend='max')
plt.colorbar()
plt.xlabel('r ($\AA$)')
plt.ylabel('z ($\AA$)')
plt.show()
exit()
def angle_average(X, Y, Z, SF, ucell=None):
ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)
THETA_BINS_PER_INV_ANG = 20.
MIN_THETA_BINS = 10 # minimum allowed bins
RBINS = 100
if ucell is not None:
a1 = ucell[0]
a2 = ucell[1]
a3 = ucell[2]
b1 = (np.cross(a2, a3)) / (np.dot(a1, np.cross(a2, a3)))
b2 = (np.cross(a3, a1)) / (np.dot(a2, np.cross(a3, a1)))
b3 = (np.cross(a1, a2)) / (np.dot(a3, np.cross(a1, a2)))
b_inv = np.linalg.inv(np.vstack((b1, b2, b3)))
ZBINS = Z.shape[0] # 400
XR = (X[-1] - X[0])
YR = (Y[-1] - Y[0])
Rmax = min(XR, YR) / 2.0
Rmax *= 0.95
rarr, rspace = np.linspace(0.0, Rmax, RBINS, retstep=True)
zar = np.linspace(Z[0], Z[-1], ZBINS)
oa = np.zeros((rarr.shape[0], zar.shape[0]))
circ = 2.*np.pi*rarr # circumference
for ir in range(rarr.shape[0]):
NTHETABINS = max(int(THETA_BINS_PER_INV_ANG*circ[ir]), MIN_THETA_BINS) #calculate number of bins at this r
thetas = np.linspace(0.0, np.pi*2.0, NTHETABINS, endpoint=False) # generate theta array
t, r, z = np.meshgrid(thetas, rarr[ir], zar) # generate grid of cylindrical points
xar = r*np.cos(t) # set up x,y coords
yar = r*np.sin(t)
pts = np.vstack((xar.ravel(), yar.ravel(), z.ravel())).T # reshape for interpolation
if ucell is not None:
# pts = mc_inv(pts, ucell)
pts = np.matmul(pts, b_inv)
oa[ir, :] = np.average(ES(pts).reshape(r.shape), axis=1) # store average values in final array
mn = np.nanmin(oa)
oa = np.where(np.isnan(oa), mn, oa)
rad_avg = np.average(oa) # ???
oa /= rad_avg # normalize
# set up data for contourf plot by making it symmetrical
final = np.append(oa[::-1, :], oa[1:], axis=0) # SF
rfin = np.append(-rarr[::-1], rarr[1:]) # R
zfin = np.append(z[:, 0, :], z[1:, 0, :], axis=0) # Z
return final, rfin, zfin
def Rspots(R, Z, waxs, theta=37, theta_sigma=(7, 5), bounds=(1.256, 1.57), cmap='jet'):
""" Measure intensity of R-spots in specified region """
spots = np.copy(waxs.T)
inner = bounds[0]
outer = bounds[1]
I = []
for i in range(R.shape[0]):
for j in range(Z.shape[0]):
if inner < np.linalg.norm([R[i], Z[j]]) < outer:
angle = (180 / np.pi) * np.arctan(Z[j] / R[i])
if (theta - theta_sigma[0]) < angle < (theta + theta_sigma[1]) or \
(theta - theta_sigma[0]) < (angle - 2*angle) < (theta + theta_sigma[1]):
spots[i, j] = 100
I.append(waxs[j, i])
average_intensity = np.mean(I)
plt.figure()
levels = np.linspace(0, 3.1, 200)
plt.contourf(R, Z, spots.T, cmap=cmap, levels=levels, extend='max')
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.figure()
plt.hist(I, bins=25)
plt.title('Average intensity of R-spots: %.2f' % average_intensity)
# plt.show()
return average_intensity
def gaussian(points, mean, sigma, amplitude, yshift):
return yshift + (amplitude / np.sqrt(2 * np.pi * sigma ** 2)) * np.exp(
-(points - mean) ** 2 / (2 * sigma ** 2))
def lorentz(points, a, b, c):
"""
:param p: lorentzian parameters : [full width half max (FWHM), position of maximum, maximum heigth]
:param p: position
:return:
"""
w = a / 2
x = (b - points) / w
return (c / (np.pi * w)) / (1 + x ** 2)
def triple_lorentz(x, a0, a1, a2, b0, b1, b2, c0, c1, c2):
return lorentz(x, a0, b0, c0) + lorentz(x, a1, b1, c1) + lorentz(x, a2, b2, c2)
def PLOT_RAD_NEW(D, wavelength_angstroms, ucell, format=False, factor=3.1, **kwargs):
"""
:param D: raw structure factor
:param wavelength_angstroms: wavelength of X-ray (angstroms)
:param ucell: 3 x 3 unitcell vectors
:param factor: maximum colorbar value if using formatting from Coscia et al. manuscript
:param format: plot simulated XRD patterns as they appear in Coscai et al. manuscript
:return:
"""
if not os.path.exists(path):
os.makedirs(path)
# inverse_ft(D, ucell)
X = D[:, 0, 0, 0]
Y = D[0, :, 0, 1]
Z = D[0, 0, :, 2]
SF = D[..., 3]
############## Plot z-slice down the middle of the raw structure factor ###################
# plt.plot(Z, SF[len(X)//2, len(Y)//2, :])
# plt.xlabel('q$_z$ ($\AA^{-1}$)')
# plt.ylabel('Intensity')
# plt.savefig('z_section.png')
# plt.show()
# exit()
ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)
THETA_BINS_PER_INV_ANG = 20.
MIN_THETA_BINS = 1 # minimum allowed bins
RBINS = 400
NLEVELS = 200 # number of levels for contour plots
a1 = ucell[0]
a2 = ucell[1]
a3 = ucell[2]
b1 = (np.cross(a2, a3)) / (np.dot(a1, np.cross(a2, a3)))
b2 = (np.cross(a3, a1)) / (np.dot(a2, np.cross(a3, a1)))
b3 = (np.cross(a1, a2)) / (np.dot(a3, np.cross(a1, a2)))
b_inv = np.linalg.inv(np.vstack((b1, b2, b3)))
ZBINS = Z.shape[0] # 400
XR = (X[-1] - X[0])*ucell[0][0]
YR = (Y[-1] - Y[0])*ucell[1][1]
Rmax = min(XR, YR) / 2.0
Rmax *= 0.95
rarr, rspace = np.linspace(0.0, Rmax, RBINS, retstep=True)
zar = np.linspace(Z[0], Z[-1], ZBINS)
oa = np.zeros((rarr.shape[0], zar.shape[0]))
circ = 2.*np.pi*rarr # circumference
for ir in trange(rarr.shape[0]):
NTHETABINS = max(int(THETA_BINS_PER_INV_ANG*circ[ir]), MIN_THETA_BINS) #calculate number of bins at this r
thetas = np.linspace(0.0, np.pi*2.0, NTHETABINS, endpoint=False) # generate theta array
t, r, z = np.meshgrid(thetas, rarr[ir], zar) # generate grid of cylindrical points
xar = r*np.cos(t) # set up x,y coords
yar = r*np.sin(t)
pts = np.vstack((xar.ravel(), yar.ravel(), z.ravel())).T # reshape for interpolation
MCpts = np.matmul(pts, b_inv) # slower: MCpts = mc_inv(pts,ucell)
oa[ir, :] = np.average(ES(MCpts).reshape(r.shape), axis=1) # store average values in final array
mn = np.nanmin(oa)
oa = np.where(np.isnan(oa), mn, oa)
if not format:
rad_avg = np.average(oa)
oa /= rad_avg # normalize
# set up data for contourf plot by making it symmetrical
final = np.append(oa[::-1, :], oa[1:], axis=0) # SF
rfin = np.append(-rarr[::-1], rarr[1:]) # R
zfin = np.append(z[:, 0, :], z[1:, 0, :], axis=0) # Z
unitlab = '($\AA^{-1}$)' # Angstroms
logfinal = np.log(final)
MIN = np.amin(final) # MIN = np.amin(np.ma.masked_invalid(final))
MAX = np.amax(final) # MAX = np.amax(np.ma.masked_invalid(final))
lvls = np.linspace(MIN, MAX, NLEVELS)
if format:
alkane_intensity = normalize_alkanes(rfin, zfin[0], final, 1.4, 1.57, 120) # 1.4, 1.57
final /= alkane_intensity # normalize according to R-alkanes
# lvls = np.linspace(0, factor, NLEVELS) # contour levels
rlimits = [np.argmin(np.abs(rfin + 2.5)), np.argmin(np.abs(rfin - 2.5))]
zlimits = [np.argmin(np.abs(zfin[0] + 2.5)), np.argmin(np.abs(zfin[0] - 2.5))]
MIN = np.amin(final[rlimits[0]:rlimits[1], zlimits[0]:zlimits[1]])
MIN = 0.4
# MAX = 7.67
#lvls = np.linspace(np.log10(MIN), np.log10(MAX), NLEVELS)
if format:
cmap = 'jet'
print(factor)
lvls = np.linspace(0, factor, NLEVELS)
lvls_log = np.linspace(np.log10(final[-1, -1]), np.log10(np.amax(final)), NLEVELS)
# plot 1D SAXS
plt.figure()
plt.plot(rfin, final[:, zfin[0].shape[0]//2], linewidth=2)
plt.xlabel('$q_r\ (\AA^{-1})$', fontsize=14)
plt.ylabel('Intensity', fontsize=14)
plt.tight_layout()
plt.figure()
heatmap = plt.contourf(rfin, zfin[0], final.T, levels=lvls, cmap=cmap, extend='max')
cbar = plt.colorbar(heatmap)
plt.xlabel('$q_r\ (\AA^{-1}$)', fontsize=18)
plt.ylabel('$q_z\ (\AA^{-1}$)', fontsize=18)
plt.gcf().get_axes()[0].set_ylim(-2.5, 2.5)
plt.gcf().get_axes()[0].set_xlim(-2.5, 2.5)
plt.gcf().get_axes()[0].tick_params(labelsize=14)
plt.gcf().get_axes()[0].set_aspect('equal')
plt.tight_layout()
plt.savefig('rzplot.png')
print('rzplot.png saved')
################# Q_R and Q_Z CROSS_SECTIONS OF R_PI WITH GAUSSIAN AND LORENTZIAN FITS ##################
############################### FIT TO QR CROSS-SECTION OF R-PI #########################
plt.figure()
rpi_ndx = np.argmin(np.abs(zfin[0] - zfin[0][np.argmax(final[rfin.size // 2, :])]))
plt.plot(rfin, final[:, rpi_ndx], linewidth=2, color='blue') # its xkcd:blue in paper
p = np.array([0, 0.3, 4, 1])
solp, cov_x = curve_fit(gaussian, rfin, final[:, rpi_ndx], p,
bounds=((-np.inf, 0, 0, 0), (np.inf, np.inf, np.inf, np.inf)))
plt.plot(rfin, gaussian(rfin, solp[0], solp[1], solp[2], solp[3]), '--', color='blue', label='Gaussian Fit',
linewidth=2)
print("Gaussian FWHM = %.3f +/- %.3f A^-1" % (2*np.sqrt(2*np.log(2))*solp[1],
2 * np.sqrt(2 * np.log(2)) * cov_x[1, 1] ** 0.5))
p = np.array([0.1, 0, 4])
solp_lorentz, cov_x = curve_fit(lorentz, rfin, final[:, rpi_ndx], p,
bounds=[[0, -np.inf, 0], [np.inf, np.inf, np.inf]])
plt.plot(rfin, lorentz(rfin, solp_lorentz[0], solp_lorentz[1], solp_lorentz[2]), '--', label='Lorentzian Fit',
linewidth=2, color='orange') # its xkcd:orange in the paper
print("Lorentzian FWHM = %.3f +/- %.3f A^-1" % (solp_lorentz[0], cov_x[0, 0] ** 0.5))
plt.legend(fontsize=16)
plt.xlabel('$q_r\ (\AA^{-1})$', fontsize=18)
plt.ylabel('Intensity', fontsize=18)
plt.gcf().get_axes()[0].tick_params(labelsize=18)
plt.tight_layout()
#plt.savefig('/home/bcoscia/PycharmProjects/LLC_Membranes/Ben_Manuscripts/structure_paper/figures/sim_rsection_fit.pdf')
# ######################## FIT TO QZ CROSS-SECTION OF R-PI #########################
# plt.figure()
#
# rndx = rfin.size // 2
# zstart = zfin[0].size // 2
# plt.plot(zfin[0][zstart:], final[rndx, zstart:], linewidth=2, color='blue')
#
# p = np.array([1.4, 0.1, 7, 0])
# solp, cov_x = curve_fit(gaussian, zfin[0][zstart:], final[rndx, zstart:], p,
# bounds=([-np.inf, 0, 0, 0], [np.inf, np.inf, np.inf, np.inf]))
#
# fine_grid = np.linspace(zfin[0][zstart], zfin[0][-1], 1000)
# plt.plot(fine_grid, gaussian(fine_grid, solp[0], solp[1], solp[2], solp[3]), '--', color='blue', label='Gaussian Fit',
# linewidth=2)
#
# print("Gaussian FWHM = %.3f +/- %.3f A^-1" % (2*np.sqrt(2*np.log(2))*solp[1],
# 2 * np.sqrt(2 * np.log(2)) * cov_x[1, 1] ** 0.5))
#
# p = np.array([0.1, 0, 4])
# solp_lorentz, cov_x = curve_fit(lorentz, zfin[0][zstart:], final[rndx, zstart:], p,
# bounds=[[0, -np.inf, 0], [np.inf, np.inf, np.inf]])
#
# plt.plot(fine_grid, lorentz(fine_grid, solp_lorentz[0], solp_lorentz[1], solp_lorentz[2]), '--',
# label='Lorentzian Fit', linewidth=2, color='orange')
#
# print("Lorentzian FWHM = %.3f +/- %.3f A^-1" % (solp_lorentz[0], cov_x[0, 0] ** 0.5))
#
# plt.legend(fontsize=17)
# plt.xlabel('$q_z\ (\AA^{-1})$', fontsize=18)
# plt.ylabel('Intensity', fontsize=18)
# plt.gcf().get_axes()[0].tick_params(labelsize=18)
# plt.tight_layout()
# #plt.savefig('/home/bcoscia/PycharmProjects/LLC_Membranes/Ben_Manuscripts/structure_paper/figures/sim_zsection_fit.pdf')
#
# print('Average R-pi intensity: %.2f' % np.amax(final[rfin.size // 2, :]))
#print('Average R-spots intensity : %.2f' % Rspots(rfin, zfin[0], final.T, theta=30, theta_sigma=(1, 1),
#bounds=(1.39, 1.49), cmap=cmap))
else:
plt.figure()
plt.contourf(rfin, zfin[0], final.T, levels=lvls, cmap='jet')
plt.colorbar()
plt.title('S(r,z)')
plt.xlabel('r ' + unitlab)
plt.ylabel('z ' + unitlab)
plt.savefig('new_rzplot.png')
plt.figure()
cs = plt.contourf(rfin, zfin[0], final.T, levels=lvls, cmap='jet', extend='both')
cs.cmap.set_under('k')
cs.set_clim(MIN, 0.1 * MAX)
plt.title('S(r,z)')
plt.xlabel('r ' + unitlab)
plt.ylabel('z ' + unitlab)
plt.colorbar()
plt.savefig('cs.png')
plt.figure()
plt.contourf(rfin, zfin[0], final.T, levels=lvls, cmap='jet')
plt.colorbar()
plt.title('S(r,z)')
plt.xlabel('r ' + unitlab)
plt.ylabel('z ' + unitlab)
plt.savefig('new_rzplot2.png')
plt.figure()
lglvls = np.linspace(np.amin(logfinal), np.amax(logfinal), NLEVELS)
plt.contourf(rfin, zfin[0], logfinal.T, levels=lglvls, cmap='jet')
plt.colorbar()
plt.title('ln(S(r,z))')
plt.xlabel('r ' + unitlab)
plt.ylabel('z ' + unitlab)
plt.savefig('new_log_rzplot.png')
plt.figure()
x2 = np.linspace(-Rmax, Rmax, RBINS * 2 - 1)
z2 = np.linspace(Z[0], Z[-1], RBINS)
xg2, yg2, zg2 = np.meshgrid(x2, np.asarray(0), z2)
pts = np.vstack((xg2.ravel(), yg2.ravel(), zg2.ravel())).T
out2 = ES(pts).reshape(xg2.shape[1], xg2.shape[2])
o2n = out2[:, :] / rad_avg
plt.contourf(xg2[0, :, :], zg2[0, :, :], o2n, levels=lvls, cmap='jet')
plt.xlabel('x ' + unitlab)
plt.ylabel('z ' + unitlab)
plt.title('S(x,z)|$_{y=0}$')
plt.colorbar()
plt.savefig('new_xzplot.png')
plt.figure()
x2 = np.linspace(-Rmax, Rmax, RBINS * 2 - 1)
y2 = np.linspace(-Rmax, Rmax, RBINS * 2 - 1)
xg2, yg2, zg2 = np.meshgrid(x2, y2, np.asarray(0))
pts = np.vstack((xg2.ravel(), yg2.ravel(), zg2.ravel())).T
out2 = ES(pts).reshape(xg2.shape[0], xg2.shape[1])
o2n = out2[:, :] / np.average(out2)
lvlsxy = np.linspace(np.amin(o2n), np.amax(o2n), NLEVELS) # contour levels
plt.contourf(xg2[:, :, 0], yg2[:, :, 0], o2n, levels=lvlsxy, cmap='jet')
plt.xlabel('x ' + unitlab)
plt.ylabel('y ' + unitlab)
plt.title('S(x,y)') # |$_{y=0}$')
plt.colorbar()
plt.savefig('new_xyplot.png')
if False:
plt.figure()
dif = o2n - final
lvls2 = np.linspace(-0.4, 0.4, 100)
plt.contourf(xg2[0, :, :], zg2[0, :, :], dif, levels=lvls2, cmap='seismic')
plt.xlabel('x,r ' + unitlab)
plt.ylabel('z ' + unitlab)
plt.title('S(r,z)-S(x,z)|$_{y=0}$')
plt.colorbar()
plt.savefig('difference.png')
plt.show()
def normalize_alkanes(R, Z, Raw_Intensity, inner, outer, angle):
"""
Plot angular integration of 2D WAXS data bounded by a circle defined by radii 'inner' and 'outer'
:param R: points in r direction
:param Z: points in z direction
:param Raw_Intensity: values at all (R, Z) points on grid
:param inner: inside radius of region bounding alkane reflections
:param outer: outside radius of region bounding alkane reflections
:return: Intensity values normalized by average intensity inside alkane region
"""
nbins = 90
bins = np.linspace(-90, 90, nbins)
bw = 180 / (nbins - 1)
angles = []
intensity = []
for i in range(R.shape[0]):
for j in range(Z.shape[0]):
if inner < np.linalg.norm([R[i], Z[j]]) < outer:
angles.append((180/np.pi)*np.arctan(Z[j]/R[i]))
intensity.append(Raw_Intensity[i, j])
inds = np.digitize(angles, bins)
I = np.zeros([nbins])
counts = np.zeros([nbins])
for i in range(len(inds)):
I[inds[i] - 1] += intensity[i]
counts[inds[i] - 1] += 1
#Get average intensity in ring excluding 60 degree slice around top and bottom #######
bin_range = 180 / nbins # degrees which a single bin covers
start = int((angle/2) / bin_range) # start at the bin which covers -60 degrees and above
end = nbins - start # because of symmetry
total_intensity = np.sum(I[start:end])
avg_intensity = total_intensity / np.sum(counts[start:end])
print('Average Intensity in alkane chain region : %s' % avg_intensity)
return avg_intensity
def tm2(D, ucell):
a1 = ucell[0]
a2 = ucell[1]
a3 = ucell[2]
V = np.dot(a1, np.cross(a2, a3))
b1 = np.cross(a2, a3) / V
b2 = np.cross(a3, a1) / V # *2.0*math.pi
b3 = np.cross(a1, a2) / V #*2.0*math.pi
Dnew = np.zeros_like(D)
X = D[..., 0]
Y = D[..., 1]
Z = D[..., 2]
for ix in range(D.shape[0]):
Dnew[ix, 0:3] += X[ix]*b1 #(X[ix]-X[X.shape[0]/2])*b1
for iy in range(D.shape[0]):
Dnew[iy, 0:3] += Y[iy]*b2 #(Y[iy]-Y[Y.shape[0]/2])*b2
for iz in range(D.shape[0]):
Dnew[iz, 0:3] += Z[iz]*b3 #(Z[iz]-Z[Z.shape[0]/2])*b3
return Dnew
def to_monoclinic(D, ucell): #monoclinic for now
a1 = ucell[0]
a2 = ucell[1]
a3 = ucell[2]
b1 = (np.cross(a2, a3)) / (np.dot(a1, np.cross(a2, a3)))
b2 = (np.cross(a3, a1)) / (np.dot(a2, np.cross(a3, a1)))#*2.0*math.pi
b3 = (np.cross(a1, a2)) / (np.dot(a3, np.cross(a1, a2)))#*2.0*math.pi
Dnew = np.zeros_like(D)
X = D[..., 0]
Y = D[..., 1]
Z = D[..., 2]
for ix in range(D.shape[0]):
Dnew[ix, 0:3] += X[ix]*b1 #(X[ix]-X[X.shape[0]/2])*b1
for iy in range(D.shape[0]):
Dnew[iy, 0:3] += Y[iy]*b2 #(Y[iy]-Y[Y.shape[0]/2])*b2
for iz in range(D.shape[0]):
Dnew[iz, 0:3] += Z[iz]*b3 #(Z[iz]-Z[Z.shape[0]/2])*b3
return Dnew
def mc_inv(D, ucell):
a1 = ucell[0]
a2 = ucell[1]
a3 = ucell[2]
b1 = (np.cross(a2, a3))/(np.dot(a1, np.cross(a2, a3)))
b2 = (np.cross(a3, a1))/(np.dot(a2, np.cross(a3, a1)))
b3 = (np.cross(a1, a2))/(np.dot(a3, np.cross(a1, a2)))
b_inv = np.linalg.inv(np.vstack((b1, b2, b3)))
Dnew = np.zeros_like(D)
X = D[..., 0]
Y = D[..., 1]
Z = D[..., 2]
for ix in range(D.shape[0]):
Dnew[ix, 0:3] += X[ix]*b_inv[0]
for iy in range(D.shape[0]):
Dnew[iy, 0:3] += Y[iy]*b_inv[1]
for iz in range(D.shape[0]):
Dnew[iz, 0:3] += Z[iz]*b_inv[2]
return Dnew
def Plot_Ewald_triclinic(D, wavelength_angstroms, ucell, factor=3.1, format=True, **kwargs): # pass full 3d data,SF,wavelength in angstroms
PLOT_RAD_NEW(D, wavelength_angstroms, ucell, factor=factor, format=format, **kwargs)
exit()
if not os.path.exists(path):
os.makedirs(path)
X = D[:, 0, 0, 0].copy()
Y = D[0, :, 0, 1].copy()
Z = D[0, 0, :, 2].copy()
NBINSZ = 1 * D[0, 0, :, 2].size
ZBNS = np.linspace(Z[0], Z[-1], NBINSZ)
if NBINSRAD > 0:
XBNSRD = np.linspace(-NBINSRAD, NBINSRAD, num=NBINSRAD*2)
XBNSRD = np.sqrt(np.abs(XBNSRD))*np.sign(XBNSRD)
XBNSRD *= (X[-1]/XBNSRD[-1])
else:
XBNSRD = X
print("setting XBNSRD=", X)
dx1 = X[1 + int(X.shape[0]/2)] - X[int(X.shape[0]/2)]
SF = D[:, :, :, 3]
a1 = ucell[0]
a2 = ucell[1]
a3 = ucell[2]
b1 = old_div((np.cross(a2, a3)), (np.dot(a1, np.cross(a2, a3))))
b2 = old_div((np.cross(a3, a1)), (np.dot(a2, np.cross(a3, a1))))
b3 = old_div((np.cross(a1, a2)), (np.dot(a3, np.cross(a1, a2))))
Dnew = np.zeros_like(D)
for ix in trange(D.shape[0]):
Dnew[ix, :, :, 0:3] += X[ix]*b1
for iy in trange(D.shape[1]):
Dnew[:, iy, :, 0:3] += Y[iy]*b2
for iz in trange(D.shape[2]):
Dnew[:, :, iz, 0:3] += Z[iz]*b3
D[..., :3] = Dnew[..., :3]
K_ES = 2.0*math.pi/wavelength_angstroms # calculate k for incident xrays in inverse angstroms
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RegularGridInterpolator.html#scipy.interpolate.RegularGridInterpolator
# Notes
# Contrary to LinearNDInterpolator and NearestNDInterpolator, RegularGridInterpolator class avoids expensive triangulation of the input data by taking advantage of the regular grid structure.
# this is why this style of interpolation is so slow
XGD = D[:, :, :, 0] # X spatial grid view
YGD = D[:, :, :, 1]
ZGD = D[:, :, :, 2]
VGD = D[:, :, :, 3]
DC = D[:, :, :, 0:3]
DR = DC.reshape(DC.size/3, 3)
# check if fast interpolation can be used
lbuf = True
for i in range(3):
for j in range(i + 1, 3):
if ucell[i, j] != 0 or ucell[j, i] != 0:
lbuf = False
print("Interpolating grid...")
if ucell[0, 0] == ucell[1, 1] and ucell[0, 0] == ucell[2, 2] and lbuf:
print("using fast interpolation for orthorhombic cell")
ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)
else:
print("Interpolating non-orthorhombic cell")
dtime = 480.0 * XGD.size / (98 * 98 * 99) # empirical time estimate
print("interpolation time estimate: ", round(dtime / 60, 1), " minutes, finishing around ", (
datetime.datetime.now() + datetime.timedelta(seconds=dtime)).strftime('%I:%M %p'))
start = time.time()
coords = list(zip(XGD.ravel(), YGD.ravel(), ZGD.ravel()))
if False:
ES = LinearNDInterpolator(coords, VGD.ravel())
else:
ES = NearestNDInterpolator(coords, VGD.ravel())
end = time.time()
print("interpolation finished, taking %4.2f seconds" % (end-start))
xyzpts = np.asarray([])
print("setting up points for radial integration")
Scale=1
if False:
for ix in trange(D.shape[0]):
for iy in range(D.shape[1]):
for iz in range(D.shape[2]):
xyzpts.append((D[ix, iy, iz, 0], D[ix, iy, iz, 1], D[ix, iy, iz, 2]))
else:
XPTS = np.linspace(D[0, 0, 0, 0], D[-1, 0, 0, 0], Scale * D.shape[0], dtype=np.float16)
YPTS = np.linspace(D[0, 0, 0, 1], D[0, -1, 0, 1], Scale * D.shape[1], dtype=np.float16)
ZPTS = np.linspace(D[0, 0, 0, 2], D[0, 0, -1, 2], Scale * D.shape[2], dtype=np.float16)
print("mesh")
xyzpts = np.meshgrid(XPTS, YPTS, ZPTS)
print("stack")
xyzpts = np.stack(xyzpts, -1).reshape(-1, 3)
print("done")
xyzpts = np.reshape(D[:, :, :, :3], (D.shape[0]*D.shape[1]*D.shape[2], 3)) # 5000x faster than above loop
NSP = 20
NSP = np.minimum(NSP, xyzpts.shape[0]) # split into at most 20 chunks before processing to limit memory usage
xyzpieces = np.array_split(xyzpts, NSP)
EWDxyz = np.asarray([])
print("interpolating")
for i in tqdm.tqdm(xyzpieces):
buf = ES(i)
EWDxyz = np.append(EWDxyz, buf, axis=0)
print("EWD done")
rpts = np.sqrt(xyzpts[:, 0]**2.0 + xyzpts[:, 1]**2.0)
Hcount, XEC, YEC = np.histogram2d(rpts, xyzpts[:, 2], bins=(XBNSRD, ZBNS))
Hval, XEV, YEV = np.histogram2d(rpts, xyzpts[:, 2], weights=EWDxyz, normed=False, bins=(XBNSRD, ZBNS))
switch1 = True
if switch1:
Hcount = np.where(Hcount == 0, 1, Hcount)
Hrz = Hval / Hcount
if not switch1:
Hrz = np.ma.masked_invalid(Hrz)
S1 = np.sum(Hrz)
S3 = np.sum(Hrz[Hrz.shape[0]/2, :])
Condition1 = False # Need to figure this out-when should this be true?
if Condition1:
for ir in range(1, Hrz.shape[0] / 2 - 1):
Hrz[-ir + Hrz.shape[0] / 2, :] = Hrz[ir + Hrz.shape[0] / 2,
:] # this needs to be tested for both even and odd numbers of bins
else:
for ir in range(1, Hrz.shape[0] / 2 - 1):
Hrz[-ir + 2 + Hrz.shape[0] / 2, :] = Hrz[ir + Hrz.shape[0] / 2,
:] # this needs to be tested for both even and odd numbers of bins
S2 = np.sum(Hrz)
XMG, YMG = np.meshgrid(XEV, YEV)
plt.pcolormesh(XMG[:-1, :], YMG[:-1, :], np.log10(Hrz.T), vmin=np.amin(np.log10(Hrz)), vmax=np.amax(np.log10(Hrz)))
plt.savefig(path+"_log_rzplot"+format, dpi=DPI)
plt.clf()
print("_log_rzplot saved")
mn = np.amin(Hrz[np.nonzero(Hrz)])
Hbuf = np.where(Hrz > 0.0, Hrz, mn)
Log_HRZ = np.log10(Hbuf)
plt.pcolormesh(XMG[:-1, :] - dx1 / 2.0, YMG[:-1, :], Log_HRZ.T, vmin=np.amin(Log_HRZ), vmax=np.amax(Log_HRZ),
cmap='nipy_spectral')
plt.colorbar()
plt.savefig(path + "_log_rzplot" + format, dpi=DPI)
plt.clf()
Nx = D.shape[0]
Ny = D.shape[1]
Nz = D.shape[2]
#==============flat and Ewald-corrected plots=================
xypts = []
xyflat = []
for ix in range(D.shape[0]):
for iy in range(D.shape[1]):
xp = D[ix, iy, int(Nz/2), 0]
yp = D[ix, iy, int(Nz/2), 1]
theta = np.arctan(np.sqrt(xp**2.0 + yp**2.0)/K_ES)
xypts.append((xp*np.cos(theta), yp*np.cos(theta), K_ES*(1.0 - np.cos(theta))))
xyflat.append((xp, yp, 0.0))
xzpts = []
xzflat = []
for ix in range(D.shape[0]):
for iz in range(D.shape[2]):
xp = D[ix, int(Ny/2), iz, 0]
zp = D[ix, int(Ny/2), iz, 2]
theta = np.arctan(np.sqrt(xp**2.0 + yp**2.0)/K_ES)
xzpts.append((xp*np.cos(theta), K_ES*(1.0-np.cos(theta)), zp*np.cos(theta)))
xzflat.append((xp, 0.0, zp))
yzpts = []
yzflat = []
for iy in range(D.shape[1]):
for iz in range(D.shape[2]):
yp = D[int(Nz/2), iy, iz, 1]
zp = D[int(Nz/2), iy, iz, 2]
theta = np.arctan(np.sqrt(yp**2.0 + zp**2.0)/K_ES)
yzpts.append((K_ES*(1.0-np.cos(theta)), yp*np.cos(theta), zp*np.cos(theta)))
yzflat.append((0.0, yp, zp))
xypts = np.asarray(xypts)
xzpts = np.asarray(xzpts)
yzpts = np.asarray(yzpts)
xyflat = np.asarray(xyflat)
xzflat = np.asarray(xzflat)
yzflat = np.asarray(yzflat)
EWDxy = ES(xypts)
EWDxz = ES(xzpts)
EWDyz = ES(yzpts)
EWDxyflat = ES(xyflat)
EWDxzflat = ES(xzflat)
EWDyzflat = ES(yzflat)
EWDxy = EWDxy.reshape(D.shape[0], D.shape[1])
EWDxz = EWDxz.reshape(D.shape[0], D.shape[2])
EWDyz = EWDyz.reshape(D.shape[1], D.shape[2])
EWDxyflat = EWDxyflat.reshape(D.shape[0], D.shape[1])
EWDxzflat = EWDxzflat.reshape(D.shape[0], D.shape[2])
EWDyzflat = EWDyzflat.reshape(D.shape[1], D.shape[2])
title = "Ewald Corrected Structure Factor \n $\lambda=$"+str(wavelength_angstroms)+" $\AA$ $k_{ew}=$"+str(round(K_ES,2))+" $\AA^{-1}$"
ltitle = 'log ' + title
xlab = 'x ('+units + ")"
ylab = 'y ('+units + ")"
zlab = 'z ('+units + ")"
fname = "Ewald_"
iz = 0
plt.suptitle(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.contourf(D[:, :, iz, 0], D[:, :, iz, 1], EWDxy, contours, **kwargs)
plt.savefig(path+fname+"xy"+str(iz)+format,dpi=DPI)
plt.clf()
lax = ['x', 'y', 'z']
ewlab = "Ewald"
flab = "Flat"
iax1 = 0
iax2 = 1
EWDxy = np.ma.masked_invalid(EWDxy)
EWDxyflat = np.ma.masked_invalid(EWDxyflat)
EWDxz = np.ma.masked_invalid(EWDxz)
EWDxzflat = np.ma.masked_invalid(EWDxzflat)
EWDyz = np.ma.masked_invalid(EWDyz)
EWDyzflat = np.ma.masked_invalid(EWDyzflat)
if PLOT_EWALDS:
csplot_wlog(D[:, :, int(Nz / 2) + 1, iax1], D[:, :, int(Nz / 2) + 1, iax2], EWDxy, contours, ewlab, lax[iax1],
lax[iax2], **kwargs)
csplot_wlog(D[:,:,int(Nz/2)+1,iax1],D[:,:,int(Nz/2)+1,iax2],EWDxyflat,contours,flab ,lax[iax1],lax[iax2],**kwargs)
iax1 = 0
iax2 = 2
if PLOT_EWALDS:
csplot_wlog(D[:, int(Ny / 2), :, iax1], D[:, int(Ny / 2), :, iax2], EWDxz, contours, ewlab, lax[iax1],
lax[iax2], **kwargs)
csplot_wlog(D[:,int(Ny/2),:,iax1],D[:,int(Ny/2),:,iax2],EWDxzflat,contours,flab ,lax[iax1],lax[iax2],**kwargs)
iax1 = 1
iax2 = 2
if PLOT_EWALDS:
csplot_wlog(D[int(Nx / 2), :, :, iax1], D[int(Nx / 2), :, :, iax2], EWDyz, contours, ewlab, lax[iax1],
lax[iax2], **kwargs)
csplot_wlog(D[int(Nx/2),:,:,iax1],D[int(Nx/2),:,:,iax2],EWDyzflat,contours,flab ,lax[iax1],lax[iax2],**kwargs)
| [
"numpy.log10",
"matplotlib.pyplot.hist",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.log",
"builtins.str",
"past.utils.old_div",
"numpy.array_split",
"numpy.array",
"builtins.range",
"numpy.linalg.norm",
"numpy.nanmin",
"numpy.sin",
"datetime.timedelta",
"matplotlib.pyplot.contourf"... | [((1995, 2009), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2007, 2009), True, 'import matplotlib.pyplot as plt\n'), ((2014, 2033), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (2026, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2054), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (2048, 2054), True, 'import matplotlib.pyplot as plt\n'), ((2059, 2075), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylab'], {}), '(ylab)\n', (2069, 2075), True, 'import matplotlib.pyplot as plt\n'), ((2409, 2452), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(path + fname + format)'], {'dpi': 'DPI'}), '(path + fname + format, dpi=DPI)\n', (2420, 2452), True, 'import matplotlib.pyplot as plt\n'), ((2453, 2462), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2460, 2462), True, 'import matplotlib.pyplot as plt\n'), ((2708, 2732), 'builtins.range', 'range', (['(data.shape[2] - 1)'], {}), '(data.shape[2] - 1)\n', (2713, 2732), False, 'from builtins import range\n'), ((4072, 4111), 'numpy.histogram', 'np.histogram', (['R'], {'bins': 'Nbins', 'weights': 'SF'}), '(R, bins=Nbins, weights=SF)\n', (4084, 4111), True, 'import numpy as np\n'), ((4124, 4151), 'numpy.histogram', 'np.histogram', (['R'], {'bins': 'Nbins'}), '(R, bins=Nbins)\n', (4136, 4151), True, 'import numpy as np\n'), ((4161, 4187), 'numpy.where', 'np.where', (['(Hc != 0)', 'Hc', '(1.0)'], {}), '(Hc != 0, Hc, 1.0)\n', (4169, 4187), True, 'import numpy as np\n'), ((4225, 4235), 'numpy.amax', 'np.amax', (['H'], {}), '(H)\n', (4232, 4235), True, 'import numpy as np\n'), ((4240, 4259), 'matplotlib.pyplot.plot', 'plt.plot', (['E[:-1]', 'H'], {}), '(E[:-1], H)\n', (4248, 4259), True, 'import matplotlib.pyplot as plt\n'), ((4264, 4278), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(5)'], {}), '(0, 5)\n', (4272, 4278), True, 'import matplotlib.pyplot as plt\n'), ((4283, 4315), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outputname'], {'dpi': 'DPI'}), '(outputname, dpi=DPI)\n', (4294, 4315), True, 'import matplotlib.pyplot as plt\n'), ((4679, 4717), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['(X, Y, Z)', 'SF'], {}), '((X, Y, Z), SF)\n', (4702, 4717), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((4746, 4763), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (4751, 4763), False, 'from builtins import range\n'), ((5104, 5119), 'numpy.asarray', 'np.asarray', (['pts'], {}), '(pts)\n', (5114, 5119), True, 'import numpy as np\n'), ((5196, 5270), 'matplotlib.pyplot.contourf', 'plt.contourf', (['D[:, :, 0, 0]', 'D[:, :, 0, 1]', 'EWD', '(200)'], {'interpolation': 'interp'}), '(D[:, :, 0, 0], D[:, :, 0, 1], EWD, 200, interpolation=interp)\n', (5208, 5270), True, 'import matplotlib.pyplot as plt\n'), ((5276, 5308), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""EWxy.png"""'], {'dpi': '(300)'}), "('EWxy.png', dpi=300)\n", (5287, 5308), True, 'import matplotlib.pyplot as plt\n'), ((5312, 5321), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5319, 5321), True, 'import matplotlib.pyplot as plt\n'), ((5415, 5450), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""EWxylog.png"""'], {'dpi': '(300)'}), "('EWxylog.png', dpi=300)\n", (5426, 5450), True, 'import matplotlib.pyplot as plt\n'), ((5455, 5464), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5462, 5464), True, 'import matplotlib.pyplot as plt\n'), ((6034, 6092), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['(X, Y, Z)', 'SF'], {'bounds_error': '(False)'}), '((X, Y, Z), SF, bounds_error=False)\n', (6057, 6092), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((6123, 6140), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (6128, 6140), False, 'from builtins import range\n'), ((6402, 6419), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (6407, 6419), False, 'from builtins import range\n'), ((6679, 6696), 'builtins.range', 'range', (['D.shape[1]'], {}), '(D.shape[1])\n', (6684, 6696), False, 'from builtins import range\n'), ((6937, 6954), 'numpy.asarray', 'np.asarray', (['xypts'], {}), '(xypts)\n', (6947, 6954), True, 'import numpy as np\n'), ((6967, 6984), 'numpy.asarray', 'np.asarray', (['xzpts'], {}), '(xzpts)\n', (6977, 6984), True, 'import numpy as np\n'), ((6997, 7014), 'numpy.asarray', 'np.asarray', (['yzpts'], {}), '(yzpts)\n', (7007, 7014), True, 'import numpy as np\n'), ((7524, 7537), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (7534, 7537), True, 'import matplotlib.pyplot as plt\n'), ((7542, 7561), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (7554, 7561), True, 'import matplotlib.pyplot as plt\n'), ((7566, 7582), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (7576, 7582), True, 'import matplotlib.pyplot as plt\n'), ((7587, 7603), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylab'], {}), '(ylab)\n', (7597, 7603), True, 'import matplotlib.pyplot as plt\n'), ((7620, 7634), 'numpy.amax', 'np.amax', (['EWDxy'], {}), '(EWDxy)\n', (7627, 7634), True, 'import numpy as np\n'), ((7639, 7737), 'matplotlib.pyplot.contourf', 'plt.contourf', (['D[:, :, 0, 0]', 'D[:, :, 0, 1]', 'EWDxy', 'contours'], {'vmax': '(cscale * EWDmax_xy)'}), '(D[:, :, 0, 0], D[:, :, 0, 1], EWDxy, contours, vmax=cscale *\n EWDmax_xy, **kwargs)\n', (7651, 7737), True, 'import matplotlib.pyplot as plt\n'), ((7736, 7786), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + fname + 'xy' + format)"], {'dpi': 'DPI'}), "(path + fname + 'xy' + format, dpi=DPI)\n", (7747, 7786), True, 'import matplotlib.pyplot as plt\n'), ((7791, 7800), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7798, 7800), True, 'import matplotlib.pyplot as plt\n'), ((7806, 7819), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (7816, 7819), True, 'import matplotlib.pyplot as plt\n'), ((7824, 7844), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['ltitle'], {}), '(ltitle)\n', (7836, 7844), True, 'import matplotlib.pyplot as plt\n'), ((7849, 7865), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (7859, 7865), True, 'import matplotlib.pyplot as plt\n'), ((7870, 7886), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylab'], {}), '(ylab)\n', (7880, 7886), True, 'import matplotlib.pyplot as plt\n'), ((8042, 8095), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + fname + 'xylog' + format)"], {'dpi': 'DPI'}), "(path + fname + 'xylog' + format, dpi=DPI)\n", (8053, 8095), True, 'import matplotlib.pyplot as plt\n'), ((8100, 8109), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8107, 8109), True, 'import matplotlib.pyplot as plt\n'), ((8115, 8128), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (8125, 8128), True, 'import matplotlib.pyplot as plt\n'), ((8133, 8152), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (8145, 8152), True, 'import matplotlib.pyplot as plt\n'), ((8157, 8173), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (8167, 8173), True, 'import matplotlib.pyplot as plt\n'), ((8178, 8194), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['zlab'], {}), '(zlab)\n', (8188, 8194), True, 'import matplotlib.pyplot as plt\n'), ((8211, 8225), 'numpy.amax', 'np.amax', (['EWDxz'], {}), '(EWDxz)\n', (8218, 8225), True, 'import numpy as np\n'), ((8230, 8328), 'matplotlib.pyplot.contourf', 'plt.contourf', (['D[:, 0, :, 0]', 'D[:, 0, :, 2]', 'EWDxz', 'contours'], {'vmax': '(cscale * EWDmax_xz)'}), '(D[:, 0, :, 0], D[:, 0, :, 2], EWDxz, contours, vmax=cscale *\n EWDmax_xz, **kwargs)\n', (8242, 8328), True, 'import matplotlib.pyplot as plt\n'), ((8327, 8377), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + fname + 'xz' + format)"], {'dpi': 'DPI'}), "(path + fname + 'xz' + format, dpi=DPI)\n", (8338, 8377), True, 'import matplotlib.pyplot as plt\n'), ((8382, 8391), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8389, 8391), True, 'import matplotlib.pyplot as plt\n'), ((8397, 8410), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (8407, 8410), True, 'import matplotlib.pyplot as plt\n'), ((8415, 8435), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['ltitle'], {}), '(ltitle)\n', (8427, 8435), True, 'import matplotlib.pyplot as plt\n'), ((8440, 8456), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (8450, 8456), True, 'import matplotlib.pyplot as plt\n'), ((8461, 8477), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['zlab'], {}), '(zlab)\n', (8471, 8477), True, 'import matplotlib.pyplot as plt\n'), ((8714, 8737), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-qmax, qmax]'], {}), '([-qmax, qmax])\n', (8722, 8737), True, 'import matplotlib.pyplot as plt\n'), ((8742, 8765), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-qmax, qmax]'], {}), '([-qmax, qmax])\n', (8750, 8765), True, 'import matplotlib.pyplot as plt\n'), ((8770, 8823), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + fname + 'xzlog' + format)"], {'dpi': 'DPI'}), "(path + fname + 'xzlog' + format, dpi=DPI)\n", (8781, 8823), True, 'import matplotlib.pyplot as plt\n'), ((8828, 8837), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8835, 8837), True, 'import matplotlib.pyplot as plt\n'), ((8843, 8856), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (8853, 8856), True, 'import matplotlib.pyplot as plt\n'), ((8861, 8880), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (8873, 8880), True, 'import matplotlib.pyplot as plt\n'), ((8885, 8901), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['ylab'], {}), '(ylab)\n', (8895, 8901), True, 'import matplotlib.pyplot as plt\n'), ((8906, 8922), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['zlab'], {}), '(zlab)\n', (8916, 8922), True, 'import matplotlib.pyplot as plt\n'), ((8939, 8953), 'numpy.amax', 'np.amax', (['EWDyz'], {}), '(EWDyz)\n', (8946, 8953), True, 'import numpy as np\n'), ((8958, 9056), 'matplotlib.pyplot.contourf', 'plt.contourf', (['D[0, :, :, 1]', 'D[0, :, :, 2]', 'EWDyz', 'contours'], {'vmax': '(cscale * EWDmax_yz)'}), '(D[0, :, :, 1], D[0, :, :, 2], EWDyz, contours, vmax=cscale *\n EWDmax_yz, **kwargs)\n', (8970, 9056), True, 'import matplotlib.pyplot as plt\n'), ((9055, 9105), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + fname + 'yz' + format)"], {'dpi': 'DPI'}), "(path + fname + 'yz' + format, dpi=DPI)\n", (9066, 9105), True, 'import matplotlib.pyplot as plt\n'), ((9110, 9119), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9117, 9119), True, 'import matplotlib.pyplot as plt\n'), ((9125, 9138), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {}), '(6)\n', (9135, 9138), True, 'import matplotlib.pyplot as plt\n'), ((9143, 9163), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['ltitle'], {}), '(ltitle)\n', (9155, 9163), True, 'import matplotlib.pyplot as plt\n'), ((9168, 9184), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['ylab'], {}), '(ylab)\n', (9178, 9184), True, 'import matplotlib.pyplot as plt\n'), ((9189, 9205), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['zlab'], {}), '(zlab)\n', (9199, 9205), True, 'import matplotlib.pyplot as plt\n'), ((9361, 9414), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + fname + 'yzlog' + format)"], {'dpi': 'DPI'}), "(path + fname + 'yzlog' + format, dpi=DPI)\n", (9372, 9414), True, 'import matplotlib.pyplot as plt\n'), ((9419, 9428), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9426, 9428), True, 'import matplotlib.pyplot as plt\n'), ((10322, 10370), 'numpy.linspace', 'np.linspace', (['(-real_x / 2)', '(real_x / 2)', 'X.shape[0]'], {}), '(-real_x / 2, real_x / 2, X.shape[0])\n', (10333, 10370), True, 'import numpy as np\n'), ((10384, 10432), 'numpy.linspace', 'np.linspace', (['(-real_y / 2)', '(real_y / 2)', 'Y.shape[0]'], {}), '(-real_y / 2, real_y / 2, Y.shape[0])\n', (10395, 10432), True, 'import numpy as np\n'), ((10446, 10494), 'numpy.linspace', 'np.linspace', (['(-real_z / 2)', '(real_z / 2)', 'Z.shape[0]'], {}), '(-real_z / 2, real_z / 2, Z.shape[0])\n', (10457, 10494), True, 'import numpy as np\n'), ((10671, 10709), 'numpy.concatenate', 'np.concatenate', (['(X[start:], X[:start])'], {}), '((X[start:], X[:start]))\n', (10685, 10709), True, 'import numpy as np\n'), ((10810, 10848), 'numpy.concatenate', 'np.concatenate', (['(Y[start:], Y[:start])'], {}), '((Y[start:], Y[:start]))\n', (10824, 10848), True, 'import numpy as np\n'), ((10949, 10987), 'numpy.concatenate', 'np.concatenate', (['(Z[start:], Z[:start])'], {}), '((Z[start:], Z[:start]))\n', (10963, 10987), True, 'import numpy as np\n'), ((11217, 11243), 'numpy.fft.ifftn', 'np.fft.ifftn', (['SF_reordered'], {}), '(SF_reordered)\n', (11229, 11243), True, 'import numpy as np\n'), ((12081, 12235), 'matplotlib.pyplot.contourf', 'plt.contourf', (['rfin[rbound1:rbound2]', 'zfin[0][zbound1:zbound2]', 'final[rbound1:rbound2, zbound1:zbound2].T'], {'levels': 'levels', 'cmap': '"""seismic"""', 'extend': '"""max"""'}), "(rfin[rbound1:rbound2], zfin[0][zbound1:zbound2], final[rbound1\n :rbound2, zbound1:zbound2].T, levels=levels, cmap='seismic', extend='max')\n", (12093, 12235), True, 'import matplotlib.pyplot as plt\n'), ((12252, 12266), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (12264, 12266), True, 'import matplotlib.pyplot as plt\n'), ((12271, 12295), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r ($\\\\AA$)"""'], {}), "('r ($\\\\AA$)')\n", (12281, 12295), True, 'import matplotlib.pyplot as plt\n'), ((12299, 12323), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z ($\\\\AA$)"""'], {}), "('z ($\\\\AA$)')\n", (12309, 12323), True, 'import matplotlib.pyplot as plt\n'), ((12327, 12337), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12335, 12337), True, 'import matplotlib.pyplot as plt\n'), ((12405, 12463), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['(X, Y, Z)', 'SF'], {'bounds_error': '(False)'}), '((X, Y, Z), SF, bounds_error=False)\n', (12428, 12463), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((13055, 13098), 'numpy.linspace', 'np.linspace', (['(0.0)', 'Rmax', 'RBINS'], {'retstep': '(True)'}), '(0.0, Rmax, RBINS, retstep=True)\n', (13066, 13098), True, 'import numpy as np\n'), ((13109, 13140), 'numpy.linspace', 'np.linspace', (['Z[0]', 'Z[-1]', 'ZBINS'], {}), '(Z[0], Z[-1], ZBINS)\n', (13120, 13140), True, 'import numpy as np\n'), ((13151, 13190), 'numpy.zeros', 'np.zeros', (['(rarr.shape[0], zar.shape[0])'], {}), '((rarr.shape[0], zar.shape[0]))\n', (13159, 13190), True, 'import numpy as np\n'), ((13248, 13268), 'builtins.range', 'range', (['rarr.shape[0]'], {}), '(rarr.shape[0])\n', (13253, 13268), False, 'from builtins import range\n'), ((13971, 13984), 'numpy.nanmin', 'np.nanmin', (['oa'], {}), '(oa)\n', (13980, 13984), True, 'import numpy as np\n'), ((14040, 14054), 'numpy.average', 'np.average', (['oa'], {}), '(oa)\n', (14050, 14054), True, 'import numpy as np\n'), ((14167, 14205), 'numpy.append', 'np.append', (['oa[::-1, :]', 'oa[1:]'], {'axis': '(0)'}), '(oa[::-1, :], oa[1:], axis=0)\n', (14176, 14205), True, 'import numpy as np\n'), ((14223, 14255), 'numpy.append', 'np.append', (['(-rarr[::-1])', 'rarr[1:]'], {}), '(-rarr[::-1], rarr[1:])\n', (14232, 14255), True, 'import numpy as np\n'), ((14272, 14314), 'numpy.append', 'np.append', (['z[:, 0, :]', 'z[1:, 0, :]'], {'axis': '(0)'}), '(z[:, 0, :], z[1:, 0, :], axis=0)\n', (14281, 14314), True, 'import numpy as np\n'), ((14515, 14530), 'numpy.copy', 'np.copy', (['waxs.T'], {}), '(waxs.T)\n', (14522, 14530), True, 'import numpy as np\n'), ((14600, 14617), 'builtins.range', 'range', (['R.shape[0]'], {}), '(R.shape[0])\n', (14605, 14617), False, 'from builtins import range\n'), ((15064, 15074), 'numpy.mean', 'np.mean', (['I'], {}), '(I)\n', (15071, 15074), True, 'import numpy as np\n'), ((15080, 15092), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15090, 15092), True, 'import matplotlib.pyplot as plt\n'), ((15106, 15130), 'numpy.linspace', 'np.linspace', (['(0)', '(3.1)', '(200)'], {}), '(0, 3.1, 200)\n', (15117, 15130), True, 'import numpy as np\n'), ((15136, 15203), 'matplotlib.pyplot.contourf', 'plt.contourf', (['R', 'Z', 'spots.T'], {'cmap': 'cmap', 'levels': 'levels', 'extend': '"""max"""'}), "(R, Z, spots.T, cmap=cmap, levels=levels, extend='max')\n", (15148, 15203), True, 'import matplotlib.pyplot as plt\n'), ((15208, 15227), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (15216, 15227), True, 'import matplotlib.pyplot as plt\n'), ((15232, 15251), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (15240, 15251), True, 'import matplotlib.pyplot as plt\n'), ((15256, 15268), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15266, 15268), True, 'import matplotlib.pyplot as plt\n'), ((15273, 15293), 'matplotlib.pyplot.hist', 'plt.hist', (['I'], {'bins': '(25)'}), '(I, bins=25)\n', (15281, 15293), True, 'import matplotlib.pyplot as plt\n'), ((15298, 15365), 'matplotlib.pyplot.title', 'plt.title', (["('Average intensity of R-spots: %.2f' % average_intensity)"], {}), "('Average intensity of R-spots: %.2f' % average_intensity)\n", (15307, 15365), True, 'import matplotlib.pyplot as plt\n'), ((16917, 16975), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['(X, Y, Z)', 'SF'], {'bounds_error': '(False)'}), '((X, Y, Z), SF, bounds_error=False)\n', (16940, 16975), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((17590, 17633), 'numpy.linspace', 'np.linspace', (['(0.0)', 'Rmax', 'RBINS'], {'retstep': '(True)'}), '(0.0, Rmax, RBINS, retstep=True)\n', (17601, 17633), True, 'import numpy as np\n'), ((17644, 17675), 'numpy.linspace', 'np.linspace', (['Z[0]', 'Z[-1]', 'ZBINS'], {}), '(Z[0], Z[-1], ZBINS)\n', (17655, 17675), True, 'import numpy as np\n'), ((17686, 17725), 'numpy.zeros', 'np.zeros', (['(rarr.shape[0], zar.shape[0])'], {}), '((rarr.shape[0], zar.shape[0]))\n', (17694, 17725), True, 'import numpy as np\n'), ((17783, 17804), 'tqdm.trange', 'trange', (['rarr.shape[0]'], {}), '(rarr.shape[0])\n', (17789, 17804), False, 'from tqdm import trange\n'), ((18475, 18488), 'numpy.nanmin', 'np.nanmin', (['oa'], {}), '(oa)\n', (18484, 18488), True, 'import numpy as np\n'), ((18691, 18729), 'numpy.append', 'np.append', (['oa[::-1, :]', 'oa[1:]'], {'axis': '(0)'}), '(oa[::-1, :], oa[1:], axis=0)\n', (18700, 18729), True, 'import numpy as np\n'), ((18747, 18779), 'numpy.append', 'np.append', (['(-rarr[::-1])', 'rarr[1:]'], {}), '(-rarr[::-1], rarr[1:])\n', (18756, 18779), True, 'import numpy as np\n'), ((18796, 18838), 'numpy.append', 'np.append', (['z[:, 0, :]', 'z[1:, 0, :]'], {'axis': '(0)'}), '(z[:, 0, :], z[1:, 0, :], axis=0)\n', (18805, 18838), True, 'import numpy as np\n'), ((18903, 18916), 'numpy.log', 'np.log', (['final'], {}), '(final)\n', (18909, 18916), True, 'import numpy as np\n'), ((18928, 18942), 'numpy.amin', 'np.amin', (['final'], {}), '(final)\n', (18935, 18942), True, 'import numpy as np\n'), ((18999, 19013), 'numpy.amax', 'np.amax', (['final'], {}), '(final)\n', (19006, 19013), True, 'import numpy as np\n'), ((19072, 19102), 'numpy.linspace', 'np.linspace', (['MIN', 'MAX', 'NLEVELS'], {}), '(MIN, MAX, NLEVELS)\n', (19083, 19102), True, 'import numpy as np\n'), ((19520, 19580), 'numpy.amin', 'np.amin', (['final[rlimits[0]:rlimits[1], zlimits[0]:zlimits[1]]'], {}), '(final[rlimits[0]:rlimits[1], zlimits[0]:zlimits[1]])\n', (19527, 19580), True, 'import numpy as np\n'), ((27516, 27526), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27524, 27526), True, 'import matplotlib.pyplot as plt\n'), ((28097, 28124), 'numpy.linspace', 'np.linspace', (['(-90)', '(90)', 'nbins'], {}), '(-90, 90, nbins)\n', (28108, 28124), True, 'import numpy as np\n'), ((28202, 28219), 'builtins.range', 'range', (['R.shape[0]'], {}), '(R.shape[0])\n', (28207, 28219), False, 'from builtins import range\n'), ((28448, 28473), 'numpy.digitize', 'np.digitize', (['angles', 'bins'], {}), '(angles, bins)\n', (28459, 28473), True, 'import numpy as np\n'), ((28483, 28500), 'numpy.zeros', 'np.zeros', (['[nbins]'], {}), '([nbins])\n', (28491, 28500), True, 'import numpy as np\n'), ((28514, 28531), 'numpy.zeros', 'np.zeros', (['[nbins]'], {}), '([nbins])\n', (28522, 28531), True, 'import numpy as np\n'), ((28958, 28978), 'numpy.sum', 'np.sum', (['I[start:end]'], {}), '(I[start:end])\n', (28964, 28978), True, 'import numpy as np\n'), ((29393, 29409), 'numpy.zeros_like', 'np.zeros_like', (['D'], {}), '(D)\n', (29406, 29409), True, 'import numpy as np\n'), ((29480, 29497), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (29485, 29497), False, 'from builtins import range\n'), ((29577, 29594), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (29582, 29594), False, 'from builtins import range\n'), ((29674, 29691), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (29679, 29691), False, 'from builtins import range\n'), ((30102, 30118), 'numpy.zeros_like', 'np.zeros_like', (['D'], {}), '(D)\n', (30115, 30118), True, 'import numpy as np\n'), ((30189, 30206), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (30194, 30206), False, 'from builtins import range\n'), ((30286, 30303), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (30291, 30303), False, 'from builtins import range\n'), ((30383, 30400), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (30388, 30400), False, 'from builtins import range\n'), ((30802, 30818), 'numpy.zeros_like', 'np.zeros_like', (['D'], {}), '(D)\n', (30815, 30818), True, 'import numpy as np\n'), ((30889, 30906), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (30894, 30906), False, 'from builtins import range\n'), ((30963, 30980), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (30968, 30980), False, 'from builtins import range\n'), ((31037, 31054), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (31042, 31054), False, 'from builtins import range\n'), ((31553, 31585), 'numpy.linspace', 'np.linspace', (['Z[0]', 'Z[-1]', 'NBINSZ'], {}), '(Z[0], Z[-1], NBINSZ)\n', (31564, 31585), True, 'import numpy as np\n'), ((32191, 32207), 'numpy.zeros_like', 'np.zeros_like', (['D'], {}), '(D)\n', (32204, 32207), True, 'import numpy as np\n'), ((32223, 32241), 'tqdm.trange', 'trange', (['D.shape[0]'], {}), '(D.shape[0])\n', (32229, 32241), False, 'from tqdm import trange\n'), ((32297, 32315), 'tqdm.trange', 'trange', (['D.shape[1]'], {}), '(D.shape[1])\n', (32303, 32315), False, 'from tqdm import trange\n'), ((32371, 32389), 'tqdm.trange', 'trange', (['D.shape[2]'], {}), '(D.shape[2])\n', (32377, 32389), False, 'from tqdm import trange\n'), ((33234, 33242), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (33239, 33242), False, 'from builtins import range\n'), ((34321, 34335), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (34331, 34335), True, 'import numpy as np\n'), ((35110, 35179), 'numpy.reshape', 'np.reshape', (['D[:, :, :, :3]', '(D.shape[0] * D.shape[1] * D.shape[2], 3)'], {}), '(D[:, :, :, :3], (D.shape[0] * D.shape[1] * D.shape[2], 3))\n', (35120, 35179), True, 'import numpy as np\n'), ((35232, 35264), 'numpy.minimum', 'np.minimum', (['NSP', 'xyzpts.shape[0]'], {}), '(NSP, xyzpts.shape[0])\n', (35242, 35264), True, 'import numpy as np\n'), ((35354, 35381), 'numpy.array_split', 'np.array_split', (['xyzpts', 'NSP'], {}), '(xyzpts, NSP)\n', (35368, 35381), True, 'import numpy as np\n'), ((35395, 35409), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (35405, 35409), True, 'import numpy as np\n'), ((35450, 35470), 'tqdm.tqdm', 'tqdm.tqdm', (['xyzpieces'], {}), '(xyzpieces)\n', (35459, 35470), False, 'import tqdm\n'), ((35575, 35625), 'numpy.sqrt', 'np.sqrt', (['(xyzpts[:, 0] ** 2.0 + xyzpts[:, 1] ** 2.0)'], {}), '(xyzpts[:, 0] ** 2.0 + xyzpts[:, 1] ** 2.0)\n', (35582, 35625), True, 'import numpy as np\n'), ((35646, 35701), 'numpy.histogram2d', 'np.histogram2d', (['rpts', 'xyzpts[:, 2]'], {'bins': '(XBNSRD, ZBNS)'}), '(rpts, xyzpts[:, 2], bins=(XBNSRD, ZBNS))\n', (35660, 35701), True, 'import numpy as np\n'), ((35724, 35814), 'numpy.histogram2d', 'np.histogram2d', (['rpts', 'xyzpts[:, 2]'], {'weights': 'EWDxyz', 'normed': '(False)', 'bins': '(XBNSRD, ZBNS)'}), '(rpts, xyzpts[:, 2], weights=EWDxyz, normed=False, bins=(\n XBNSRD, ZBNS))\n', (35738, 35814), True, 'import numpy as np\n'), ((35993, 36004), 'numpy.sum', 'np.sum', (['Hrz'], {}), '(Hrz)\n', (35999, 36004), True, 'import numpy as np\n'), ((36014, 36046), 'numpy.sum', 'np.sum', (['Hrz[Hrz.shape[0] / 2, :]'], {}), '(Hrz[Hrz.shape[0] / 2, :])\n', (36020, 36046), True, 'import numpy as np\n'), ((36640, 36651), 'numpy.sum', 'np.sum', (['Hrz'], {}), '(Hrz)\n', (36646, 36651), True, 'import numpy as np\n'), ((36668, 36689), 'numpy.meshgrid', 'np.meshgrid', (['XEV', 'YEV'], {}), '(XEV, YEV)\n', (36679, 36689), True, 'import numpy as np\n'), ((36815, 36866), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + '_log_rzplot' + format)"], {'dpi': 'DPI'}), "(path + '_log_rzplot' + format, dpi=DPI)\n", (36826, 36866), True, 'import matplotlib.pyplot as plt\n'), ((36867, 36876), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (36874, 36876), True, 'import matplotlib.pyplot as plt\n'), ((36959, 36987), 'numpy.where', 'np.where', (['(Hrz > 0.0)', 'Hrz', 'mn'], {}), '(Hrz > 0.0, Hrz, mn)\n', (36967, 36987), True, 'import numpy as np\n'), ((37002, 37016), 'numpy.log10', 'np.log10', (['Hbuf'], {}), '(Hbuf)\n', (37010, 37016), True, 'import numpy as np\n'), ((37177, 37191), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (37189, 37191), True, 'import matplotlib.pyplot as plt\n'), ((37196, 37247), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + '_log_rzplot' + format)"], {'dpi': 'DPI'}), "(path + '_log_rzplot' + format, dpi=DPI)\n", (37207, 37247), True, 'import matplotlib.pyplot as plt\n'), ((37252, 37261), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (37259, 37261), True, 'import matplotlib.pyplot as plt\n'), ((37437, 37454), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (37442, 37454), False, 'from builtins import range\n'), ((37818, 37835), 'builtins.range', 'range', (['D.shape[0]'], {}), '(D.shape[0])\n', (37823, 37835), False, 'from builtins import range\n'), ((38195, 38212), 'builtins.range', 'range', (['D.shape[1]'], {}), '(D.shape[1])\n', (38200, 38212), False, 'from builtins import range\n'), ((38539, 38556), 'numpy.asarray', 'np.asarray', (['xypts'], {}), '(xypts)\n', (38549, 38556), True, 'import numpy as np\n'), ((38569, 38586), 'numpy.asarray', 'np.asarray', (['xzpts'], {}), '(xzpts)\n', (38579, 38586), True, 'import numpy as np\n'), ((38599, 38616), 'numpy.asarray', 'np.asarray', (['yzpts'], {}), '(yzpts)\n', (38609, 38616), True, 'import numpy as np\n'), ((38631, 38649), 'numpy.asarray', 'np.asarray', (['xyflat'], {}), '(xyflat)\n', (38641, 38649), True, 'import numpy as np\n'), ((38663, 38681), 'numpy.asarray', 'np.asarray', (['xzflat'], {}), '(xzflat)\n', (38673, 38681), True, 'import numpy as np\n'), ((38695, 38713), 'numpy.asarray', 'np.asarray', (['yzflat'], {}), '(yzflat)\n', (38705, 38713), True, 'import numpy as np\n'), ((39485, 39504), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (39497, 39504), True, 'import matplotlib.pyplot as plt\n'), ((39509, 39525), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (39519, 39525), True, 'import matplotlib.pyplot as plt\n'), ((39530, 39546), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylab'], {}), '(ylab)\n', (39540, 39546), True, 'import matplotlib.pyplot as plt\n'), ((39551, 39622), 'matplotlib.pyplot.contourf', 'plt.contourf', (['D[:, :, iz, 0]', 'D[:, :, iz, 1]', 'EWDxy', 'contours'], {}), '(D[:, :, iz, 0], D[:, :, iz, 1], EWDxy, contours, **kwargs)\n', (39563, 39622), True, 'import matplotlib.pyplot as plt\n'), ((39683, 39692), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (39690, 39692), True, 'import matplotlib.pyplot as plt\n'), ((39799, 39826), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['EWDxy'], {}), '(EWDxy)\n', (39819, 39826), True, 'import numpy as np\n'), ((39843, 39874), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['EWDxyflat'], {}), '(EWDxyflat)\n', (39863, 39874), True, 'import numpy as np\n'), ((39888, 39915), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['EWDxz'], {}), '(EWDxz)\n', (39908, 39915), True, 'import numpy as np\n'), ((39932, 39963), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['EWDxzflat'], {}), '(EWDxzflat)\n', (39952, 39963), True, 'import numpy as np\n'), ((39977, 40004), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['EWDyz'], {}), '(EWDyz)\n', (39997, 40004), True, 'import numpy as np\n'), ((40021, 40052), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['EWDyzflat'], {}), '(EWDyzflat)\n', (40041, 40052), True, 'import numpy as np\n'), ((1792, 1801), 'numpy.log', 'np.log', (['Z'], {}), '(Z)\n', (1798, 1801), True, 'import numpy as np\n'), ((2567, 2587), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2581, 2587), False, 'import os\n'), ((2597, 2614), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2608, 2614), False, 'import os\n'), ((3252, 3297), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['ltitle'], {'fontsize': 'title_fontsize'}), '(ltitle, fontsize=title_fontsize)\n', (3264, 3297), True, 'import matplotlib.pyplot as plt\n'), ((3306, 3322), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (3316, 3322), True, 'import matplotlib.pyplot as plt\n'), ((3331, 3347), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylab'], {}), '(ylab)\n', (3341, 3347), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3574), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '_log' + format)"], {'dpi': 'DPI'}), "(filename + '_log' + format, dpi=DPI)\n", (3537, 3574), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3588), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3586, 3588), True, 'import matplotlib.pyplot as plt\n'), ((3614, 3658), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {'fontsize': 'title_fontsize'}), '(title, fontsize=title_fontsize)\n', (3626, 3658), True, 'import matplotlib.pyplot as plt\n'), ((3667, 3683), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (3677, 3683), True, 'import matplotlib.pyplot as plt\n'), ((3692, 3708), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylab'], {}), '(ylab)\n', (3702, 3708), True, 'import matplotlib.pyplot as plt\n'), ((3717, 3804), 'matplotlib.pyplot.contourf', 'plt.contourf', (['data[..., la[0]]', 'data[..., la[1]]', 'data[..., 3]', 'contours'], {}), '(data[..., la[0]], data[..., la[1]], data[..., 3], contours, **\n kwargs)\n', (3729, 3804), True, 'import matplotlib.pyplot as plt\n'), ((3808, 3847), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(filename + format)'], {'dpi': 'DPI'}), '(filename + format, dpi=DPI)\n', (3819, 3847), True, 'import matplotlib.pyplot as plt\n'), ((3854, 3863), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3861, 3863), True, 'import matplotlib.pyplot as plt\n'), ((4808, 4825), 'builtins.range', 'range', (['D.shape[1]'], {}), '(D.shape[1])\n', (4813, 4825), False, 'from builtins import range\n'), ((5370, 5381), 'numpy.log', 'np.log', (['EWD'], {}), '(EWD)\n', (5376, 5381), True, 'import numpy as np\n'), ((5786, 5806), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5800, 5806), False, 'import os\n'), ((5816, 5833), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (5827, 5833), False, 'import os\n'), ((6185, 6202), 'builtins.range', 'range', (['D.shape[1]'], {}), '(D.shape[1])\n', (6190, 6202), False, 'from builtins import range\n'), ((6464, 6481), 'builtins.range', 'range', (['D.shape[2]'], {}), '(D.shape[2])\n', (6469, 6481), False, 'from builtins import range\n'), ((6741, 6758), 'builtins.range', 'range', (['D.shape[2]'], {}), '(D.shape[2])\n', (6746, 6758), False, 'from builtins import range\n'), ((7914, 7927), 'numpy.log', 'np.log', (['EWDxy'], {}), '(EWDxy)\n', (7920, 7927), True, 'import numpy as np\n'), ((7976, 7989), 'numpy.log', 'np.log', (['EWDxy'], {}), '(EWDxy)\n', (7982, 7989), True, 'import numpy as np\n'), ((8505, 8518), 'numpy.log', 'np.log', (['EWDxz'], {}), '(EWDxz)\n', (8511, 8518), True, 'import numpy as np\n'), ((8567, 8580), 'numpy.log', 'np.log', (['EWDxz'], {}), '(EWDxz)\n', (8573, 8580), True, 'import numpy as np\n'), ((8641, 8663), 'numpy.amax', 'np.amax', (['D[:, 0, :, 0]'], {}), '(D[:, 0, :, 0])\n', (8648, 8663), True, 'import numpy as np\n'), ((8665, 8687), 'numpy.amax', 'np.amax', (['D[:, 0, :, 2]'], {}), '(D[:, 0, :, 2])\n', (8672, 8687), True, 'import numpy as np\n'), ((9233, 9246), 'numpy.log', 'np.log', (['EWDyz'], {}), '(EWDyz)\n', (9239, 9246), True, 'import numpy as np\n'), ((9295, 9308), 'numpy.log', 'np.log', (['EWDyz'], {}), '(EWDyz)\n', (9301, 9308), True, 'import numpy as np\n'), ((11613, 11638), 'numpy.amax', 'np.amax', (['inverse_fft.real'], {}), '(inverse_fft.real)\n', (11620, 11638), True, 'import numpy as np\n'), ((12032, 12046), 'numpy.amin', 'np.amin', (['final'], {}), '(final)\n', (12039, 12046), True, 'import numpy as np\n'), ((13404, 13461), 'numpy.linspace', 'np.linspace', (['(0.0)', '(np.pi * 2.0)', 'NTHETABINS'], {'endpoint': '(False)'}), '(0.0, np.pi * 2.0, NTHETABINS, endpoint=False)\n', (13415, 13461), True, 'import numpy as np\n'), ((13503, 13537), 'numpy.meshgrid', 'np.meshgrid', (['thetas', 'rarr[ir]', 'zar'], {}), '(thetas, rarr[ir], zar)\n', (13514, 13537), True, 'import numpy as np\n'), ((14003, 14015), 'numpy.isnan', 'np.isnan', (['oa'], {}), '(oa)\n', (14011, 14015), True, 'import numpy as np\n'), ((14636, 14653), 'builtins.range', 'range', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (14641, 14653), False, 'from builtins import range\n'), ((16467, 16487), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (16481, 16487), False, 'import os\n'), ((16497, 16514), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (16508, 16514), False, 'import os\n'), ((17195, 17211), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (17203, 17211), True, 'import numpy as np\n'), ((17256, 17272), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (17264, 17272), True, 'import numpy as np\n'), ((17317, 17333), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (17325, 17333), True, 'import numpy as np\n'), ((17395, 17418), 'numpy.vstack', 'np.vstack', (['(b1, b2, b3)'], {}), '((b1, b2, b3))\n', (17404, 17418), True, 'import numpy as np\n'), ((17940, 17997), 'numpy.linspace', 'np.linspace', (['(0.0)', '(np.pi * 2.0)', 'NTHETABINS'], {'endpoint': '(False)'}), '(0.0, np.pi * 2.0, NTHETABINS, endpoint=False)\n', (17951, 17997), True, 'import numpy as np\n'), ((18039, 18073), 'numpy.meshgrid', 'np.meshgrid', (['thetas', 'rarr[ir]', 'zar'], {}), '(thetas, rarr[ir], zar)\n', (18050, 18073), True, 'import numpy as np\n'), ((18299, 18320), 'numpy.matmul', 'np.matmul', (['pts', 'b_inv'], {}), '(pts, b_inv)\n', (18308, 18320), True, 'import numpy as np\n'), ((18507, 18519), 'numpy.isnan', 'np.isnan', (['oa'], {}), '(oa)\n', (18515, 18519), True, 'import numpy as np\n'), ((18567, 18581), 'numpy.average', 'np.average', (['oa'], {}), '(oa)\n', (18577, 18581), True, 'import numpy as np\n'), ((19751, 19782), 'numpy.linspace', 'np.linspace', (['(0)', 'factor', 'NLEVELS'], {}), '(0, factor, NLEVELS)\n', (19762, 19782), True, 'import numpy as np\n'), ((19906, 19918), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19916, 19918), True, 'import matplotlib.pyplot as plt\n'), ((19927, 19987), 'matplotlib.pyplot.plot', 'plt.plot', (['rfin', 'final[:, zfin[0].shape[0] // 2]'], {'linewidth': '(2)'}), '(rfin, final[:, zfin[0].shape[0] // 2], linewidth=2)\n', (19935, 19987), True, 'import matplotlib.pyplot as plt\n'), ((19994, 20040), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$q_r\\\\ (\\\\AA^{-1})$"""'], {'fontsize': '(14)'}), "('$q_r\\\\ (\\\\AA^{-1})$', fontsize=14)\n", (20004, 20040), True, 'import matplotlib.pyplot as plt\n'), ((20047, 20083), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {'fontsize': '(14)'}), "('Intensity', fontsize=14)\n", (20057, 20083), True, 'import matplotlib.pyplot as plt\n'), ((20092, 20110), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20108, 20110), True, 'import matplotlib.pyplot as plt\n'), ((20120, 20132), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20130, 20132), True, 'import matplotlib.pyplot as plt\n'), ((20152, 20226), 'matplotlib.pyplot.contourf', 'plt.contourf', (['rfin', 'zfin[0]', 'final.T'], {'levels': 'lvls', 'cmap': 'cmap', 'extend': '"""max"""'}), "(rfin, zfin[0], final.T, levels=lvls, cmap=cmap, extend='max')\n", (20164, 20226), True, 'import matplotlib.pyplot as plt\n'), ((20242, 20263), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['heatmap'], {}), '(heatmap)\n', (20254, 20263), True, 'import matplotlib.pyplot as plt\n'), ((20272, 20318), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$q_r\\\\ (\\\\AA^{-1}$)"""'], {'fontsize': '(18)'}), "('$q_r\\\\ (\\\\AA^{-1}$)', fontsize=18)\n", (20282, 20318), True, 'import matplotlib.pyplot as plt\n'), ((20325, 20371), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$q_z\\\\ (\\\\AA^{-1}$)"""'], {'fontsize': '(18)'}), "('$q_z\\\\ (\\\\AA^{-1}$)', fontsize=18)\n", (20335, 20371), True, 'import matplotlib.pyplot as plt\n'), ((20592, 20610), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20608, 20610), True, 'import matplotlib.pyplot as plt\n'), ((20619, 20644), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rzplot.png"""'], {}), "('rzplot.png')\n", (20630, 20644), True, 'import matplotlib.pyplot as plt\n'), ((20901, 20913), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20911, 20913), True, 'import matplotlib.pyplot as plt\n'), ((21016, 21076), 'matplotlib.pyplot.plot', 'plt.plot', (['rfin', 'final[:, rpi_ndx]'], {'linewidth': '(2)', 'color': '"""blue"""'}), "(rfin, final[:, rpi_ndx], linewidth=2, color='blue')\n", (21024, 21076), True, 'import matplotlib.pyplot as plt\n'), ((21116, 21140), 'numpy.array', 'np.array', (['[0, 0.3, 4, 1]'], {}), '([0, 0.3, 4, 1])\n', (21124, 21140), True, 'import numpy as np\n'), ((21163, 21277), 'scipy.optimize.curve_fit', 'curve_fit', (['gaussian', 'rfin', 'final[:, rpi_ndx]', 'p'], {'bounds': '((-np.inf, 0, 0, 0), (np.inf, np.inf, np.inf, np.inf))'}), '(gaussian, rfin, final[:, rpi_ndx], p, bounds=((-np.inf, 0, 0, 0),\n (np.inf, np.inf, np.inf, np.inf)))\n', (21172, 21277), False, 'from scipy.optimize import curve_fit\n'), ((21651, 21672), 'numpy.array', 'np.array', (['[0.1, 0, 4]'], {}), '([0.1, 0, 4])\n', (21659, 21672), True, 'import numpy as np\n'), ((21703, 21806), 'scipy.optimize.curve_fit', 'curve_fit', (['lorentz', 'rfin', 'final[:, rpi_ndx]', 'p'], {'bounds': '[[0, -np.inf, 0], [np.inf, np.inf, np.inf]]'}), '(lorentz, rfin, final[:, rpi_ndx], p, bounds=[[0, -np.inf, 0], [np\n .inf, np.inf, np.inf]])\n', (21712, 21806), False, 'from scipy.optimize import curve_fit\n'), ((22136, 22159), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (22146, 22159), True, 'import matplotlib.pyplot as plt\n'), ((22168, 22214), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$q_r\\\\ (\\\\AA^{-1})$"""'], {'fontsize': '(18)'}), "('$q_r\\\\ (\\\\AA^{-1})$', fontsize=18)\n", (22178, 22214), True, 'import matplotlib.pyplot as plt\n'), ((22221, 22257), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {'fontsize': '(18)'}), "('Intensity', fontsize=18)\n", (22231, 22257), True, 'import matplotlib.pyplot as plt\n'), ((22324, 22342), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22340, 22342), True, 'import matplotlib.pyplot as plt\n'), ((24633, 24645), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24643, 24645), True, 'import matplotlib.pyplot as plt\n'), ((24654, 24715), 'matplotlib.pyplot.contourf', 'plt.contourf', (['rfin', 'zfin[0]', 'final.T'], {'levels': 'lvls', 'cmap': '"""jet"""'}), "(rfin, zfin[0], final.T, levels=lvls, cmap='jet')\n", (24666, 24715), True, 'import matplotlib.pyplot as plt\n'), ((24724, 24738), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (24736, 24738), True, 'import matplotlib.pyplot as plt\n'), ((24748, 24767), 'matplotlib.pyplot.title', 'plt.title', (['"""S(r,z)"""'], {}), "('S(r,z)')\n", (24757, 24767), True, 'import matplotlib.pyplot as plt\n'), ((24776, 24802), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('r ' + unitlab)"], {}), "('r ' + unitlab)\n", (24786, 24802), True, 'import matplotlib.pyplot as plt\n'), ((24811, 24837), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('z ' + unitlab)"], {}), "('z ' + unitlab)\n", (24821, 24837), True, 'import matplotlib.pyplot as plt\n'), ((24847, 24876), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""new_rzplot.png"""'], {}), "('new_rzplot.png')\n", (24858, 24876), True, 'import matplotlib.pyplot as plt\n'), ((24886, 24898), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24896, 24898), True, 'import matplotlib.pyplot as plt\n'), ((24913, 24989), 'matplotlib.pyplot.contourf', 'plt.contourf', (['rfin', 'zfin[0]', 'final.T'], {'levels': 'lvls', 'cmap': '"""jet"""', 'extend': '"""both"""'}), "(rfin, zfin[0], final.T, levels=lvls, cmap='jet', extend='both')\n", (24925, 24989), True, 'import matplotlib.pyplot as plt\n'), ((25065, 25084), 'matplotlib.pyplot.title', 'plt.title', (['"""S(r,z)"""'], {}), "('S(r,z)')\n", (25074, 25084), True, 'import matplotlib.pyplot as plt\n'), ((25093, 25119), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('r ' + unitlab)"], {}), "('r ' + unitlab)\n", (25103, 25119), True, 'import matplotlib.pyplot as plt\n'), ((25128, 25154), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('z ' + unitlab)"], {}), "('z ' + unitlab)\n", (25138, 25154), True, 'import matplotlib.pyplot as plt\n'), ((25163, 25177), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (25175, 25177), True, 'import matplotlib.pyplot as plt\n'), ((25186, 25207), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cs.png"""'], {}), "('cs.png')\n", (25197, 25207), True, 'import matplotlib.pyplot as plt\n'), ((25217, 25229), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25227, 25229), True, 'import matplotlib.pyplot as plt\n'), ((25239, 25300), 'matplotlib.pyplot.contourf', 'plt.contourf', (['rfin', 'zfin[0]', 'final.T'], {'levels': 'lvls', 'cmap': '"""jet"""'}), "(rfin, zfin[0], final.T, levels=lvls, cmap='jet')\n", (25251, 25300), True, 'import matplotlib.pyplot as plt\n'), ((25309, 25323), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (25321, 25323), True, 'import matplotlib.pyplot as plt\n'), ((25333, 25352), 'matplotlib.pyplot.title', 'plt.title', (['"""S(r,z)"""'], {}), "('S(r,z)')\n", (25342, 25352), True, 'import matplotlib.pyplot as plt\n'), ((25361, 25387), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('r ' + unitlab)"], {}), "('r ' + unitlab)\n", (25371, 25387), True, 'import matplotlib.pyplot as plt\n'), ((25396, 25422), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('z ' + unitlab)"], {}), "('z ' + unitlab)\n", (25406, 25422), True, 'import matplotlib.pyplot as plt\n'), ((25431, 25461), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""new_rzplot2.png"""'], {}), "('new_rzplot2.png')\n", (25442, 25461), True, 'import matplotlib.pyplot as plt\n'), ((25471, 25483), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25481, 25483), True, 'import matplotlib.pyplot as plt\n'), ((25569, 25635), 'matplotlib.pyplot.contourf', 'plt.contourf', (['rfin', 'zfin[0]', 'logfinal.T'], {'levels': 'lglvls', 'cmap': '"""jet"""'}), "(rfin, zfin[0], logfinal.T, levels=lglvls, cmap='jet')\n", (25581, 25635), True, 'import matplotlib.pyplot as plt\n'), ((25644, 25658), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (25656, 25658), True, 'import matplotlib.pyplot as plt\n'), ((25668, 25691), 'matplotlib.pyplot.title', 'plt.title', (['"""ln(S(r,z))"""'], {}), "('ln(S(r,z))')\n", (25677, 25691), True, 'import matplotlib.pyplot as plt\n'), ((25700, 25726), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('r ' + unitlab)"], {}), "('r ' + unitlab)\n", (25710, 25726), True, 'import matplotlib.pyplot as plt\n'), ((25735, 25761), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('z ' + unitlab)"], {}), "('z ' + unitlab)\n", (25745, 25761), True, 'import matplotlib.pyplot as plt\n'), ((25770, 25803), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""new_log_rzplot.png"""'], {}), "('new_log_rzplot.png')\n", (25781, 25803), True, 'import matplotlib.pyplot as plt\n'), ((25813, 25825), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25823, 25825), True, 'import matplotlib.pyplot as plt\n'), ((25840, 25879), 'numpy.linspace', 'np.linspace', (['(-Rmax)', 'Rmax', '(RBINS * 2 - 1)'], {}), '(-Rmax, Rmax, RBINS * 2 - 1)\n', (25851, 25879), True, 'import numpy as np\n'), ((25893, 25924), 'numpy.linspace', 'np.linspace', (['Z[0]', 'Z[-1]', 'RBINS'], {}), '(Z[0], Z[-1], RBINS)\n', (25904, 25924), True, 'import numpy as np\n'), ((26156, 26226), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xg2[0, :, :]', 'zg2[0, :, :]', 'o2n'], {'levels': 'lvls', 'cmap': '"""jet"""'}), "(xg2[0, :, :], zg2[0, :, :], o2n, levels=lvls, cmap='jet')\n", (26168, 26226), True, 'import matplotlib.pyplot as plt\n'), ((26236, 26262), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('x ' + unitlab)"], {}), "('x ' + unitlab)\n", (26246, 26262), True, 'import matplotlib.pyplot as plt\n'), ((26271, 26297), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('z ' + unitlab)"], {}), "('z ' + unitlab)\n", (26281, 26297), True, 'import matplotlib.pyplot as plt\n'), ((26306, 26334), 'matplotlib.pyplot.title', 'plt.title', (['"""S(x,z)|$_{y=0}$"""'], {}), "('S(x,z)|$_{y=0}$')\n", (26315, 26334), True, 'import matplotlib.pyplot as plt\n'), ((26344, 26358), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (26356, 26358), True, 'import matplotlib.pyplot as plt\n'), ((26367, 26396), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""new_xzplot.png"""'], {}), "('new_xzplot.png')\n", (26378, 26396), True, 'import matplotlib.pyplot as plt\n'), ((26406, 26418), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26416, 26418), True, 'import matplotlib.pyplot as plt\n'), ((26433, 26472), 'numpy.linspace', 'np.linspace', (['(-Rmax)', 'Rmax', '(RBINS * 2 - 1)'], {}), '(-Rmax, Rmax, RBINS * 2 - 1)\n', (26444, 26472), True, 'import numpy as np\n'), ((26486, 26525), 'numpy.linspace', 'np.linspace', (['(-Rmax)', 'Rmax', '(RBINS * 2 - 1)'], {}), '(-Rmax, Rmax, RBINS * 2 - 1)\n', (26497, 26525), True, 'import numpy as np\n'), ((26851, 26923), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xg2[:, :, 0]', 'yg2[:, :, 0]', 'o2n'], {'levels': 'lvlsxy', 'cmap': '"""jet"""'}), "(xg2[:, :, 0], yg2[:, :, 0], o2n, levels=lvlsxy, cmap='jet')\n", (26863, 26923), True, 'import matplotlib.pyplot as plt\n'), ((26933, 26959), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('x ' + unitlab)"], {}), "('x ' + unitlab)\n", (26943, 26959), True, 'import matplotlib.pyplot as plt\n'), ((26968, 26994), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('y ' + unitlab)"], {}), "('y ' + unitlab)\n", (26978, 26994), True, 'import matplotlib.pyplot as plt\n'), ((27003, 27022), 'matplotlib.pyplot.title', 'plt.title', (['"""S(x,y)"""'], {}), "('S(x,y)')\n", (27012, 27022), True, 'import matplotlib.pyplot as plt\n'), ((27047, 27061), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (27059, 27061), True, 'import matplotlib.pyplot as plt\n'), ((27070, 27099), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""new_xyplot.png"""'], {}), "('new_xyplot.png')\n", (27081, 27099), True, 'import matplotlib.pyplot as plt\n'), ((28238, 28255), 'builtins.range', 'range', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (28243, 28255), False, 'from builtins import range\n'), ((29017, 29042), 'numpy.sum', 'np.sum', (['counts[start:end]'], {}), '(counts[start:end])\n', (29023, 29042), True, 'import numpy as np\n'), ((29241, 29257), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (29249, 29257), True, 'import numpy as np\n'), ((29269, 29285), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (29277, 29285), True, 'import numpy as np\n'), ((29299, 29315), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (29307, 29315), True, 'import numpy as np\n'), ((29345, 29361), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (29353, 29361), True, 'import numpy as np\n'), ((29891, 29907), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (29899, 29907), True, 'import numpy as np\n'), ((29952, 29968), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (29960, 29968), True, 'import numpy as np\n'), ((30026, 30042), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (30034, 30042), True, 'import numpy as np\n'), ((30572, 30588), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (30580, 30588), True, 'import numpy as np\n'), ((30631, 30647), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (30639, 30647), True, 'import numpy as np\n'), ((30690, 30706), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (30698, 30706), True, 'import numpy as np\n'), ((30766, 30789), 'numpy.vstack', 'np.vstack', (['(b1, b2, b3)'], {}), '((b1, b2, b3))\n', (30775, 30789), True, 'import numpy as np\n'), ((31369, 31389), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (31383, 31389), False, 'import os\n'), ((31399, 31416), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (31410, 31416), False, 'import os\n'), ((31625, 31675), 'numpy.linspace', 'np.linspace', (['(-NBINSRAD)', 'NBINSRAD'], {'num': '(NBINSRAD * 2)'}), '(-NBINSRAD, NBINSRAD, num=NBINSRAD * 2)\n', (31636, 31675), True, 'import numpy as np\n'), ((31990, 32006), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (31998, 32006), True, 'import numpy as np\n'), ((32059, 32075), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (32067, 32075), True, 'import numpy as np\n'), ((32128, 32144), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (32136, 32144), True, 'import numpy as np\n'), ((33261, 33276), 'builtins.range', 'range', (['(i + 1)', '(3)'], {}), '(i + 1, 3)\n', (33266, 33276), False, 'from builtins import range\n'), ((33550, 33608), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['(X, Y, Z)', 'SF'], {'bounds_error': '(False)'}), '((X, Y, Z), SF, bounds_error=False)\n', (33573, 33608), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((33974, 33985), 'time.time', 'time.time', ([], {}), '()\n', (33983, 33985), False, 'import time\n'), ((34218, 34229), 'time.time', 'time.time', ([], {}), '()\n', (34227, 34229), False, 'import time\n'), ((34435, 34453), 'tqdm.trange', 'trange', (['D.shape[0]'], {}), '(D.shape[0])\n', (34441, 34453), False, 'from tqdm import trange\n'), ((34656, 34741), 'numpy.linspace', 'np.linspace', (['D[0, 0, 0, 0]', 'D[-1, 0, 0, 0]', '(Scale * D.shape[0])'], {'dtype': 'np.float16'}), '(D[0, 0, 0, 0], D[-1, 0, 0, 0], Scale * D.shape[0], dtype=np.float16\n )\n', (34667, 34741), True, 'import numpy as np\n'), ((34752, 34837), 'numpy.linspace', 'np.linspace', (['D[0, 0, 0, 1]', 'D[0, -1, 0, 1]', '(Scale * D.shape[1])'], {'dtype': 'np.float16'}), '(D[0, 0, 0, 1], D[0, -1, 0, 1], Scale * D.shape[1], dtype=np.float16\n )\n', (34763, 34837), True, 'import numpy as np\n'), ((34848, 34933), 'numpy.linspace', 'np.linspace', (['D[0, 0, 0, 2]', 'D[0, 0, -1, 2]', '(Scale * D.shape[2])'], {'dtype': 'np.float16'}), '(D[0, 0, 0, 2], D[0, 0, -1, 2], Scale * D.shape[2], dtype=np.float16\n )\n', (34859, 34933), True, 'import numpy as np\n'), ((34968, 34997), 'numpy.meshgrid', 'np.meshgrid', (['XPTS', 'YPTS', 'ZPTS'], {}), '(XPTS, YPTS, ZPTS)\n', (34979, 34997), True, 'import numpy as np\n'), ((35509, 35539), 'numpy.append', 'np.append', (['EWDxyz', 'buf'], {'axis': '(0)'}), '(EWDxyz, buf, axis=0)\n', (35518, 35539), True, 'import numpy as np\n'), ((35864, 35896), 'numpy.where', 'np.where', (['(Hcount == 0)', '(1)', 'Hcount'], {}), '(Hcount == 0, 1, Hcount)\n', (35872, 35896), True, 'import numpy as np\n'), ((35957, 35982), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['Hrz'], {}), '(Hrz)\n', (35977, 35982), True, 'import numpy as np\n'), ((36159, 36189), 'builtins.range', 'range', (['(1)', '(Hrz.shape[0] / 2 - 1)'], {}), '(1, Hrz.shape[0] / 2 - 1)\n', (36164, 36189), False, 'from builtins import range\n'), ((36404, 36434), 'builtins.range', 'range', (['(1)', '(Hrz.shape[0] / 2 - 1)'], {}), '(1, Hrz.shape[0] / 2 - 1)\n', (36409, 36434), False, 'from builtins import range\n'), ((36736, 36751), 'numpy.log10', 'np.log10', (['Hrz.T'], {}), '(Hrz.T)\n', (36744, 36751), True, 'import numpy as np\n'), ((37474, 37491), 'builtins.range', 'range', (['D.shape[1]'], {}), '(D.shape[1])\n', (37479, 37491), False, 'from builtins import range\n'), ((37855, 37872), 'builtins.range', 'range', (['D.shape[2]'], {}), '(D.shape[2])\n', (37860, 37872), False, 'from builtins import range\n'), ((38232, 38249), 'builtins.range', 'range', (['D.shape[2]'], {}), '(D.shape[2])\n', (38237, 38249), False, 'from builtins import range\n'), ((3374, 3394), 'numpy.log', 'np.log', (['data[..., 3]'], {}), '(data[..., 3])\n', (3380, 3394), True, 'import numpy as np\n'), ((3453, 3473), 'numpy.log', 'np.log', (['data[..., 3]'], {}), '(data[..., 3])\n', (3459, 3473), True, 'import numpy as np\n'), ((4843, 4870), 'numpy.sqrt', 'np.sqrt', (['(xsq + Y[iy] ** 2.0)'], {}), '(xsq + Y[iy] ** 2.0)\n', (4850, 4870), True, 'import numpy as np\n'), ((12056, 12070), 'numpy.amax', 'np.amax', (['final'], {}), '(final)\n', (12063, 12070), True, 'import numpy as np\n'), ((12671, 12687), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (12679, 12687), True, 'import numpy as np\n'), ((12736, 12752), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (12744, 12752), True, 'import numpy as np\n'), ((12801, 12817), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (12809, 12817), True, 'import numpy as np\n'), ((12883, 12906), 'numpy.vstack', 'np.vstack', (['(b1, b2, b3)'], {}), '((b1, b2, b3))\n', (12892, 12906), True, 'import numpy as np\n'), ((13594, 13603), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (13600, 13603), True, 'import numpy as np\n'), ((13641, 13650), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (13647, 13650), True, 'import numpy as np\n'), ((13834, 13855), 'numpy.matmul', 'np.matmul', (['pts', 'b_inv'], {}), '(pts, b_inv)\n', (13843, 13855), True, 'import numpy as np\n'), ((15537, 15585), 'numpy.exp', 'np.exp', (['(-(points - mean) ** 2 / (2 * sigma ** 2))'], {}), '(-(points - mean) ** 2 / (2 * sigma ** 2))\n', (15543, 15585), True, 'import numpy as np\n'), ((17227, 17243), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (17235, 17243), True, 'import numpy as np\n'), ((17288, 17304), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (17296, 17304), True, 'import numpy as np\n'), ((17349, 17365), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (17357, 17365), True, 'import numpy as np\n'), ((18130, 18139), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (18136, 18139), True, 'import numpy as np\n'), ((18177, 18186), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (18183, 18186), True, 'import numpy as np\n'), ((19374, 19392), 'numpy.abs', 'np.abs', (['(rfin + 2.5)'], {}), '(rfin + 2.5)\n', (19380, 19392), True, 'import numpy as np\n'), ((19405, 19423), 'numpy.abs', 'np.abs', (['(rfin - 2.5)'], {}), '(rfin - 2.5)\n', (19411, 19423), True, 'import numpy as np\n'), ((19451, 19472), 'numpy.abs', 'np.abs', (['(zfin[0] + 2.5)'], {}), '(zfin[0] + 2.5)\n', (19457, 19472), True, 'import numpy as np\n'), ((19485, 19506), 'numpy.abs', 'np.abs', (['(zfin[0] - 2.5)'], {}), '(zfin[0] - 2.5)\n', (19491, 19506), True, 'import numpy as np\n'), ((19814, 19837), 'numpy.log10', 'np.log10', (['final[-1, -1]'], {}), '(final[-1, -1])\n', (19822, 19837), True, 'import numpy as np\n'), ((25513, 25530), 'numpy.amin', 'np.amin', (['logfinal'], {}), '(logfinal)\n', (25520, 25530), True, 'import numpy as np\n'), ((25532, 25549), 'numpy.amax', 'np.amax', (['logfinal'], {}), '(logfinal)\n', (25539, 25549), True, 'import numpy as np\n'), ((25966, 25979), 'numpy.asarray', 'np.asarray', (['(0)'], {}), '(0)\n', (25976, 25979), True, 'import numpy as np\n'), ((26571, 26584), 'numpy.asarray', 'np.asarray', (['(0)'], {}), '(0)\n', (26581, 26584), True, 'import numpy as np\n'), ((26740, 26756), 'numpy.average', 'np.average', (['out2'], {}), '(out2)\n', (26750, 26756), True, 'import numpy as np\n'), ((26787, 26799), 'numpy.amin', 'np.amin', (['o2n'], {}), '(o2n)\n', (26794, 26799), True, 'import numpy as np\n'), ((26801, 26813), 'numpy.amax', 'np.amax', (['o2n'], {}), '(o2n)\n', (26808, 26813), True, 'import numpy as np\n'), ((27132, 27144), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (27142, 27144), True, 'import matplotlib.pyplot as plt\n'), ((27196, 27223), 'numpy.linspace', 'np.linspace', (['(-0.4)', '(0.4)', '(100)'], {}), '(-0.4, 0.4, 100)\n', (27207, 27223), True, 'import numpy as np\n'), ((27237, 27312), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xg2[0, :, :]', 'zg2[0, :, :]', 'dif'], {'levels': 'lvls2', 'cmap': '"""seismic"""'}), "(xg2[0, :, :], zg2[0, :, :], dif, levels=lvls2, cmap='seismic')\n", (27249, 27312), True, 'import matplotlib.pyplot as plt\n'), ((27325, 27353), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('x,r ' + unitlab)"], {}), "('x,r ' + unitlab)\n", (27335, 27353), True, 'import matplotlib.pyplot as plt\n'), ((27366, 27392), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('z ' + unitlab)"], {}), "('z ' + unitlab)\n", (27376, 27392), True, 'import matplotlib.pyplot as plt\n'), ((27405, 27440), 'matplotlib.pyplot.title', 'plt.title', (['"""S(r,z)-S(x,z)|$_{y=0}$"""'], {}), "('S(r,z)-S(x,z)|$_{y=0}$')\n", (27414, 27440), True, 'import matplotlib.pyplot as plt\n'), ((27454, 27468), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (27466, 27468), True, 'import matplotlib.pyplot as plt\n'), ((27481, 27510), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""difference.png"""'], {}), "('difference.png')\n", (27492, 27510), True, 'import matplotlib.pyplot as plt\n'), ((29923, 29939), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (29931, 29939), True, 'import numpy as np\n'), ((29984, 30000), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (29992, 30000), True, 'import numpy as np\n'), ((30058, 30074), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (30066, 30074), True, 'import numpy as np\n'), ((30602, 30618), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (30610, 30618), True, 'import numpy as np\n'), ((30661, 30677), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (30669, 30677), True, 'import numpy as np\n'), ((30720, 30736), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (30728, 30736), True, 'import numpy as np\n'), ((31715, 31730), 'numpy.sign', 'np.sign', (['XBNSRD'], {}), '(XBNSRD)\n', (31722, 31730), True, 'import numpy as np\n'), ((32021, 32037), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (32029, 32037), True, 'import numpy as np\n'), ((32090, 32106), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (32098, 32106), True, 'import numpy as np\n'), ((32159, 32175), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (32167, 32175), True, 'import numpy as np\n'), ((34477, 34494), 'builtins.range', 'range', (['D.shape[1]'], {}), '(D.shape[1])\n', (34482, 34494), False, 'from builtins import range\n'), ((36930, 36945), 'numpy.nonzero', 'np.nonzero', (['Hrz'], {}), '(Hrz)\n', (36940, 36945), True, 'import numpy as np\n'), ((37091, 37107), 'numpy.amin', 'np.amin', (['Log_HRZ'], {}), '(Log_HRZ)\n', (37098, 37107), True, 'import numpy as np\n'), ((37114, 37130), 'numpy.amax', 'np.amax', (['Log_HRZ'], {}), '(Log_HRZ)\n', (37121, 37130), True, 'import numpy as np\n'), ((2136, 2146), 'numpy.amax', 'np.amax', (['Z'], {}), '(Z)\n', (2143, 2146), True, 'import numpy as np\n'), ((2745, 2768), 'numpy.unique', 'np.unique', (['data[..., i]'], {}), '(data[..., i])\n', (2754, 2768), True, 'import numpy as np\n'), ((4897, 4913), 'past.utils.old_div', 'old_div', (['R', 'K_ES'], {}), '(R, K_ES)\n', (4904, 4913), False, 'from past.utils import old_div\n'), ((4939, 4952), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4945, 4952), True, 'import numpy as np\n'), ((4978, 4991), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4984, 4991), True, 'import numpy as np\n'), ((12703, 12719), 'numpy.cross', 'np.cross', (['a2', 'a3'], {}), '(a2, a3)\n', (12711, 12719), True, 'import numpy as np\n'), ((12768, 12784), 'numpy.cross', 'np.cross', (['a3', 'a1'], {}), '(a3, a1)\n', (12776, 12784), True, 'import numpy as np\n'), ((12833, 12849), 'numpy.cross', 'np.cross', (['a1', 'a2'], {}), '(a1, a2)\n', (12841, 12849), True, 'import numpy as np\n'), ((14678, 14706), 'numpy.linalg.norm', 'np.linalg.norm', (['[R[i], Z[j]]'], {}), '([R[i], Z[j]])\n', (14692, 14706), True, 'import numpy as np\n'), ((15502, 15533), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma ** 2)'], {}), '(2 * np.pi * sigma ** 2)\n', (15509, 15533), True, 'import numpy as np\n'), ((19848, 19862), 'numpy.amax', 'np.amax', (['final'], {}), '(final)\n', (19855, 19862), True, 'import numpy as np\n'), ((28280, 28308), 'numpy.linalg.norm', 'np.linalg.norm', (['[R[i], Z[j]]'], {}), '([R[i], Z[j]])\n', (28294, 28308), True, 'import numpy as np\n'), ((31699, 31713), 'numpy.abs', 'np.abs', (['XBNSRD'], {}), '(XBNSRD)\n', (31705, 31713), True, 'import numpy as np\n'), ((34522, 34539), 'builtins.range', 'range', (['D.shape[2]'], {}), '(D.shape[2])\n', (34527, 34539), False, 'from builtins import range\n'), ((35038, 35058), 'numpy.stack', 'np.stack', (['xyzpts', '(-1)'], {}), '(xyzpts, -1)\n', (35046, 35058), True, 'import numpy as np\n'), ((36766, 36779), 'numpy.log10', 'np.log10', (['Hrz'], {}), '(Hrz)\n', (36774, 36779), True, 'import numpy as np\n'), ((36795, 36808), 'numpy.log10', 'np.log10', (['Hrz'], {}), '(Hrz)\n', (36803, 36808), True, 'import numpy as np\n'), ((39655, 39662), 'builtins.str', 'str', (['iz'], {}), '(iz)\n', (39658, 39662), False, 'from builtins import str\n'), ((2255, 2265), 'numpy.amax', 'np.amax', (['Z'], {}), '(Z)\n', (2262, 2265), True, 'import numpy as np\n'), ((5021, 5034), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5027, 5034), True, 'import numpy as np\n'), ((6242, 6269), 'numpy.sqrt', 'np.sqrt', (['(xsq + Y[iy] ** 2.0)'], {}), '(xsq + Y[iy] ** 2.0)\n', (6249, 6269), True, 'import numpy as np\n'), ((6521, 6548), 'numpy.sqrt', 'np.sqrt', (['(xsq + Z[iz] ** 2.0)'], {}), '(xsq + Z[iz] ** 2.0)\n', (6528, 6548), True, 'import numpy as np\n'), ((6798, 6825), 'numpy.sqrt', 'np.sqrt', (['(ysq + Z[iz] ** 2.0)'], {}), '(ysq + Z[iz] ** 2.0)\n', (6805, 6825), True, 'import numpy as np\n'), ((7295, 7320), 'builtins.str', 'str', (['wavelength_angstroms'], {}), '(wavelength_angstroms)\n', (7298, 7320), False, 'from builtins import str\n'), ((14756, 14778), 'numpy.arctan', 'np.arctan', (['(Z[j] / R[i])'], {}), '(Z[j] / R[i])\n', (14765, 14778), True, 'import numpy as np\n'), ((37606, 37636), 'numpy.sqrt', 'np.sqrt', (['(xp ** 2.0 + yp ** 2.0)'], {}), '(xp ** 2.0 + yp ** 2.0)\n', (37613, 37636), True, 'import numpy as np\n'), ((37986, 38016), 'numpy.sqrt', 'np.sqrt', (['(xp ** 2.0 + yp ** 2.0)'], {}), '(xp ** 2.0 + yp ** 2.0)\n', (37993, 38016), True, 'import numpy as np\n'), ((38363, 38393), 'numpy.sqrt', 'np.sqrt', (['(yp ** 2.0 + zp ** 2.0)'], {}), '(yp ** 2.0 + zp ** 2.0)\n', (38370, 38393), True, 'import numpy as np\n'), ((39251, 39276), 'builtins.str', 'str', (['wavelength_angstroms'], {}), '(wavelength_angstroms)\n', (39254, 39276), False, 'from builtins import str\n'), ((6307, 6320), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6313, 6320), True, 'import numpy as np\n'), ((6328, 6341), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6334, 6341), True, 'import numpy as np\n'), ((6586, 6599), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6592, 6599), True, 'import numpy as np\n'), ((6633, 6646), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6639, 6646), True, 'import numpy as np\n'), ((6887, 6900), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6893, 6900), True, 'import numpy as np\n'), ((6908, 6921), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6914, 6921), True, 'import numpy as np\n'), ((20968, 21003), 'numpy.argmax', 'np.argmax', (['final[rfin.size // 2, :]'], {}), '(final[rfin.size // 2, :])\n', (20977, 21003), True, 'import numpy as np\n'), ((28360, 28382), 'numpy.arctan', 'np.arctan', (['(Z[j] / R[i])'], {}), '(Z[j] / R[i])\n', (28369, 28382), True, 'import numpy as np\n'), ((33874, 33897), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (33895, 33897), False, 'import datetime\n'), ((33900, 33933), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'dtime'}), '(seconds=dtime)\n', (33918, 33933), False, 'import datetime\n'), ((37668, 37681), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (37674, 37681), True, 'import numpy as np\n'), ((37686, 37699), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (37692, 37699), True, 'import numpy as np\n'), ((38048, 38061), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (38054, 38061), True, 'import numpy as np\n'), ((38092, 38105), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (38098, 38105), True, 'import numpy as np\n'), ((38451, 38464), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (38457, 38464), True, 'import numpy as np\n'), ((38469, 38482), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (38475, 38482), True, 'import numpy as np\n'), ((6355, 6368), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6361, 6368), True, 'import numpy as np\n'), ((6611, 6624), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6617, 6624), True, 'import numpy as np\n'), ((6865, 6878), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6871, 6878), True, 'import numpy as np\n'), ((20378, 20387), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20385, 20387), True, 'import matplotlib.pyplot as plt\n'), ((20430, 20439), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20437, 20439), True, 'import matplotlib.pyplot as plt\n'), ((20482, 20491), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20489, 20491), True, 'import matplotlib.pyplot as plt\n'), ((20540, 20549), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20547, 20549), True, 'import matplotlib.pyplot as plt\n'), ((22266, 22275), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22273, 22275), True, 'import matplotlib.pyplot as plt\n'), ((37713, 37726), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (37719, 37726), True, 'import numpy as np\n'), ((38073, 38086), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (38079, 38086), True, 'import numpy as np\n'), ((38432, 38445), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (38438, 38445), True, 'import numpy as np\n'), ((21521, 21530), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (21527, 21530), True, 'import numpy as np\n'), ((21604, 21613), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (21610, 21613), True, 'import numpy as np\n')] |
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from warnings import warn
import math
import numpy as np
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
class Mesh():
"""
Mesh is a model object representing
"""
def __init__(self, *arg, **kwargs):
super().__init__()
# Set default values
self.Nx = 0; self.Ny = 0; self.Nz = 0
self.Lx = 0; self.Ly = 0; self.Lz = 0
self.BCx = -1; self.BCy = -1; self.BCz = -1
self.stretched = False
self.beta = 0
self.yp = None
# Figure out how to do initialisation
if len(arg) == 1:
warn("You are using an old-style initialisation, the future is dynamic!", DeprecationWarning)
self._init_fromjson(arg[0])
else:
self._init_new(*arg, **kwargs)
# Finish off initialisation
if self.Nx * self.Ny * self.Nz == 0:
raise RuntimeError("Need to set Nx, Ny and Nz")
elif self.Lx * self.Ly * self.Lz == 0:
raise RuntimeError("Need to set Lx, Ly and Lz")
elif (self.BCx == -1) or (self.BCy == -1) or (self.BCz == -1):
raise RuntimeError("Need to set boundary conditions!")
def _init_new(self, *args, **kwargs):
for arg, val in kwargs.items():
if arg == "n":
self.Nx = val[0]
self.Ny = val[1]
self.Nz = val[2]
elif arg == "l":
self.Lx = val[0]
self.Ly = val[1]
self.Lz = val[2]
elif arg == "bc":
self.BCx = val[0]
self.BCy = val[1]
self.BCz = val[2]
elif arg == "beta":
self.beta = val
elif arg == "stretched":
self.stretched = val
elif arg == "yp":
self.yp = val
else:
warn("Unrecognised input to mesh: %s" % arg)
def _init_fromjson(self, instance_dictionary):
self.description = instance_dictionary["description"]
properties = instance_dictionary["properties"]
self.Nx = properties["Nx"]
self.Ny = properties["Ny"]
self.Nz = properties["Nz"]
self.Lx = properties["Lx"]
self.Ly = properties["Ly"]
self.Lz = properties["Lz"]
self.BCx = properties["BCx"]
self.BCy = properties["BCy"]
self.BCz = properties["BCz"]
try:
self.stretched = properties["stretched"]
self.beta = properties["beta"]
except:
self.stretched = False
self.beta = 0
# Once we know the mesh layout we can set the derivative variables
self.compute_derivvars()
if self.stretched:
try:
self.yp = properties["yp"]
with open(self.yp, "r") as ypfile:
j = 0
self.yp = np.zeros(self.Ny)
for row in ypfile:
self.yp[j] = float(row)
j += 1
yp, yeta = self.calc_yp()
except:
self.yp, yeta = self.calc_yp()
self.ppy = self.calc_ppy(yeta)
else:
self.yp = None
def get_grid(self):
x = np.zeros(self.Nx)
y = np.zeros(self.Ny)
z = np.zeros(self.Nz)
for i in range(self.Nx):
x[i] = i * self.dx
for i in range(self.Ny):
y[i] = i * self.dy
for i in range(self.Nz):
z[i] = i * self.dz
return x, y, z
def compute_derivvars(self):
""" Compute variables required by derivative functions. """
if (self.BCx==0):
self.dx = self.Lx / float(self.Nx)
self.Nxm = self.Nx
else:
self.dx = self.Lx / float(self.Nx-1)
self.Nxm = self.Nx - 1
if (self.BCy==0):
self.dy = self.Ly / float(self.Ny) # XXX This will not be correct for stretched grids
self.Nym = self.Ny
else:
self.dy = self.Ly / float(self.Ny-1) # XXX This will not be correct for stretched grids
self.Nym = self.Ny - 1
if (self.BCz==0):
self.dz = self.Lz / float(self.Nz)
self.Nzm = self.Nz
else:
self.dz=self.Lz/float(self.Nz-1)
self.Nzm = self.Nz - 1
self.alpha = 1.0 / 3.0
self.a = 14.0 / 9.0
self.b = 1.0 / 9.0
def calc_yp(self):
self.compute_derivvars()
yinf = -self.Ly / 2.0
den = 2.0 * self.beta * yinf
xnum = -(yinf + math.sqrt((math.pi * self.beta)**2 + yinf**2))
alpha = abs(xnum / den)
xcx = 1.0 / self.beta / alpha
yp = np.zeros(self.Ny)
yeta = np.zeros(self.Ny)
if (alpha != 0.0):
yp[0] = 0.0
yeta[0] = -0.5
for j in range(1, self.Ny):
if (self.stretched == 1):
yeta[j] = (j - 1.0) / self.Nym
elif (self.stretched == 2):
yeta[j] = (j - 1.0) / self.Nym - 0.5
else:
yeta[j] = (j - 1.0) * 0.5 / self.Ny - 0.5
den1 = math.sqrt(alpha * self.beta + 1)
xnum = den1 / math.sqrt(alpha / math.pi) / math.sqrt(self.beta) \
/ math.sqrt(math.pi)
den = 2.0 * math.sqrt(alpha / math.pi) * math.sqrt(self.beta) \
* math.pi**1.5
den3 = (math.sin(math.pi * yeta[j]))**2 / self.beta / math.pi \
+ alpha / math.pi
den4 = 2.0 * alpha * self.beta - math.cos(2.0 * math.pi * yeta[j]) + 1.0
xnum1 = math.atan(xnum * math.tan(math.pi * yeta[j])) \
* den4 / den1 / den3 / den
cst = math.sqrt(self.beta) * math.pi \
/ (2.0 * math.sqrt(alpha) * math.sqrt(alpha * self.beta + 1.0))
if (yeta[j] < 0.5):
if (self.stretched == 1):
yp[j] = xnum1 - cst - yinf
elif (self.stretched == 2):
yp[j] = xnum1 - cst + self.Ly
else:
yp[j] = 2 * (xnum1 - cst + self.Ly)
elif (yeta[j] > 0.5):
if (self.stretched == 1):
yp[j] = xnum1 + cst - yinf
elif (self.stretched == 2):
yp[j] = xnum1 + cst + self.Ly
else:
yp[j] = 2 * (xnum1 - cst + self.Ly)
else:
if (self.stretched == 1):
yp[j] = -yinf
elif (self.stretched == 2):
yp[j] = self.Ly
else:
yp[j] = 2 * self.Ly
else:
yp[0] = -1e10
for j in range(1, self.Ny):
yeta[j] = (j - 1.0) / float(self.Ny)
yp[j] = -self.beta * math.cos(math.pi * yeta[j]) / math.sin(yeta[j] * math.pi)
return yp, yeta
def calc_ppy(self, yeta):
ppy = np.zeros(self.Ny)
alpha = self.calc_alpha()
if (self.stretched == 3):
yetai = self.calc_yetai(alpha)
for j in range(self.Ny):
ppy[j] = self.Ly * (alpha / math.pi \
+ (1.0 / math.pi / self.beta) * (math.sin(math.pi * yetai[j]))**2)
else:
for j in range(self.Ny):
ppy[j] = self.Ly * (alpha / math.pi \
+ (1.0 / math.pi / self.beta) * (math.sin(math.pi * yeta[j]))**2)
return ppy
def calc_alpha(self):
yinf = -self.Ly / 2.0
den = 2.0 * self.beta * yinf
xnum = -(yinf + math.sqrt(math.pi**2 * self.beta**2 + yinf**2))
return abs(xnum / den)
def calc_yetai(self, alpha):
yetai = np.zeros(self.Ny)
if (alpha != 0.0):
for j in range(self.Ny):
yetai[j] = (j - 0.5) * (0.5 / self.Nym) - 0.5
else:
for j in range(self.Ny):
yetai[j] = (j - 1.0) / self.Ny
return yetai
def get_grid(self):
""" Return the x,y,z arrays that describe the mesh. """
x, y, z = np.zeros(self.Nx), np.zeros(self.Ny), np.zeros(self.Nz)
for i in range(self.Nx):
x[i] = i * self.dx
use_yp = True
if self.yp is None:
use_yp = False
elif isinstance(self.yp, np.ndarray):
if not self.yp.any():
use_yp = False
if use_yp:
for j in range(self.Ny):
y[j] = self.yp[j]
else:
for j in range(self.Ny):
y[j] = j * self.dy
for k in range(self.Nz):
z[k] = k * self.dz
return x, y, z
| [
"math.tan",
"math.sqrt",
"math.cos",
"numpy.zeros",
"warnings.warn",
"math.sin"
] | [((3863, 3880), 'numpy.zeros', 'np.zeros', (['self.Nx'], {}), '(self.Nx)\n', (3871, 3880), True, 'import numpy as np\n'), ((3893, 3910), 'numpy.zeros', 'np.zeros', (['self.Ny'], {}), '(self.Ny)\n', (3901, 3910), True, 'import numpy as np\n'), ((3923, 3940), 'numpy.zeros', 'np.zeros', (['self.Nz'], {}), '(self.Nz)\n', (3931, 3940), True, 'import numpy as np\n'), ((5366, 5383), 'numpy.zeros', 'np.zeros', (['self.Ny'], {}), '(self.Ny)\n', (5374, 5383), True, 'import numpy as np\n'), ((5399, 5416), 'numpy.zeros', 'np.zeros', (['self.Ny'], {}), '(self.Ny)\n', (5407, 5416), True, 'import numpy as np\n'), ((7805, 7822), 'numpy.zeros', 'np.zeros', (['self.Ny'], {}), '(self.Ny)\n', (7813, 7822), True, 'import numpy as np\n'), ((8614, 8631), 'numpy.zeros', 'np.zeros', (['self.Ny'], {}), '(self.Ny)\n', (8622, 8631), True, 'import numpy as np\n'), ((1173, 1270), 'warnings.warn', 'warn', (['"""You are using an old-style initialisation, the future is dynamic!"""', 'DeprecationWarning'], {}), "('You are using an old-style initialisation, the future is dynamic!',\n DeprecationWarning)\n", (1177, 1270), False, 'from warnings import warn\n'), ((8996, 9013), 'numpy.zeros', 'np.zeros', (['self.Nx'], {}), '(self.Nx)\n', (9004, 9013), True, 'import numpy as np\n'), ((9015, 9032), 'numpy.zeros', 'np.zeros', (['self.Ny'], {}), '(self.Ny)\n', (9023, 9032), True, 'import numpy as np\n'), ((9034, 9051), 'numpy.zeros', 'np.zeros', (['self.Nz'], {}), '(self.Nz)\n', (9042, 9051), True, 'import numpy as np\n'), ((5235, 5284), 'math.sqrt', 'math.sqrt', (['((math.pi * self.beta) ** 2 + yinf ** 2)'], {}), '((math.pi * self.beta) ** 2 + yinf ** 2)\n', (5244, 5284), False, 'import math\n'), ((5846, 5878), 'math.sqrt', 'math.sqrt', (['(alpha * self.beta + 1)'], {}), '(alpha * self.beta + 1)\n', (5855, 5878), False, 'import math\n'), ((8483, 8535), 'math.sqrt', 'math.sqrt', (['(math.pi ** 2 * self.beta ** 2 + yinf ** 2)'], {}), '(math.pi ** 2 * self.beta ** 2 + yinf ** 2)\n', (8492, 8535), False, 'import math\n'), ((3486, 3503), 'numpy.zeros', 'np.zeros', (['self.Ny'], {}), '(self.Ny)\n', (3494, 3503), True, 'import numpy as np\n'), ((5986, 6004), 'math.sqrt', 'math.sqrt', (['math.pi'], {}), '(math.pi)\n', (5995, 6004), False, 'import math\n'), ((7706, 7733), 'math.sin', 'math.sin', (['(yeta[j] * math.pi)'], {}), '(yeta[j] * math.pi)\n', (7714, 7733), False, 'import math\n'), ((5938, 5958), 'math.sqrt', 'math.sqrt', (['self.beta'], {}), '(self.beta)\n', (5947, 5958), False, 'import math\n'), ((6062, 6082), 'math.sqrt', 'math.sqrt', (['self.beta'], {}), '(self.beta)\n', (6071, 6082), False, 'import math\n'), ((6292, 6325), 'math.cos', 'math.cos', (['(2.0 * math.pi * yeta[j])'], {}), '(2.0 * math.pi * yeta[j])\n', (6300, 6325), False, 'import math\n'), ((6478, 6498), 'math.sqrt', 'math.sqrt', (['self.beta'], {}), '(self.beta)\n', (6487, 6498), False, 'import math\n'), ((6561, 6595), 'math.sqrt', 'math.sqrt', (['(alpha * self.beta + 1.0)'], {}), '(alpha * self.beta + 1.0)\n', (6570, 6595), False, 'import math\n'), ((7676, 7703), 'math.cos', 'math.cos', (['(math.pi * yeta[j])'], {}), '(math.pi * yeta[j])\n', (7684, 7703), False, 'import math\n'), ((5909, 5935), 'math.sqrt', 'math.sqrt', (['(alpha / math.pi)'], {}), '(alpha / math.pi)\n', (5918, 5935), False, 'import math\n'), ((6033, 6059), 'math.sqrt', 'math.sqrt', (['(alpha / math.pi)'], {}), '(alpha / math.pi)\n', (6042, 6059), False, 'import math\n'), ((6542, 6558), 'math.sqrt', 'math.sqrt', (['alpha'], {}), '(alpha)\n', (6551, 6558), False, 'import math\n'), ((6146, 6173), 'math.sin', 'math.sin', (['(math.pi * yeta[j])'], {}), '(math.pi * yeta[j])\n', (6154, 6173), False, 'import math\n'), ((8103, 8131), 'math.sin', 'math.sin', (['(math.pi * yetai[j])'], {}), '(math.pi * yetai[j])\n', (8111, 8131), False, 'import math\n'), ((8311, 8338), 'math.sin', 'math.sin', (['(math.pi * yeta[j])'], {}), '(math.pi * yeta[j])\n', (8319, 8338), False, 'import math\n'), ((2453, 2497), 'warnings.warn', 'warn', (["('Unrecognised input to mesh: %s' % arg)"], {}), "('Unrecognised input to mesh: %s' % arg)\n", (2457, 2497), False, 'from warnings import warn\n'), ((6373, 6400), 'math.tan', 'math.tan', (['(math.pi * yeta[j])'], {}), '(math.pi * yeta[j])\n', (6381, 6400), False, 'import math\n')] |
import numpy as np
from itertools import product
import depthai as dai
from math import gcd
from pathlib import Path
from FPS import FPS, now
import cv2
import os, sys, re
SCRIPT_DIR = Path(__file__).resolve().parent
DEFAULT_YUNET_MODEL = str(SCRIPT_DIR / "models/face_detection_yunet_180x320_sh4.blob")
def find_isp_scale_params(size, is_height=True):
"""
Find closest valid size close to 'size' and and the corresponding parameters to setIspScale()
This function is useful to work around a bug in depthai where ImageManip is scrambling images that have an invalid size
is_height : boolean that indicates if the value is the height or the width of the image
Returns: valid size, (numerator, denominator)
"""
# We want size >= 288
if size < 288:
size = 288
# We are looking for the list on integers that are divisible by 16 and
# that can be written like n/d where n <= 16 and d <= 63
if is_height:
reference = 1080
other = 1920
else:
reference = 1920
other = 1080
size_candidates = {}
for s in range(16,reference,16):
f = gcd(reference, s)
n = s//f
d = reference//f
if n <= 16 and d <= 63 and int(round(other * n / d) % 2 == 0):
size_candidates[s] = (n, d)
# What is the candidate size closer to 'size' ?
min_dist = -1
for s in size_candidates:
dist = abs(size - s)
if min_dist == -1:
min_dist = dist
candidate = s
else:
if dist > min_dist: break
candidate = s
min_dist = dist
return candidate, size_candidates[candidate]
class YuNet:
"""
YuNet Face Detector : https://github.com/opencv/opencv_zoo/tree/dev/models/face_detection_yunet
Arguments:
- model: path to Yunet blob
- model_resolution: None or string "HxW" where H and W are the Yunet input resolution (Height, Width)
If None, the resolution is inferred from the model path "face_detection_yunet_HxW.blob"
- input_src: frame source,
- "rgb" or None: OAK* internal color camera,
- "rgb_laconic": same as "rgb" but without sending the frames to the host,
- a file path of an image or a video,
- an integer (eg 0) for a webcam id,
- conf_threshold: detection score threshold [0..1],
- nms_threshold: Non Maximal Suppression threshold [0..1],
- internal_fps : when using the internal color camera as input source, set its FPS to this value (calling setFps()).
- internal_frame_height : when using the internal color camera, set the frame height (calling setIspScale()).
The width is calculated accordingly to height and depends on value of 'crop'
- stats : boolean, when True, display some statistics when exiting.
- trace: boolean, when True print some debug messages
"""
def __init__(self,
model = str(DEFAULT_YUNET_MODEL),
model_resolution=None,
input_src=None,
conf_threshold=0.6,
nms_threshold=0.3,
top_k = 50,
internal_fps=50,
internal_frame_height=640,
stats=False,
trace=False,
):
self.model = model
if not os.path.isfile(model):
print(f"Model path '{model}' does not exist !!!")
sys.exit()
if model_resolution is None: model_resolution = model
# Try to infer from the model path
match = re.search(r'.*?(\d+)x(\d+).*', model)
if not match:
print(f"Impossible to infer the model input resolution from model name '{model}' does not exist !!!")
sys.exit()
self.nn_input_w = int(match.group(2))
self.nn_input_h = int(match.group(1))
print(f"Model : {self.model} - Input resolution: {self.nn_input_h}x{self.nn_input_w}")
self.internal_fps = internal_fps
self.conf_threshold = conf_threshold
self.nms_threshold = nms_threshold
self.top_k = top_k
self.stats = stats
self.trace = trace
self.min_sizes = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
self.steps = [8, 16, 32, 64]
self.variance = [0.1, 0.2]
# Generate priors
self.prior_gen()
self.device = dai.Device()
if input_src is None or input_src == "rgb" or input_src == "rgb_laconic":
self.input_type = "rgb" # OAK* internal color camera
self.laconic = input_src == "rgb_laconic" # Camera frames are not sent to the host
self.video_fps = self.internal_fps # Used when saving the output in a video file. Should be close to the real fps
width, self.scale_nd = find_isp_scale_params(internal_frame_height * 1920 / 1080, is_height=False)
self.img_h = int(round(1080 * self.scale_nd[0] / self.scale_nd[1]))
self.img_w = int(round(1920 * self.scale_nd[0] / self.scale_nd[1]))
print(f"Internal camera image size: {self.img_w} x {self.img_h}")
elif input_src.endswith('.jpg') or input_src.endswith('.png') :
self.input_type= "image"
self.img = cv2.imread(input_src)
self.video_fps = 25
self.img_h, self.img_w = self.img.shape[:2]
else:
self.input_type = "video"
if input_src.isdigit():
input_type = "webcam"
input_src = int(input_src)
self.cap = cv2.VideoCapture(input_src)
self.video_fps = int(self.cap.get(cv2.CAP_PROP_FPS))
self.img_w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.img_h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print("Video FPS:", self.video_fps)
# We want to keep aspect ratio of the input images
# So we may need to pad the images before feeding them to the model
# 'padded_size' is the size of the image once padded.
# Note that the padding when used is not applied on both sides (top and bottom,
# or left and right) but only on the side opposite to the origin (top or left).
# It makes calculations easier.
self.iwnh_ihnw = self.img_w * self.nn_input_h / (self.img_h * self.nn_input_w)
if self.iwnh_ihnw >= 1:
self.padded_size = np.array((self.img_w, self.img_h * self.iwnh_ihnw)).astype(int)
else:
self.padded_size = np.array((self.img_w / self.iwnh_ihnw, self.img_h)).astype(int)
print(f"Source image size: {self.img_w} x {self.img_h}")
print(f"Padded image size: {self.padded_size[0]} x {self.padded_size[1]}")
# Define and start pipeline
usb_speed = self.device.getUsbSpeed()
self.device.startPipeline(self.create_pipeline())
print(f"Pipeline started - USB speed: {str(usb_speed).split('.')[-1]}")
# Define data queues
if self.input_type == "rgb":
if not self.laconic:
self.q_video = self.device.getOutputQueue(name="cam_out", maxSize=1, blocking=False)
if self.trace:
self.q_manip_out = self.device.getOutputQueue(name="manip_out", maxSize=1, blocking=False)
else:
self.q_nn_in = self.device.getInputQueue(name="nn_in")
self.q_nn_out = self.device.getOutputQueue(name="nn_out", maxSize=4, blocking=False)
self.fps = FPS()
self.glob_rtrip_time = 0
self.glob_posprocessing_time = 0
def create_pipeline(self):
print("Creating pipeline...")
# Start defining a pipeline
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version = dai.OpenVINO.Version.VERSION_2021_4)
if self.input_type == "rgb":
# ColorCamera
print("Creating Color Camera...")
cam = pipeline.createColorCamera()
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
cam.setInterleaved(False)
cam.setIspScale(self.scale_nd[0], self.scale_nd[1])
cam.setFps(self.internal_fps)
cam.setPreviewSize(self.img_w, self.img_h)
if not self.laconic:
cam_out = pipeline.createXLinkOut()
cam_out.setStreamName("cam_out")
cam_out.input.setQueueSize(1)
cam_out.input.setBlocking(False)
cam.video.link(cam_out.input)
# The frame is padded to have the same ratio width/height
# as the model input, and resized to the model input resolution
print("Creating Image Manip node...")
manip = pipeline.createImageManip()
manip.setMaxOutputFrameSize(self.nn_input_w*self.nn_input_h*3)
manip.inputImage.setQueueSize(1)
manip.inputImage.setBlocking(False)
points = [
[0, 0],
[self.padded_size[0], 0],
[self.padded_size[0], self.padded_size[1]],
[0, self.padded_size[1]]]
point2fList = []
for p in points:
pt = dai.Point2f()
pt.x, pt.y = p[0], p[1]
point2fList.append(pt)
manip.initialConfig.setWarpTransformFourPoints(point2fList, False)
manip.initialConfig.setResize(self.nn_input_w, self.nn_input_h)
cam.preview.link(manip.inputImage)
# For debugging
if self.trace:
manip_out = pipeline.createXLinkOut()
manip_out.setStreamName("manip_out")
manip.out.link(manip_out.input)
# Define YUNET model
print("Creating YUNET Neural Network...")
nn = pipeline.createNeuralNetwork()
nn.setBlobPath(self.model)
if self.input_type == "rgb":
manip.out.link(nn.input)
else:
nn_in = pipeline.createXLinkIn()
nn_in.setStreamName("nn_in")
nn_in.out.link(nn.input)
# YUNET output
nn_out = pipeline.createXLinkOut()
nn_out.setStreamName("nn_out")
nn.out.link(nn_out.input)
print("Pipeline created.")
return pipeline
def prior_gen(self):
w, h = self.nn_input_w, self.nn_input_h
feature_map_2th = [int(int((h + 1) / 2) / 2),
int(int((w + 1) / 2) / 2)]
feature_map_3th = [int(feature_map_2th[0] / 2),
int(feature_map_2th[1] / 2)]
feature_map_4th = [int(feature_map_3th[0] / 2),
int(feature_map_3th[1] / 2)]
feature_map_5th = [int(feature_map_4th[0] / 2),
int(feature_map_4th[1] / 2)]
feature_map_6th = [int(feature_map_5th[0] / 2),
int(feature_map_5th[1] / 2)]
feature_maps = [feature_map_3th, feature_map_4th,
feature_map_5th, feature_map_6th]
priors = []
for k, f in enumerate(feature_maps):
min_sizes = self.min_sizes[k]
for i, j in product(range(f[0]), range(f[1])): # i->h, j->w
for min_size in min_sizes:
s_kx = min_size / w
s_ky = min_size / h
cx = (j + 0.5) * self.steps[k] / w
cy = (i + 0.5) * self.steps[k] / h
priors.append([cx, cy, s_kx, s_ky])
print("Priors length =", len(priors))
self.priors = np.array(priors, dtype=np.float32)
def decode(self, inference):
# print(inference.getAllLayerNames())
loc = np.array(inference.getLayerFp16("loc"), dtype=np.float32).reshape(-1, 14)
conf = np.array(inference.getLayerFp16("conf"), dtype=np.float32).reshape(-1, 2)
iou_scores = np.array(inference.getLayerFp16("iou"), dtype=np.float32)
# get score
cls_scores = conf[:, 1]
# clamp
idx = np.where(iou_scores < 0.)
iou_scores[idx] = 0.
idx = np.where(iou_scores > 1.)
iou_scores[idx] = 1.
scores = np.sqrt(cls_scores * iou_scores)
scores = scores[:, np.newaxis]
# get bboxes
bboxes = np.hstack((
(self.priors[:, 0:2] + loc[:, 0:2] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size,
(self.priors[:, 2:4] * np.exp(loc[:, 2:4] * self.variance)) * self.padded_size
))
# (x_c, y_c, w, h) -> (x1, y1, w, h)
bboxes[:, 0:2] -= bboxes[:, 2:4] / 2
# get landmarks
landmarks = np.hstack((
(self.priors[:, 0:2] + loc[:, 4: 6] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size,
(self.priors[:, 0:2] + loc[:, 6: 8] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size,
(self.priors[:, 0:2] + loc[:, 8:10] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size,
(self.priors[:, 0:2] + loc[:, 10:12] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size,
(self.priors[:, 0:2] + loc[:, 12:14] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size
))
dets = np.hstack((bboxes, landmarks, scores))
return dets
def save_inference_to_npz(self, inference):
loc = np.array(inference.getLayerFp16("loc"), dtype=np.float32).reshape(-1, 14)
conf = np.array(inference.getLayerFp16("conf"), dtype=np.float32).reshape(-1, 2)
iou = np.array(inference.getLayerFp16("iou"), dtype=np.float32)
np.savez("models/build/yunet_output.npz", loc=loc, conf=conf, iou=iou, w=self.nn_input_w, h=self.nn_input_h)
def postprocess(self, inference):
# Decode
dets = self.decode(inference)
# NMS
keep_idx = cv2.dnn.NMSBoxes(
bboxes=dets[:, 0:4].tolist(),
scores=dets[:, -1].tolist(),
score_threshold=self.conf_threshold,
nms_threshold=self.nms_threshold,
top_k=self.top_k
) # box_num x class_num
if len(keep_idx) > 0:
dets = dets[keep_idx]
# If opencv >= 4.5.4.58, NMSBoxes returns Nx1x15
# Else, NMSBoxes returns 1x15
if len(dets.shape) > 2:
dets = np.squeeze(dets, axis=1)
return dets # [:self.keep_top_k]
else:
return np.empty(shape=(0, 15))
def next_frame(self):
"""
Return:
- frame: source input frame,
- faces: detected faces as a 2D numpy arrays of dim (N, 15) with N = number of faces:
- faces[:,0:4] represents the bounding box (x,y,width,height),
- faces[:,4:14] represents the 5 facial landmarks coordinates (x,y),
- faces[:,15] is the detection score.
"""
self.fps.update()
if self.input_type == "rgb":
if self.laconic:
frame = np.zeros((self.img_h, self.img_w, 3), dtype=np.uint8)
else:
# Read color frame from the device
in_video = self.q_video.get()
frame = in_video.getCvFrame()
else:
if self.input_type == "image":
frame = self.img.copy()
else:
ok, frame = self.cap.read()
if not ok:
return None, None
# Send color frame to the device
# The frame is padded to have the same ratio width/height
# as the model input, and resized to the model input resolution
padded = cv2.copyMakeBorder(frame,
0,
self.padded_size[1] - self.img_h,
0,
self.padded_size[0] - self.img_w,
cv2.BORDER_CONSTANT)
padded = cv2.resize(padded, (self.nn_input_w, self.nn_input_h), interpolation=cv2.INTER_AREA)
if self.trace:
cv2.imshow("NN input", padded)
frame_nn = dai.ImgFrame()
frame_nn.setTimestamp(now())
frame_nn.setWidth(self.nn_input_w)
frame_nn.setHeight(self.nn_input_h)
frame_nn.setData(padded.transpose(2, 0, 1))
self.q_nn_in.send(frame_nn)
rtrip_time = now()
# Get model inference
inference = self.q_nn_out.get()
_now = now()
if self.input_type != "rgb":
self.glob_rtrip_time += _now - rtrip_time
faces = self.postprocess(inference)
self.glob_posprocessing_time = now() - _now
# For debugging
if self.trace and self.input_type == "rgb":
manip = self.q_manip_out.get()
manip = manip.getCvFrame()
cv2.imshow("NN input", manip)
return frame, faces
def exit(self):
self.device.close()
# Print some stats
if self.stats:
nb_frames = self.fps.nb_frames()
print(f"FPS : {self.fps.get_global():.1f} f/s (# frames = {nb_frames})")
if self.input_type != "rgb":
print(f"Round trip (send frame + get back inference result) : {self.glob_rtrip_time/nb_frames*1000:.1f} ms")
print(f"Post processing time (on the host) : {self.glob_posprocessing_time/nb_frames*1000:.1f} ms") | [
"numpy.sqrt",
"numpy.hstack",
"depthai.Point2f",
"depthai.ImgFrame",
"cv2.imshow",
"numpy.array",
"sys.exit",
"re.search",
"FPS.now",
"numpy.savez",
"pathlib.Path",
"math.gcd",
"numpy.where",
"numpy.exp",
"numpy.empty",
"depthai.Pipeline",
"numpy.squeeze",
"os.path.isfile",
"dept... | [((1139, 1156), 'math.gcd', 'gcd', (['reference', 's'], {}), '(reference, s)\n', (1142, 1156), False, 'from math import gcd\n'), ((3679, 3717), 're.search', 're.search', (['""".*?(\\\\d+)x(\\\\d+).*"""', 'model'], {}), "('.*?(\\\\d+)x(\\\\d+).*', model)\n", (3688, 3717), False, 'import os, sys, re\n'), ((4514, 4526), 'depthai.Device', 'dai.Device', ([], {}), '()\n', (4524, 4526), True, 'import depthai as dai\n'), ((7611, 7616), 'FPS.FPS', 'FPS', ([], {}), '()\n', (7614, 7616), False, 'from FPS import FPS, now\n'), ((7820, 7834), 'depthai.Pipeline', 'dai.Pipeline', ([], {}), '()\n', (7832, 7834), True, 'import depthai as dai\n'), ((11766, 11800), 'numpy.array', 'np.array', (['priors'], {'dtype': 'np.float32'}), '(priors, dtype=np.float32)\n', (11774, 11800), True, 'import numpy as np\n'), ((12220, 12246), 'numpy.where', 'np.where', (['(iou_scores < 0.0)'], {}), '(iou_scores < 0.0)\n', (12228, 12246), True, 'import numpy as np\n'), ((12289, 12315), 'numpy.where', 'np.where', (['(iou_scores > 1.0)'], {}), '(iou_scores > 1.0)\n', (12297, 12315), True, 'import numpy as np\n'), ((12361, 12393), 'numpy.sqrt', 'np.sqrt', (['(cls_scores * iou_scores)'], {}), '(cls_scores * iou_scores)\n', (12368, 12393), True, 'import numpy as np\n'), ((12830, 13364), 'numpy.hstack', 'np.hstack', (['((self.priors[:, 0:2] + loc[:, 4:6] * self.variance[0] * self.priors[:, 2:4\n ]) * self.padded_size, (self.priors[:, 0:2] + loc[:, 6:8] * self.\n variance[0] * self.priors[:, 2:4]) * self.padded_size, (self.priors[:, \n 0:2] + loc[:, 8:10] * self.variance[0] * self.priors[:, 2:4]) * self.\n padded_size, (self.priors[:, 0:2] + loc[:, 10:12] * self.variance[0] *\n self.priors[:, 2:4]) * self.padded_size, (self.priors[:, 0:2] + loc[:, \n 12:14] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size)'], {}), '(((self.priors[:, 0:2] + loc[:, 4:6] * self.variance[0] * self.\n priors[:, 2:4]) * self.padded_size, (self.priors[:, 0:2] + loc[:, 6:8] *\n self.variance[0] * self.priors[:, 2:4]) * self.padded_size, (self.\n priors[:, 0:2] + loc[:, 8:10] * self.variance[0] * self.priors[:, 2:4]) *\n self.padded_size, (self.priors[:, 0:2] + loc[:, 10:12] * self.variance[\n 0] * self.priors[:, 2:4]) * self.padded_size, (self.priors[:, 0:2] + \n loc[:, 12:14] * self.variance[0] * self.priors[:, 2:4]) * self.padded_size)\n )\n', (12839, 13364), True, 'import numpy as np\n'), ((13423, 13461), 'numpy.hstack', 'np.hstack', (['(bboxes, landmarks, scores)'], {}), '((bboxes, landmarks, scores))\n', (13432, 13461), True, 'import numpy as np\n'), ((13789, 13902), 'numpy.savez', 'np.savez', (['"""models/build/yunet_output.npz"""'], {'loc': 'loc', 'conf': 'conf', 'iou': 'iou', 'w': 'self.nn_input_w', 'h': 'self.nn_input_h'}), "('models/build/yunet_output.npz', loc=loc, conf=conf, iou=iou, w=\n self.nn_input_w, h=self.nn_input_h)\n", (13797, 13902), True, 'import numpy as np\n'), ((16700, 16705), 'FPS.now', 'now', ([], {}), '()\n', (16703, 16705), False, 'from FPS import FPS, now\n'), ((187, 201), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (191, 201), False, 'from pathlib import Path\n'), ((3439, 3460), 'os.path.isfile', 'os.path.isfile', (['model'], {}), '(model)\n', (3453, 3460), False, 'import os, sys, re\n'), ((3536, 3546), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3544, 3546), False, 'import os, sys, re\n'), ((3871, 3881), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3879, 3881), False, 'import os, sys, re\n'), ((14613, 14636), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 15)'}), '(shape=(0, 15))\n', (14621, 14636), True, 'import numpy as np\n'), ((15812, 15937), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['frame', '(0)', '(self.padded_size[1] - self.img_h)', '(0)', '(self.padded_size[0] - self.img_w)', 'cv2.BORDER_CONSTANT'], {}), '(frame, 0, self.padded_size[1] - self.img_h, 0, self.\n padded_size[0] - self.img_w, cv2.BORDER_CONSTANT)\n', (15830, 15937), False, 'import cv2\n'), ((16154, 16243), 'cv2.resize', 'cv2.resize', (['padded', '(self.nn_input_w, self.nn_input_h)'], {'interpolation': 'cv2.INTER_AREA'}), '(padded, (self.nn_input_w, self.nn_input_h), interpolation=cv2.\n INTER_AREA)\n', (16164, 16243), False, 'import cv2\n'), ((16336, 16350), 'depthai.ImgFrame', 'dai.ImgFrame', ([], {}), '()\n', (16348, 16350), True, 'import depthai as dai\n'), ((16608, 16613), 'FPS.now', 'now', ([], {}), '()\n', (16611, 16613), False, 'from FPS import FPS, now\n'), ((16880, 16885), 'FPS.now', 'now', ([], {}), '()\n', (16883, 16885), False, 'from FPS import FPS, now\n'), ((17064, 17093), 'cv2.imshow', 'cv2.imshow', (['"""NN input"""', 'manip'], {}), "('NN input', manip)\n", (17074, 17093), False, 'import cv2\n'), ((5379, 5400), 'cv2.imread', 'cv2.imread', (['input_src'], {}), '(input_src)\n', (5389, 5400), False, 'import cv2\n'), ((5681, 5708), 'cv2.VideoCapture', 'cv2.VideoCapture', (['input_src'], {}), '(input_src)\n', (5697, 5708), False, 'import cv2\n'), ((9393, 9406), 'depthai.Point2f', 'dai.Point2f', ([], {}), '()\n', (9404, 9406), True, 'import depthai as dai\n'), ((14510, 14534), 'numpy.squeeze', 'np.squeeze', (['dets'], {'axis': '(1)'}), '(dets, axis=1)\n', (14520, 14534), True, 'import numpy as np\n'), ((15157, 15210), 'numpy.zeros', 'np.zeros', (['(self.img_h, self.img_w, 3)'], {'dtype': 'np.uint8'}), '((self.img_h, self.img_w, 3), dtype=np.uint8)\n', (15165, 15210), True, 'import numpy as np\n'), ((16282, 16312), 'cv2.imshow', 'cv2.imshow', (['"""NN input"""', 'padded'], {}), "('NN input', padded)\n", (16292, 16312), False, 'import cv2\n'), ((16385, 16390), 'FPS.now', 'now', ([], {}), '()\n', (16388, 16390), False, 'from FPS import FPS, now\n'), ((6539, 6590), 'numpy.array', 'np.array', (['(self.img_w, self.img_h * self.iwnh_ihnw)'], {}), '((self.img_w, self.img_h * self.iwnh_ihnw))\n', (6547, 6590), True, 'import numpy as np\n'), ((6648, 6699), 'numpy.array', 'np.array', (['(self.img_w / self.iwnh_ihnw, self.img_h)'], {}), '((self.img_w / self.iwnh_ihnw, self.img_h))\n', (6656, 6699), True, 'import numpy as np\n'), ((12628, 12663), 'numpy.exp', 'np.exp', (['(loc[:, 2:4] * self.variance)'], {}), '(loc[:, 2:4] * self.variance)\n', (12634, 12663), True, 'import numpy as np\n')] |
from __future__ import division, print_function
from builtins import chr, range, object, zip, bytes
import io
import itertools
import time
from Bio.UniProt import GOA
from bisect import bisect_left
import dateutil
import pandas as pd
import pyopa
import tables
import threading
import numpy
import numpy.lib.recfunctions
import re
import json
import os
import collections
import logging
from .KmerEncoder import KmerEncoder
from .models import LazyProperty, KeyWrapper, ProteinEntry, Genome
from .geneontology import GeneOntology, OntologyParser, AnnotationParser, GOAspect
from xml.etree import ElementTree as et
logger = logging.getLogger(__name__)
# Raise stack limit for PyOPA ~400MB
threading.stack_size(4096*100000)
# Global initialisations
GAF_VERSION = '2.1'
def count_elements(iterable):
"""return the number of elements in an iterator in the most efficient way.
Be aware that for unbound iterators, this method won't terminate!
:param iterable: an iterable object.
"""
counter = itertools.count()
collections.deque(zip(iterable, counter), maxlen=0) # (consume at C speed)
return next(counter)
_first_cap_re = re.compile('(.)([A-Z][a-z]+)')
_all_cap_re = re.compile('([a-z0-9])([A-Z])')
def to_snail_case(name):
"""function to convert from CamelCase to snail_case"""
s1 = _first_cap_re.sub(r'\1_\2', name)
return _all_cap_re.sub(r'\1_\2', s1).lower()
class Database(object):
"""This is the main interface to the oma database. Queries
will typically be issued by methods of this object. Typically
the result of queries will be :py:class:`numpy.recarray` objects."""
EXPECTED_DB_SCHEMA = "3.2"
def __init__(self, db):
if isinstance(db, str):
logger.info('opening {} for read-only'.format(db))
self.db = tables.open_file(db, 'r')
elif isinstance(db, tables.File):
self.db = db
else:
raise ValueError(str(db) + ' is not a valid database type')
try:
db_version = self.db.get_node_attr('/', 'db_schema_version')
except AttributeError:
db_version = "1.0"
logger.info('database version: {}'.format(db_version))
if db_version != self.EXPECTED_DB_SCHEMA:
exp_tup = self.EXPECTED_DB_SCHEMA.split('.')
db_tup = db_version.split('.')
if db_tup[0] != exp_tup[0]:
raise DBVersionError('Unsupported database version: {} != {} ({})'
.format(db_version, self.EXPECTED_DB_SCHEMA, self.db.filename))
else:
logger.warning("outdated database version, but only minor version change: "
"{} != {}. Some functions might fail"
.format(db_version, self.EXPECTED_DB_SCHEMA))
self.db_schema_version = tuple(int(z) for z in db_version.split("."))
try:
self.seq_search = SequenceSearch(self)
except DBConsistencyError as e:
logger.exception("Cannot load SequenceSearch. Any future call to seq_search will fail!")
self.seq_search = object()
self.id_resolver = IDResolver(self)
self.id_mapper = IdMapperFactory(self)
genomes = [Genome(self, g) for g in self.db.root.Genome.read()]
self.tax = Taxonomy(self.db.root.Taxonomy.read(),
genomes={g.ncbi_taxon_id: g for g in genomes})
self._re_fam = None
self.format_hogid = None
self._set_hogid_schema()
@LazyProperty
def gene_ontology(self):
"""returns GeneOntology object containing hierarchy
of terms using the is_a and part_of relations. See
:meth:`load_gene_ontology` to parametrize the
creation of GeneOntology object."""
return self.load_gene_ontology(GeneOntology)
def load_gene_ontology(self, factory=None, rels=None):
"""Instantiate GeneOntology object
By default, a GeneOntology object is returned based on
the default relations (which are defined in :mod:`.gene_ontology`)
The factory parameter allows to specify an subtype of
GeneOntology, e.g. :class:`.gene_ontology.FreqAwareGeneOntology`,
The rels parameter should be a list of relation strings that
should be used as parents relations.
:param factory: GeneOntology factory
:param rels: list of rels for parent relations
:returns: GeneOntology object"""
try:
fp = io.StringIO(self.db.root.Ontologies.GO.read().tobytes().decode('utf-8'))
except tables.NoSuchNodeError:
p = os.path.join(os.path.dirname(self.db.filename), 'go-basic.obo')
fp = open(p, 'rt')
if factory is None:
factory = GeneOntology
go = factory(OntologyParser(fp), rels=rels)
go.parse()
fp.close()
return go
def get_hdf5_handle(self):
"""return the handle to the database hdf5 file"""
return self.db
def get_conversion_date(self):
"""return the conversion end date from the DB attributes"""
return dateutil.parser.parse(self.db.root._v_attrs['conversion_end'])
def ensure_entry(self, entry):
"""This method allows to use an entry or an entry_nr.
If necessary it will load the entry from the entry_nr,
otherwise returning the same object again.
:param entry: the entry_nr of a protein to be loaded or a
protein entry."""
try:
t = entry['AltSpliceVariant']
return entry
except (TypeError, AttributeError, IndexError):
if isinstance(entry, (int, numpy.number)):
return self.entry_by_entry_nr(entry)
raise TypeError('Invalid type to retrieve an Entry')
except Exception:
raise TypeError('Invalid type to retrieve an Entry')
def entry_by_entry_nr(self, entry_nr):
"""Returns the entry from the /Protein/Entries table
corresponding to entry_nr.
:param int entry_nr: a numeric identifier for the protein
entry"""
entry = self.db.root.Protein.Entries[entry_nr - 1]
if entry['EntryNr'] != entry_nr:
logger.warning('EntryNr {} not at position {}. Using index instead'.format(entry_nr, entry_nr - 1))
entry = self.db.root.Protein.Entries.read_where(
'EntryNr == {:d}'.format(entry_nr))
if len(entry) != 1:
raise ValueError("there are {} entries with entry_nr {}".format(len(entry), entry_nr))
entry = entry[0]
return entry
def _set_hogid_schema(self):
"""Determines the used HOG ID schema
Some versions of the database have HOG IDs of the form
"HOG:0000001" and others without the prefix (e.g. standalone)
or with the prefix, but without padding. This method checks
which schema is used and sets the appropriate member vars
"""
re_id = re.compile(b'(?P<prefix>HOG:)(?P<nr>\d+)')
for entry in self.db.root.Protein.Entries:
m = re_id.match(entry['OmaHOG'])
if m is None:
continue
nr = m.group('nr')
if len(nr) >= 7 and not nr.startswith(b'0'):
continue # a case where we cannot determine if padded nr
is_padded = nr.startswith(b'0')
prefix = m.group('prefix').decode()
if prefix is None:
prefix = ''
fmt = "{}{{:{}d}}".format(prefix, "07" if is_padded else "")
self._re_fam = re.compile('{}(?P<fam>\d{})'
.format(prefix, "{7,}" if is_padded else "+")
.encode('ascii'))
self.format_hogid = lambda fam: fmt.format(fam)
logger.info("setting HOG ID schema: re_fam: {}, hog_fmt: {}"
.format(self._re_fam, fmt))
return
raise DBConsistencyError('no protein in a hog')
def all_proteins_of_genome(self, genome):
"""return all protein entries of a genome"""
rng = self.id_mapper['OMA'].genome_range(genome)
prot_tab = self.get_hdf5_handle().get_node('/Protein/Entries')
return prot_tab.read_where('(EntryNr >= {}) & (EntryNr <= {})'.format(rng[0], rng[1]))
def main_isoforms(self, genome):
"""returns the proteins that are the main isoforms of a genome.
The main isoform is in this context the isoform that we used in OMA to
infer the orthologs. It is the one variant that has the most alignment
matches to all other gnomes.
The genome parameter should be the UniProtSpeciesCode of the species of
interest. If it is a numeric value, the genome parameter is interpreted
as the protein entrynr. The method returns then the main isoforms for
the species to which this protein belongs.
:Note: OMA only predicts orthologs for the main isoform, so there is no
difference if you work with only the main isoforms or all proteins of
a genome in terms of orthologs.
:param genome: UniProtSpeciesCode of the genome of interest, or a gene
number (EntryNr) from the genome of interest.
"""
rng = self.id_mapper['OMA'].genome_range(genome)
prot_tab = self.get_hdf5_handle().get_node('/Protein/Entries')
return prot_tab.read_where(
'(EntryNr >= {}) & (EntryNr <= {}) & ((AltSpliceVariant == EntryNr) | (AltSpliceVariant == 0))'
.format(rng[0], rng[1]))
def get_splicing_variants(self, entry):
e = self.ensure_entry(entry)
if e['AltSpliceVariant'] == 0:
return numpy.array([e], dtype=e.dtype)
# TODO: create index on AltSpliceVariant column?!
return self.get_hdf5_handle().get_node('/Protein/Entries').read_where(
'(EntryNr >= {:d}) & (EntryNr < {:d}) & (AltSpliceVariant == {:d})'
.format(e['EntryNr']-100, e['EntryNr']+100, e['AltSpliceVariant']))
def _get_vptab(self, entry_nr):
return self._get_pw_tab(entry_nr, 'VPairs')
def _get_pw_tab(self, entry_nr, subtab):
genome = self.id_mapper['OMA'].genome_of_entry_nr(entry_nr)['UniProtSpeciesCode'].decode()
return self.db.get_node('/PairwiseRelation/{}/{}'.format(genome, subtab))
def count_vpairs(self, entry_nr):
vptab = self._get_vptab(entry_nr)
try:
cnt = count_elements(vptab.where('(EntryNr1=={:d})'.format(entry_nr)))
except (TypeError, ValueError):
cnt = 0
return cnt
def count_homoeologs(self, entry_nr):
pwtab = self._get_pw_tab(entry_nr, 'within')
homolog_typ_nr = pwtab.get_enum('RelType')['homeolog']
try:
cnt = count_elements(pwtab.where('(EntryNr1=={:d}) & (RelType == {:d})'.format(entry_nr, homolog_typ_nr)))
except (TypeError, ValueError):
cnt = 0
return cnt
def _get_pw_data(self, entry_nr, tab, typ_filter=None, extra_cols=None):
query = "(EntryNr1 == {:d})".format(entry_nr)
if typ_filter is not None:
query += " & (RelType == {:d})".format(typ_filter)
dat = tab.read_where(query)
typ = tab.get_enum('RelType')
cols = ['EntryNr1', 'EntryNr2', 'Score', 'Distance']
if extra_cols is not None:
cols.extend(extra_cols)
res = numpy.lib.recfunctions.append_fields(
dat[cols],
names='RelType',
data=[typ(x) for x in dat['RelType']],
usemask=False)
return res
def get_vpairs(self, entry_nr):
"""returns the verified pairs of a query protein.
This method returns an instance of a :class:`numpy.recarray` class
containing the verified pairs of a query protein entry.
The returned array contains columns with EntryNr1 and EntryNr2 to
identify the pair together with RelType (indicating the subtype of
orthology), the alignment score and the distance. The score and
distance will be set to -1 if unknown.
:param int entry_nr: the numeric entry_nr of the query protein."""
vp_tab = self._get_vptab(entry_nr)
return self._get_pw_data(entry_nr, vp_tab)
def get_within_species_paralogs(self, entry_nr):
"""returns the within species paralogs of a given entry
This method returns a :class:`numpy.recarray` instance
containing the close paralogs. Close paralogs are within
species paralogs that are inparalogs to at least one
ortholog of the query gene in OMA.
The returned array contains columns with EntryNr1 and EntryNr2 to
identify the pair together with RelType (indicating the subtype of
paralogy), the alignment score and the distance. The score and
distance will be set to -1 if unknown.
:param int entry_nr: the numeric entry_id of the query protein"""
within_species_paralogs = self._get_pw_tab(entry_nr, 'within')
return self._get_pw_data(entry_nr, within_species_paralogs)
def get_homoeologs(self, entry_nr):
within_species = self._get_pw_tab(entry_nr, 'within')
homolog_typ_nr = within_species.get_enum('RelType')['homeolog']
return self._get_pw_data(entry_nr, within_species,
typ_filter=homolog_typ_nr,
extra_cols=['SyntenyConservationLocal', 'Confidence'])
def neighbour_genes(self, entry_nr, window=1):
"""Returns neighbor genes around a query gene.
This method returns a tuple containing a numpy recarray with
gene entries located around the query gene, and an index
pointing to the query gene. The genes are sorted according to
their position on the chromosome.
The *windows* parameter specifies the number of genes up- and
downstream of the query gene that should be reported. Note
that the actual number can be smaller if the query gene is close
to a chromosome start or end.
:param entry_nr: the entry number of the query gene
:param window: the number of neighboring genes on each
side to return"""
if window <= 0 or not isinstance(window, int):
raise ValueError('windows parameters must be a positive integer value')
dat = self.entry_by_entry_nr(entry_nr)
target_chr = dat['Chromosome']
genome_range = self.id_mapper['OMA'].genome_range(entry_nr)
f = 5
data = self.db.root.Protein.Entries.read_where(
'(EntryNr >= {:d}) & (EntryNr <= {:d}) & '
'(Chromosome == {!r}) & '
'((AltSpliceVariant == 0) |'
' (AltSpliceVariant == EntryNr))'.format(
max(genome_range[0], entry_nr - f * window),
min(genome_range[1], entry_nr + f * window),
target_chr))
data.sort(order=['EntryNr'])
idx = data['EntryNr'].searchsorted(entry_nr)
res = data[max(0, idx - window):min(len(data), idx + window + 1)]
idx = res['EntryNr'].searchsorted(entry_nr)
return res, idx
def parse_hog_id(self, hog_id):
hog_id = hog_id if isinstance(hog_id, bytes) else hog_id.encode('ascii')
m = self._re_fam.match(hog_id)
if m is not None:
return int(m.group('fam'))
else:
raise ValueError('invalid hog id format')
def hog_family(self, entry):
entry = self.ensure_entry(entry)
m = self._re_fam.match(entry['OmaHOG'])
if m is None:
raise Singleton(entry)
return int(m.group('fam'))
def hog_levels_of_fam(self, fam_nr):
"""get all taxonomic levels covered by a family.
The family coresponds to the toplevel numeric id of a HOG,
i.e. for HOG:002421 the fam_nr should be 2421. If a HOG
covers a certain level more than once, it will be returned
several times.
:param fam_nr: the numeric id of the family (== Toplevel HOG)
"""
return self.db.root.HogLevel.read_where(
'(Fam=={})'.format(fam_nr))['Level']
def get_subhogids_at_level(self, fam_nr, level):
"""get all the hog ids within a given family at a given taxonomic
level of interest.
After a duplication in an ancestor lineage, there exists multiple
sub-hogs for any taxonomic level after the duplication. This method
allows to get the list of hogids at the requested taxonomic level.
E.g. assume in family 1 (HOG:0000001) there has been a duplication
between Eukaryota and Metazoa. this method would return for
get_subhogids_at_level(1, 'Eukaryota') --> ['HOG:0000001']
and for
get_subhogids_at_level(1, 'Metazoa') --> ['HOG:0000001.1a', 'HOG:0000001.1b']
:param fam_nr: the numeric family id
:param level: the taxonomic level of interest"""
lev = level if isinstance(level, bytes) else level.encode('ascii')
return self.db.root.HogLevel.read_where(
'(Fam=={}) & (Level=={!r})'.format(fam_nr, lev))['ID']
def member_of_hog_id(self, hog_id, level=None):
"""return an array of protein entries which belong to a given hog_id.
E.g. if hog_id = 'HOG122.1a', the method returns all the proteins that
have either exactly this hog id or an inparalogous id such a HOG122.1a.4b.2a
If you are only interested in the members of a specific lineage (identified
through its taxonomic range), you can pass the taxonomic range as an
additional argument. Only the proteins of genomes belonging to this clade
will be returned. Otherwise, all proteins with having this specific hog_id
will be returned.
:param str hog_id: the requested hog_id.
:param level: the taxonomic level of interest
:type level: str or None
:return: a numpy.array with the protein entries belonging to the requested hog.
:rtype: :class:`numpy.ndarray`
:Note: Even if you obtained a certain hog_id using
:py:meth:`get_subhogids_at_level`
using a certain level, if you do not specify the level in
:meth:`member_of_hog_id` again, you will likely get proteins from other
clades. Only if it happens that the deepest level of the hog_id
coincides with the taxonomic range of interest, the two will be identical.
"""
hog_range = self._hog_lex_range(hog_id)
# get the proteins which have that HOG number
memb = self.db.root.Protein.Entries.read_where(
'({!r} <= OmaHOG) & (OmaHOG < {!r})'.format(*hog_range))
if level is not None:
memb = [x for x in memb if level.encode('ascii') in self.tax.get_parent_taxa(
self.id_mapper['OMA'].genome_of_entry_nr(x['EntryNr'])['NCBITaxonId'])['Name']]
return memb
def iter_members_of_hog_id(self, hog_id):
"""iterates over all proteins that belong to a specific hog_id.
A hog_id might be an ID of the following form: HOG:0000212.1a
This method will yield all proteins in the form of
:class:`ProteinEntry` instances that are part of this hog_id.
:param str hog_id: the requested HOG ID.
:return: :py:class:`ProteinEntry` objects
:rtype: iter(:class:`ProteinEntry`)"""
hog_range = self._hog_lex_range(hog_id)
it = self.db.root.Protein.Entries.where(
'({!r} <= OmaHOG) & (OmaHOG < {!r})'.format(*hog_range))
for row in it:
yield ProteinEntry(self, row.fetch_all_fields())
def member_of_fam(self, fam):
"""returns an array of protein entries which belong to a given fam"""
if not isinstance(fam, (int, numpy.number)):
raise ValueError('expect a numeric family id')
return self.member_of_hog_id(self.format_hogid(fam))
def hog_members(self, entry, level):
"""get hog members with respect to a given taxonomic level.
The method will return a list of protein entries that are all
member of the same hog with respect to the taxonomic range
of interest.
:param entry: an entry or entry_nr of a query protein
:param level: the taxonomic level of interest"""
query = self.ensure_entry(entry)
members = self.hog_members_from_hog_id(query['OmaHOG'], level)
if query not in members:
raise ValueError(u"Level '{0:s}' undefined for query gene".format(level))
return members
def hog_members_from_hog_id(self, hog_id, level):
"""get hog members with respect to a given taxonomic level.
The method will return a list of protein entries that are all
member of the same hog with respect to the taxonomic range
of interest.
:param bytes hog_id: the query hog id
:param str level: the taxonomic level of interest"""
if isinstance(hog_id, str):
hog_id = hog_id.encode('ascii')
query_fam = self.parse_hog_id(hog_id)
hoglev = None
for hog_candidate in self.db.root.HogLevel.where(
'(Fam == {:d}) & (Level == {!r})'.format(query_fam, level.encode('ascii'))):
if hog_id.startswith(hog_candidate['ID']):
hoglev = hog_candidate
break
if hoglev is None:
raise ValueError(u'Level "{0:s}" undefined for query gene'.format(level))
# get the entries which have this hogid (or a sub-hog)
members = self.member_of_hog_id(hoglev['ID'])
if level != 'LUCA':
# last, we need to filter the proteins to the tax range of interest
members = [x for x in members if level.encode('ascii') in self.tax.get_parent_taxa(
self.id_mapper['OMA'].genome_of_entry_nr(x['EntryNr'])['NCBITaxonId'])['Name']]
return members
def get_orthoxml(self, fam):
"""returns the orthoxml of a given toplevel HOG family
:param fam: numeric id of requested toplevel hog"""
idx = self.db.root.OrthoXML.Index.read_where('Fam == {:d}'.format(fam))
if len(idx) < 1:
raise ValueError('cannot retrieve orthoxml for {}'.format(fam))
idx = idx[0]
return self.db.root.OrthoXML.Buffer[
idx['HogBufferOffset']:idx['HogBufferOffset'] + idx['HogBufferLength']].tostring()
def _hog_lex_range(self, hog):
"""return the lexographic range of a hog.
This can be used to search of sub-hogs which are nested in
the query hog. The semantics is such that
_hog_lex_range[0] <= hog < _hog_lex_range[1].
This is equivalent to say that a sub-hog starts with the
query hog."""
hog_str = hog.decode() if isinstance(hog, bytes) else hog
return hog_str.encode('ascii'), (hog_str[0:-1] + chr(1 + ord(hog_str[-1]))).encode('ascii')
def oma_group_members(self, group_id):
"""get the member entries of an oma group.
This method returns a numpy array of protein entries that form
an oma group. If the group id is invalid (not positive
integer value or a valid Fingerprint), an `InvalidId` Exception
is raised.
:param group_id: numeric oma group id or Fingerprint"""
group_nr = self.resolve_oma_group(group_id)
members = self.db.root.Protein.Entries.read_where('OmaGroup=={:d}'.format(group_nr))
return members
def resolve_oma_group(self, group_id):
if isinstance(group_id, int) and 0 < group_id <= self.get_nr_oma_groups():
return group_id
elif isinstance(group_id, numpy.integer):
return self.resolve_oma_group(int(group_id))
elif isinstance(group_id, (bytes, str)):
if group_id.isdigit():
return self.resolve_oma_group(int(group_id))
if isinstance(group_id, str):
group_id = group_id.encode('utf-8')
if group_id == b'n/a':
raise InvalidId('Invalid ID (n/a) for an OMA Group')
if not self.seq_search.contains_only_valid_chars(group_id):
raise InvalidId("Invalid ID: non-amino-accids characters in Fingerprint or sequence pattern")
if len(group_id) == 7:
# most likely a fingerprint. let's check that first
group_meta_tab = self.db.get_node('/OmaGroups/MetaData')
try:
e = next(group_meta_tab.where('(Fingerprint == {!r})'
.format(group_id)))
return int(e['GroupNr'])
except StopIteration:
pass
# search in suffix array
entry_nrs = self.seq_search.exact_search(
group_id.decode(), only_full_length=False)
if len(entry_nrs) == 0:
raise InvalidId('No sequence contains search pattern')
group_nrs = {self.entry_by_entry_nr(nr)['OmaGroup'] for nr in entry_nrs}
group_nrs.discard(0)
if len(group_nrs) == 1:
return int(group_nrs.pop())
elif len(group_nrs) == 0:
raise InvalidId("Sequence with pattern '{}' does not belong to any group"
.format(group_id.decode()))
else:
raise AmbiguousID("sequence pattern matches several oma groups", candidates=group_nrs)
raise InvalidId('Invalid type to determine OMA Group: {} (type: {})'.format(group_id, type(group_id)))
def oma_group_metadata(self, group_nr):
"""get the meta data associated with a OMA Group
The meta data contains the fingerprint and the keywords infered for this group.
The method retuns this information as a dictionary. The parameter must be
the numeric oma group nr.
:param int group_nr: a numeric oma group id."""
if not isinstance(group_nr, (int, numpy.integer)) or group_nr < 0:
raise InvalidId('Invalid group nr: {} (type: {})'.format(group_nr, type(group_nr)))
meta_tab = self.db.get_node('/OmaGroups/MetaData')
try:
e = next(meta_tab.where('GroupNr == {:d}'.format(group_nr)))
kw_buf = self.db.get_node('/OmaGroups/KeywordBuffer')
res = {'fingerprint': e['Fingerprint'].decode(),
'group_nr': int(e['GroupNr']),
'keywords': kw_buf[e['KeywordOffset']:e['KeywordOffset']+e['KeywordLength']].tostring().decode(),
'size': int(e['NrMembers'])}
return res
except StopIteration:
raise InvalidId('invalid group nr')
def get_nr_oma_groups(self):
"""returns the number of OMA Groups in the database"""
tab = self.db.get_node('/Protein/Entries')
try:
idx = tab.colindexes['OmaGroup'][-1]
return int(tab[idx]['OmaGroup'])
except KeyError:
hist = self.group_size_histogram('oma')
return int(hist['Count'].sum())
def get_nr_toplevel_hogs(self):
"""returns the number of toplevel hogs, i.e. roothogs"""
hist = self.group_size_histogram('hog')
return int(hist['Count'].sum())
def group_size_histogram(self, typ=None):
"""returns a table with two columns, e.g. Size and Count.
if typ is set to 'oma' or not set, then the data for the
oma groups is returned. if it is set to 'hog', the data for
the rootlevel hogs is returned.
:param typ: either 'oma' or 'hog', defaults to 'oma'"""
if typ is None or typ.lower() == 'oma':
tabname = 'OmaGroup'
elif typ.lower() == 'hog':
tabname = 'OmaHOG'
else:
raise ValueError('{} is not a valid group typ'.format(typ))
tab = self.db.get_node('/Summary/{}_size_hist'.format(tabname))
return tab.read()
def get_sequence(self, entry):
"""get the protein sequence of a given entry as a string
:param entry: the entry or entry_nr for which the sequence is requested"""
entry = self.ensure_entry(entry)
seqArr = self.db.get_node('/Protein/SequenceBuffer')
seq = seqArr[entry['SeqBufferOffset']:entry['SeqBufferOffset'] + entry['SeqBufferLength'] - 1]
return seq.tostring()
def get_cdna(self, entry):
"""get the protein sequence of a given entry as a string"""
entry = self.ensure_entry(entry)
seqArr = self.db.get_node('/Protein/CDNABuffer')
seq = seqArr[entry['CDNABufferOffset']:entry['CDNABufferOffset'] + entry['CDNABufferLength'] - 1]
return seq.tostring()
def get_description(self, entry):
entry = self.ensure_entry(entry)
descArr = self.db.get_node('/Protein/DescriptionBuffer')
desc = descArr[entry['DescriptionOffset']:entry['DescriptionOffset'] + entry['DescriptionLength']]
return desc.tostring()
def get_release_name(self):
return str(self.db.get_node_attr('/', 'oma_version'))
def get_exons(self, entry_nr):
genome = self.id_mapper['OMA'].genome_of_entry_nr(entry_nr)['UniProtSpeciesCode'].decode()
locus_tab = self.db.get_node('/Protein/Locus/{}'.format(genome))
return locus_tab.read_where('EntryNr == {}'.format(entry_nr))
def get_domains(self, entry_nr):
try:
return self.db.root.Annotations.Domains.read_where('EntryNr == {:d}'.format(entry_nr))
except ValueError as e:
raise InvalidId('require a numeric entry id, got {}'.format(entry_nr))
def get_representative_entry_of_hog(self, fam):
"""Get the information of the representative entry for a given family (roothog).
For each family we select a represenative entry that has the most prevalent
domain architecture. This method returns the entry_nr that we selected, together
with the domain architecture and its prevalence. In case no representative entry
has been found, the method raises an :class:`NoReprEntry` Exception.
:param int fam: The numeric family number."""
domprev_tab = self.db.get_node('/HOGAnnotations/DomainArchPrevalence')
try:
row = next(domprev_tab.where('Fam == {:d}'.format(fam)))
fields = (to_snail_case(z) for z in domprev_tab.dtype.names)
res = dict(zip(fields, row.fetch_all_fields()))
res['domains'] = self.get_domains(int(row['ReprEntryNr']))
res['prevalence'] = 100.0 * res['prev_count'] / res['fam_size']
return res
except StopIteration:
raise NoReprEntry()
def get_prevalent_domains(self, fam):
# Gets the prevalent domains for a particular top level HOG / family.
# returns: (family_row, similar_families)
# family_row contains: family ID, representative entry, DA prevalence.
# similar_families contains: same, with similarity score. Ordered.
domprev_tab = self.db.get_node('/HOGAnnotations/DomainArchPrevalence')
dom2hog_tab = self.db.get_node('/HOGAnnotations/Domains')
try:
fam_row = self.get_representative_entry_of_hog(fam)
except NoReprEntry:
return None, None
# Get the family's consensus DA and count them...
fam_da = collections.Counter(fam_row['domains']['DomainId'])
# Retrieve the relevant other families...
sim_fams = collections.defaultdict(collections.Counter)
for d in fam_da:
for hog_with_domain in dom2hog_tab.where('DomainId == {}'.format(d)):
sim_fams[hog_with_domain['Offset']][d] += 1
if len(sim_fams) == 0:
return fam_row, None
# Now get similar families and order them by similarity
sim_fams_df = pd.DataFrame(domprev_tab[list(sim_fams.keys())])
sim_fams_df['sim'] = list(map(lambda i: sum((sim_fams[i] & fam_da).values()),
sim_fams.keys()))
# Sort by similarity & family size
sim_fams_df.sort_values(['sim', 'FamSize'], inplace=True, ascending=False)
sim_fams_df.reset_index(drop=True, inplace=True)
# Prevalence
sim_fams_df['Prev'] = 100.0 * (sim_fams_df['PrevCount'] / sim_fams_df['FamSize'])
return fam_row, sim_fams_df
def get_gene_ontology_annotations(self, entry_nr, stop=None, as_dataframe=False, as_gaf=False):
"""Retrieve the gene ontology annotations for an entry or entry_range
The method returns the gene ontology annotations stored in the database
for a given entry (if `stop` parameter is not provided) or for all the
entries between [entry_nr, stop). Like in slices, the stop entry_nr is
not inclusive, where as the entry_nr - the start of the slice - is.
By default the result are returned as numpy arrays of type
:class:`tablefmt.GeneOntologyTable`. If as_dataframe is set to true, the
result will be a pandas dataframe, and if as_gaf is set to true, a gaf
formatted text file with the annotations is returned.
:param int entry_nr: numeric protein entry
"""
# function to check if an annotation term is obsolete
def filter_obsolete_terms(term):
try:
self.gene_ontology.term_by_id(term)
return True
except (KeyError, ValueError):
return False
try:
if stop is None:
query = 'EntryNr == {:d}'.format(entry_nr)
else:
if not isinstance(stop, int) or stop < entry_nr:
raise TypeError("stop argument needs to be a entry number that is larger than 'entry_nr'")
query = '(EntryNr >= {:d}) & (EntryNr < {:d})'.format(entry_nr, stop)
annots = self.db.root.Annotations.GeneOntology.read_where(query)
# for test database we also have some obsolete terms. we need to filter those
if len(annots) > 0:
not_obsolete = numpy.vectorize(filter_obsolete_terms)(annots['TermNr'])
annots = annots[not_obsolete]
except ValueError as e:
raise InvalidId('require a numeric entry id, got {}'.format(entry_nr))
if not as_dataframe and not as_gaf:
return annots
# early return if no annotations available
if len(annots) == 0:
return '!gaf-version: {}\n'.format(GAF_VERSION) if as_gaf else None
df = pd.DataFrame(annots)
# 1R DB
df['DB'] = 'OMA'
# 2R DB Object ID
df['DB_Object_ID'] = df['EntryNr'].apply(self.id_mapper['Oma'].map_entry_nr)
# 3R DB Object Symbol
df['DB_Object_Symbol'] = df['DB_Object_ID']
# 4O Qualifier
df['Qualifier'] = ''
# 5R GO ID
df['GO_ID'] = df['TermNr'].apply(lambda t: 'GO:{:07d}'.format(t))
# 6R DB:Reference
df['DB:Reference'] = df['Reference'].apply(lambda x: x.decode('ascii'))
# 7R Evidence code
df['Evidence'] = df['Evidence'].apply(lambda x: x.decode('ascii'))
# 8O With (or) From
df['With'] = ''
# 9R Aspect
df['Aspect'] = df['GO_ID'].apply(lambda t: GOAspect.to_char(self.gene_ontology.term_by_id(t).aspect))
# 10O DB Object Name
df['DB_Object_Name'] = ''
# 11O DB Object Synonym (|Synonym)
df['Synonym'] = ''
# 12R DB Object Type
df['DB_Object_Type'] = 'protein'
# 13R Taxon (|taxon)
df['Taxon_ID'] = df['EntryNr'].apply(lambda e: 'taxon:{:d}'
.format(self.id_mapper['Oma'].genome_of_entry_nr(e)['NCBITaxonId']))
# 14R Date
df['Date'] = self.get_conversion_date().strftime('%Y%m%d')
# 15R Assigned by - TODO: FIX FOR NON OMA!!!
df['Assigned_By'] = df['DB']
# 16O Annotation Extension
df['Annotation_Extension'] = ''
# 17O Gene Product Form ID
df['Gene_Product_Form_ID'] = ''
df = df[GOA.GAF20FIELDS]
return (df if not as_gaf else
('!gaf-version: {}\n'.format(GAF_VERSION) +
'\n'.join(df.apply(lambda e: '\t'.join(map(str, e)), axis=1)) +
'\n'))
class SuffixSearcher(object):
def __init__(self, suffix_index_node, buffer=None, lookup=None):
if isinstance(suffix_index_node, tables.Group):
self.buffer_arr = buffer if buffer else suffix_index_node._f_get_child('buffer')
self.suffix_arr = suffix_index_node._f_get_child('suffix')
self.lookup_arr = lookup if lookup else suffix_index_node._f_get_child('offset')
else:
self.buffer_arr = buffer
self.suffix_arr = suffix_index_node
self.lookup_arr = lookup
self.lookup_arr = self.lookup_arr[:]
def find(self, query):
n = len(query)
if n > 0:
slicer = KeyWrapper(self.suffix_arr,
key=lambda i:
self.buffer_arr[i:(i + n)].tobytes())
ii = bisect_left(slicer, query)
if ii and (slicer[ii] == query):
# Left most found.
jj = ii + 1
while (jj < len(slicer)) and (slicer[jj] == query):
# zoom to end -> -> ->
jj += 1
# Find entry numbers and filter to remove incorrect entries
return numpy.searchsorted(self.lookup_arr, self.suffix_arr[ii:jj]+1) - 1
return []
class SequenceSearch(object):
'''
Contains all the methods for searching the sequence
TODO: implement taxonomic filtering.
'''
from .KmerEncoder import DIGITS_AA
PROTEIN_CHARS = frozenset(map(lambda x: x.decode(), DIGITS_AA))
PAM100 = pyopa.generate_env(pyopa.load_default_environments()['log_pam1'],
100)
def __init__(self, db):
# Backup reference to used DB method.
self.get_sequence = db.get_sequence
# Assume the index is stored in the main DB if there is no .idx file
self.db = db.get_hdf5_handle()
self.db_idx = (self.db if not os.path.isfile(self.db.filename + '.idx') else
tables.open_file(self.db.filename + '.idx', 'r'))
# Protein search arrays.
try:
self.seq_idx = self.db_idx.root.Protein.SequenceIndex
if isinstance(self.seq_idx, tables.link.ExternalLink):
self.seq_idx = self.seq_idx()
self.kmer_lookup = self.db_idx.root.Protein.KmerLookup
if isinstance(self.kmer_lookup, tables.link.ExternalLink):
self.kmer_lookup = self.kmer_lookup()
except (AttributeError, OSError) as e:
raise DBConsistencyError("Suffix index for protein sequences is not available: "+str(e))
self.seq_buff = self.db.root.Protein.SequenceBuffer
self.n_entries = len(self.db.root.Protein.Entries)
# Kmer lookup arrays / kmer setup
self.k = self.kmer_lookup._f_getattr('k')
self.encoder = KmerEncoder(self.k)
logger.info('KmerLookup of size k={} loaded'.format(self.k))
def get_entry_length(self, ii):
"""Get length of a particular entry."""
return self.db.root.Protein.Entries[ii - 1]['SeqBufferLength'] - 1
@LazyProperty
def entry_idx(self):
'''
Caches the index lookup part of the SA.
'''
return self.seq_idx[:self.n_entries]
def get_entrynr(self, ii):
'''
Get the entry number(s) corresponding to a location in the sequence
buffer.
'''
return (numpy.searchsorted(self.entry_idx, ii) + 1)
def contains_only_valid_chars(self, seq):
"""returns true iff `seq` contains only valid AA chars.
The method ignores the case of the seq, i.e. upper
or lower case chars both match.
:param (bytes, str) seq: sequence to be checked
:returns bool
"""
if isinstance(seq, bytes):
seq = seq.decode()
return all(map(lambda c: c in self.PROTEIN_CHARS, seq.upper()))
def _sanitise_seq(self, seq):
'''
Sanitise a string protein sequence. Deletes "invalid" characters.
TODO: add functionality for biopython sequence / skbio sequence.
'''
assert type(seq) == str
return ''.join(filter(lambda c: c in self.PROTEIN_CHARS,
seq.upper())).encode('ascii')
def search(self, seq, n=None, coverage=None, is_sanitised=None):
'''
Searches the database for entries that match. If can't find an exact
match performs a kmer + local alignment approach to approximate
search.
'''
seq = (self._sanitise_seq(seq) if not is_sanitised else seq)
m = self.exact_search(seq, is_sanitised=True)
# TODO: taxonomic filtering.
if len(m) == 0:
# Do approximate search
m = self.approx_search(seq, n=n, coverage=coverage, is_sanitised=True)
# TODO: taxonomic filtering.
return ('approx', m) if m is not [] else None
else:
return 'exact', m
def exact_search(self, seq, only_full_length=True, is_sanitised=None):
'''
Performs an exact match search using the suffix array.
'''
# TODO: work out whether to just use the approximate search and then
# check if any are actually exact matches. Do the counting and then
# do an equality checking on any of the sequences that have the correct
# number of kmer matches.
seq = (seq if is_sanitised else self._sanitise_seq(seq))
nn = len(seq)
if nn > 0:
z = KeyWrapper(self.seq_idx,
key=lambda i:
self.seq_buff[i:(i + nn)].tobytes())
ii = bisect_left(z, seq, lo=self.n_entries)
if ii and (z[ii] == seq):
# Left most found.
jj = ii + 1
while (jj < len(z)) and (z[jj] == seq):
# zoom to end -> -> ->
jj += 1
# Find entry numbers and filter to remove incorrect entries
return list(filter(lambda e: (not only_full_length) or self.get_entry_length(e) == nn,
self.get_entrynr(self.seq_idx[ii:jj])))
# Nothing found.
return []
def approx_search(self, seq, n=None, is_sanitised=None, coverage=None):
'''
Performs an exact match search using the suffix array.
'''
seq = (seq if is_sanitised else self._sanitise_seq(seq))
n = (n if n is not None else 50)
coverage = (0.0 if coverage is None else coverage)
# 1. Do kmer counting vs entry numbers TODO: switch to np.unique?
c = collections.Counter()
for z in map(lambda kmer: numpy.unique(self.kmer_lookup[int(kmer)],
return_counts=True),
self.encoder.decompose(seq)):
c.update(dict(zip(*z)))
# 2. Filter to top n if necessary
z = len(seq) - self.k + 1
cut_off = coverage * z
c = [(x[0], (x[1] / z)) for x in c.items() if x[1] >= cut_off]
c = (sorted(c,
reverse=True,
key=lambda x: x[1])[:n] if n > 0 else c)
# 3. Do local alignments and return count / score / alignment
if len(c) > 0:
return sorted([(m[0], {'kmer_coverage': m[1],
'score': a[0],
'alignment': a[1]})
for (m, a) in self._align_entries(seq, c)],
key=lambda z: z[1]['score'],
reverse=True)
return []
def _align_entries(self, seq, matches):
# Does the alignment for the approximate search
def align(s1, s2s, env, aligned):
for s2 in s2s:
z = pyopa.align_double(s1, s2, env, False, False, True)
a = pyopa.align_strings(s1, s2, env, False, z)
aligned.append((z[0], ((a[0].convert_readable(),
(z[3], z[1])),
(a[1].convert_readable(),
(z[4], z[2])))))
aligned = []
query = pyopa.Sequence(seq.decode('ascii'))
entries = list(map(lambda m:
pyopa.Sequence(self.get_sequence(int(m[0])).decode('ascii')),
matches))
t = threading.Thread(target=align,
args=(query, entries, self.PAM100, aligned))
t.start()
t.join()
assert (len(aligned) > 0), 'Alignment thread crashed.'
return zip(matches, aligned)
class OmaIdMapper(object):
def __init__(self, db):
self.genome_table = db.get_hdf5_handle().root.Genome.read()
self._entry_off_keys = self.genome_table.argsort(order=('EntryOff'))
self._genome_keys = self.genome_table.argsort(
order=('UniProtSpeciesCode'))
self._taxid_keys = self.genome_table.argsort(order=('NCBITaxonId'))
self._omaid_re = re.compile(r'(?P<genome>[A-Z][A-Z0-9]{4})(?P<nr>\d+)')
self._db = db
def genome_of_entry_nr(self, e_nr):
"""returns the genome code belonging to a given entry_nr"""
idx = self.genome_table['EntryOff'].searchsorted(
e_nr - 1, side='right',
sorter=self._entry_off_keys)
return self.genome_table[self._entry_off_keys[idx - 1]]
def map_entry_nr(self, entry_nr):
genome = self.genome_of_entry_nr(entry_nr)
return "{0:s}{1:05d}".format(genome['UniProtSpeciesCode'].decode(),
entry_nr - genome['EntryOff'])
def genome_from_UniProtCode(self, code):
code = code.encode('ascii')
idx = self.genome_table['UniProtSpeciesCode'].searchsorted(
code, sorter=self._genome_keys)
try:
genome = self.genome_table[self._genome_keys[idx]]
except IndexError:
raise UnknownSpecies('{} is unknown'.format(code))
if genome['UniProtSpeciesCode'] != code:
raise UnknownSpecies('{} is unknown'.format(code))
return genome
def genome_from_taxid(self, taxid):
try:
taxid = int(taxid)
idx = self.genome_table['NCBITaxonId'].searchsorted(
taxid, sorter=self._taxid_keys)
genome = self.genome_table[self._taxid_keys[idx]]
except (IndexError, ValueError):
raise UnknownSpecies('TaxonId "{}" is unknown'.format(taxid))
if genome['NCBITaxonId'] != taxid:
raise UnknownSpecies('TaxonId "{}" is unknown'.format(taxid))
return genome
def identify_genome(self, code):
"""identify genome based on either a UniProtSpeciesCode or an
NCBI Taxonomy Id"""
if isinstance(code, int) or code.isdigit():
return self.genome_from_taxid(code)
else:
return self.genome_from_UniProtCode(code)
def omaid_to_entry_nr(self, omaid):
"""returns the internal numeric entrynr from a
UniProtSpeciesCode+nr id. this is the inverse
function of 'map_entry_nr'."""
match = self._omaid_re.match(omaid)
if match is None:
raise InvalidOmaId(omaid)
code, nr = match.group('genome'), int(match.group('nr'))
genome = self.genome_from_UniProtCode(code)
if nr <= 0 or nr > genome['TotEntries']:
raise InvalidOmaId(omaid)
return genome['EntryOff'] + int(match.group('nr'))
def genome_range(self, query):
"""returns the internal range of EntryNr associated with
'query'. 'query' can be either a numeric id of a protein
or a UniProtSpeciesCode of a genome. If 'query' is unknown
by the database, an InvalidOmaId exception is raised.
The return range is a tuple of length two, and the numbers
indicated the *inclusive* boundaries, e.g. (1,5) indicates
that the entries 1,2,3,4 and 5 belong to the query species"""
if isinstance(query, (int, numpy.integer)):
genome_row = self.genome_of_entry_nr(query)
if query <= 0 or query > genome_row['EntryOff'] + genome_row['TotEntries']:
raise InvalidOmaId(query)
else:
genome_row = self.genome_from_UniProtCode(query)
return (genome_row['EntryOff'] + 1,
genome_row['EntryOff'] + genome_row['TotEntries'],)
def species_ordering(self, root=None):
"""get ordering of the genomes with respect to taxonomy.
This method returns a linear ordering of all the genomes with
respect to their lineage, i.e. genomes that are evolutionary
"close" to each other appear close in the ordering.
Optionally, one can give a root genome, that will be the species
the ordering is going to start with.
:param root: UniProtSpeciesCode of the root genome.
:returns: a list of species codes in the correct order."""
if root is None:
root = self.genome_table[0]['UniProtSpeciesCode']
root_genome = self.genome_from_UniProtCode(root)
lins = {g['UniProtSpeciesCode']: [lev['Name'] for lev in self._db.tax.get_parent_taxa(g['NCBITaxonId'])][::-1]
for g in self.genome_table}
root_lin = lins[root_genome['UniProtSpeciesCode']]
sort_key = {}
for g, lin_g in lins.items():
for k in range(min(len(root_lin), len(lin_g))):
if root_lin[k] != lin_g[k]:
k -= 1
break
sort_key[g] = (-k, lin_g)
sorted_genomes = sorted(list(sort_key.keys()), key=lambda g: sort_key[g])
return {g.decode(): v for v, g in enumerate(sorted_genomes)}
class AmbiguousID(Exception):
def __init__(self, message, candidates):
super(AmbiguousID, self).__init__(message, candidates)
self.candidates = candidates
class IDResolver(object):
def __init__(self, db):
entry_nr_col = db.get_hdf5_handle().root.Protein.Entries.cols.EntryNr
self.max_entry_nr = entry_nr_col[int(entry_nr_col.index[-1])]
self._db = db
def _from_numeric(self, e_id):
nr = int(e_id)
if not 0 < nr <= self.max_entry_nr:
raise InvalidId('{0:d} out of protein range: {1:}'.format(nr, e_id))
return nr
def _from_omaid(self, e_id):
return int(self._db.id_mapper['OMA'].omaid_to_entry_nr(e_id))
def search_xrefs(self, e_id):
"""search for all xrefs. TODO: what happens if xref is ambiguous?"""
res = set([x['EntryNr'] for x in self._db.id_mapper['XRef'].search_xref(e_id)])
if len(res) == 0:
# let's try to mach as substring using suffix array case insensitive
res = set([x['EntryNr'] for x in self._db.id_mapper['XRef'].search_xref(e_id, match_any_substring=True)])
if len(res) == 0:
raise InvalidId(e_id)
if len(res) > 1:
# check whether its only different isoforms, then return canonical isoform
splice_variants = set([x['AltSpliceVariant'] for x in (self._db.entry_by_entry_nr(eNr) for eNr in res)])
logger.info('xref {} has {} entries, {} splice variants'.format(e_id, len(res), len(splice_variants)))
if len(splice_variants) > 1 or 0 in splice_variants:
raise AmbiguousID('Cross-ref "{}" is ambiguous'.format(e_id), res)
else:
res = splice_variants
return int(res.pop())
def resolve(self, e_id):
"""maps an id to the entry_nr of the current OMA release."""
try:
nr = self._from_numeric(e_id)
except ValueError:
try:
nr = self._from_omaid(e_id)
except (InvalidOmaId, UnknownSpecies) as e:
nr = self.search_xrefs(e_id)
return nr
class Taxonomy(object):
"""Taxonomy provides an interface to navigate the taxonomy data.
The input data is the same as what is stored in the Database in
table "/Taxonomy"."""
def __init__(self, data, genomes=None, _valid_levels=None):
if not isinstance(data, numpy.ndarray):
raise ValueError('Taxonomy expects a numpy table.')
self.genomes = genomes if genomes is not None else {}
self.tax_table = data
self.taxid_key = self.tax_table.argsort(order=('NCBITaxonId'))
self.parent_key = self.tax_table.argsort(order=('ParentTaxonId'))
self.all_hog_levels = _valid_levels
if _valid_levels is None:
self._load_valid_taxlevels()
def _load_valid_taxlevels(self):
forbidden_chars = re.compile(r'[^A-Za-z. -]')
try:
with open(os.environ['DARWIN_BROWSERDATA_PATH'] + '/TaxLevels.drw') as f:
taxStr = f.read()
tax_json = json.loads(("[" + taxStr[14:-3] + "]").replace("'", '"'))
self.all_hog_levels = frozenset([t.encode('ascii') for t in
tax_json if forbidden_chars.search(t) is None])
except (IOError, KeyError):
self.all_hog_levels = frozenset([l for l in self.tax_table['Name']
if forbidden_chars.search(l.decode()) is None])
def _table_idx_from_numeric(self, tid):
i = self.tax_table['NCBITaxonId'].searchsorted(
tid, sorter=self.taxid_key)
idx = self.taxid_key[i]
if self.tax_table[idx]['NCBITaxonId'] != tid:
raise InvalidTaxonId(u"{0:d} is an invalid/unknown taxonomy id".format(tid))
return idx
def _get_root_taxon(self):
i1 = self.tax_table['ParentTaxonId'].searchsorted(0, sorter=self.parent_key)
i2 = self.tax_table['ParentTaxonId'].searchsorted(0, sorter=self.parent_key, side='right')
if i2 - i1 == 0:
raise DBConsistencyError('Not a single root in Taxonomy: {}'
.format(self.tax_table[self.parent_key[i1]]))
elif i2 - i1 == 1:
res = self.tax_table[self.parent_key[i1]]
else:
res = numpy.array([(0, -1, b'LUCA')], dtype=self.tax_table.dtype)[0]
return res
def _taxon_from_numeric(self, tid):
idx = self._table_idx_from_numeric(tid)
return self.tax_table[idx]
def _direct_children_taxa(self, tid):
i = self.tax_table['ParentTaxonId'].searchsorted(tid, sorter=self.parent_key)
idx = []
while i < len(self.parent_key) and self.tax_table[self.parent_key[i]]['ParentTaxonId'] == tid:
idx.append(self.parent_key[i])
i += 1
return self.tax_table.take(idx)
def get_parent_taxa(self, query):
"""Get array of taxonomy entries leading towards the
root of the taxonomy.
:param query: the starting taxonomy level"""
idx = []
parent = query
count = 0
while parent != 0:
i = self._table_idx_from_numeric(parent)
idx.append(i)
tmp = self.tax_table[i]['ParentTaxonId']
if tmp == parent:
raise InvalidTaxonId(u"{0:d} has itself as parent".format(tmp))
parent = tmp
count += 1
if count > 100:
raise InvalidTaxonId(u"{0:d} exceeds max depth of 100. Infinite recursion?".format(query))
return self.tax_table.take(idx)
def _get_taxids_from_any(self, it, skip_missing=True):
if not isinstance(it, numpy.ndarray):
try:
it = numpy.fromiter(it, dtype='i4')
except ValueError:
it = numpy.fromiter(it, dtype='S255')
if it.dtype.type is numpy.string_:
try:
ns = self.name_key
except AttributeError:
ns = self.name_key = self.tax_table.argsort(order='Name')
idxs = self.tax_table['Name'].searchsorted(it, sorter=ns)
idxs = numpy.clip(idxs, 0, len(ns) - 1)
taxs = self.tax_table[ns[idxs]]
keep = taxs['Name'] == it
if not skip_missing and not keep.all():
raise KeyError('not all taxonomy names could be found')
res = taxs['NCBITaxonId'][keep]
else:
res = it
return res
def get_induced_taxonomy(self, members, collapse=True, augment_parents=False):
"""Extract the taxonomy induced by a given set of `members`.
This method allows to extract the part which is induced by a
given set of levels and leaves that should be part of the
new taxonomy. `members` must be an iterable, the levels
must be either numeric taxids or scientific names.
Unless `augment_parents` is set to true, the resulting sub-taxonomy
will only contain levels that are specified in `members`. If
`augment_parents` is set to True, also all parent nodes of the
levels passed in members are considered for the sub-taxonomy.
:param iter members: an iterable containing the levels
and leaves that should remain in the new taxonomy. can be
either axonomic ids or scientific names.
:param bool collapse: whether or not levels with only one child
should be skipped or not. This defaults to True
:param bool augment_parents: whether or not to consider parent
levels of members for the resulting taxonomy."""
taxids_to_keep = numpy.sort(self._get_taxids_from_any(members))
if augment_parents:
# find all the parents of all the members, add them to taxids_to_keep
additional_levels = set([])
for cur_tax in taxids_to_keep:
try:
additional_levels.update(set(self.get_parent_taxa(cur_tax)['NCBITaxonId']))
except KeyError:
logger.info("{} seems not to exist in Taxonomy".format(cur_tax))
pass
# add and remove duplicates
all_levels = numpy.append(taxids_to_keep, list(additional_levels))
taxids_to_keep = numpy.unique(all_levels)
idxs = numpy.searchsorted(self.tax_table['NCBITaxonId'], taxids_to_keep, sorter=self.taxid_key)
idxs = numpy.clip(idxs, 0, len(self.taxid_key) - 1)
subtaxdata = self.tax_table[self.taxid_key[idxs]]
if not numpy.alltrue(subtaxdata['NCBITaxonId'] == taxids_to_keep):
raise KeyError('not all levels in members exists in this taxonomy')
updated_parent = numpy.zeros(len(subtaxdata), 'bool')
for i, cur_tax in enumerate(taxids_to_keep):
if updated_parent[i]:
continue
# get all the parents and check which ones we keep in the new taxonomy.
parents = self.get_parent_taxa(cur_tax)['NCBITaxonId']
mask = numpy.in1d(parents, taxids_to_keep)
# find the position of them in subtaxdata (note: subtaxdata and
# taxids_to_keep have the same ordering).
new_idx = taxids_to_keep.searchsorted(parents[mask])
taxids = taxids_to_keep[new_idx]
# parent taxid are ncbitaxonids shifted by one position!
parents = numpy.roll(taxids, -1)
parents[-1] = 0
subtaxdata['ParentTaxonId'][new_idx] = parents
updated_parent[new_idx] = True
if collapse:
nr_children = collections.defaultdict(int)
for p in subtaxdata['ParentTaxonId']:
nr_children[p] += 1
rem = [p for (p, cnt) in nr_children.items() if cnt == 1 and p != 0]
if len(rem) > 0:
idx = taxids_to_keep.searchsorted(rem)
return self.get_induced_taxonomy(numpy.delete(taxids_to_keep, idx))
return Taxonomy(subtaxdata, genomes=self.genomes, _valid_levels=self.all_hog_levels)
def newick(self):
"""Get a Newick representation of the Taxonomy
Note: as many newick parsers do not support quoted labels,
the method instead replaces spaces with underscores."""
def newick_enc(s):
return s.translate({ord(' '): u'_', ord('('): u'[', ord(')'): u']'})
def _rec_newick(node):
children = []
for child in self._direct_children_taxa(node['NCBITaxonId']):
children.append(_rec_newick(child))
if len(children) == 0:
return newick_enc(node['Name'].decode())
else:
t = ",".join(children)
return '(' + t + ')' + newick_enc(node['Name'].decode())
return _rec_newick(self._get_root_taxon()) + ';'
def as_dict(self):
"""Encode the Taxonomy as a nested dict.
This representation can for example be used to serialize
a Taxonomy in json format."""
def _rec_phylogeny(node):
res = {'name': node['Name'].decode(), 'id': int(node['NCBITaxonId'])}
children = []
for child in self._direct_children_taxa(node['NCBITaxonId']):
children.append(_rec_phylogeny(child))
if len(children) > 0:
res['children'] = children
else:
try:
g = self.genomes[res['id']]
res['code'] = g.uniprot_species_code
except KeyError:
pass
return res
return _rec_phylogeny(self._get_root_taxon())
def as_phyloxml(self):
"""Encode the Taxonomy as phyloxml output"""
def _rec_phyloxml(node):
n = et.Element("clade")
tax = et.SubElement(n, "taxonomy")
id_ = et.SubElement(tax, "id", provider="uniprot")
id_.text = str(node['NCBITaxonId'])
children = []
for child in self._direct_children_taxa(node['NCBITaxonId']):
children.append(_rec_phyloxml(child))
if len(children) == 0:
try:
g = self.genomes[int(node['NCBITaxonId'])]
code = et.SubElement(tax, 'code')
code.text = g.uniprot_species_code
except ValueError:
pass
sci = et.SubElement(tax, 'scientific_name')
sci.text = node['Name'].decode()
n.extend(children)
return n
root = et.Element('phyloxml', xmlns="http://www.phyloxml.org")
phylo = et.SubElement(root, "phylogeny", rooted="true", rerootable="false")
name = et.SubElement(phylo, "name")
name.text = "(Partial) species phylogeny from OMA Browser"
phylo.append(_rec_phyloxml(self._get_root_taxon()))
return et.tostring(root, encoding='utf-8')
class InvalidTaxonId(Exception):
pass
class DBVersionError(Exception):
pass
class DBConsistencyError(Exception):
pass
class InvalidId(Exception):
pass
class InvalidOmaId(InvalidId):
pass
class UnknownIdType(Exception):
pass
class UnknownSpecies(Exception):
pass
class Singleton(Exception):
def __init__(self, entry, msg=None):
super(Singleton, self).__init__(msg)
self.entry = entry
class NoReprEntry(Exception):
pass
class IdMapperFactory(object):
def __init__(self, db_obj):
self.db = db_obj
self.mappers = {}
def __getitem__(self, idtype):
return self.get_mapper(idtype)
def get_mapper(self, idtype):
try:
mapper = self.mappers[idtype]
except KeyError:
try:
mapper = globals()[str(idtype).title() + 'IdMapper'](self.db)
self.mappers[idtype] = mapper
except KeyError:
raise UnknownIdType('{} is unknown'.format(str(idtype)))
return mapper
class XrefIdMapper(object):
def __init__(self, db):
self._db = db
self.xref_tab = db.get_hdf5_handle().get_node('/XRef')
self.xrefEnum = self.xref_tab.get_enum('XRefSource')
self.idtype = frozenset(list(self.xrefEnum._values.keys()))
self.xref_index = SuffixSearcher(db.get_hdf5_handle().get_node('/XRef_Index'))
def map_entry_nr(self, entry_nr):
"""returns the XRef entries associated with the query protein.
The types of XRefs that are returned depends on the idtype
class member variable. In the base-class, idtype contains
all valid xref types. Typically, subclasses of XrefIdMapper
will change this set.
:param entry_nr: the numeric id of the query protein.
:returns: list of dicts with 'source' and 'xref' keys."""
res = [{'source': self.xrefEnum._values[row['XRefSource']],
'xref': row['XRefId'].decode()}
for row in self.xref_tab.where('EntryNr=={:d}'.format(entry_nr))
if row['XRefSource'] in self.idtype]
return res
def canonical_source_order(self):
"""returns the list of xref sources in order of their importance.
Most important source - in the base class for example UniProtKB/SwissProt
are first. The canonical order is defined in the enum definition.
:returns: list of source strings"""
return [self.xrefEnum(z) for z in sorted(self.idtype)]
def iter_xrefs_for_entry_nr(self, entry_nr):
"""Iterate over the xrefs of a given entry number.
This method returns a dict with 'source' and 'xref' fields
(both str) holding the information of the xref record.
:param entry_nr: the numeric id of the query protein"""
for row in self.xref_tab.where('EntryNr=={:d}'.format(entry_nr)):
if row['XRefSource'] in self.idtype:
yield {'source': self.xrefEnum._values[row['XRefSource']],
'xref': row['XRefId'].decode()}
def _combine_query_values(self, field, values):
parts = ['({}=={})'.format(field, z) for z in values]
return '|'.join(parts)
def map_many_entry_nrs(self, entry_nrs):
"""map several entry_nrs with as few db queries as possible
to their cross-references. The function returns a
:class:`numpy.recarray` containing all fields as defined in
the table.
:param entry_nrs: a list with numeric protein entry ids"""
mapped_junks = []
junk_size = 32 - len(self.idtype) # respect max number of condition variables.
source_condition = self._combine_query_values('XRefSource', self.idtype)
for start in range(0, len(entry_nrs), junk_size):
condition = "({}) & ({})".format(
self._combine_query_values('EntryNr',
entry_nrs[start:start + junk_size]),
source_condition)
mapped_junks.append(self.xref_tab.read_where(condition))
return numpy.lib.recfunctions.stack_arrays(
mapped_junks,
usemask=False)
def search_xref(self, xref, is_prefix=False, match_any_substring=False):
"""identify proteins associcated with `xref`.
The crossreferences are limited to the types in the class
member `idtype`. In the base class, all types are valid
xrefs. The method returns a :class:`numpy.recarry` defined
for the XRef table with all entries pointing to `xref`.
The method by default returns only exact matches. By setting
`is_prefix` to True, one can indicated that the requested xref
should be interpreted as a prefix and all entries matching this
prefix should be returned.
:param str xref: an xref to be located
:param bool is_prefix: treat xref as a prefix and return
potentially several matching xrefs"""
if match_any_substring:
query = xref.encode('utf-8').lower()
res = self.xref_tab[self.xref_index.find(query)]
else:
if is_prefix:
up = xref[:-1] + chr(ord(xref[-1])+1)
cond = '(XRefId >= {!r}) & (XRefId < {!r})'.format(
xref.encode('utf-8'), up.encode('utf-8'))
else:
cond = 'XRefId=={!r}'.format(xref.encode('utf-8'))
res = self.xref_tab.read_where(cond)
if len(res) > 0 and len(self.idtype) < len(self.xrefEnum):
res = res[numpy.in1d(res['XRefSource'], list(self.idtype))]
return res
def source_as_string(self, source):
"""string representation of xref source enum value
this auxiliary method converts the numeric value of
a xref source into a string representation.
:param int source: numeric value of xref source"""
try:
return self.xrefEnum._values[source]
except KeyError:
raise ValueError("'{}' is not a valid xref source value".format(source))
def xreftab_to_dict(self, tab):
"""convert a xreftable to a dictionary per entry_nr.
All rows in `tab` are converted into a nested dictionary
where the outer key is a protein entry number and the
inner key the xref source type.
:param tab: a :class:`numpy.recarray` corresponding to XRef
table definition to be converted"""
xrefdict = collections.defaultdict(dict)
for row in tab:
try:
typ = self.xrefEnum._values[row['XRefSource']]
except IndexError:
logger.warning('invalid XRefSource value in {}'.format(row))
continue
if typ not in xrefdict[row['EntryNr']]:
xrefdict[row['EntryNr']][typ] = {'id': row['XRefId']}
return xrefdict
class UniProtIdMapper(XrefIdMapper):
def __init__(self, db):
super(UniProtIdMapper, self).__init__(db)
self.idtype = frozenset([self.xrefEnum[z]
for z in ['UniProtKB/SwissProt', 'UniProtKB/TrEMBL']])
class LinkoutIdMapper(XrefIdMapper):
def __init__(self, db):
super(LinkoutIdMapper, self).__init__(db)
self.idtype = frozenset([self.xrefEnum[z]
for z in ['UniProtKB/SwissProt', 'UniProtKB/TrEMBL',
'Ensembl Protein', 'Ensembl Gene',
'EntrezGene']])
def url(self, typ, id_):
# TODO: improve url generator in external module with all xrefs
url = None
try:
id_ = id_.decode()
except AttributeError:
pass
if typ.startswith('UniProtKB'):
url = 'http://uniprot.org/uniprot/{}'.format(id_)
elif typ == 'EntrezGene':
url = 'http://www.ncbi.nlm.nih.gov/gene/{}'.format(id_)
elif typ.startswith('Ensembl'):
url = 'http://ensembl.org/id/{}'.format(id_)
return url
def xreftab_to_dict(self, tab):
xref = super(LinkoutIdMapper, self).xreftab_to_dict(tab)
for d in list(xref.values()):
for typ, elem in list(d.items()):
elem['url'] = self.url(typ, elem['id'])
return xref
def iter_xrefs_for_entry_nr(self, entry_nr):
"""same as base clase but includes also the url as a field"""
for xref in super(LinkoutIdMapper, self).iter_xrefs_for_entry_nr(entry_nr):
xref['url'] = self.url(xref['source'], xref['xref'])
yield xref
class DomainNameIdMapper(object):
def __init__(self, db):
self.domain_src = db.get_hdf5_handle().root.Annotations.DomainDescription.read()
self.domain_src.sort(order='DomainId')
def _get_dominfo(self, domain_id):
idx = self.domain_src['DomainId'].searchsorted(domain_id)
if self.domain_src[idx]['DomainId'] != domain_id:
raise KeyError("no domain info available for {}".format(domain_id))
return self.domain_src[idx]
def get_info_dict_from_domainid(self, domain_id):
info = self._get_dominfo(domain_id)
return {'name': info['Description'].decode(), 'source': info['Source'].decode(),
'domainid': domain_id.decode()}
class FastMapper(object):
"""GO Function projection to sequences from OMA hdf5 file"""
def __init__(self, db):
self.db = db
def iter_projected_goannotations(self, records):
# gene ontology fast mapping, uses exact / approximate search.
# todo: implement taxonomic restriction.
# Input: iterable of biopython SeqRecords
for rec in records:
logger.debug('projecting function to {}'.format(rec))
r = self.db.seq_search.search(str(rec.seq))
if r is not None:
logger.debug(str(r))
if r[0] == 'exact':
tdfs1 = []
for enum in r[1]:
df = self.db.get_gene_ontology_annotations(enum, as_dataframe=True)
if df is not None:
df['With'] = 'Exact:{}'.format(self.db.id_mapper['Oma'].map_entry_nr(enum))
tdfs1.append(df)
go_df = pd.concat(tdfs1, ignore_index=True)
else:
# Take best match. TODO: remove those below some level of match.
match_enum = r[1][0][0]
match_score = r[1][0][1]['score']
logger.debug('match: enum: {}, score:{}'.format(match_enum, match_score))
go_df = self.db.get_gene_ontology_annotations(match_enum, as_dataframe=True)
if go_df is not None:
go_df['With'] = 'Approx:{}:{}'.format(self.db.id_mapper['Oma'].map_entry_nr(match_enum),
match_score)
if go_df is not None:
go_df['DB'] = 'OMA_FastMap'
go_df['Assigned_By'] = go_df['DB']
go_df['DB_Object_ID'] = rec.id
go_df['DB_Object_Symbol'] = go_df['DB_Object_ID']
go_df['Evidence'] = 'IEA'
go_df['DB:Reference'] = 'OMA_Fun:002'
go_df['Taxon_ID'] = 'taxon:-1'
len_with_dupl = len(go_df)
go_df.drop_duplicates(inplace=True)
logger.debug('cleaning duplicates: from {} to {} annotations'.format(len_with_dupl, len(go_df)))
for row in go_df.to_dict('records'):
yield row
def write_annotations(self, file, seqrecords):
"""Project annotations and write them to file
This method takes a filehandle and an iterable of BioPython
SeqRecords objects as input. The function computes the
projected annotations and writes them to the file in gaf
format.
:param file: filehandle to write annotations to
:param seqrecords: input sequencs to project functions to
"""
file.write('!gaf-version: {}\n'.format(GAF_VERSION))
file.write('!Project Name: OMA Fast Function Projection\n')
file.write('!Date created: {}\n'.format(time.strftime("%c")))
file.write('!Contact Email: <EMAIL>\n')
for anno in self.iter_projected_goannotations(seqrecords):
GOA.writerec(anno, file, GOA.GAF20FIELDS)
| [
"logging.getLogger",
"numpy.alltrue",
"builtins.object",
"re.compile",
"numpy.array",
"numpy.fromiter",
"numpy.searchsorted",
"numpy.delete",
"pandas.DataFrame",
"dateutil.parser.parse",
"pyopa.load_default_environments",
"numpy.in1d",
"tables.open_file",
"pyopa.align_double",
"builtins.... | [((624, 651), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (641, 651), False, 'import logging\n'), ((690, 725), 'threading.stack_size', 'threading.stack_size', (['(4096 * 100000)'], {}), '(4096 * 100000)\n', (710, 725), False, 'import threading\n'), ((1156, 1186), 're.compile', 're.compile', (['"""(.)([A-Z][a-z]+)"""'], {}), "('(.)([A-Z][a-z]+)')\n", (1166, 1186), False, 'import re\n'), ((1201, 1232), 're.compile', 're.compile', (['"""([a-z0-9])([A-Z])"""'], {}), "('([a-z0-9])([A-Z])')\n", (1211, 1232), False, 'import re\n'), ((1015, 1032), 'itertools.count', 'itertools.count', ([], {}), '()\n', (1030, 1032), False, 'import itertools\n'), ((1055, 1077), 'builtins.zip', 'zip', (['iterable', 'counter'], {}), '(iterable, counter)\n', (1058, 1077), False, 'from builtins import chr, range, object, zip, bytes\n'), ((5160, 5222), 'dateutil.parser.parse', 'dateutil.parser.parse', (["self.db.root._v_attrs['conversion_end']"], {}), "(self.db.root._v_attrs['conversion_end'])\n", (5181, 5222), False, 'import dateutil\n'), ((7046, 7089), 're.compile', 're.compile', (["b'(?P<prefix>HOG:)(?P<nr>\\\\d+)'"], {}), "(b'(?P<prefix>HOG:)(?P<nr>\\\\d+)')\n", (7056, 7089), False, 'import re\n'), ((31626, 31677), 'collections.Counter', 'collections.Counter', (["fam_row['domains']['DomainId']"], {}), "(fam_row['domains']['DomainId'])\n", (31645, 31677), False, 'import collections\n'), ((31748, 31792), 'collections.defaultdict', 'collections.defaultdict', (['collections.Counter'], {}), '(collections.Counter)\n', (31771, 31792), False, 'import collections\n'), ((34831, 34851), 'pandas.DataFrame', 'pd.DataFrame', (['annots'], {}), '(annots)\n', (34843, 34851), True, 'import pandas as pd\n'), ((43323, 43344), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (43342, 43344), False, 'import collections\n'), ((45110, 45185), 'threading.Thread', 'threading.Thread', ([], {'target': 'align', 'args': '(query, entries, self.PAM100, aligned)'}), '(target=align, args=(query, entries, self.PAM100, aligned))\n', (45126, 45185), False, 'import threading\n'), ((45328, 45349), 'builtins.zip', 'zip', (['matches', 'aligned'], {}), '(matches, aligned)\n', (45331, 45349), False, 'from builtins import chr, range, object, zip, bytes\n'), ((45750, 45804), 're.compile', 're.compile', (['"""(?P<genome>[A-Z][A-Z0-9]{4})(?P<nr>\\\\d+)"""'], {}), "('(?P<genome>[A-Z][A-Z0-9]{4})(?P<nr>\\\\d+)')\n", (45760, 45804), False, 'import re\n'), ((53422, 53448), 're.compile', 're.compile', (['"""[^A-Za-z. -]"""'], {}), "('[^A-Za-z. -]')\n", (53432, 53448), False, 'import re\n'), ((58930, 59023), 'numpy.searchsorted', 'numpy.searchsorted', (["self.tax_table['NCBITaxonId']", 'taxids_to_keep'], {'sorter': 'self.taxid_key'}), "(self.tax_table['NCBITaxonId'], taxids_to_keep, sorter=\n self.taxid_key)\n", (58948, 59023), False, 'import numpy\n'), ((63174, 63229), 'xml.etree.ElementTree.Element', 'et.Element', (['"""phyloxml"""'], {'xmlns': '"""http://www.phyloxml.org"""'}), "('phyloxml', xmlns='http://www.phyloxml.org')\n", (63184, 63229), True, 'from xml.etree import ElementTree as et\n'), ((63246, 63313), 'xml.etree.ElementTree.SubElement', 'et.SubElement', (['root', '"""phylogeny"""'], {'rooted': '"""true"""', 'rerootable': '"""false"""'}), "(root, 'phylogeny', rooted='true', rerootable='false')\n", (63259, 63313), True, 'from xml.etree import ElementTree as et\n'), ((63329, 63357), 'xml.etree.ElementTree.SubElement', 'et.SubElement', (['phylo', '"""name"""'], {}), "(phylo, 'name')\n", (63342, 63357), True, 'from xml.etree import ElementTree as et\n'), ((63501, 63536), 'xml.etree.ElementTree.tostring', 'et.tostring', (['root'], {'encoding': '"""utf-8"""'}), "(root, encoding='utf-8')\n", (63512, 63536), True, 'from xml.etree import ElementTree as et\n'), ((67655, 67719), 'numpy.lib.recfunctions.stack_arrays', 'numpy.lib.recfunctions.stack_arrays', (['mapped_junks'], {'usemask': '(False)'}), '(mapped_junks, usemask=False)\n', (67690, 67719), False, 'import numpy\n'), ((70064, 70093), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (70087, 70093), False, 'import collections\n'), ((1814, 1839), 'tables.open_file', 'tables.open_file', (['db', '"""r"""'], {}), "(db, 'r')\n", (1830, 1839), False, 'import tables\n'), ((9814, 9845), 'numpy.array', 'numpy.array', (['[e]'], {'dtype': 'e.dtype'}), '([e], dtype=e.dtype)\n', (9825, 9845), False, 'import numpy\n'), ((37445, 37471), 'bisect.bisect_left', 'bisect_left', (['slicer', 'query'], {}), '(slicer, query)\n', (37456, 37471), False, 'from bisect import bisect_left\n'), ((38196, 38229), 'pyopa.load_default_environments', 'pyopa.load_default_environments', ([], {}), '()\n', (38227, 38229), False, 'import pyopa\n'), ((38624, 38672), 'tables.open_file', 'tables.open_file', (["(self.db.filename + '.idx')", '"""r"""'], {}), "(self.db.filename + '.idx', 'r')\n", (38640, 38672), False, 'import tables\n'), ((40061, 40099), 'numpy.searchsorted', 'numpy.searchsorted', (['self.entry_idx', 'ii'], {}), '(self.entry_idx, ii)\n', (40079, 40099), False, 'import numpy\n'), ((42336, 42374), 'bisect.bisect_left', 'bisect_left', (['z', 'seq'], {'lo': 'self.n_entries'}), '(z, seq, lo=self.n_entries)\n', (42347, 42374), False, 'from bisect import bisect_left\n'), ((58889, 58913), 'numpy.unique', 'numpy.unique', (['all_levels'], {}), '(all_levels)\n', (58901, 58913), False, 'import numpy\n'), ((59152, 59210), 'numpy.alltrue', 'numpy.alltrue', (["(subtaxdata['NCBITaxonId'] == taxids_to_keep)"], {}), "(subtaxdata['NCBITaxonId'] == taxids_to_keep)\n", (59165, 59210), False, 'import numpy\n'), ((59637, 59672), 'numpy.in1d', 'numpy.in1d', (['parents', 'taxids_to_keep'], {}), '(parents, taxids_to_keep)\n', (59647, 59672), False, 'import numpy\n'), ((60004, 60026), 'numpy.roll', 'numpy.roll', (['taxids', '(-1)'], {}), '(taxids, -1)\n', (60014, 60026), False, 'import numpy\n'), ((60205, 60233), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (60228, 60233), False, 'import collections\n'), ((62384, 62403), 'xml.etree.ElementTree.Element', 'et.Element', (['"""clade"""'], {}), "('clade')\n", (62394, 62403), True, 'from xml.etree import ElementTree as et\n'), ((62422, 62450), 'xml.etree.ElementTree.SubElement', 'et.SubElement', (['n', '"""taxonomy"""'], {}), "(n, 'taxonomy')\n", (62435, 62450), True, 'from xml.etree import ElementTree as et\n'), ((62469, 62513), 'xml.etree.ElementTree.SubElement', 'et.SubElement', (['tax', '"""id"""'], {'provider': '"""uniprot"""'}), "(tax, 'id', provider='uniprot')\n", (62482, 62513), True, 'from xml.etree import ElementTree as et\n'), ((63023, 63060), 'xml.etree.ElementTree.SubElement', 'et.SubElement', (['tax', '"""scientific_name"""'], {}), "(tax, 'scientific_name')\n", (63036, 63060), True, 'from xml.etree import ElementTree as et\n'), ((76094, 76135), 'Bio.UniProt.GOA.writerec', 'GOA.writerec', (['anno', 'file', 'GOA.GAF20FIELDS'], {}), '(anno, file, GOA.GAF20FIELDS)\n', (76106, 76135), False, 'from Bio.UniProt import GOA\n'), ((3150, 3158), 'builtins.object', 'object', ([], {}), '()\n', (3156, 3158), False, 'from builtins import chr, range, object, zip, bytes\n'), ((38554, 38595), 'os.path.isfile', 'os.path.isfile', (["(self.db.filename + '.idx')"], {}), "(self.db.filename + '.idx')\n", (38568, 38595), False, 'import os\n'), ((44504, 44555), 'pyopa.align_double', 'pyopa.align_double', (['s1', 's2', 'env', '(False)', '(False)', '(True)'], {}), '(s1, s2, env, False, False, True)\n', (44522, 44555), False, 'import pyopa\n'), ((44576, 44618), 'pyopa.align_strings', 'pyopa.align_strings', (['s1', 's2', 'env', '(False)', 'z'], {}), '(s1, s2, env, False, z)\n', (44595, 44618), False, 'import pyopa\n'), ((56317, 56347), 'numpy.fromiter', 'numpy.fromiter', (['it'], {'dtype': '"""i4"""'}), "(it, dtype='i4')\n", (56331, 56347), False, 'import numpy\n'), ((75945, 75964), 'time.strftime', 'time.strftime', (['"""%c"""'], {}), "('%c')\n", (75958, 75964), False, 'import time\n'), ((4675, 4708), 'os.path.dirname', 'os.path.dirname', (['self.db.filename'], {}), '(self.db.filename)\n', (4690, 4708), False, 'import os\n'), ((34368, 34406), 'numpy.vectorize', 'numpy.vectorize', (['filter_obsolete_terms'], {}), '(filter_obsolete_terms)\n', (34383, 34406), False, 'import numpy\n'), ((37819, 37882), 'numpy.searchsorted', 'numpy.searchsorted', (['self.lookup_arr', '(self.suffix_arr[ii:jj] + 1)'], {}), '(self.lookup_arr, self.suffix_arr[ii:jj] + 1)\n', (37837, 37882), False, 'import numpy\n'), ((43570, 43577), 'builtins.zip', 'zip', (['*z'], {}), '(*z)\n', (43573, 43577), False, 'from builtins import chr, range, object, zip, bytes\n'), ((54882, 54941), 'numpy.array', 'numpy.array', (["[(0, -1, b'LUCA')]"], {'dtype': 'self.tax_table.dtype'}), "([(0, -1, b'LUCA')], dtype=self.tax_table.dtype)\n", (54893, 54941), False, 'import numpy\n'), ((56400, 56432), 'numpy.fromiter', 'numpy.fromiter', (['it'], {'dtype': '"""S255"""'}), "(it, dtype='S255')\n", (56414, 56432), False, 'import numpy\n'), ((60534, 60567), 'numpy.delete', 'numpy.delete', (['taxids_to_keep', 'idx'], {}), '(taxids_to_keep, idx)\n', (60546, 60567), False, 'import numpy\n'), ((62863, 62889), 'xml.etree.ElementTree.SubElement', 'et.SubElement', (['tax', '"""code"""'], {}), "(tax, 'code')\n", (62876, 62889), True, 'from xml.etree import ElementTree as et\n'), ((73928, 73963), 'pandas.concat', 'pd.concat', (['tdfs1'], {'ignore_index': '(True)'}), '(tdfs1, ignore_index=True)\n', (73937, 73963), True, 'import pandas as pd\n')] |
# --------------
# Code starts here
import numpy as np
# Code starts here
# Adjacency matrix
adj_mat = np.array([[0,0,0,0,0,0,1/3,0],
[1/2,0,1/2,1/3,0,0,0,0],
[1/2,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0],
[0,0,1/2,1/3,0,0,1/3,0],
[0,0,0,1/3,1/3,0,0,1/2],
[0,0,0,0,1/3,0,0,1/2],
[0,0,0,0,1/3,1,1/3,0]])
# Compute eigenvalues and eigencevectrs
eigenvalues, eigenvectors = np.linalg.eig(adj_mat)
print('Eigen values are', eigenvalues)
print('*'*25)
#print(eigenvectors)
print('Eigen Vector are', abs(eigenvectors[:,0]))
print('*'*25)
eigen_1 = abs(eigenvectors[:,0]) / np.linalg.norm(eigenvectors[:,0],1)
print('Normalize eigen vector are', eigen_1)
print('*'*50)
page = np.where(np.max(eigen_1) == eigen_1)[0][0] + 1
print('The most important page is', page)
# Eigen vector corresponding to 1
# most important page
# Code ends here
# --------------
# Code starts here
# Initialize stationary vector I
# Code starts here
# Initialize stationary vector I
init_I = np.array([1,0,0,0,0,0,0,0])
# Perform iterations for power method
for i in range(10):
init_I = np.dot(adj_mat, init_I)
init_I /= np.linalg.norm(init_I, 1)
print(init_I)
power_page = np.where(np.max(init_I) == init_I)[0][0] + 1
print(power_page)
# Code ends here
print('*'*100)
print('*'*100)
print('*'*100)
print('*'*100)
print('*'*100)
# Code ends here
# --------------
# Code starts here
# New Adjancency matrix
# New Adjancency matrix
new_adj_mat = np.array([[0,0,0,0,0,0,0,0],
[1/2,0,1/2,1/3,0,0,0,0],
[1/2,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0],
[0,0,1/2,1/3,0,0,1/2,0],
[0,0,0,1/3,1/3,0,0,1/2],
[0,0,0,0,1/3,0,0,1/2],
[0,0,0,0,1/3,1,1/2,0]])
# Initialize stationary vector I
new_init_I = np.array([1,0,0,0,0,0,0,0])
#print(new_init_I)
# Perform iterations for power method
for i in range (10):
new_init_I = np.dot(new_adj_mat, new_init_I)
new_init_I /= np.linalg.norm(new_init_I, 1)
print(new_init_I)
# Code ends here
# --------------
# Alpha value
alpha = 0.85
# Code starts here
# Modified adjancency matrix
G = (alpha*new_adj_mat) + (1-alpha)*(1/len(new_adj_mat))*np.ones(new_adj_mat.shape)
print(G)
# Initialize stationary vector I
final_init_I = np.array([1,0,0,0,0,0,0,0])
# Perform iterations for power method
for i in range (1000):
final_init_I = np.dot(new_adj_mat, final_init_I)
final_init_I /= np.linalg.norm(final_init_I, 1)
print(final_init_I)
# Code ends here
| [
"numpy.ones",
"numpy.linalg.eig",
"numpy.max",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm"
] | [((106, 397), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 1 / 3, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0], [1 / 2,\n 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2, 1 / 3, 0,\n 0, 1 / 3, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3,\n 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 3, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 1 / 3, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0\n ], [1 / 2, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2,\n 1 / 3, 0, 0, 1 / 3, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0,\n 0, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 3, 0]])\n', (114, 397), True, 'import numpy as np\n'), ((500, 522), 'numpy.linalg.eig', 'np.linalg.eig', (['adj_mat'], {}), '(adj_mat)\n', (513, 522), True, 'import numpy as np\n'), ((1102, 1136), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0, 0, 0])\n', (1110, 1136), True, 'import numpy as np\n'), ((1573, 1861), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0], [1 / 2, 0,\n 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2, 1 / 3, 0, 0,\n 1 / 2, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 0,\n 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 2, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0], [\n 1 / 2, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2, 1 /\n 3, 0, 0, 1 / 2, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, \n 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 2, 0]])\n', (1581, 1861), True, 'import numpy as np\n'), ((1943, 1977), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0, 0, 0])\n', (1951, 1977), True, 'import numpy as np\n'), ((2496, 2530), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0, 0, 0])\n', (2504, 2530), True, 'import numpy as np\n'), ((696, 733), 'numpy.linalg.norm', 'np.linalg.norm', (['eigenvectors[:, 0]', '(1)'], {}), '(eigenvectors[:, 0], 1)\n', (710, 733), True, 'import numpy as np\n'), ((1202, 1225), 'numpy.dot', 'np.dot', (['adj_mat', 'init_I'], {}), '(adj_mat, init_I)\n', (1208, 1225), True, 'import numpy as np\n'), ((1240, 1265), 'numpy.linalg.norm', 'np.linalg.norm', (['init_I', '(1)'], {}), '(init_I, 1)\n', (1254, 1265), True, 'import numpy as np\n'), ((2069, 2100), 'numpy.dot', 'np.dot', (['new_adj_mat', 'new_init_I'], {}), '(new_adj_mat, new_init_I)\n', (2075, 2100), True, 'import numpy as np\n'), ((2120, 2149), 'numpy.linalg.norm', 'np.linalg.norm', (['new_init_I', '(1)'], {}), '(new_init_I, 1)\n', (2134, 2149), True, 'import numpy as np\n'), ((2605, 2638), 'numpy.dot', 'np.dot', (['new_adj_mat', 'final_init_I'], {}), '(new_adj_mat, final_init_I)\n', (2611, 2638), True, 'import numpy as np\n'), ((2659, 2690), 'numpy.linalg.norm', 'np.linalg.norm', (['final_init_I', '(1)'], {}), '(final_init_I, 1)\n', (2673, 2690), True, 'import numpy as np\n'), ((2343, 2369), 'numpy.ones', 'np.ones', (['new_adj_mat.shape'], {}), '(new_adj_mat.shape)\n', (2350, 2369), True, 'import numpy as np\n'), ((808, 823), 'numpy.max', 'np.max', (['eigen_1'], {}), '(eigen_1)\n', (814, 823), True, 'import numpy as np\n'), ((1304, 1318), 'numpy.max', 'np.max', (['init_I'], {}), '(init_I)\n', (1310, 1318), True, 'import numpy as np\n')] |
# wordrl imports
import wordrl as wdl
# torch imports
import torch
# misc imports
import gym
import os
import numpy as np
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2])
for x in open('tmp', 'r').readlines()]
if len(memory_available) == 0:
return 0
else:
return np.argmax(memory_available)
@torch.no_grad()
def play_one_game(agent, state, env, dataset, epsilon, device):
done = False
cur_seq = list()
reward = 0
while not done:
action = agent.get_action(state, epsilon, device)
# do step in the environment
new_state, reward, done, _ = env.step(action)
exp = wdl.experience.Experience(state.copy(), action, reward, done, new_state.copy(), env.goal_word)
state = new_state
done = exp.done
reward = exp.reward
cur_seq.append(exp)
winning_steps = env.max_turns - state[0]
print(winning_steps)
if reward > 0:
dataset.winners.append(cur_seq)
else:
dataset.losers.append(cur_seq)
state = env.reset()
return state, reward, winning_steps
def run_dqn_experiment(config):
env = gym.make('Wordle-v2-10-visualized')
# ndarray
state = env.reset()
obs_size = env.observation_space.shape[0]
n_actions = env.action_space.n
num_eps = config["experiment"]["num_episodes"]
if config["experiment"]["use_gpu"]:
device = get_freer_gpu()
else:
device = torch.device("cpu")
net = wdl.agent.get_net(obs_size, n_actions, config["agent"])
target_net = wdl.agent.get_net(obs_size, n_actions, config["agent"])
agent = wdl.agent.Agent(net, env.action_space)
dataset = wdl.experience.RLDataset(winners=wdl.experience.SequenceReplay(config["dataset"]),
losers=wdl.experience.SequenceReplay(config["dataset"]),
sample_size=config["dataset"]["eps_length"])
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=config["training"]["batch_size"])
optimizer = torch.optim.Adam(net.parameters(), lr=config["training"]["lr"], weight_decay=config["training"]["weight_decay"])
# high level statistics
total_reward = 0
episode_reward = 0
total_games_played = 0
# global step tracks number of optimizer steps
global_step = 0
# low level statistics
wins = 0
losses = 0
winning_steps = 0
rewards = 0
#
# Kind of tricky scheme: Episode = Game
#
# Looping to play multiple games of Wordrl
for i in range(num_eps + config["training"]["warmup"]):
# provide small warmup period
if i < config["training"]["warmup"]:
epsilon = 1
state, _, _ = play_one_game(agent, state, env, dataset, epsilon, device)
else:
# Training step
for batch in dataloader:
epsilon = max(config["training"]["eps_end"],
config["training"]["eps_state"] - total_games_played /
config["training"]["eps_last_frame"])
# step through environment with agent
with torch.no_grad():
state, reward, winning_steps = play_one_game(agent, state, env, dataset, epsilon, device)
total_games_played += 1
if reward > 0:
wins += 1
winning_steps += winning_steps
else:
losses += 1
rewards += reward
# standard pytorch training loop
optimizer.zero_grad()
loss = wdl.losses.dqn_mse_loss(batch, config["training"]["gamma"], net, target_net)
loss.backward()
optimizer.step()
global_step += 1
# Soft update of target network
if global_step % config["experiment"]["sync_rate"] == 0:
target_net.load_state_dict(net.state_dict())
log = {
"total_reward": torch.tensor(total_reward).to(device),
"reward": torch.tensor(reward).to(device),
"train_loss": loss.detach(),
}
status = {
"steps": torch.tensor(global_step).to(device),
"total_reward": torch.tensor(total_reward).to(device),
}
if global_step % config["experiment"]["steps_per_update"] == 0:
if len(dataset.winners) > 0:
winner = dataset.winners.buffer[-1]
game = f"goal: {winner[0].goal_id}\n"
for i, xp in enumerate(winner):
game += f"{i}: {env.words[xp.action]}\n"
if len(dataset.losers) > 0:
loser = dataset.losers.buffer[-1]
game = f"goal: {loser[0].goal_id}\n"
for i, xp in enumerate(loser):
game += f"{i}: {env.words[xp.action]}\n"
winning_steps = 0
wins = 0
losses = 0
rewards = 0
| [
"wordrl.experience.SequenceReplay",
"wordrl.losses.dqn_mse_loss",
"wordrl.agent.get_net",
"numpy.argmax",
"wordrl.agent.Agent",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.no_grad",
"os.system",
"gym.make",
"torch.device"
] | [((429, 444), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (442, 444), False, 'import torch\n'), ((151, 216), 'os.system', 'os.system', (['"""nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp"""'], {}), "('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n", (160, 216), False, 'import os\n'), ((1244, 1279), 'gym.make', 'gym.make', (['"""Wordle-v2-10-visualized"""'], {}), "('Wordle-v2-10-visualized')\n", (1252, 1279), False, 'import gym\n'), ((1583, 1638), 'wordrl.agent.get_net', 'wdl.agent.get_net', (['obs_size', 'n_actions', "config['agent']"], {}), "(obs_size, n_actions, config['agent'])\n", (1600, 1638), True, 'import wordrl as wdl\n'), ((1656, 1711), 'wordrl.agent.get_net', 'wdl.agent.get_net', (['obs_size', 'n_actions', "config['agent']"], {}), "(obs_size, n_actions, config['agent'])\n", (1673, 1711), True, 'import wordrl as wdl\n'), ((1724, 1762), 'wordrl.agent.Agent', 'wdl.agent.Agent', (['net', 'env.action_space'], {}), '(net, env.action_space)\n', (1739, 1762), True, 'import wordrl as wdl\n'), ((2060, 2154), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': "config['training']['batch_size']"}), "(dataset=dataset, batch_size=config['training'][\n 'batch_size'])\n", (2087, 2154), False, 'import torch\n'), ((399, 426), 'numpy.argmax', 'np.argmax', (['memory_available'], {}), '(memory_available)\n', (408, 426), True, 'import numpy as np\n'), ((1552, 1571), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1564, 1571), False, 'import torch\n'), ((1811, 1859), 'wordrl.experience.SequenceReplay', 'wdl.experience.SequenceReplay', (["config['dataset']"], {}), "(config['dataset'])\n", (1840, 1859), True, 'import wordrl as wdl\n'), ((1908, 1956), 'wordrl.experience.SequenceReplay', 'wdl.experience.SequenceReplay', (["config['dataset']"], {}), "(config['dataset'])\n", (1937, 1956), True, 'import wordrl as wdl\n'), ((3771, 3847), 'wordrl.losses.dqn_mse_loss', 'wdl.losses.dqn_mse_loss', (['batch', "config['training']['gamma']", 'net', 'target_net'], {}), "(batch, config['training']['gamma'], net, target_net)\n", (3794, 3847), True, 'import wordrl as wdl\n'), ((3270, 3285), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3283, 3285), False, 'import torch\n'), ((4203, 4229), 'torch.tensor', 'torch.tensor', (['total_reward'], {}), '(total_reward)\n', (4215, 4229), False, 'import torch\n'), ((4272, 4292), 'torch.tensor', 'torch.tensor', (['reward'], {}), '(reward)\n', (4284, 4292), False, 'import torch\n'), ((4428, 4453), 'torch.tensor', 'torch.tensor', (['global_step'], {}), '(global_step)\n', (4440, 4453), False, 'import torch\n'), ((4502, 4528), 'torch.tensor', 'torch.tensor', (['total_reward'], {}), '(total_reward)\n', (4514, 4528), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 18 10:57:19 2018
@author: edanrein
v14-May-2019
-Added to pyPplusS
v26-jun-2018
-New Code
"""
if __name__ == "__main__":
from pyppluss.segment_models import LC_ringed as LC
import numpy as np
import matplotlib.pyplot as plt
#number of points
n=1000
#Parameters for the test
x_planet = np.linspace(-1.1,1.1,2*n)
y_planet = np.zeros_like(x_planet)+0.0
radius_planet = np.ones_like(x_planet)*0.11
radius_in = np.ones_like(x_planet)*10000.44
radius_out = np.ones_like(x_planet)*10000.7
ring_inclination = 0
ring_rotation = 0
opacity = 1.0
#Range of orders for which the errors will be calculated
orders = np.arange(3,15)
#"True" order - this will be considered the true value
trueorder = 20
err = np.empty((len(orders),len(x_planet)))
valstrue = vals = LC(radius_planet, radius_in, radius_out, x_planet, y_planet, ring_inclination, ring_rotation, opacity, 0.0, 0.448667, 0.0 ,0.313276,n_center=trueorder,n_gress=trueorder)
plt.figure()
for k in range(len(orders)):
vals = LC(radius_planet, radius_in, radius_out, x_planet, y_planet, ring_inclination, ring_rotation, opacity, 0.0, 0.448667, 0.0 ,0.313276,n_center=orders[k],n_gress=orders[k])
err[k] = abs(vals-valstrue)
#Plotting
plt.semilogy(x_planet,err[k],'o-')
plt.text(x_planet[n],err[k][n],"n="+orders[k].__str__())
plt.grid()
| [
"numpy.ones_like",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.grid",
"pyppluss.segment_models.LC_ringed",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros_like",
"numpy.arange"
] | [((387, 416), 'numpy.linspace', 'np.linspace', (['(-1.1)', '(1.1)', '(2 * n)'], {}), '(-1.1, 1.1, 2 * n)\n', (398, 416), True, 'import numpy as np\n'), ((740, 756), 'numpy.arange', 'np.arange', (['(3)', '(15)'], {}), '(3, 15)\n', (749, 756), True, 'import numpy as np\n'), ((904, 1083), 'pyppluss.segment_models.LC_ringed', 'LC', (['radius_planet', 'radius_in', 'radius_out', 'x_planet', 'y_planet', 'ring_inclination', 'ring_rotation', 'opacity', '(0.0)', '(0.448667)', '(0.0)', '(0.313276)'], {'n_center': 'trueorder', 'n_gress': 'trueorder'}), '(radius_planet, radius_in, radius_out, x_planet, y_planet,\n ring_inclination, ring_rotation, opacity, 0.0, 0.448667, 0.0, 0.313276,\n n_center=trueorder, n_gress=trueorder)\n', (906, 1083), True, 'from pyppluss.segment_models import LC_ringed as LC\n'), ((1078, 1090), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1088, 1090), True, 'import matplotlib.pyplot as plt\n'), ((1484, 1494), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1492, 1494), True, 'import matplotlib.pyplot as plt\n'), ((428, 451), 'numpy.zeros_like', 'np.zeros_like', (['x_planet'], {}), '(x_planet)\n', (441, 451), True, 'import numpy as np\n'), ((476, 498), 'numpy.ones_like', 'np.ones_like', (['x_planet'], {}), '(x_planet)\n', (488, 498), True, 'import numpy as np\n'), ((520, 542), 'numpy.ones_like', 'np.ones_like', (['x_planet'], {}), '(x_planet)\n', (532, 542), True, 'import numpy as np\n'), ((569, 591), 'numpy.ones_like', 'np.ones_like', (['x_planet'], {}), '(x_planet)\n', (581, 591), True, 'import numpy as np\n'), ((1139, 1318), 'pyppluss.segment_models.LC_ringed', 'LC', (['radius_planet', 'radius_in', 'radius_out', 'x_planet', 'y_planet', 'ring_inclination', 'ring_rotation', 'opacity', '(0.0)', '(0.448667)', '(0.0)', '(0.313276)'], {'n_center': 'orders[k]', 'n_gress': 'orders[k]'}), '(radius_planet, radius_in, radius_out, x_planet, y_planet,\n ring_inclination, ring_rotation, opacity, 0.0, 0.448667, 0.0, 0.313276,\n n_center=orders[k], n_gress=orders[k])\n', (1141, 1318), True, 'from pyppluss.segment_models import LC_ringed as LC\n'), ((1371, 1407), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['x_planet', 'err[k]', '"""o-"""'], {}), "(x_planet, err[k], 'o-')\n", (1383, 1407), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import range
from past.utils import old_div
import numpy
import cosmopy
import sys
Ho = 70.0
Om = 0.3
OL = 0.7
dz = 0.2
z = numpy.arange(0.0, 2.1, dz) # .astype('Float')
# Set the morphological parameters
flat = (Om, OL, old_div(Ho, 100.))
c = cosmopy.set(flat)
dl = c.dlum(z)
da = c.dang(z)
age = old_div(c.age(z), 1.e9)
lbt = c.lookback(z)
print(" --------------------------------------------------------------------")
print(" cosmology: Om=%.2f, OL=%.2f, h0=%.2f" % flat)
print("%6s %12s %12s %12s %12s" % ('z', 'dl(z)', 'da(z)', 'age(z)[Gyr]',
'lookbacktime(z)'))
print(" --------------------------------------------------------------------")
for i in range(len(z)):
print("%6.2f %12.5f %12.5f %.6e %.6e" % (z[i], dl[i], da[i], age[i],
lbt[i]))
open = (1., 0., old_div(Ho, 100.))
o = cosmopy.set(open)
dl = o.dlum(z)
da = o.dang(z)
age = old_div(o.age(z), 1.e9)
lbt = o.lookback(z)
print(
" ----------------------------------------------------------------------")
print(" cosmology: Om=%.2f, OL=%.2f, h0=%.2f" % open)
print("%6s %12s %12s %12s %12s" % ('z', 'dl(z)', 'da(z)', 'age(z)[Gyr]',
'lookbacktime(z)'))
print(
" ----------------------------------------------------------------------")
for i in range(len(z)):
print("%6.2f %12.5f %12.5f %.6e %.6e" % (z[i], dl[i], da[i], age[i],
lbt[i]))
| [
"cosmopy.set",
"numpy.arange",
"past.utils.old_div"
] | [((272, 298), 'numpy.arange', 'numpy.arange', (['(0.0)', '(2.1)', 'dz'], {}), '(0.0, 2.1, dz)\n', (284, 298), False, 'import numpy\n'), ((394, 411), 'cosmopy.set', 'cosmopy.set', (['flat'], {}), '(flat)\n', (405, 411), False, 'import cosmopy\n'), ((1026, 1043), 'cosmopy.set', 'cosmopy.set', (['open'], {}), '(open)\n', (1037, 1043), False, 'import cosmopy\n'), ((371, 389), 'past.utils.old_div', 'old_div', (['Ho', '(100.0)'], {}), '(Ho, 100.0)\n', (378, 389), False, 'from past.utils import old_div\n'), ((1003, 1021), 'past.utils.old_div', 'old_div', (['Ho', '(100.0)'], {}), '(Ho, 100.0)\n', (1010, 1021), False, 'from past.utils import old_div\n')] |
import numpy as np
from bumperboats.contact import Contact
class SimplePositionSensor:
def __init__(self, engine, std, period, min_value=0, max_value=600):
self.engine = engine
self.std = std
self.period = period
self.min_value = min_value
self.max_value = max_value
self.destinations = []
self.elapsed = 0
self.since = 0
self.contacts = []
def add_destination(self, destination):
self.destinations.append(destination)
def noise(self):
return np.array(np.random.normal(0, self.std, 2))
def tick(self, dt):
self.elapsed += dt
self.since += dt
if self.since >= self.period:
self.contacts = [
Contact(measurement=np.array([boat.position[0], boat.position[1]]) + self.noise(),
actual_position=np.array([boat.position[0], boat.position[1]]),
actual_state=np.array([boat.position[0], boat.velocity[0], boat.acceleration[0],boat.position[1], boat.velocity[1], boat.acceleration[1]]),
actual_id=boat.id,
elapsed=self.elapsed)
for boat, controller in self.engine.boats
if self.min_value < boat.position[0] < self.max_value and
self.min_value < boat.position[1] < self.max_value
]
for destination in self.destinations:
destination.on_data(contacts=self.contacts, dt=self.since)
self.since = 0
| [
"numpy.random.normal",
"numpy.array"
] | [((557, 589), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.std', '(2)'], {}), '(0, self.std, 2)\n', (573, 589), True, 'import numpy as np\n'), ((875, 921), 'numpy.array', 'np.array', (['[boat.position[0], boat.position[1]]'], {}), '([boat.position[0], boat.position[1]])\n', (883, 921), True, 'import numpy as np\n'), ((960, 1091), 'numpy.array', 'np.array', (['[boat.position[0], boat.velocity[0], boat.acceleration[0], boat.position[1],\n boat.velocity[1], boat.acceleration[1]]'], {}), '([boat.position[0], boat.velocity[0], boat.acceleration[0], boat.\n position[1], boat.velocity[1], boat.acceleration[1]])\n', (968, 1091), True, 'import numpy as np\n'), ((772, 818), 'numpy.array', 'np.array', (['[boat.position[0], boat.position[1]]'], {}), '([boat.position[0], boat.position[1]])\n', (780, 818), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import numpy as np
from keras.utils.generic_utils import Progbar
from six.moves import xrange
class Agent(object):
"""Base Agent class
Parameters
----------
model : :obj:`Model`
A learning model. Ex: neural network or table
memory : :obj:`Memory`
"""
def __init__(self, model, memory):
self.model = model
self.memory = memory
class DiscreteAgent(Agent):
"""Single Discrete action Agent
Parameters
----------
model : :obj:`Model`
A learning model. Ex: neural network o} table
memory : :obj:`Memory`
Model's memory for storing experiences for replay and such.
epsilon : callable
A rule to define if model explore or exploit
TODO: generalize this to a class that controls if it should explore and
define custom explorations rules
"""
def __init__(self, model, memory, epsilon=None):
super(DiscreteAgent, self).__init__(model, memory)
if epsilon is None:
epsilon = lambda *args: .1
self.epsilon = epsilon
def compile(self, *args, **kwargs):
self.model.compile(*args, **kwargs)
if 'experience' in kwargs:
experience = kwargs['experience']
else:
experience = None
self.memory.reset(experience)
# def compile(self, optimizer="sgd", loss="mse", policy_rule="max",
# experience=None):
# self.model.compile(optimizer, loss, policy_rule)
# self.memory.reset(experience)
def values(self, observation, train=False):
return self.model.values(observation, train)
def max_values(self, observation, train=False):
return self.model.max_values(observation, train)
def policy(self, observation, train=False):
if train and np.random.rand() <= self.epsilon():
return [np.random.randint(0, self.num_actions)]
else:
return self.model.policy(observation, train)
def update(self, batch_size=1, exp_batch_size=0, gamma=0.9, callback=None):
inputs, targets, actions = self.get_batch(
self.model, batch_size=batch_size, exp_batch_size=exp_batch_size,
gamma=gamma, callback=callback)
loss = self.model.update(inputs, targets, actions)
return loss
@property
def num_actions(self):
return self.model.num_actions
@property
def input_shape(self):
return self.model.input_shape
def reset(self):
self.memory.reset()
def remember(self, prev_state, action, reward, next_state, game_over):
self.memory.remember(prev_state, action, reward,
next_state, game_over)
def get_batch(self, model, batch_size=1, exp_batch_size=0,
gamma=0.9, callback=None):
return self.memory.get_batch(model, batch_size, exp_batch_size,
gamma, callback)
def learn(self, env, epoch=1, batch_size=1, exp_batch_size=0,
gamma=0.9, reset_memory=False, verbose=1, callbacks=None):
"""Train Agent to play Enviroment env
Parameters
----------
env : :obj:`Enviroment`
The enviroment the agent learn to play
epoch : int
number of complete episodes to play
batch_size : int
number of experiences to replay per step
exp_batch_size : int
number of experiences to replay from the consolidated
:attr:`ExperienceReplayexperience.experience`.
gamma : float
discount factor
reset_memory : bool
if we should restart :attr:`ExperienceReplay.memory` before
starting the game.
verbose : int
controls how much should we print
callbacks : list of callables
TODO: Add callback support
"""
print("Learning started!")
print("[Environment]: {}".format(env.description))
print("[Model]: {}".format(self.model.description))
print("[Memory]: {}".format(self.memory.description))
if reset_memory:
self.reset()
progbar = Progbar(epoch)
for e in xrange(epoch):
# reset environment on each epoch
env.reset()
game_over = False
loss = 0
rewards = 0
# get initial observation, start game
obs_t = env.observe()
# Run an episonde
while not game_over:
obs_tm1 = obs_t
action = self.policy(obs_tm1, train=True)
# apply action, get rewards and new state
obs_t, reward, game_over = env.update(action)
rewards += reward
# store experience
self.remember(obs_tm1, action, reward, obs_t, game_over)
# adapt model
loss += self.update(batch_size=batch_size,
exp_batch_size=exp_batch_size,
gamma=gamma)
if verbose == 1:
progbar.add(1, values=[("loss", loss), ("rewards", rewards)])
def play(self, env, epoch=1, batch_size=1, visualize=None, verbose=1):
print("Free play started!")
frames = np.zeros((0, ) + env.observe_image().shape[1:])
frames = frames.transpose(0, 2, 3, 1)
progbar = Progbar(epoch)
for e in xrange(epoch):
# reset environment on each epoch
env.reset()
game_over = False
loss = 0
rewards = 0
# get initial observation, start game
obs_t = env.observe()
while not game_over:
obs_tm1 = obs_t
# get next action
action = self.policy(obs_tm1, train=False)
# apply action, get rewareds and new state
obs_t, reward, game_over = env.update(action)
rewards += reward
frame_t = env.observe_image().transpose(0, 2, 3, 1)
frames = np.concatenate([frames, frame_t], axis=0)
if verbose == 1:
progbar.add(1, values=[("loss", loss), ("rewards", rewards)])
if visualize:
from agnez.video import make_gif
print("Making gif!")
frames = np.repeat(frames, 3, axis=-1)
make_gif(frames[:visualize['n_frames']],
filepath=visualize['filepath'], gray=visualize['gray'], interpolation='none')
print("See your gif at {}".format(visualize['filepath']))
| [
"numpy.repeat",
"numpy.random.rand",
"agnez.video.make_gif",
"numpy.random.randint",
"six.moves.xrange",
"numpy.concatenate",
"keras.utils.generic_utils.Progbar"
] | [((4258, 4272), 'keras.utils.generic_utils.Progbar', 'Progbar', (['epoch'], {}), '(epoch)\n', (4265, 4272), False, 'from keras.utils.generic_utils import Progbar\n'), ((4291, 4304), 'six.moves.xrange', 'xrange', (['epoch'], {}), '(epoch)\n', (4297, 4304), False, 'from six.moves import xrange\n'), ((5508, 5522), 'keras.utils.generic_utils.Progbar', 'Progbar', (['epoch'], {}), '(epoch)\n', (5515, 5522), False, 'from keras.utils.generic_utils import Progbar\n'), ((5541, 5554), 'six.moves.xrange', 'xrange', (['epoch'], {}), '(epoch)\n', (5547, 5554), False, 'from six.moves import xrange\n'), ((6468, 6497), 'numpy.repeat', 'np.repeat', (['frames', '(3)'], {'axis': '(-1)'}), '(frames, 3, axis=-1)\n', (6477, 6497), True, 'import numpy as np\n'), ((6510, 6632), 'agnez.video.make_gif', 'make_gif', (["frames[:visualize['n_frames']]"], {'filepath': "visualize['filepath']", 'gray': "visualize['gray']", 'interpolation': '"""none"""'}), "(frames[:visualize['n_frames']], filepath=visualize['filepath'],\n gray=visualize['gray'], interpolation='none')\n", (6518, 6632), False, 'from agnez.video import make_gif\n'), ((1897, 1913), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1911, 1913), True, 'import numpy as np\n'), ((1953, 1991), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.num_actions'], {}), '(0, self.num_actions)\n', (1970, 1991), True, 'import numpy as np\n'), ((6195, 6236), 'numpy.concatenate', 'np.concatenate', (['[frames, frame_t]'], {'axis': '(0)'}), '([frames, frame_t], axis=0)\n', (6209, 6236), True, 'import numpy as np\n')] |
"""Unit tests for the dense assembler."""
import numpy as np
import pytest
import bempp.api
from bempp.api import function_space
from bempp.api.operators.boundary import laplace, helmholtz
def test_laplace_single_layer():
"""Test dense assembler for the Laplace operators."""
grid = bempp.api.shapes.regular_sphere(2)
space = function_space(grid, "DP", 0)
op1 = laplace.single_layer(space, space, space, assembler="dense")
op2 = laplace.single_layer(space, space, space, assembler="fmm")
fun = bempp.api.GridFunction(
space, coefficients=np.random.rand(space.global_dof_count)
)
assert np.allclose((op1 * fun).coefficients, (op2 * fun).coefficients)
bempp.api.clear_fmm_cache()
@pytest.mark.parametrize("wavenumber", [2.5]) # , 2.5 + 1j])
def test_helmholtz_single_layer(wavenumber):
"""Test dense assembler for the Laplace operators."""
grid = bempp.api.shapes.regular_sphere(2)
space = function_space(grid, "DP", 0)
op1 = helmholtz.single_layer(space, space, space, wavenumber, assembler="dense")
op2 = helmholtz.single_layer(space, space, space, wavenumber, assembler="fmm")
fun = bempp.api.GridFunction(
space, coefficients=np.random.rand(space.global_dof_count)
)
assert np.allclose((op1 * fun).coefficients, (op2 * fun).coefficients)
bempp.api.clear_fmm_cache()
| [
"numpy.allclose",
"bempp.api.operators.boundary.laplace.single_layer",
"numpy.random.rand",
"bempp.api.operators.boundary.helmholtz.single_layer",
"pytest.mark.parametrize",
"bempp.api.function_space"
] | [((732, 776), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""wavenumber"""', '[2.5]'], {}), "('wavenumber', [2.5])\n", (755, 776), False, 'import pytest\n'), ((341, 370), 'bempp.api.function_space', 'function_space', (['grid', '"""DP"""', '(0)'], {}), "(grid, 'DP', 0)\n", (355, 370), False, 'from bempp.api import function_space\n'), ((382, 442), 'bempp.api.operators.boundary.laplace.single_layer', 'laplace.single_layer', (['space', 'space', 'space'], {'assembler': '"""dense"""'}), "(space, space, space, assembler='dense')\n", (402, 442), False, 'from bempp.api.operators.boundary import laplace, helmholtz\n'), ((453, 511), 'bempp.api.operators.boundary.laplace.single_layer', 'laplace.single_layer', (['space', 'space', 'space'], {'assembler': '"""fmm"""'}), "(space, space, space, assembler='fmm')\n", (473, 511), False, 'from bempp.api.operators.boundary import laplace, helmholtz\n'), ((632, 695), 'numpy.allclose', 'np.allclose', (['(op1 * fun).coefficients', '(op2 * fun).coefficients'], {}), '((op1 * fun).coefficients, (op2 * fun).coefficients)\n', (643, 695), True, 'import numpy as np\n'), ((954, 983), 'bempp.api.function_space', 'function_space', (['grid', '"""DP"""', '(0)'], {}), "(grid, 'DP', 0)\n", (968, 983), False, 'from bempp.api import function_space\n'), ((995, 1069), 'bempp.api.operators.boundary.helmholtz.single_layer', 'helmholtz.single_layer', (['space', 'space', 'space', 'wavenumber'], {'assembler': '"""dense"""'}), "(space, space, space, wavenumber, assembler='dense')\n", (1017, 1069), False, 'from bempp.api.operators.boundary import laplace, helmholtz\n'), ((1080, 1152), 'bempp.api.operators.boundary.helmholtz.single_layer', 'helmholtz.single_layer', (['space', 'space', 'space', 'wavenumber'], {'assembler': '"""fmm"""'}), "(space, space, space, wavenumber, assembler='fmm')\n", (1102, 1152), False, 'from bempp.api.operators.boundary import laplace, helmholtz\n'), ((1273, 1336), 'numpy.allclose', 'np.allclose', (['(op1 * fun).coefficients', '(op2 * fun).coefficients'], {}), '((op1 * fun).coefficients, (op2 * fun).coefficients)\n', (1284, 1336), True, 'import numpy as np\n'), ((575, 613), 'numpy.random.rand', 'np.random.rand', (['space.global_dof_count'], {}), '(space.global_dof_count)\n', (589, 613), True, 'import numpy as np\n'), ((1216, 1254), 'numpy.random.rand', 'np.random.rand', (['space.global_dof_count'], {}), '(space.global_dof_count)\n', (1230, 1254), True, 'import numpy as np\n')] |
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import mplcyberpunk
plt.style.use('cyberpunk')
plt.rcParams['font.family'] = 'Roboto'
plt.rcParams['font.weight'] = 'regular'
plt.rcParams['font.size'] = '13'
rpsf = ['out/das_1.rps', 'out/dds_1.rps', 'out/dods_1_small.rps', 'out/dods_1_big.rps', 'out/dods_2.rps' ]
data = []
for f in rpsf:
print("reading ", f, "..")
with open(f) as fd:
d = [float(dd) for dd in fd.readlines()]
data.append(d)
data = np.array(data)
xlabels = ['Dars', 'Thredds', 'Hyrax' ]
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
# from: https://matplotlib.org/3.2.1/gallery/lines_bars_and_markers/barchart.html#sphx-glr-gallery-lines-bars-and-markers-barchart-py
def autolabel(ax, rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
aph = .8
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (12,8))
fig2, (ax3, ax4, ax5) = plt.subplots(1, 3, figsize = (12,8))
## DAS
r1 = ax1.bar(xlabels, data[0,:], color = cycle, alpha = aph)
ax1.set_title('Metadata (DAS)')
ax1.set_ylabel('Requests / Sec')
autolabel(ax1, r1)
## DDS
r2 = ax2.bar(xlabels, data[1,:], color = cycle, alpha = aph)
ax2.set_title('Metadata (DDS)')
ax2.set_ylabel('Requests / Sec')
autolabel(ax2, r2)
## DODS1 small
r3 = ax3.bar(xlabels, data[2,:], color = cycle, alpha = aph)
ax3.set_title('Data (40kb, slicing large dataset)')
ax3.set_ylabel('Requests / Sec')
autolabel(ax3, r3)
## DODS1 big
r4 = ax4.bar(xlabels, data[3,:], color = cycle, alpha = aph)
ax4.set_title('Data (464mb, entire large dataset)')
ax4.set_ylabel('Requests / Sec')
autolabel(ax4, r4)
## DODS2 big
r5 = ax5.bar(xlabels, data[4,:], color = cycle, alpha = aph)
ax5.set_title('Data (759kb, entire small dataset)')
ax5.set_ylabel('Requests / Sec')
autolabel(ax5, r5)
mplcyberpunk.add_glow_effects()
fig.suptitle('Requests per second (2 threads with 10 concurrent connections)')
fig2.suptitle('Requests per second (2 threads with 10 concurrent connections)')
fig.savefig('rps_meta.png')
fig2.savefig('rps_data.png')
plt.show()
| [
"matplotlib.pyplot.style.use",
"numpy.array",
"mplcyberpunk.add_glow_effects",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((123, 149), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""cyberpunk"""'], {}), "('cyberpunk')\n", (136, 149), True, 'import matplotlib.pyplot as plt\n'), ((519, 533), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (527, 533), True, 'import numpy as np\n'), ((1169, 1204), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 8)'}), '(1, 2, figsize=(12, 8))\n', (1181, 1204), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1265), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(12, 8)'}), '(1, 3, figsize=(12, 8))\n', (1242, 1265), True, 'import matplotlib.pyplot as plt\n'), ((2114, 2145), 'mplcyberpunk.add_glow_effects', 'mplcyberpunk.add_glow_effects', ([], {}), '()\n', (2143, 2145), False, 'import mplcyberpunk\n'), ((2363, 2373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2371, 2373), True, 'import matplotlib.pyplot as plt\n')] |
import argparse
import torch.distributed as dist
import torch.nn.functional as F
from solvers.solver import Solver
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
import os
import glob
import yaml
import numpy as np
import random
import time
import tqdm
import math
import test # import test.py to get mAP after each epoch
from models.yolo import Model
from utils import utils
from utils import torch_utils
from utils import model_utils
from utils import anchor_utils
from utils import visual_utils
from datasets.dataset_reader import create_dataloader
from preprocess.data_preprocess import TrainAugmentation, TestTransform
from models import losses
from utils.ParamList import ParamList
wdir = 'weights' + os.sep # weights dir
os.makedirs(wdir, exist_ok=True)
last = 'model3d_5m_last_transconv_11_25'
best = 'model3d_5m_best_transconv_11_25'
config_path = './configs/train_config.yaml'
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
mixed_precision = True
try: # Mixed precision training https://github.com/NVIDIA/apex
from apex import amp
except:
print('Apex recommended for faster mixed precision training: https://github.com/NVIDIA/apex')
mixed_precision = False # not installed
def train(config):
utils.init_seeds(1)
results_file = os.path.join(config['logdir'], 'results.txt')
# Remove previous results
for f in glob.glob(os.path.join(config['logdir'], 'train_batch*.jpg')) + glob.glob(results_file):
os.remove(f)
epochs = config['epochs'] # 300
batch_size = config['batch_size'] # 64
weights = config['weights'] # initial training weights
imgsz, imgsz_test = config['img_size']
strides = config['detect_strides']
num_classes = config['num_classes']
if config['only_3d']:
config['notest'] = True
config['include_scopes'] = ['model.24.bbox3d_headers']
config['giou'] = 0.
config['obj'] = 0.
config['cls'] = 0.
elif config['only_2d']:
config['exclude_scopes'] = ['model.24.bbox3d_headers']
config['conf'] = 0.
config['orient'] = 0.
config['dim'] = 0.
config['cls'] *= num_classes / 80. # scale coco-tuned config['cls'] to current dataset
gs = int(max(strides))
# dataset
with open(config['data']) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
dataset_path = data_dict['dataset_path']
# Trainloader
test_cfg = {}
test_cfg.update(config)
dataloader, dataset = create_dataloader(dataset_path,
config,
transform=TrainAugmentation(cfg['img_size'][0], mean=config['brg_mean']),
is_training=True)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
assert mlc < num_classes, \
'Label class %g exceeds nc=%g in %s. Correct your labels or your model.' % (mlc, num_classes, config['cfg'])
# Testloader
test_cfg['is_rect'] = True
test_cfg['is_mosaic'] = False
testloader = create_dataloader(dataset_path,
test_cfg,
transform=TestTransform(cfg['img_size'][0], mean=config['brg_mean']),
is_training=False,
split='test')[0]
# Create model
model = Model(config).to(device)
nb = len(dataloader) # number of batches
max_step_burn_in = max(3 * nb, 1e3) # burn-in iterations, max(3 epochs, 1k iterations)
solver = Solver(model, config, max_steps_burn_in=max_step_burn_in, apex=amp)
losser = losses.YoloLoss(model)
# Load Model
start_epoch, best_fitness = 0, 0.0
checkpointer = model_utils.CheckPointer(model, solver, save_dir='./weights', save_to_disk=True, device=device)
if weights.endswith('.pt'): # pytorch format
ckpt = checkpointer.load(weights, use_latest=False, load_solver=(not config['resume']))
# load results
if ckpt.get('training_results') is not None:
with open(results_file, 'w') as file:
file.write(ckpt['training_results']) # write results.txt
if not config['resume']:
start_epoch = ckpt['epoch'] + 1
best_fitness = ckpt['best_fitness']
del ckpt
else:
solver.build_optim_and_scheduler()
if tb_writer:
# Class frequency
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
visual_utils.plot_labels(labels, config['logdir'])
tb_writer.add_histogram('classes', c, 0)
# Check anchors
if not config['noautoanchor']:
anchor_utils.check_anchors(dataset, model=model, thr=config['anchor_t'], imgsz=imgsz)
# Start training
t0 = time.time()
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
print('Image sizes %g train, %g test' % (imgsz, imgsz_test))
print('Using %g dataloader workers' % dataloader.num_workers)
print('Starting training for %g epochs...' % epochs)
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
mloss = torch.zeros(7, device=device) # mean losses
print(('\n' + '%10s' * 12) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'conf', 'orient', 'dim', 'total',
'targets', 'img_size', 'lr'))
pbar = tqdm.tqdm(enumerate(dataloader), total=nb) # progress bar
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
targets.delete_by_mask()
targets.to_float32()
targ = ParamList(targets.size, True)
targ.copy_from(targets)
img_id = targets.get_field('img_id')
classes = targets.get_field('class')
bboxes = targets.get_field('bbox')
targets = torch.cat([img_id.unsqueeze(-1), classes.unsqueeze(-1), bboxes], dim=-1)
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device).float() / 1.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
solver.update(epoch)
# Multi-scale
if config['multi_scale']:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
pred = model(imgs)
# Loss
# loss, loss_items = losses.calc_loss(pred, targets.to(device), model)
loss, loss_items = losser(pred, targ)
# print(loss_items)
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss_items)
return results
solver.optimizer_step(loss)
# Print
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 10) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1], solver.learn_rate)
pbar.set_description(s)
# Plot
if ni < 3:
f = os.path.join(config['logdir'], 'train_batch%g.jpg' % ni) # filename
result = visual_utils.plot_images(images=imgs, targets=targets, paths=paths, fname=f)
if tb_writer and result is not None:
tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ============================================================================================
solver.scheduler_step()
# mAP
solver.ema.update_attr(model)
final_epoch = epoch + 1 == epochs
if not config['notest'] or final_epoch: # Calculate mAP
results, maps, times = test.test(config['data'],
batch_size=batch_size,
imgsz=imgsz_test,
save_json=final_epoch and config['data'].endswith(os.sep + 'kitti.yaml'),
model=solver.ema.model,
logdir=config['logdir'],
dataloader=testloader)
# Write
with open(os.path.join(results_file), 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
# Tensorboard
if tb_writer:
tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1',
'val/giou_loss', 'val/obj_loss', 'val/cls_loss']
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = utils.fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not config['nosave']) or final_epoch
if save:
with open(results_file, 'r') as f: # create checkpoint
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': f.read()}
# Save last, best and delete
checkpointer.save(last, **ckpt)
if (best_fitness == fi) and not final_epoch:
checkpointer.save(best, **ckpt)
del ckpt
# end epoch =================================================================================================
# end training
print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=8)
parser.add_argument('--cfg', type=str, default='models/configs/yolo3d_5s.yaml', help='*.yaml path')
parser.add_argument('--data', type=str, default='data/coco_tl.yaml', help='*.data path')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='train,test sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', action='store_true', help='resume training from last.pt')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--weights', type=str, default='', help='initial weights path')
parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--adam', action='store_true', help='use adam optimizer')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%')
parser.add_argument('--local-rank', type=int, default=0, help='local rank')
parser.add_argument('--exclude-scopes', nargs='+', type=str, default=[],
help='do not train the params in exclude_scopes')
parser.add_argument('--include-scopes', nargs='+', type=str, default=[],
help='only train the params in include_scopes')
parser.add_argument('--logdir', type=str, default='./runs', help='do not train the params in exclude_scopes')
parser.add_argument('--is-mosaic', action='store_true', help='load image by applying mosaic')
parser.add_argument('--is-rect', action='store_true', help='resize image apply rect mode not square mode')
parser.add_argument('--only-3d', action='store_true', help='only train 3d')
parser.add_argument('--only-2d', action='store_true', help='only train 2d, that is, excluding 3d')
opt = parser.parse_args()
# opt.weights = last if opt.resume else opt.weights
opt.cfg = utils.check_file(opt.cfg) # check file
opt.data = utils.check_file(opt.data) # check file
print(opt)
cfg = None
with open(config_path) as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
cfg.update(opt.__dict__)
with open(opt.cfg) as f:
model_cfg = yaml.load(f, Loader=yaml.FullLoader) # model config
cfg.update(model_cfg)
# dataset
with open(cfg['data']) as f:
data_cfg = yaml.load(f, Loader=yaml.FullLoader) # data config
cfg.update(data_cfg)
device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)
if device.type == 'cpu':
mixed_precision = False
# Train
tb_writer = SummaryWriter(comment=opt.name)
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
# assert cfg is None, 'Please check config for training!'
train(cfg)
| [
"utils.visual_utils.plot_images",
"yaml.load",
"models.yolo.Model",
"numpy.array",
"utils.ParamList.ParamList",
"torch.nn.functional.interpolate",
"utils.visual_utils.plot_labels",
"os.remove",
"torch.utils.tensorboard.SummaryWriter",
"argparse.ArgumentParser",
"preprocess.data_preprocess.TrainA... | [((758, 790), 'os.makedirs', 'os.makedirs', (['wdir'], {'exist_ok': '(True)'}), '(wdir, exist_ok=True)\n', (769, 790), False, 'import os\n'), ((1248, 1267), 'utils.utils.init_seeds', 'utils.init_seeds', (['(1)'], {}), '(1)\n', (1264, 1267), False, 'from utils import utils\n'), ((1287, 1332), 'os.path.join', 'os.path.join', (["config['logdir']", '"""results.txt"""'], {}), "(config['logdir'], 'results.txt')\n", (1299, 1332), False, 'import os\n'), ((3591, 3658), 'solvers.solver.Solver', 'Solver', (['model', 'config'], {'max_steps_burn_in': 'max_step_burn_in', 'apex': 'amp'}), '(model, config, max_steps_burn_in=max_step_burn_in, apex=amp)\n', (3597, 3658), False, 'from solvers.solver import Solver\n'), ((3672, 3694), 'models.losses.YoloLoss', 'losses.YoloLoss', (['model'], {}), '(model)\n', (3687, 3694), False, 'from models import losses\n'), ((3770, 3870), 'utils.model_utils.CheckPointer', 'model_utils.CheckPointer', (['model', 'solver'], {'save_dir': '"""./weights"""', 'save_to_disk': '(True)', 'device': 'device'}), "(model, solver, save_dir='./weights', save_to_disk=\n True, device=device)\n", (3794, 3870), False, 'from utils import model_utils\n'), ((4838, 4849), 'time.time', 'time.time', ([], {}), '()\n', (4847, 4849), False, 'import time\n'), ((10546, 10571), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10569, 10571), False, 'import argparse\n'), ((13078, 13103), 'utils.utils.check_file', 'utils.check_file', (['opt.cfg'], {}), '(opt.cfg)\n', (13094, 13103), False, 'from utils import utils\n'), ((13133, 13159), 'utils.utils.check_file', 'utils.check_file', (['opt.data'], {}), '(opt.data)\n', (13149, 13159), False, 'from utils import utils\n'), ((13615, 13706), 'utils.torch_utils.select_device', 'torch_utils.select_device', (['opt.device'], {'apex': 'mixed_precision', 'batch_size': 'opt.batch_size'}), '(opt.device, apex=mixed_precision, batch_size=opt.\n batch_size)\n', (13640, 13706), False, 'from utils import torch_utils\n'), ((13792, 13823), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'comment': 'opt.name'}), '(comment=opt.name)\n', (13805, 13823), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1440, 1463), 'glob.glob', 'glob.glob', (['results_file'], {}), '(results_file)\n', (1449, 1463), False, 'import glob\n'), ((1473, 1485), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1482, 1485), False, 'import os\n'), ((2320, 2356), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (2329, 2356), False, 'import yaml\n'), ((4465, 4498), 'numpy.concatenate', 'np.concatenate', (['dataset.labels', '(0)'], {}), '(dataset.labels, 0)\n', (4479, 4498), True, 'import numpy as np\n'), ((4557, 4607), 'utils.visual_utils.plot_labels', 'visual_utils.plot_labels', (['labels', "config['logdir']"], {}), "(labels, config['logdir'])\n", (4581, 4607), False, 'from utils import visual_utils\n'), ((4721, 4810), 'utils.anchor_utils.check_anchors', 'anchor_utils.check_anchors', (['dataset'], {'model': 'model', 'thr': "config['anchor_t']", 'imgsz': 'imgsz'}), "(dataset, model=model, thr=config['anchor_t'],\n imgsz=imgsz)\n", (4747, 4810), False, 'from utils import anchor_utils\n'), ((13251, 13287), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (13260, 13287), False, 'import yaml\n'), ((13371, 13407), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (13380, 13407), False, 'import yaml\n'), ((13521, 13557), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (13530, 13557), False, 'import yaml\n'), ((1386, 1436), 'os.path.join', 'os.path.join', (["config['logdir']", '"""train_batch*.jpg"""'], {}), "(config['logdir'], 'train_batch*.jpg')\n", (1398, 1436), False, 'import os\n'), ((2645, 2707), 'preprocess.data_preprocess.TrainAugmentation', 'TrainAugmentation', (["cfg['img_size'][0]"], {'mean': "config['brg_mean']"}), "(cfg['img_size'][0], mean=config['brg_mean'])\n", (2662, 2707), False, 'from preprocess.data_preprocess import TrainAugmentation, TestTransform\n'), ((3415, 3428), 'models.yolo.Model', 'Model', (['config'], {}), '(config)\n', (3420, 3428), False, 'from models.yolo import Model\n'), ((5821, 5850), 'utils.ParamList.ParamList', 'ParamList', (['targets.size', '(True)'], {}), '(targets.size, True)\n', (5830, 5850), False, 'from utils.ParamList import ParamList\n'), ((2781, 2814), 'numpy.concatenate', 'np.concatenate', (['dataset.labels', '(0)'], {}), '(dataset.labels, 0)\n', (2795, 2814), True, 'import numpy as np\n'), ((3217, 3275), 'preprocess.data_preprocess.TestTransform', 'TestTransform', (["cfg['img_size'][0]"], {'mean': "config['brg_mean']"}), "(cfg['img_size'][0], mean=config['brg_mean'])\n", (3230, 3275), False, 'from preprocess.data_preprocess import TrainAugmentation, TestTransform\n'), ((7686, 7742), 'os.path.join', 'os.path.join', (["config['logdir']", "('train_batch%g.jpg' % ni)"], {}), "(config['logdir'], 'train_batch%g.jpg' % ni)\n", (7698, 7742), False, 'import os\n'), ((7780, 7856), 'utils.visual_utils.plot_images', 'visual_utils.plot_images', ([], {'images': 'imgs', 'targets': 'targets', 'paths': 'paths', 'fname': 'f'}), '(images=imgs, targets=targets, paths=paths, fname=f)\n', (7804, 7856), False, 'from utils import visual_utils\n'), ((8944, 8970), 'os.path.join', 'os.path.join', (['results_file'], {}), '(results_file)\n', (8956, 8970), False, 'import os\n'), ((6721, 6787), 'torch.nn.functional.interpolate', 'F.interpolate', (['imgs'], {'size': 'ns', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(imgs, size=ns, mode='bilinear', align_corners=False)\n", (6734, 6787), True, 'import torch.nn.functional as F\n'), ((9543, 9560), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (9551, 9560), True, 'import numpy as np\n'), ((6418, 6465), 'random.randrange', 'random.randrange', (['(imgsz * 0.5)', '(imgsz * 1.5 + gs)'], {}), '(imgsz * 0.5, imgsz * 1.5 + gs)\n', (6434, 6465), False, 'import random\n'), ((10428, 10439), 'time.time', 'time.time', ([], {}), '()\n', (10437, 10439), False, 'import time\n'), ((6601, 6623), 'math.ceil', 'math.ceil', (['(x * sf / gs)'], {}), '(x * sf / gs)\n', (6610, 6623), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
This module contains the core of the optimization model, containing
the definiton of problem (variables,constraints,objective function,...)
in CVXPY for planning and operation modes.
"""
import pandas as pd
import cvxpy as cp
import numpy as np
from collections import namedtuple
from hypatia.utility.utility import (
invcosts,
invcosts_annuity,
salvage_factor,
newcap_accumulated,
line_newcap_accumulated,
_calc_variable_overall,
_calc_production_overall,
fixcosts,
line_varcost,
decomcap,
line_decomcap,
available_resource_prod,
annual_activity,
storage_state_of_charge,
get_regions_with_storage,
storage_max_flow,
)
import logging
logger = logging.getLogger(__name__)
RESULTS = [
"variables",
"cost_fix",
"cost_variable",
"totalcapacity",
"cost_fix_tax",
"cost_fix_sub",
"emission_cost",
"CO2_equivalent",
"demand",
]
PLANNING_RESULTS = [
"cost_decom",
"decommissioned_capacity",
"cost_inv",
"salvage_inv",
"cost_inv_tax",
"cost_inv_sub",
]
class BuildModel:
"""Class that builds the variables and equations of the model
Attributes
-----------
sets:
The instance of the Readsets class for delivering the structural inputs
including regions, technologies, years, timesteps, mapping tables
variables: dict
a nested dictionary of all the decision variables including the new capacity, production
by each technology, use (consumption) by each technology, imports and exports
"""
def __init__(self, sets):
self.sets = sets
self.constr = []
timeslice_fraction = self.sets.timeslice_fraction
if not isinstance(timeslice_fraction, int):
timeslice_fraction.shape = (len(self.sets.time_steps), 1)
self.timeslice_fraction = timeslice_fraction
self._set_variables()
# calling the methods based on the defined mode by the user
if self.sets.mode == "Planning":
self._calc_variable_planning()
self._balance_()
self._constr_totalcapacity_regional()
self._constr_newcapacity_regional()
self._constr_balance()
self._constr_resource_tech_availability()
self._constr_tech_efficiency()
self._constr_prod_annual()
self._constr_emission_cap()
self._calc_variable_storage_SOC()
self._constr_storage_max_min_charge()
self._constr_storage_max_flow_in_out()
self._set_regional_objective_planning()
if len(self.sets.regions) == 1:
self._set_final_objective_singlenode()
elif len(self.sets.regions) > 1:
self._calc_variable_planning_line()
self._constr_totalcapacity_line()
self._constr_totalcapacity_overall()
self._constr_newcapacity_overall()
self._constr_line_availability()
self._constr_trade_balance()
self._constr_prod_annual_overall()
self._set_lines_objective_planning()
self._set_final_objective_multinode()
elif self.sets.mode == "Operation":
self._calc_variable_operation()
self._balance_()
self._constr_balance()
self._constr_resource_tech_availability()
self._constr_tech_efficiency()
self._constr_prod_annual()
self._constr_emission_cap()
self._calc_variable_storage_SOC()
self._constr_storage_max_min_charge()
self._constr_storage_max_flow_in_out()
self._set_regional_objective_operation()
if len(self.sets.regions) == 1:
self._set_final_objective_singlenode()
elif len(self.sets.regions) > 1:
self._calc_variable_operation_line()
self._constr_line_availability()
self._constr_trade_balance()
self._constr_prod_annual_overall()
self._set_lines_objective_operation()
self._set_final_objective_multinode()
def _solve(self, verbosity, solver, **kwargs):
"""
Creates a CVXPY problem instance, if the output status is optimal,
returns the results to the interface
"""
objective = cp.Minimize(self.global_objective)
problem = cp.Problem(objective, self.constr)
problem.solve(solver=solver, verbose=verbosity, **kwargs)
if problem.status == "optimal":
# Reshape the demand
self.demand = {
reg: self.sets.data[reg]["demand"] for reg in self.sets.regions
}
res = RESULTS.copy()
to_add = []
if self.sets.multi_node:
if self.sets.mode == "Planning":
to_add = [
"line_totalcapacity",
"line_decommissioned_capacity",
"cost_inv_line",
"cost_fix_line",
"cost_decom_line",
"cost_variable_line",
]
else:
to_add = [
"line_totalcapacity",
"cost_fix_line",
"cost_variable_line",
]
if self.sets.mode == "Planning":
to_add.extend(PLANNING_RESULTS)
res.extend(to_add)
result_collector = namedtuple("result", res)
results = result_collector(
**{result: getattr(self, result) for result in res}
)
return results
else:
print(
"No solution found and no result will be uploaded to the model",
"critical",
)
def _set_variables(self):
"""
Creates the matrixed-based variables of the problem
in a nested dict format for each region and each technology category.
"""
technology_prod = {}
technology_use = {}
new_capacity = {}
line_newcapacity = {}
line_import = {}
line_export = {}
for reg in self.sets.regions:
regional_prod = {}
regional_use = {}
for key in self.sets.Technologies[reg].keys():
if key != "Demand":
regional_prod[key] = cp.Variable(
shape=(
len(self.sets.main_years) * len(self.sets.time_steps),
len(self.sets.Technologies[reg][key]),
),
nonneg=True,
)
if key != "Demand" and key != "Supply":
regional_use[key] = cp.Variable(
shape=(
len(self.sets.main_years) * len(self.sets.time_steps),
len(self.sets.Technologies[reg][key]),
),
nonneg=True,
)
technology_prod[reg] = regional_prod
technology_use[reg] = regional_use
export_ = {}
import_ = {}
for reg_ in self.sets.regions:
if reg_ != reg:
export_[reg_] = cp.Variable(
shape=(
len(self.sets.main_years) * len(self.sets.time_steps),
len(self.sets.glob_mapping["Carriers_glob"].index),
),
nonneg=True,
)
import_[reg_] = cp.Variable(
shape=(
len(self.sets.main_years) * len(self.sets.time_steps),
len(self.sets.glob_mapping["Carriers_glob"].index),
),
nonneg=True,
)
line_export[reg] = export_
line_import[reg] = import_
self.variables = {
"productionbyTechnology": technology_prod,
"usebyTechnology": technology_use,
}
if len(self.sets.regions) > 1:
self.variables.update(
{"line_export": line_export, "line_import": line_import,}
)
if self.sets.mode == "Planning":
for reg in self.sets.regions:
regional_newcap = {}
for key in self.sets.Technologies[reg].keys():
if key != "Demand":
regional_newcap[key] = cp.Variable(
shape=(
len(self.sets.main_years),
len(self.sets.Technologies[reg][key]),
),
nonneg=True,
)
new_capacity[reg] = regional_newcap
self.variables.update({"newcapacity": new_capacity})
if len(self.sets.regions) > 1:
for line in self.sets.lines_list:
line_newcapacity[line] = cp.Variable(
shape=(
len(self.sets.main_years),
len(self.sets.glob_mapping["Carriers_glob"].index),
),
nonneg=True,
)
self.variables.update({"line_newcapacity": line_newcapacity})
def _calc_variable_planning(self):
"""
Calculates all the cost components of the objective function and the
intermediate variables in the planning mode, for each region
"""
self.cost_inv = {}
self.cost_inv_tax = {}
self.cost_inv_sub = {}
self.cost_inv_fvalue = {}
self.salvage_inv = {}
self.accumulated_newcapacity = {}
self.totalcapacity = {}
self.cost_fix = {}
self.cost_fix_tax = {}
self.cost_fix_sub = {}
self.decommissioned_capacity = {}
self.cost_decom = {}
self.cost_variable = {}
self.CO2_equivalent = {}
self.emission_cost = {}
self.production_annual = {}
for reg in self.sets.regions:
cost_inv_regional = {}
cost_inv_tax_regional = {}
cost_inv_sub_regional = {}
cost_fvalue_regional = {}
salvage_inv_regional = {}
accumulated_newcapacity_regional = {}
totalcapacity_regional = {}
cost_fix_regional = {}
cost_fix_tax_regional = {}
cost_fix_Sub_regional = {}
decomcapacity_regional = {}
cost_decom_regional = {}
cost_variable_regional = {}
CO2_equivalent_regional = {}
emission_cost_regional = {}
production_annual_regional = {}
for key in self.variables["newcapacity"][reg].keys():
(
cost_inv_regional[key],
cost_inv_tax_regional[key],
cost_inv_sub_regional[key],
) = invcosts(
self.sets.data[reg]["tech_inv"][key],
self.variables["newcapacity"][reg][key],
self.sets.data[reg]["inv_taxsub"]["Tax"][key],
self.sets.data[reg]["inv_taxsub"]["Sub"][key],
)
salvage_inv_regional[key] = cp.multiply(
salvage_factor(
self.sets.main_years,
self.sets.Technologies[reg][key],
self.sets.data[reg]["tech_lifetime"].loc[:, key],
self.sets.data[reg]["interest_rate"].loc[:, key],
self.sets.data[reg]["discount_rate"],
self.sets.data[reg]["economic_lifetime"].loc[:, key],
),
cost_inv_regional[key],
)
accumulated_newcapacity_regional[key] = newcap_accumulated(
self.variables["newcapacity"][reg][key],
self.sets.Technologies[reg][key],
self.sets.main_years,
self.sets.data[reg]["tech_lifetime"].loc[:, key],
)
totalcapacity_regional[key] = (
accumulated_newcapacity_regional[key]
+ self.sets.data[reg]["tech_residual_cap"].loc[:, key]
)
(
cost_fix_regional[key],
cost_fix_tax_regional[key],
cost_fix_Sub_regional[key],
) = fixcosts(
self.sets.data[reg]["tech_fixed_cost"][key],
totalcapacity_regional[key],
self.sets.data[reg]["fix_taxsub"]["Tax"][key],
self.sets.data[reg]["fix_taxsub"]["Sub"][key],
)
decomcapacity_regional[key] = decomcap(
self.variables["newcapacity"][reg][key],
self.sets.Technologies[reg][key],
self.sets.main_years,
self.sets.data[reg]["tech_lifetime"].loc[:, key],
)
cost_decom_regional[key] = cp.multiply(
self.sets.data[reg]["tech_decom_cost"].loc[:, key].values,
decomcapacity_regional[key],
)
production_annual_regional[key] = annual_activity(
self.variables["productionbyTechnology"][reg][key],
self.sets.main_years,
self.sets.time_steps,
)
cost_variable_regional[key] = cp.multiply(
production_annual_regional[key],
self.sets.data[reg]["tech_var_cost"].loc[:, key],
)
if key != "Transmission" and key != "Storage":
CO2_equivalent_regional[key] = cp.multiply(
production_annual_regional[key],
self.sets.data[reg]["specific_emission"].loc[:, key],
)
emission_cost_regional[key] = cp.multiply(
CO2_equivalent_regional[key],
self.sets.data[reg]["carbon_tax"].loc[:, key],
)
cost_fvalue_regional[key] = invcosts_annuity(
cost_inv_regional[key],
self.sets.data[reg]["interest_rate"].loc[:, key],
self.sets.data[reg]["economic_lifetime"].loc[:, key],
self.sets.Technologies[reg][key],
self.sets.main_years,
self.sets.data[reg]["discount_rate"],
)
self.cost_inv[reg] = cost_inv_regional
self.cost_inv_tax[reg] = cost_inv_tax_regional
self.cost_inv_sub[reg] = cost_inv_sub_regional
self.salvage_inv[reg] = salvage_inv_regional
self.totalcapacity[reg] = totalcapacity_regional
self.cost_fix[reg] = cost_fix_regional
self.cost_fix_tax[reg] = cost_fix_tax_regional
self.cost_fix_sub[reg] = cost_fix_Sub_regional
self.decommissioned_capacity[reg] = decomcapacity_regional
self.cost_decom[reg] = cost_decom_regional
self.cost_variable[reg] = cost_variable_regional
self.CO2_equivalent[reg] = CO2_equivalent_regional
self.emission_cost[reg] = emission_cost_regional
self.cost_inv_fvalue[reg] = cost_fvalue_regional
self.production_annual[reg] = production_annual_regional
def _calc_variable_planning_line(self):
"""
Calculates all the cost and intermediate variables related to the inter-
regional links in the planning mode
"""
self.cost_inv_line = {}
self.line_accumulated_newcapacity = {}
self.line_totalcapacity = {}
self.cost_fix_line = {}
self.line_decommissioned_capacity = {}
self.cost_decom_line = {}
for key in self.variables["line_newcapacity"].keys():
self.cost_inv_line[key] = cp.multiply(
self.sets.trade_data["line_inv"].loc[:, key].values,
self.variables["line_newcapacity"][key],
)
self.line_accumulated_newcapacity[key] = line_newcap_accumulated(
self.variables["line_newcapacity"][key],
self.sets.glob_mapping["Carriers_glob"]["Carrier"],
self.sets.main_years,
self.sets.trade_data["line_lifetime"].loc[:, key],
)
self.line_totalcapacity[key] = (
self.line_accumulated_newcapacity[key]
+ self.sets.trade_data["line_residual_cap"].loc[:, key].values
)
self.cost_fix_line[key] = cp.multiply(
self.sets.trade_data["line_fixed_cost"].loc[:, key].values,
self.line_totalcapacity[key],
)
self.line_decommissioned_capacity[key] = line_decomcap(
self.variables["line_newcapacity"][key],
self.sets.glob_mapping["Carriers_glob"]["Carrier"],
self.sets.main_years,
self.sets.trade_data["line_lifetime"].loc[:, key],
)
self.cost_decom_line[key] = cp.multiply(
self.sets.trade_data["line_decom_cost"].loc[:, key].values,
self.line_decommissioned_capacity[key],
)
self.cost_variable_line = line_varcost(
self.sets.trade_data["line_var_cost"],
self.variables["line_import"],
self.sets.regions,
self.sets.main_years,
self.sets.time_steps,
self.sets.lines_list,
)
def _calc_variable_operation(self):
"""
Calculates all the cost components of the objective function and the
intermediate variables in the operation mode, for each region
"""
self.totalcapacity = {}
self.cost_fix = {}
self.cost_fix_tax = {}
self.cost_fix_sub = {}
self.cost_variable = {}
self.CO2_equivalent = {}
self.emission_cost = {}
self.production_annual = {}
for reg in self.sets.regions:
totalcapacity_regional = {}
cost_fix_regional = {}
cost_fix_tax_regional = {}
cost_fix_Sub_regional = {}
cost_variable_regional = {}
CO2_equivalent_regional = {}
emission_cost_regional = {}
production_annual_regional = {}
for key in self.sets.Technologies[reg].keys():
if key != "Demand":
totalcapacity_regional[key] = (
self.sets.data[reg]["tech_residual_cap"].loc[:, key].values
)
(
cost_fix_regional[key],
cost_fix_tax_regional[key],
cost_fix_Sub_regional[key],
) = fixcosts(
self.sets.data[reg]["tech_fixed_cost"][key],
totalcapacity_regional[key],
self.sets.data[reg]["fix_taxsub"]["Tax"][key],
self.sets.data[reg]["fix_taxsub"]["Sub"][key],
)
production_annual_regional[key] = annual_activity(
self.variables["productionbyTechnology"][reg][key],
self.sets.main_years,
self.sets.time_steps,
)
cost_variable_regional[key] = cp.multiply(
production_annual_regional[key],
self.sets.data[reg]["tech_var_cost"].loc[:, key],
)
if key != "Transmission" and key != "Storage":
CO2_equivalent_regional[key] = cp.multiply(
production_annual_regional[key],
self.sets.data[reg]["specific_emission"].loc[:, key],
)
emission_cost_regional[key] = cp.multiply(
CO2_equivalent_regional[key],
self.sets.data[reg]["carbon_tax"].loc[:, key],
)
self.totalcapacity[reg] = totalcapacity_regional
self.cost_fix[reg] = cost_fix_regional
self.cost_fix_tax[reg] = cost_fix_tax_regional
self.cost_fix_sub[reg] = cost_fix_Sub_regional
self.cost_variable[reg] = cost_variable_regional
self.CO2_equivalent[reg] = CO2_equivalent_regional
self.emission_cost[reg] = emission_cost_regional
self.production_annual[reg] = production_annual_regional
def _calc_variable_operation_line(self):
"""
Calculates all the cost and intermediate variables related to the inter-
regional links in the operation mode
"""
self.line_totalcapacity = {}
self.cost_fix_line = {}
for key in self.sets.lines_list:
self.line_totalcapacity[key] = (
self.sets.trade_data["line_residual_cap"].loc[:, key].values
)
self.cost_fix_line[key] = cp.multiply(
self.sets.trade_data["line_fixed_cost"].loc[:, key].values,
self.line_totalcapacity[key],
)
self.cost_variable_line = line_varcost(
self.sets.trade_data["line_var_cost"],
self.variables["line_import"],
self.sets.regions,
self.sets.main_years,
self.sets.time_steps,
self.sets.lines_list,
)
def _calc_variable_storage_SOC(self):
"""
Calculates the annual state of charge of the on grid storage technologies,
in the models with hourly temporal resolution
"""
self.storage_SOC = {}
for reg in get_regions_with_storage(self.sets):
self.storage_SOC[reg] = storage_state_of_charge(
self.sets.data[reg]["storage_initial_SOC"],
self.variables["usebyTechnology"][reg]["Storage"],
self.variables["productionbyTechnology"][reg]["Storage"],
self.sets.main_years,
self.sets.time_steps,
)
def _balance_(self):
"""
Creates the dictionaries for the annual total production by each technology,
total consumption by each technology, total import,total exports and total final demand
of each energy carrier within each region
"""
self.totalusebycarrier = {}
self.totalprodbycarrier = {}
self.totalimportbycarrier = {}
self.totalexportbycarrier = {}
self.totaldemandbycarrier = {}
for reg in self.sets.regions:
totalusebycarrier_regional = {}
totalprodbycarrier_regional = {}
totalimportbycarrier_regional = {}
totalexportbycarrier_regional = {}
totaldemandbycarrier_regional = {}
for carr in self.sets.glob_mapping["Carriers_glob"]["Carrier"]:
totalusebycarrier_regional[carr] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps),)
)
totalprodbycarrier_regional[carr] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps),)
)
totalimportbycarrier_regional[carr] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps),)
)
totalexportbycarrier_regional[carr] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps),)
)
totaldemandbycarrier_regional[carr] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps),)
)
for key in self.sets.Technologies[reg].keys():
for indx, tech in enumerate(self.sets.Technologies[reg][key]):
if (
carr
in self.sets.mapping[reg]["Carrier_input"]
.loc[
self.sets.mapping[reg]["Carrier_input"]["Technology"]
== tech
]["Carrier_in"]
.values
):
if key == "Conversion_plus":
totalusebycarrier_regional[carr] += cp.multiply(
self.variables["usebyTechnology"][reg][key][
:, indx
],
self.sets.data[reg]["carrier_ratio_in"][
(tech, carr)
].values,
)
elif key == "Demand":
totaldemandbycarrier_regional[carr] += self.sets.data[
reg
]["demand"][tech].values
elif key != "Supply":
totalusebycarrier_regional[carr] += self.variables[
"usebyTechnology"
][reg][key][:, indx]
if (
carr
in self.sets.mapping[reg]["Carrier_output"]
.loc[
self.sets.mapping[reg]["Carrier_output"]["Technology"]
== tech
]["Carrier_out"]
.values
):
if key == "Conversion_plus":
totalprodbycarrier_regional[carr] += cp.multiply(
self.variables["productionbyTechnology"][reg][key][
:, indx
],
self.sets.data[reg]["carrier_ratio_out"][
(tech, carr)
].values,
)
else:
totalprodbycarrier_regional[carr] += self.variables[
"productionbyTechnology"
][reg][key][:, indx]
if len(self.sets.regions) > 1:
for key in self.variables["line_import"][reg].keys():
if "{}-{}".format(reg, key) in self.sets.lines_list:
line_eff = (
pd.concat(
[
self.sets.trade_data["line_eff"][
("{}-{}".format(reg, key), carr)
]
]
* len(self.sets.time_steps)
)
.sort_index()
.values
)
elif "{}-{}".format(key, reg) in self.sets.lines_list:
line_eff = (
pd.concat(
[
self.sets.trade_data["line_eff"][
("{}-{}".format(key, reg), carr)
]
]
* len(self.sets.time_steps)
)
.sort_index()
.values
)
totalimportbycarrier_regional[carr] += cp.multiply(
self.variables["line_import"][reg][key][
:,
list(
self.sets.glob_mapping["Carriers_glob"]["Carrier"]
).index(carr),
],
line_eff,
)
totalexportbycarrier_regional[carr] += self.variables[
"line_export"
][reg][key][
:,
list(
self.sets.glob_mapping["Carriers_glob"]["Carrier"]
).index(carr),
]
self.totalusebycarrier[reg] = totalusebycarrier_regional
self.totalprodbycarrier[reg] = totalprodbycarrier_regional
self.totalimportbycarrier[reg] = totalimportbycarrier_regional
self.totalexportbycarrier[reg] = totalexportbycarrier_regional
self.totaldemandbycarrier[reg] = totaldemandbycarrier_regional
def _constr_balance(self):
"""
Ensures the energy balance of each carrier within each region
"""
for reg in self.sets.regions:
for carr in self.sets.glob_mapping["Carriers_glob"]["Carrier"]:
self.totalusebycarrier[reg][carr] = cp.reshape(
self.totalusebycarrier[reg][carr],
self.totalprodbycarrier[reg][carr].shape,
)
self.constr.append(
self.totalprodbycarrier[reg][carr]
+ self.totalimportbycarrier[reg][carr]
- self.totalusebycarrier[reg][carr]
- self.totalexportbycarrier[reg][carr]
- self.totaldemandbycarrier[reg][carr]
== 0
)
def _constr_trade_balance(self):
"""
Ensure sthe trade balance among any pairs of regions before the transmission
loss
"""
for reg in self.sets.regions:
for key in self.variables["line_import"][reg].keys():
self.constr.append(
self.variables["line_import"][reg][key]
- self.variables["line_export"][key][reg]
== 0
)
def _constr_resource_tech_availability(self):
"""
Guarantees the adequecy of total capacity of each technology based on
the technology capacity factor and resource availability
"""
for reg in self.sets.regions:
for key in self.variables["productionbyTechnology"][reg].keys():
if key != "Storage":
for indx, year in enumerate(self.sets.main_years):
self.available_prod = available_resource_prod(
self.totalcapacity[reg][key][indx : indx + 1, :],
self.sets.data[reg]["res_capacity_factor"]
.loc[(year, slice(None)), (key, slice(None))]
.values,
self.timeslice_fraction,
self.sets.data[reg]["annualprod_per_unitcapacity"]
.loc[:, (key, slice(None))]
.values,
)
self.constr.append(
self.available_prod
- self.variables["productionbyTechnology"][reg][key][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
>= 0
)
self.constr.append(
cp.multiply(
cp.sum(self.available_prod, axis=0),
self.sets.data[reg]["tech_capacity_factor"].loc[
year, (key, slice(None))
],
)
- cp.sum(
self.variables["productionbyTechnology"][reg][key][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
],
axis=0,
)
>= 0
)
def _constr_line_availability(self):
"""
Guarantees the adequecy of inter-regional link capacities based on their
capacity factor
"""
for reg in self.sets.regions:
for key, value in self.variables["line_import"][reg].items():
for indx, year in enumerate(self.sets.main_years):
if "{}-{}".format(reg, key) in self.sets.lines_list:
capacity_factor = (
self.sets.trade_data["line_capacity_factor"]
.loc[year, ("{}-{}".format(reg, key), slice(None))]
.values
)
capacity_to_production = (
self.sets.trade_data["annualprod_per_unitcapacity"]
.loc[:, ("{}-{}".format(reg, key), slice(None))]
.values
)
capacity = self.line_totalcapacity["{}-{}".format(reg, key)][
indx : indx + 1, :
]
elif "{}-{}".format(key, reg) in self.sets.lines_list:
capacity_factor = (
self.sets.trade_data["line_capacity_factor"]
.loc[year, ("{}-{}".format(key, reg), slice(None))]
.values
)
capacity_to_production = (
self.sets.trade_data["annualprod_per_unitcapacity"]
.loc[:, ("{}-{}".format(key, reg), slice(None))]
.values
)
capacity = self.line_totalcapacity["{}-{}".format(key, reg)][
indx : indx + 1, :
]
line_import = cp.sum(
value[
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
],
axis=0,
)
line_import = cp.reshape(line_import, capacity_to_production.shape)
capacity_factor.shape = capacity_to_production.shape
self.constr.append(
cp.multiply(
cp.multiply(capacity, capacity_to_production),
self.timeslice_fraction,
)
- value[
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
>= 0
)
self.constr.append(
cp.multiply(
cp.multiply(capacity, capacity_factor),
capacity_to_production,
)
- line_import
>= 0
)
def _constr_totalcapacity_regional(self):
"""
Defines the annual upper and lower limit on the total capacity
of each technology within each region
"""
for reg in self.sets.regions:
for key, value in self.totalcapacity[reg].items():
self.constr.append(
value - self.sets.data[reg]["tech_mintotcap"].loc[:, key].values
>= 0
)
self.constr.append(
value - self.sets.data[reg]["tech_maxtotcap"].loc[:, key] <= 0
)
def _constr_totalcapacity_overall(self):
"""
Defines the annual upper and lower limit on the aggregated total capacity
of each technology over all the regions
"""
self.totalcapacity_overall = _calc_variable_overall(
self.sets.glob_mapping["Technologies_glob"],
self.sets.regions,
self.sets.main_years,
self.sets.Technologies,
self.totalcapacity,
)
for tech, value in self.totalcapacity_overall.items():
self.constr.append(
value - self.sets.global_data["global_mintotcap"].loc[:, tech].values
>= 0
)
self.constr.append(
value - self.sets.global_data["global_maxtotcap"].loc[:, tech].values
<= 0
)
def _constr_totalcapacity_line(self):
"""
Defines the upper and lower limit on the annual total capacity of the
inter-regional links
"""
for key, value in self.line_totalcapacity.items():
self.constr.append(
value <= self.sets.trade_data["line_maxtotcap"][key].values
)
self.constr.append(
value >= self.sets.trade_data["line_mintotcap"][key].values
)
def _constr_newcapacity_regional(self):
"""
Defines the upper and lower limit on the annual new installed capacity
of each technology within each region
"""
for reg in self.sets.regions:
for key, value in self.variables["newcapacity"][reg].items():
self.constr.append(
value >= self.sets.data[reg]["tech_min_newcap"].loc[:, key]
)
self.constr.append(
value <= self.sets.data[reg]["tech_max_newcap"].loc[:, key]
)
def _constr_newcapacity_overall(self):
"""
Defines the upper and lower limit on the aggregated new installed capacity
of each technology over all the regions
"""
self.newcapacity_overall = _calc_variable_overall(
self.sets.glob_mapping["Technologies_glob"],
self.sets.regions,
self.sets.main_years,
self.sets.Technologies,
self.variables["newcapacity"],
)
for tech, value in self.newcapacity_overall.items():
self.constr.append(
value - self.sets.global_data["global_min_newcap"].loc[:, tech] >= 0
)
self.constr.append(
value - self.sets.global_data["global_max_newcap"].loc[:, tech] <= 0
)
def _constr_newcapacity_line(self):
"""
Defines the upper and lower limit on the annual new installed capacity
of the inter-regional links
"""
for key, value in self.variables["newcapaciy"].items():
self.constr.append(value <= self.sets.trade_data["line_max_newcap"][key])
self.constr.append(value >= self.sets.trade_data["line_min_newcap"][key])
def _constr_tech_efficiency(self):
"""
Defines the relationship between the input and output activity of
conversion, transmission and conversion-plus technologies
"""
for reg in self.sets.regions:
for key, value in self.variables["productionbyTechnology"][reg].items():
if key != "Supply" and key != "Storage":
tech_efficiency_reshape = pd.concat(
[self.sets.data[reg]["tech_efficiency"][key]]
* len(self.sets.time_steps)
).sort_index()
self.constr.append(
value
- cp.multiply(
self.variables["usebyTechnology"][reg][key],
tech_efficiency_reshape.values,
)
== 0
)
def _constr_prod_annual(self):
"""
Defines the upper and lower limit for the annual production of the technologies
within each region
"""
for reg in self.sets.regions:
for key, value in self.variables["productionbyTechnology"][reg].items():
production_annual = annual_activity(
value, self.sets.main_years, self.sets.time_steps,
)
if key != "Transmission" and key != "Storage":
self.constr.append(
production_annual
- self.sets.data[reg]["tech_max_production"].loc[
:, (key, slice(None))
]
<= 0
)
self.constr.append(
production_annual
- self.sets.data[reg]["tech_min_production"].loc[
:, (key, slice(None))
]
>= 0
)
def _constr_prod(self):
"""
Defines the upper and lower limit for the hourly production of the technologies
within each region
"""
for reg in self.sets.regions:
for key, value in self.variables["productionbyTechnology"][reg].items():
if key != "Transmission" and key != "Storage":
self.constr.append(
value
- self.sets.data[reg]["tech_max_production_h"].loc[
:, (key, slice(None))
]
<= 0
)
self.constr.append(
value
- self.sets.data[reg]["tech_min_production_h"].loc[
:, (key, slice(None))
]
>= 0
)
def _constr_prod_annual_overall(self):
"""
Defines the upper and lower limit for the aggregated annual production
of the technologies over all the regions
"""
self.production_overall = _calc_production_overall(
self.sets.glob_mapping["Technologies_glob"],
self.sets.regions,
self.sets.main_years,
self.sets.Technologies,
self.production_annual,
)
for tech, value in self.production_overall.items():
self.constr.append(
value - self.sets.global_data["global_min_production"].loc[:, tech] >= 0
)
self.constr.append(
value - self.sets.global_data["global_max_production"].loc[:, tech] <= 0
)
def _constr_emission_cap(self):
"""
Defines the CO2 emission cap within each region and over all the regions
"""
self.regional_emission = {}
self.global_emission = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps), 1)
)
for reg in self.sets.regions:
self.regional_emission[reg] = np.zeros(
(len(self.sets.main_years) * len(self.sets.time_steps), 1)
)
for key, value in self.CO2_equivalent[reg].items():
self.regional_emission[reg] += cp.sum(value, axis=1)
emission_cap = self.sets.data[reg]["emission_cap_annual"].values
emission_cap.shape = self.regional_emission[reg].shape
self.global_emission += self.regional_emission[reg]
self.constr.append(emission_cap - self.regional_emission[reg] >= 0)
if len(self.sets.regions) > 1:
global_emission_cap = self.sets.global_data[
"global_emission_cap_annual"
].values
global_emission_cap.shape = self.global_emission.shape
self.constr.append(global_emission_cap - self.global_emission >= 0)
def _constr_storage_max_min_charge(self):
"""
Defines the maximum and minumum alllowed storage state of charge in each
timestep of the year based on the total nominal capacity and the minimum
state of charge factor
"""
for reg in get_regions_with_storage(self.sets):
for indx, year in enumerate(self.sets.main_years):
self.constr.append(
self.totalcapacity[reg]["Storage"][indx : indx + 1, :]
- self.storage_SOC[reg][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
>= 0
)
self.constr.append(
self.storage_SOC[reg][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
- cp.multiply(
self.totalcapacity[reg]["Storage"][indx : indx + 1, :],
self.sets.data[reg]["storage_min_SOC"].values[
indx : indx + 1, :
],
)
>= 0
)
def _constr_storage_max_flow_in_out(self):
"""
Defines the maximum and minimum allowed storage inflow and outflow in each
hour of the year based on the total capacity, the capacity factor and
the storage charge and discharge time
"""
for reg in get_regions_with_storage(self.sets):
for indx, year in enumerate(self.sets.main_years):
max_storage_flow_in = storage_max_flow(
self.totalcapacity[reg]["Storage"][indx : indx + 1, :],
self.sets.data[reg]["storage_charge_time"].values,
self.sets.data[reg]["tech_capacity_factor"]["Storage"].values[
indx : indx + 1, :
],
self.timeslice_fraction,
)
max_storage_flow_out = storage_max_flow(
self.totalcapacity[reg]["Storage"][indx : indx + 1, :],
self.sets.data[reg]["storage_discharge_time"].values,
self.sets.data[reg]["tech_capacity_factor"]["Storage"].values[
indx : indx + 1, :
],
self.timeslice_fraction,
)
self.constr.append(
max_storage_flow_in
- self.variables["usebyTechnology"][reg]["Storage"][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
>= 0
)
self.constr.append(
max_storage_flow_out
- self.variables["productionbyTechnology"][reg]["Storage"][
indx
* len(self.sets.time_steps) : (indx + 1)
* len(self.sets.time_steps),
:,
]
>= 0
)
def _set_regional_objective_planning(self):
"""
Calculates the regional objective function in the planning mode
"""
self.totalcost_allregions = np.zeros((len(self.sets.main_years), 1))
self.inv_allregions = 0
years = -1 * np.arange(len(self.sets.main_years))
for reg in self.sets.regions:
totalcost_regional = np.zeros((len(self.sets.main_years), 1))
for ctgry in self.sets.Technologies[reg].keys():
if ctgry != "Demand":
totalcost_regional += cp.sum(
self.cost_inv_tax[reg][ctgry]
- self.cost_inv_sub[reg][ctgry]
+ self.cost_fix[reg][ctgry]
+ self.cost_fix_tax[reg][ctgry]
- self.cost_fix_sub[reg][ctgry]
+ self.cost_variable[reg][ctgry]
+ self.cost_decom[reg][ctgry]
- self.salvage_inv[reg][ctgry],
axis=1,
)
self.inv_allregions += self.cost_inv_fvalue[reg][ctgry]
if ctgry != "Transmission" and ctgry != "Storage":
totalcost_regional += cp.sum(
self.emission_cost[reg][ctgry], axis=1
)
discount_factor = (
1 + self.sets.data[reg]["discount_rate"]["Annual Discount Rate"].values
)
totalcost_regional_discounted = cp.multiply(
totalcost_regional, np.power(discount_factor, years)
)
self.totalcost_allregions += totalcost_regional_discounted
def _set_regional_objective_operation(self):
"""
Calculates the regional objective function in the operation mode
"""
self.totalcost_allregions = 0
for reg in self.sets.regions:
totalcost_regional = 0
for ctgry in self.sets.Technologies[reg].keys():
if ctgry != "Demand":
totalcost_regional += cp.sum(
self.cost_fix[reg][ctgry]
+ self.cost_fix_tax[reg][ctgry]
- self.cost_fix_sub[reg][ctgry]
+ self.cost_variable[reg][ctgry]
)
if ctgry != "Transmission" and ctgry != "Storage":
totalcost_regional += cp.sum(
self.emission_cost[reg][ctgry], axis=1
)
self.totalcost_allregions += totalcost_regional
def _set_lines_objective_planning(self):
"""
Calculates the objective function of the inter-regional links in the
planning mode
"""
years = -1 * np.arange(len(self.sets.main_years))
self.totalcost_lines = np.zeros((len(self.sets.main_years), 1))
for line in self.sets.lines_list:
self.totalcost_lines += cp.sum(
self.cost_inv_line[line]
+ self.cost_fix_line[line]
+ self.cost_decom_line[line],
axis=1,
)
for reg in self.sets.regions:
for key, value in self.cost_variable_line[reg].items():
self.totalcost_lines += cp.sum(value, axis=1)
discount_factor_global = (
1
+ self.sets.global_data["global_discount_rate"][
"Annual Discount Rate"
].values
)
self.totalcost_lines_discounted = cp.multiply(
self.totalcost_lines, np.power(discount_factor_global, years)
)
def _set_lines_objective_operation(self):
"""
Calculates the objective function of the inter-regional links in the
operation mode
"""
self.totalcost_lines = np.zeros((len(self.sets.main_years), 1))
for line in self.sets.lines_list:
self.totalcost_lines += cp.sum(self.cost_fix_line[line], axis=1)
for reg in self.sets.regions:
for key, value in self.cost_variable_line[reg].items():
self.totalcost_lines += cp.sum(value, axis=1)
def _set_final_objective_singlenode(self):
"""
Calculates the overall objective function in a single-node model
"""
if self.sets.mode == "Planning":
self.global_objective = (
cp.sum(self.totalcost_allregions) + self.inv_allregions
)
elif self.sets.mode == "Operation":
self.global_objective = self.totalcost_allregions
def _set_final_objective_multinode(self):
"""
Calculates the overall objective function as the summation of all the
regional and inter-regional links objective functions in a multi-node
model
"""
if self.sets.mode == "Planning":
self.global_objective = (
cp.sum(self.totalcost_lines_discounted + self.totalcost_allregions)
+ self.inv_allregions
)
elif self.sets.mode == "Operation":
self.global_objective = self.totalcost_allregions + self.totalcost_lines
| [
"logging.getLogger",
"hypatia.utility.utility.newcap_accumulated",
"hypatia.utility.utility.line_newcap_accumulated",
"hypatia.utility.utility.invcosts",
"cvxpy.Minimize",
"hypatia.utility.utility.line_decomcap",
"hypatia.utility.utility._calc_variable_overall",
"hypatia.utility.utility.salvage_factor... | [((738, 765), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (755, 765), False, 'import logging\n'), ((4441, 4475), 'cvxpy.Minimize', 'cp.Minimize', (['self.global_objective'], {}), '(self.global_objective)\n', (4452, 4475), True, 'import cvxpy as cp\n'), ((4494, 4528), 'cvxpy.Problem', 'cp.Problem', (['objective', 'self.constr'], {}), '(objective, self.constr)\n', (4504, 4528), True, 'import cvxpy as cp\n'), ((17816, 17993), 'hypatia.utility.utility.line_varcost', 'line_varcost', (["self.sets.trade_data['line_var_cost']", "self.variables['line_import']", 'self.sets.regions', 'self.sets.main_years', 'self.sets.time_steps', 'self.sets.lines_list'], {}), "(self.sets.trade_data['line_var_cost'], self.variables[\n 'line_import'], self.sets.regions, self.sets.main_years, self.sets.\n time_steps, self.sets.lines_list)\n", (17828, 17993), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((21807, 21984), 'hypatia.utility.utility.line_varcost', 'line_varcost', (["self.sets.trade_data['line_var_cost']", "self.variables['line_import']", 'self.sets.regions', 'self.sets.main_years', 'self.sets.time_steps', 'self.sets.lines_list'], {}), "(self.sets.trade_data['line_var_cost'], self.variables[\n 'line_import'], self.sets.regions, self.sets.main_years, self.sets.\n time_steps, self.sets.lines_list)\n", (21819, 21984), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((22314, 22349), 'hypatia.utility.utility.get_regions_with_storage', 'get_regions_with_storage', (['self.sets'], {}), '(self.sets)\n', (22338, 22349), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((37344, 37506), 'hypatia.utility.utility._calc_variable_overall', '_calc_variable_overall', (["self.sets.glob_mapping['Technologies_glob']", 'self.sets.regions', 'self.sets.main_years', 'self.sets.Technologies', 'self.totalcapacity'], {}), "(self.sets.glob_mapping['Technologies_glob'], self.\n sets.regions, self.sets.main_years, self.sets.Technologies, self.\n totalcapacity)\n", (37366, 37506), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((39233, 39406), 'hypatia.utility.utility._calc_variable_overall', '_calc_variable_overall', (["self.sets.glob_mapping['Technologies_glob']", 'self.sets.regions', 'self.sets.main_years', 'self.sets.Technologies', "self.variables['newcapacity']"], {}), "(self.sets.glob_mapping['Technologies_glob'], self.\n sets.regions, self.sets.main_years, self.sets.Technologies, self.\n variables['newcapacity'])\n", (39255, 39406), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((43347, 43515), 'hypatia.utility.utility._calc_production_overall', '_calc_production_overall', (["self.sets.glob_mapping['Technologies_glob']", 'self.sets.regions', 'self.sets.main_years', 'self.sets.Technologies', 'self.production_annual'], {}), "(self.sets.glob_mapping['Technologies_glob'], self.\n sets.regions, self.sets.main_years, self.sets.Technologies, self.\n production_annual)\n", (43371, 43515), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((45421, 45456), 'hypatia.utility.utility.get_regions_with_storage', 'get_regions_with_storage', (['self.sets'], {}), '(self.sets)\n', (45445, 45456), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((46820, 46855), 'hypatia.utility.utility.get_regions_with_storage', 'get_regions_with_storage', (['self.sets'], {}), '(self.sets)\n', (46844, 46855), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((5626, 5651), 'collections.namedtuple', 'namedtuple', (['"""result"""', 'res'], {}), "('result', res)\n", (5636, 5651), False, 'from collections import namedtuple\n'), ((16410, 16520), 'cvxpy.multiply', 'cp.multiply', (["self.sets.trade_data['line_inv'].loc[:, key].values", "self.variables['line_newcapacity'][key]"], {}), "(self.sets.trade_data['line_inv'].loc[:, key].values, self.\n variables['line_newcapacity'][key])\n", (16421, 16520), True, 'import cvxpy as cp\n'), ((16617, 16816), 'hypatia.utility.utility.line_newcap_accumulated', 'line_newcap_accumulated', (["self.variables['line_newcapacity'][key]", "self.sets.glob_mapping['Carriers_glob']['Carrier']", 'self.sets.main_years', "self.sets.trade_data['line_lifetime'].loc[:, key]"], {}), "(self.variables['line_newcapacity'][key], self.sets.\n glob_mapping['Carriers_glob']['Carrier'], self.sets.main_years, self.\n sets.trade_data['line_lifetime'].loc[:, key])\n", (16640, 16816), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((17119, 17224), 'cvxpy.multiply', 'cp.multiply', (["self.sets.trade_data['line_fixed_cost'].loc[:, key].values", 'self.line_totalcapacity[key]'], {}), "(self.sets.trade_data['line_fixed_cost'].loc[:, key].values,\n self.line_totalcapacity[key])\n", (17130, 17224), True, 'import cvxpy as cp\n'), ((17322, 17511), 'hypatia.utility.utility.line_decomcap', 'line_decomcap', (["self.variables['line_newcapacity'][key]", "self.sets.glob_mapping['Carriers_glob']['Carrier']", 'self.sets.main_years', "self.sets.trade_data['line_lifetime'].loc[:, key]"], {}), "(self.variables['line_newcapacity'][key], self.sets.\n glob_mapping['Carriers_glob']['Carrier'], self.sets.main_years, self.\n sets.trade_data['line_lifetime'].loc[:, key])\n", (17335, 17511), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((17622, 17737), 'cvxpy.multiply', 'cp.multiply', (["self.sets.trade_data['line_decom_cost'].loc[:, key].values", 'self.line_decommissioned_capacity[key]'], {}), "(self.sets.trade_data['line_decom_cost'].loc[:, key].values,\n self.line_decommissioned_capacity[key])\n", (17633, 17737), True, 'import cvxpy as cp\n'), ((21623, 21728), 'cvxpy.multiply', 'cp.multiply', (["self.sets.trade_data['line_fixed_cost'].loc[:, key].values", 'self.line_totalcapacity[key]'], {}), "(self.sets.trade_data['line_fixed_cost'].loc[:, key].values,\n self.line_totalcapacity[key])\n", (21634, 21728), True, 'import cvxpy as cp\n'), ((22388, 22623), 'hypatia.utility.utility.storage_state_of_charge', 'storage_state_of_charge', (["self.sets.data[reg]['storage_initial_SOC']", "self.variables['usebyTechnology'][reg]['Storage']", "self.variables['productionbyTechnology'][reg]['Storage']", 'self.sets.main_years', 'self.sets.time_steps'], {}), "(self.sets.data[reg]['storage_initial_SOC'], self.\n variables['usebyTechnology'][reg]['Storage'], self.variables[\n 'productionbyTechnology'][reg]['Storage'], self.sets.main_years, self.\n sets.time_steps)\n", (22411, 22623), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((51578, 51679), 'cvxpy.sum', 'cp.sum', (['(self.cost_inv_line[line] + self.cost_fix_line[line] + self.cost_decom_line\n [line])'], {'axis': '(1)'}), '(self.cost_inv_line[line] + self.cost_fix_line[line] + self.\n cost_decom_line[line], axis=1)\n', (51584, 51679), True, 'import cvxpy as cp\n'), ((52196, 52235), 'numpy.power', 'np.power', (['discount_factor_global', 'years'], {}), '(discount_factor_global, years)\n', (52204, 52235), True, 'import numpy as np\n'), ((52572, 52612), 'cvxpy.sum', 'cp.sum', (['self.cost_fix_line[line]'], {'axis': '(1)'}), '(self.cost_fix_line[line], axis=1)\n', (52578, 52612), True, 'import cvxpy as cp\n'), ((11288, 11479), 'hypatia.utility.utility.invcosts', 'invcosts', (["self.sets.data[reg]['tech_inv'][key]", "self.variables['newcapacity'][reg][key]", "self.sets.data[reg]['inv_taxsub']['Tax'][key]", "self.sets.data[reg]['inv_taxsub']['Sub'][key]"], {}), "(self.sets.data[reg]['tech_inv'][key], self.variables['newcapacity'\n ][reg][key], self.sets.data[reg]['inv_taxsub']['Tax'][key], self.sets.\n data[reg]['inv_taxsub']['Sub'][key])\n", (11296, 11479), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((12197, 12372), 'hypatia.utility.utility.newcap_accumulated', 'newcap_accumulated', (["self.variables['newcapacity'][reg][key]", 'self.sets.Technologies[reg][key]', 'self.sets.main_years', "self.sets.data[reg]['tech_lifetime'].loc[:, key]"], {}), "(self.variables['newcapacity'][reg][key], self.sets.\n Technologies[reg][key], self.sets.main_years, self.sets.data[reg][\n 'tech_lifetime'].loc[:, key])\n", (12215, 12372), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((12841, 13026), 'hypatia.utility.utility.fixcosts', 'fixcosts', (["self.sets.data[reg]['tech_fixed_cost'][key]", 'totalcapacity_regional[key]', "self.sets.data[reg]['fix_taxsub']['Tax'][key]", "self.sets.data[reg]['fix_taxsub']['Sub'][key]"], {}), "(self.sets.data[reg]['tech_fixed_cost'][key],\n totalcapacity_regional[key], self.sets.data[reg]['fix_taxsub']['Tax'][\n key], self.sets.data[reg]['fix_taxsub']['Sub'][key])\n", (12849, 13026), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((13164, 13329), 'hypatia.utility.utility.decomcap', 'decomcap', (["self.variables['newcapacity'][reg][key]", 'self.sets.Technologies[reg][key]', 'self.sets.main_years', "self.sets.data[reg]['tech_lifetime'].loc[:, key]"], {}), "(self.variables['newcapacity'][reg][key], self.sets.Technologies[\n reg][key], self.sets.main_years, self.sets.data[reg]['tech_lifetime'].\n loc[:, key])\n", (13172, 13329), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((13463, 13566), 'cvxpy.multiply', 'cp.multiply', (["self.sets.data[reg]['tech_decom_cost'].loc[:, key].values", 'decomcapacity_regional[key]'], {}), "(self.sets.data[reg]['tech_decom_cost'].loc[:, key].values,\n decomcapacity_regional[key])\n", (13474, 13566), True, 'import cvxpy as cp\n'), ((13673, 13789), 'hypatia.utility.utility.annual_activity', 'annual_activity', (["self.variables['productionbyTechnology'][reg][key]", 'self.sets.main_years', 'self.sets.time_steps'], {}), "(self.variables['productionbyTechnology'][reg][key], self.\n sets.main_years, self.sets.time_steps)\n", (13688, 13789), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((13911, 14010), 'cvxpy.multiply', 'cp.multiply', (['production_annual_regional[key]', "self.sets.data[reg]['tech_var_cost'].loc[:, key]"], {}), "(production_annual_regional[key], self.sets.data[reg][\n 'tech_var_cost'].loc[:, key])\n", (13922, 14010), True, 'import cvxpy as cp\n'), ((14607, 14859), 'hypatia.utility.utility.invcosts_annuity', 'invcosts_annuity', (['cost_inv_regional[key]', "self.sets.data[reg]['interest_rate'].loc[:, key]", "self.sets.data[reg]['economic_lifetime'].loc[:, key]", 'self.sets.Technologies[reg][key]', 'self.sets.main_years', "self.sets.data[reg]['discount_rate']"], {}), "(cost_inv_regional[key], self.sets.data[reg][\n 'interest_rate'].loc[:, key], self.sets.data[reg]['economic_lifetime'].\n loc[:, key], self.sets.Technologies[reg][key], self.sets.main_years,\n self.sets.data[reg]['discount_rate'])\n", (14623, 14859), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((29926, 30018), 'cvxpy.reshape', 'cp.reshape', (['self.totalusebycarrier[reg][carr]', 'self.totalprodbycarrier[reg][carr].shape'], {}), '(self.totalusebycarrier[reg][carr], self.totalprodbycarrier[reg][\n carr].shape)\n', (29936, 30018), True, 'import cvxpy as cp\n'), ((41473, 41539), 'hypatia.utility.utility.annual_activity', 'annual_activity', (['value', 'self.sets.main_years', 'self.sets.time_steps'], {}), '(value, self.sets.main_years, self.sets.time_steps)\n', (41488, 41539), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((44503, 44524), 'cvxpy.sum', 'cp.sum', (['value'], {'axis': '(1)'}), '(value, axis=1)\n', (44509, 44524), True, 'import cvxpy as cp\n'), ((46960, 47202), 'hypatia.utility.utility.storage_max_flow', 'storage_max_flow', (["self.totalcapacity[reg]['Storage'][indx:indx + 1, :]", "self.sets.data[reg]['storage_charge_time'].values", "self.sets.data[reg]['tech_capacity_factor']['Storage'].values[indx:indx + 1, :]", 'self.timeslice_fraction'], {}), "(self.totalcapacity[reg]['Storage'][indx:indx + 1, :], self\n .sets.data[reg]['storage_charge_time'].values, self.sets.data[reg][\n 'tech_capacity_factor']['Storage'].values[indx:indx + 1, :], self.\n timeslice_fraction)\n", (46976, 47202), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((47377, 47622), 'hypatia.utility.utility.storage_max_flow', 'storage_max_flow', (["self.totalcapacity[reg]['Storage'][indx:indx + 1, :]", "self.sets.data[reg]['storage_discharge_time'].values", "self.sets.data[reg]['tech_capacity_factor']['Storage'].values[indx:indx + 1, :]", 'self.timeslice_fraction'], {}), "(self.totalcapacity[reg]['Storage'][indx:indx + 1, :], self\n .sets.data[reg]['storage_discharge_time'].values, self.sets.data[reg][\n 'tech_capacity_factor']['Storage'].values[indx:indx + 1, :], self.\n timeslice_fraction)\n", (47393, 47622), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((50144, 50176), 'numpy.power', 'np.power', (['discount_factor', 'years'], {}), '(discount_factor, years)\n', (50152, 50176), True, 'import numpy as np\n'), ((51903, 51924), 'cvxpy.sum', 'cp.sum', (['value'], {'axis': '(1)'}), '(value, axis=1)\n', (51909, 51924), True, 'import cvxpy as cp\n'), ((52762, 52783), 'cvxpy.sum', 'cp.sum', (['value'], {'axis': '(1)'}), '(value, axis=1)\n', (52768, 52783), True, 'import cvxpy as cp\n'), ((53027, 53060), 'cvxpy.sum', 'cp.sum', (['self.totalcost_allregions'], {}), '(self.totalcost_allregions)\n', (53033, 53060), True, 'import cvxpy as cp\n'), ((53544, 53611), 'cvxpy.sum', 'cp.sum', (['(self.totalcost_lines_discounted + self.totalcost_allregions)'], {}), '(self.totalcost_lines_discounted + self.totalcost_allregions)\n', (53550, 53611), True, 'import cvxpy as cp\n'), ((11647, 11923), 'hypatia.utility.utility.salvage_factor', 'salvage_factor', (['self.sets.main_years', 'self.sets.Technologies[reg][key]', "self.sets.data[reg]['tech_lifetime'].loc[:, key]", "self.sets.data[reg]['interest_rate'].loc[:, key]", "self.sets.data[reg]['discount_rate']", "self.sets.data[reg]['economic_lifetime'].loc[:, key]"], {}), "(self.sets.main_years, self.sets.Technologies[reg][key], self\n .sets.data[reg]['tech_lifetime'].loc[:, key], self.sets.data[reg][\n 'interest_rate'].loc[:, key], self.sets.data[reg]['discount_rate'],\n self.sets.data[reg]['economic_lifetime'].loc[:, key])\n", (11661, 11923), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((14181, 14284), 'cvxpy.multiply', 'cp.multiply', (['production_annual_regional[key]', "self.sets.data[reg]['specific_emission'].loc[:, key]"], {}), "(production_annual_regional[key], self.sets.data[reg][\n 'specific_emission'].loc[:, key])\n", (14192, 14284), True, 'import cvxpy as cp\n'), ((14402, 14495), 'cvxpy.multiply', 'cp.multiply', (['CO2_equivalent_regional[key]', "self.sets.data[reg]['carbon_tax'].loc[:, key]"], {}), "(CO2_equivalent_regional[key], self.sets.data[reg]['carbon_tax']\n .loc[:, key])\n", (14413, 14495), True, 'import cvxpy as cp\n'), ((19347, 19532), 'hypatia.utility.utility.fixcosts', 'fixcosts', (["self.sets.data[reg]['tech_fixed_cost'][key]", 'totalcapacity_regional[key]', "self.sets.data[reg]['fix_taxsub']['Tax'][key]", "self.sets.data[reg]['fix_taxsub']['Sub'][key]"], {}), "(self.sets.data[reg]['tech_fixed_cost'][key],\n totalcapacity_regional[key], self.sets.data[reg]['fix_taxsub']['Tax'][\n key], self.sets.data[reg]['fix_taxsub']['Sub'][key])\n", (19355, 19532), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((19698, 19814), 'hypatia.utility.utility.annual_activity', 'annual_activity', (["self.variables['productionbyTechnology'][reg][key]", 'self.sets.main_years', 'self.sets.time_steps'], {}), "(self.variables['productionbyTechnology'][reg][key], self.\n sets.main_years, self.sets.time_steps)\n", (19713, 19814), False, 'from hypatia.utility.utility import invcosts, invcosts_annuity, salvage_factor, newcap_accumulated, line_newcap_accumulated, _calc_variable_overall, _calc_production_overall, fixcosts, line_varcost, decomcap, line_decomcap, available_resource_prod, annual_activity, storage_state_of_charge, get_regions_with_storage, storage_max_flow\n'), ((19956, 20055), 'cvxpy.multiply', 'cp.multiply', (['production_annual_regional[key]', "self.sets.data[reg]['tech_var_cost'].loc[:, key]"], {}), "(production_annual_regional[key], self.sets.data[reg][\n 'tech_var_cost'].loc[:, key])\n", (19967, 20055), True, 'import cvxpy as cp\n'), ((35540, 35593), 'cvxpy.reshape', 'cp.reshape', (['line_import', 'capacity_to_production.shape'], {}), '(line_import, capacity_to_production.shape)\n', (35550, 35593), True, 'import cvxpy as cp\n'), ((49115, 49393), 'cvxpy.sum', 'cp.sum', (['(self.cost_inv_tax[reg][ctgry] - self.cost_inv_sub[reg][ctgry] + self.\n cost_fix[reg][ctgry] + self.cost_fix_tax[reg][ctgry] - self.\n cost_fix_sub[reg][ctgry] + self.cost_variable[reg][ctgry] + self.\n cost_decom[reg][ctgry] - self.salvage_inv[reg][ctgry])'], {'axis': '(1)'}), '(self.cost_inv_tax[reg][ctgry] - self.cost_inv_sub[reg][ctgry] + self\n .cost_fix[reg][ctgry] + self.cost_fix_tax[reg][ctgry] - self.\n cost_fix_sub[reg][ctgry] + self.cost_variable[reg][ctgry] + self.\n cost_decom[reg][ctgry] - self.salvage_inv[reg][ctgry], axis=1)\n', (49121, 49393), True, 'import cvxpy as cp\n'), ((50666, 50801), 'cvxpy.sum', 'cp.sum', (['(self.cost_fix[reg][ctgry] + self.cost_fix_tax[reg][ctgry] - self.\n cost_fix_sub[reg][ctgry] + self.cost_variable[reg][ctgry])'], {}), '(self.cost_fix[reg][ctgry] + self.cost_fix_tax[reg][ctgry] - self.\n cost_fix_sub[reg][ctgry] + self.cost_variable[reg][ctgry])\n', (50672, 50801), True, 'import cvxpy as cp\n'), ((20246, 20349), 'cvxpy.multiply', 'cp.multiply', (['production_annual_regional[key]', "self.sets.data[reg]['specific_emission'].loc[:, key]"], {}), "(production_annual_regional[key], self.sets.data[reg][\n 'specific_emission'].loc[:, key])\n", (20257, 20349), True, 'import cvxpy as cp\n'), ((20483, 20576), 'cvxpy.multiply', 'cp.multiply', (['CO2_equivalent_regional[key]', "self.sets.data[reg]['carbon_tax'].loc[:, key]"], {}), "(CO2_equivalent_regional[key], self.sets.data[reg]['carbon_tax']\n .loc[:, key])\n", (20494, 20576), True, 'import cvxpy as cp\n'), ((49814, 49860), 'cvxpy.sum', 'cp.sum', (['self.emission_cost[reg][ctgry]'], {'axis': '(1)'}), '(self.emission_cost[reg][ctgry], axis=1)\n', (49820, 49860), True, 'import cvxpy as cp\n'), ((51034, 51080), 'cvxpy.sum', 'cp.sum', (['self.emission_cost[reg][ctgry]'], {'axis': '(1)'}), '(self.emission_cost[reg][ctgry], axis=1)\n', (51040, 51080), True, 'import cvxpy as cp\n'), ((46216, 46351), 'cvxpy.multiply', 'cp.multiply', (["self.totalcapacity[reg]['Storage'][indx:indx + 1, :]", "self.sets.data[reg]['storage_min_SOC'].values[indx:indx + 1, :]"], {}), "(self.totalcapacity[reg]['Storage'][indx:indx + 1, :], self.sets\n .data[reg]['storage_min_SOC'].values[indx:indx + 1, :])\n", (46227, 46351), True, 'import cvxpy as cp\n'), ((24991, 25121), 'cvxpy.multiply', 'cp.multiply', (["self.variables['usebyTechnology'][reg][key][:, indx]", "self.sets.data[reg]['carrier_ratio_in'][tech, carr].values"], {}), "(self.variables['usebyTechnology'][reg][key][:, indx], self.sets\n .data[reg]['carrier_ratio_in'][tech, carr].values)\n", (25002, 25121), True, 'import cvxpy as cp\n'), ((26392, 26529), 'cvxpy.multiply', 'cp.multiply', (["self.variables['productionbyTechnology'][reg][key][:, indx]", "self.sets.data[reg]['carrier_ratio_out'][tech, carr].values"], {}), "(self.variables['productionbyTechnology'][reg][key][:, indx],\n self.sets.data[reg]['carrier_ratio_out'][tech, carr].values)\n", (26403, 26529), True, 'import cvxpy as cp\n'), ((40912, 41004), 'cvxpy.multiply', 'cp.multiply', (["self.variables['usebyTechnology'][reg][key]", 'tech_efficiency_reshape.values'], {}), "(self.variables['usebyTechnology'][reg][key],\n tech_efficiency_reshape.values)\n", (40923, 41004), True, 'import cvxpy as cp\n'), ((35773, 35818), 'cvxpy.multiply', 'cp.multiply', (['capacity', 'capacity_to_production'], {}), '(capacity, capacity_to_production)\n', (35784, 35818), True, 'import cvxpy as cp\n'), ((36304, 36342), 'cvxpy.multiply', 'cp.multiply', (['capacity', 'capacity_factor'], {}), '(capacity, capacity_factor)\n', (36315, 36342), True, 'import cvxpy as cp\n'), ((32527, 32562), 'cvxpy.sum', 'cp.sum', (['self.available_prod'], {'axis': '(0)'}), '(self.available_prod, axis=0)\n', (32533, 32562), True, 'import cvxpy as cp\n')] |
# -*- coding: utf-8 -*-
import logging
from abc import ABC, abstractmethod
import numpy as np
import cvxpy as cp
def cvx_desc_to_solver(solver_desc):
if solver_desc == "SCS":
return cp.SCS
elif solver_desc == "MOSEK":
return cp.MOSEK
elif solver_desc == "CVXOPT":
return cp.CVXOPT
elif solver_desc == "OSQP":
return cp.OSQP
elif solver_desc == "ECOS":
return cp.ECOS
elif solver_desc == "CPLEX":
return cp.CPLEX
elif solver_desc == "CBC":
return cp.CBC
elif solver_desc == "NAG":
return cp.NAG
elif solver_desc == "GLPK":
return cp.GLPK
elif solver_desc == "GLPK_MI":
return cp.GLPK_MI
elif solver_desc == "GUROBI":
return cp.GUROBI
elif solver_desc == "SCIP":
return cp.SCIP
elif solver_desc == "XPRESS":
return cp.XPRESS
else:
raise ValueError(f"Solver '{solver_desc}' is not supported or unkown.")
class MathematicalProgram():
"""Base class for a mathematical program.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
@abstractmethod
def solve(self):
raise NotImplementedError()
class SupportAffinePreprocessing():
"""Base class for a mathematical programs that support an affine preprocessing.
"""
def __init__(self, **kwds):
self.A = None
self.b = None
super().__init__(**kwds)
def set_affine_preprocessing(self, A, b):
self.A = A
self.b = b
def get_affine_preprocessing(self):
return {"A": self.A, "b": self.b}
def is_affine_preprocessing_set(self):
return self.A is not None and self.b is not None
def _apply_affine_preprocessing_to_var(self, var_x):
if self.A is not None and self.b is not None:
return self.A @ var_x + self.b
else:
return var_x
def _apply_affine_preprocessing_to_const(self, x):
if self.A is not None and self.b is not None:
return np.dot(self.A, x) + self.b
else:
return x
class ConvexQuadraticProgram(ABC, SupportAffinePreprocessing):
"""Base class for a convex quadratic program - for computing counterfactuals.
Attributes
----------
epsilon : `float`
"Small" non-negative number for relaxing strict inequalities.
"""
def __init__(self, **kwds):
self.epsilon = 1e-2
self.solver = cp.SCS
self.solver_verbosity = False
super().__init__(**kwds)
@abstractmethod
def _build_constraints(self, var_x, y):
"""Creates and returns all constraints.
Parameters
----------
var_x : `cvx.Variable`
Optimization variable.
y : `int` or `float`
The requested prediction of the counterfactual - e.g. a class label.
Returns
-------
`list`
List of cvxpy constraints.
"""
raise NotImplementedError()
def _solve(self, prob):
prob.solve(solver=self.solver, verbose=self.solver_verbosity)
def build_solve_opt(self, x_orig, y, features_whitelist=None, mad=None, optimizer_args=None):
"""Builds and solves the convex quadratic optimization problem.
Parameters
----------
x_orig : `numpy.ndarray`
The original data point.
y : `int` or `float`
The requested prediction of the counterfactual - e.g. a class label.
features_whitelist : `list(int)`, optional
List of feature indices (dimensions of the input space) that can be used when computing the counterfactual.
If `features_whitelist` is None, all features can be used.
The default is None.
mad : `numpy.ndarray`, optional
Weights for the weighted Manhattan distance.
If `mad` is None, the Euclidean distance is used.
The default is None.
optimizer_args : `dict`, optional
Dictionary for overriding the default hyperparameters of the optimization algorithm.
The default is None.
Returns
-------
`numpy.ndarray`
The solution of the optimization problem.
If no solution exists, `None` is returned.
"""
if optimizer_args is not None:
if "epsilon" in optimizer_args:
self.epsilon = optimizer_args["epsilon"]
if "solver" in optimizer_args:
self.solver = cvx_desc_to_solver(optimizer_args["solver"])
if "solver_verbosity" in optimizer_args:
self.solver_verbosity = optimizer_args["solver_verbosity"]
dim = x_orig.shape[0]
# Variables
x = cp.Variable(dim)
beta = cp.Variable(dim)
# Constants
c = np.ones(dim)
z = np.zeros(dim)
I = np.eye(dim)
# Construct constraints
constraints = self._build_constraints(x, y)
# If requested, fix some features
if features_whitelist is not None:
A = []
a = []
for j in range(dim):
if j not in features_whitelist:
t = np.zeros(dim)
t[j] = 1.
A.append(t)
a.append(x_orig[j])
if len(A) != 0:
A = np.array(A)
a = np.array(a)
constraints += [A @ x == a]
# If necessary, construct the weight matrix for the weighted Manhattan distance
Upsilon = None
if mad is not None:
alpha = 1. / mad
Upsilon = np.diag(alpha)
# Build the final program
f = None
if mad is not None:
f = cp.Minimize(c.T @ beta) # Minimize (weighted) Manhattan distance
constraints += [Upsilon @ (x - x_orig) <= beta, (-1. * Upsilon) @ (x - x_orig) <= beta, I @ beta >= z]
else:
f = cp.Minimize((1/2)*cp.quad_form(x, I) - x_orig.T@x) # Minimize L2 distance
prob = cp.Problem(f, constraints)
# Solve it!
self._solve(prob)
return x.value
class SDP(ABC):
"""Base class for a semi-definite program (SDP) - for computing counterfactuals.
Attributes
----------
epsilon : `float`
"Small" non-negative number for relaxing strict inequalities.
"""
def __init__(self, **kwds):
self.epsilon = 1e-2
self.solver = cp.SCS
self.solver_verbosity = False
super().__init__(**kwds)
@abstractmethod
def _build_constraints(self, var_X, var_x, y):
"""Creates and returns all constraints.
Parameters
----------
var_X : `cvx.Variable`
The artificial optimization variable X - a symmetric matrix (see paper for details).
var_x : `cvx.Variable`
Optimization variable.
y : `int` or `float`
The requested prediction of the counterfactual - e.g. a class label.
Returns
-------
`list`
List of cvxpy constraints.
"""
raise NotImplementedError()
def _solve(self, prob):
prob.solve(solver=self.solver, verbose=self.solver_verbosity)
def build_solve_opt(self, x_orig, y, features_whitelist=None, optimizer_args=None):
"""Builds and solves the SDP.
Parameters
----------
x_orig : `numpy.ndarray`
The original data point.
y : `int` or `float`
The requested prediction of the counterfactual - e.g. a class label.
features_whitelist : `list(int)`, optional
List of feature indices (dimensions of the input space) that can be used when computing the counterfactual.
If `features_whitelist` is None, all features can be used.
The default is None.
optimizer_args : `dict`, optional
Dictionary for overriding the default hyperparameters of the optimization algorithm.
The default is None.
Returns
-------
`numpy.ndarray`
The solution of the optimization problem.
If no solution exists, `None` is returned.
"""
if optimizer_args is not None:
if "epsilon" in optimizer_args:
self.epsilon = optimizer_args["epsilon"]
if "solver" in optimizer_args:
self.solver = cvx_desc_to_solver(optimizer_args["solver"])
if "solver_verbosity" in optimizer_args:
self.solver_verbosity = optimizer_args["solver_verbosity"]
dim = x_orig.shape[0]
# Variables
X = cp.Variable((dim, dim), symmetric=True)
x = cp.Variable((dim, 1))
one = np.array([[1]]).reshape(1, 1)
I = np.eye(dim)
# Construct constraints
constraints = self._build_constraints(X, x, y)
constraints += [cp.bmat([[X, x], [x.T, one]]) >> 0]
# If requested, fix some features
if features_whitelist is not None:
A = []
a = []
for j in range(dim):
if j not in features_whitelist:
t = np.zeros(dim)
t[j] = 1.
A.append(t)
a.append(x_orig[j])
if len(A) != 0:
A = np.array(A)
a = np.array(a)
constraints += [A @ x == a]
# Build the final program
f = cp.Minimize(cp.trace(I @ X) - 2. * x.T @ x_orig)
prob = cp.Problem(f, constraints)
# Solve it!
self._solve(prob)
return x.value.reshape(dim)
class DCQP(SupportAffinePreprocessing):
"""Class for a difference-of-convex-quadratic program (DCQP) - for computing counterfactuals.
.. math:: \\underset{\\vec{x} \\in \\mathbb{R}^d}{\\min} \\vec{x}^\\top Q_0 \\vec{x} + \\vec{q}^\\top \\vec{x} + c - \\vec{x}^\\top Q_1 \\vec{x} \\quad \\text{s.t. } \\vec{x}^\\top A0_i \\vec{x} + \\vec{x}^\\top \\vec{b_i} + r_i - \\vec{x}^\\top A1_i \\vec{x} \\leq 0 \\; \\forall\\,i
Attributes
----------
pccp : instance of :class:`ceml.optim.cvx.PenaltyConvexConcaveProcedure`
Implementation of the penalty convex-concave procedure for approximately solving the DCQP.
epsilon : `float`
"Small" non-negative number for relaxing strict inequalities.
"""
def __init__(self, **kwds):
self.pccp = None
super().__init__(**kwds)
def build_program(self, model, x_orig, y_target, Q0, Q1, q, c, A0_i, A1_i, b_i, r_i, features_whitelist=None, mad=None, optimizer_args=None):
"""Builds the DCQP.
Parameters
----------
model : `object`
The model that is used for computing the counterfactual - must provide a method `predict`.
x : `numpy.ndarray`
The data point `x` whose prediction has to be explained.
y_target : `int` or `float`
The requested prediction of the counterfactual - e.g. a class label.
Q0 : `numpy.ndarray`
The matrix Q_0 of the DCQP.
Q1 : `numpy.ndarray`
The matrix Q_1 of the DCQP.
q : `numpy.ndarray`
The vector q of the DCQP.
c : `float`
The constant c of the DCQP.
A0_i : `list(numpy.ndarray)`
List of matrices A0_i of the DCQP.
A1_i : `list(numpy.ndarray)`
List of matrices A1_i of the DCQP.
b_i : `list(numpy.ndarray)`
List of vectors b_i of the DCQP.
r_i : `list(float)`
List of constants r_i of the DCQP.
features_whitelist : `list(int)`, optional
List of feature indices (dimensions of the input space) that can be used when computing the counterfactual.
If `features_whitelist` is None, all features can be used.
The default is None.
mad : `numpy.ndarray`, optional
Weights for the weighted Manhattan distance.
If `mad` is None, the Euclidean distance is used.
The default is None.
optimizer_args : `dict`, optional
Dictionary for overriding the default hyperparameters of the optimization algorithm.
The default is None.
"""
self.x_orig = x_orig
self.y_target = y_target
self.pccp = PenaltyConvexConcaveProcedure(model, Q0, Q1, q, c, A0_i, A1_i, b_i, r_i, features_whitelist, mad, optimizer_args)
def solve(self, x0):
"""Approximately solves the DCQP by using the penalty convex-concave procedure.
Parameters
----------
x0 : `numpy.ndarray`
The initial data point for the penalty convex-concave procedure - this could be anything, however a "good" initial solution might lead to a better result.
"""
self.pccp.set_affine_preprocessing(**self.get_affine_preprocessing())
return self.pccp.compute_counterfactual(self.x_orig, self.y_target, x0)
class PenaltyConvexConcaveProcedure(SupportAffinePreprocessing):
"""Implementation of the penalty convex-concave procedure for approximately solving a DCQP.
"""
def __init__(self, model, Q0, Q1, q, c, A0_i, A1_i, b_i, r_i, features_whitelist=None, mad=None, optimizer_args=None, **kwds):
self.model = model
self.mad = mad
self.features_whitelist = features_whitelist
self.Q0 = Q0
self.Q1 = Q1
self.q = q
self.c = c
self.A0s = A0_i
self.A1s = A1_i
self.bs = b_i
self.rs = r_i
self.dim = None
self.epsilon = 1e-2
self.tao = 1.2
self.tao_max = 100
self.mu = 1.5
self.solver = cp.SCS
self.solver_verbosity = False
if optimizer_args is not None:
if "epsilon" in optimizer_args:
self.epsilon = optimizer_args["epsilon"]
if "tao" in optimizer_args:
self.tao = optimizer_args["tao"]
if "tao_max" in optimizer_args:
self.tao_max = optimizer_args["tao_max"]
if "mu" in optimizer_args:
self.mu = optimizer_args["mu"]
if "solver" in optimizer_args:
self.solver = cvx_desc_to_solver(optimizer_args["solver"])
if "solver_verbosity" in optimizer_args:
self.solver_verbosity = optimizer_args["solver_verbosity"]
if not(len(self.A0s) == len(self.A1s) and len(self.A0s) == len(self.bs) and len(self.rs) == len(self.bs)):
raise ValueError("Inconsistent number of constraint parameters")
super().__init__(**kwds)
def _solve(self, prob):
prob.solve(solver=self.solver, verbose=self.solver_verbosity)
def solve_aux(self, xcf, tao, x_orig):
try:
self.dim = x_orig.shape[0]
# Variables
var_x = cp.Variable(self.dim)
s = cp.Variable(len(self.A0s))
var_x_prime = self._apply_affine_preprocessing_to_var(var_x)
# Constants
s_z = np.zeros(len(self.A0s))
s_c = np.ones(len(self.A0s))
c = np.ones(self.dim)
# Build constraints
constraints = []
for i in range(len(self.A0s)):
A = cp.quad_form(var_x_prime, self.A0s[i])
q = var_x_prime.T @ self.bs[i]
c = self.rs[i] + np.dot(xcf, np.dot(xcf, self.A1s[i])) - 2. * var_x_prime.T @ np.dot(xcf, self.A1s[i]) - s[i]
constraints.append(A + q + c + self.epsilon <= 0)
# If requested, fix some features
if self.features_whitelist is not None:
A = []
a = []
for j in range(self.dim):
if j not in self.features_whitelist:
t = np.zeros(self.dim)
t[j] = 1.
A.append(t)
a.append(x_orig[j])
if len(A) != 0:
A = np.array(A)
a = np.array(a)
constraints += [A @ var_x == a]
# Build the final program
f = None
if self.mad is not None: # TODO: Right now, mad != 1 is not supported.
f = cp.Minimize(cp.norm(var_x - x_orig, 1) + s.T @ (tao*s_c))
else:
f = cp.Minimize(cp.quad_form(var_x_prime, self.Q0) + self.q.T @ var_x_prime + self.c + np.dot(xcf, np.dot(xcf, self.Q1)) - 2. * var_x_prime.T @ np.dot(xcf, self.Q1) + s.T @ (tao*s_c))
constraints += [s >= s_z]
prob = cp.Problem(f, constraints)
# Solve it!
self._solve(prob)
if var_x.value is None:
raise Exception("No solution found!")
else:
return var_x.value
except Exception as ex:
logging.debug(str(ex))
return x_orig
def compute_counterfactual(self, x_orig, y_target, x0):
####################################
# Penalty convex-concave procedure #
####################################
# Initial feasible solution
xcf = x0
# Hyperparameters
cur_tao = self.tao
# Solve a bunch of CCPs
while cur_tao < self.tao_max:
cur_xcf = xcf
if cur_xcf.shape == x_orig.shape: # Apply transformation is necessary - xcf is computed in the original space which can be different from the space the model works on!
cur_xcf = self._apply_affine_preprocessing_to_const(cur_xcf)
xcf_ = self.solve_aux(cur_xcf, cur_tao, x_orig)
xcf = xcf_
if y_target == self.model.predict([self._apply_affine_preprocessing_to_const(xcf_)])[0]:
break
# Increase penalty parameter
cur_tao *= self.mu
return xcf
#################################################
# Stuff for computing plausible counterfactuals #
#################################################
class HighDensityEllipsoids:
def __init__(self, X, X_densities, cluster_probs, means, covariances, density_threshold=None, optimizer_args=None, **kwds):
self.X = X
self.X_densities = X_densities
self.density_threshold = density_threshold if density_threshold is not None else float("-inf")
self.cluster_probs = cluster_probs
self.means = means
self.covariances = covariances
self.epsilon = 1e-5
self.solver = cp.SCS
if optimizer_args is not None:
if "epsilon" in optimizer_args:
self.epsilon = optimizer_args["epsilon"]
if "solver" in optimizer_args:
self.solver = cvx_desc_to_solver(optimizer_args["solver"])
super().__init__(**kwds)
def compute_ellipsoids(self):
return self.build_solve_opt()
def _solve(self, prob):
prob.solve(solver=self.solver, verbose=False)
def build_solve_opt(self):
n_ellipsoids = self.cluster_probs.shape[1]
n_samples = self.X.shape[0]
# Variables
r = cp.Variable(n_ellipsoids, pos=True)
# Construct constraints
constraints = []
for i in range(n_ellipsoids):
mu_i = self.means[i]
cov_i = np.linalg.inv(self.covariances[i])
for j in range(n_samples):
if self.X_densities[j][i] >= self.density_threshold: # At least as good as a requested NLL
x_j = self.X[j,:]
a = (x_j - mu_i)
b = np.dot(a, np.dot(cov_i, a))
constraints.append(b <= r[i])
# Build the final program
f = cp.Minimize(cp.sum(r))
prob = cp.Problem(f, constraints)
# Solve it!
self._solve(prob)
return r.value
class PlausibleCounterfactualOfHyperplaneClassifier():
def __init__(self, w, b, n_dims, **kwds):
self.hyperplane_w = w
self.hyperplane_b = b
self.n_dims = n_dims
self.gmm_weights = None
self.gmm_means = None
self.gmm_covariances = None
self.ellipsoids_r = None
self.projection_matrix = None
self.projection_mean_sub = None
self.density_constraint = None
self.min_density = None
self.epsilon = 1e-2
self.solver = cp.SCS
self.gmm_cluster_index = 0 # For internal use only!
super().__init__(**kwds)
def setup_plausibility_params(self, ellipsoids_r, gmm_weights, gmm_means, gmm_covariances, projection_matrix=None, projection_mean_sub=None, density_constraint=True, density_threshold=-85):
self.gmm_weights = gmm_weights
self.gmm_means = gmm_means
self.gmm_covariances = gmm_covariances
self.ellipsoids_r = ellipsoids_r
self.projection_matrix = np.eye(self.n_dims) if projection_matrix is None else projection_matrix
self.projection_mean_sub = np.zeros(self.n_dims) if projection_mean_sub is None else projection_mean_sub
self.density_constraint = density_constraint
self.min_density = density_threshold
def _build_constraints_plausibility_opt(self, var_x, y):
constraints = []
if self.hyperplane_w.shape[0] > 1:
for i in range(self.hyperplane_w.shape[0]):
if i != y:
constraints += [(self.projection_matrix @ (var_x - self.projection_mean_sub)).T @ (self.hyperplane_w[i,:] - self.hyperplane_w[y,:]) + (self.hyperplane_b[i] - self.hyperplane_b[y]) + self.epsilon <= 0]
else:
if y == 0:
return [(self.projection_matrix @ (var_x - self.projection_mean_sub)).T @ self.hyperplane_w.reshape(-1, 1) + self.hyperplane_b + self.epsilon <= 0]
else:
return [(self.projection_matrix @ (var_x - self.projection_mean_sub)).T @ self.hyperplane_w.reshape(-1, 1) + self.hyperplane_b - self.epsilon >= 0]
return constraints
def compute_plausible_counterfactual(self, x, y, regularizer="l1"):
mad = None
if regularizer == "l1":
mad = np.ones(x.shape[0])
xcf = None
s = float("inf")
for i in range(self.gmm_weights[y].shape[0]):
try:
self.gmm_cluster_index = i
xcf_ = self.build_solve_plausibility_opt(x, y, mad)
if xcf_ is None:
continue
s_ = None
if regularizer == "l1":
s_ = np.sum(np.abs(xcf_ - x))
else:
s_ = np.linalg.norm(xcf_ - x, ord=2)
if s_ <= s:
s = s_
xcf = xcf_
except Exception as ex:
pass # TODO: Proper exception handling
return xcf
def _solve_plausibility_opt(self, prob):
prob.solve(solver=self.solver, verbose=False)
def build_solve_plausibility_opt(self, x_orig, y, mad=None):
dim = x_orig.shape[0]
# Variables
x = cp.Variable(dim)
beta = cp.Variable(dim)
# Constants
c = np.ones(dim)
z = np.zeros(dim)
I = np.eye(dim)
# Construct constraints
constraints = self._build_constraints_plausibility_opt(x, y)
if self.density_constraint is True:
i = self.gmm_cluster_index
x_i = self.gmm_means[y][i]
cov = self.gmm_covariances[y][i]
cov = np.linalg.inv(cov)
constraints += [cp.quad_form(self.projection_matrix @ (x - self.projection_mean_sub) - x_i, cov) - self.ellipsoids_r[i] <= 0] # Numerically much more stable than the explicit density component constraint
# If necessary, construct the weight matrix for the weighted Manhattan distance
Upsilon = None
if mad is not None:
alpha = 1. / mad
Upsilon = np.diag(alpha)
# Build the final program
f = None
if mad is not None:
f = cp.Minimize(c.T @ beta) # Minimize (weighted) Manhattan distance
constraints += [Upsilon @ (x - x_orig) <= beta, (-1. * Upsilon) @ (x - x_orig) <= beta, I @ beta >= z]
else:
f = cp.Minimize((1/2)*cp.quad_form(x, I) - x_orig.T@x) # Minimize L2 distance
prob = cp.Problem(f, constraints)
# Solve it!
self._solve_plausibility_opt(prob)
return x.value | [
"cvxpy.Minimize",
"cvxpy.Variable",
"cvxpy.Problem",
"numpy.eye",
"cvxpy.trace",
"numpy.ones",
"numpy.abs",
"cvxpy.sum",
"cvxpy.bmat",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.dot",
"cvxpy.quad_form",
"numpy.linalg.norm",
"cvxpy.norm"
] | [((4793, 4809), 'cvxpy.Variable', 'cp.Variable', (['dim'], {}), '(dim)\n', (4804, 4809), True, 'import cvxpy as cp\n'), ((4825, 4841), 'cvxpy.Variable', 'cp.Variable', (['dim'], {}), '(dim)\n', (4836, 4841), True, 'import cvxpy as cp\n'), ((4883, 4895), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (4890, 4895), True, 'import numpy as np\n'), ((4908, 4921), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (4916, 4921), True, 'import numpy as np\n'), ((4934, 4945), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (4940, 4945), True, 'import numpy as np\n'), ((6141, 6167), 'cvxpy.Problem', 'cp.Problem', (['f', 'constraints'], {}), '(f, constraints)\n', (6151, 6167), True, 'import cvxpy as cp\n'), ((8796, 8835), 'cvxpy.Variable', 'cp.Variable', (['(dim, dim)'], {'symmetric': '(True)'}), '((dim, dim), symmetric=True)\n', (8807, 8835), True, 'import cvxpy as cp\n'), ((8848, 8869), 'cvxpy.Variable', 'cp.Variable', (['(dim, 1)'], {}), '((dim, 1))\n', (8859, 8869), True, 'import cvxpy as cp\n'), ((8926, 8937), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (8932, 8937), True, 'import numpy as np\n'), ((9693, 9719), 'cvxpy.Problem', 'cp.Problem', (['f', 'constraints'], {}), '(f, constraints)\n', (9703, 9719), True, 'import cvxpy as cp\n'), ((19430, 19465), 'cvxpy.Variable', 'cp.Variable', (['n_ellipsoids'], {'pos': '(True)'}), '(n_ellipsoids, pos=True)\n', (19441, 19465), True, 'import cvxpy as cp\n'), ((20081, 20107), 'cvxpy.Problem', 'cp.Problem', (['f', 'constraints'], {}), '(f, constraints)\n', (20091, 20107), True, 'import cvxpy as cp\n'), ((23428, 23444), 'cvxpy.Variable', 'cp.Variable', (['dim'], {}), '(dim)\n', (23439, 23444), True, 'import cvxpy as cp\n'), ((23460, 23476), 'cvxpy.Variable', 'cp.Variable', (['dim'], {}), '(dim)\n', (23471, 23476), True, 'import cvxpy as cp\n'), ((23510, 23522), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (23517, 23522), True, 'import numpy as np\n'), ((23535, 23548), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (23543, 23548), True, 'import numpy as np\n'), ((23561, 23572), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (23567, 23572), True, 'import numpy as np\n'), ((24711, 24737), 'cvxpy.Problem', 'cp.Problem', (['f', 'constraints'], {}), '(f, constraints)\n', (24721, 24737), True, 'import cvxpy as cp\n'), ((5718, 5732), 'numpy.diag', 'np.diag', (['alpha'], {}), '(alpha)\n', (5725, 5732), True, 'import numpy as np\n'), ((5829, 5852), 'cvxpy.Minimize', 'cp.Minimize', (['(c.T @ beta)'], {}), '(c.T @ beta)\n', (5840, 5852), True, 'import cvxpy as cp\n'), ((15090, 15111), 'cvxpy.Variable', 'cp.Variable', (['self.dim'], {}), '(self.dim)\n', (15101, 15111), True, 'import cvxpy as cp\n'), ((15353, 15370), 'numpy.ones', 'np.ones', (['self.dim'], {}), '(self.dim)\n', (15360, 15370), True, 'import numpy as np\n'), ((16875, 16901), 'cvxpy.Problem', 'cp.Problem', (['f', 'constraints'], {}), '(f, constraints)\n', (16885, 16901), True, 'import cvxpy as cp\n'), ((19615, 19649), 'numpy.linalg.inv', 'np.linalg.inv', (['self.covariances[i]'], {}), '(self.covariances[i])\n', (19628, 19649), True, 'import numpy as np\n'), ((20055, 20064), 'cvxpy.sum', 'cp.sum', (['r'], {}), '(r)\n', (20061, 20064), True, 'import cvxpy as cp\n'), ((21195, 21214), 'numpy.eye', 'np.eye', (['self.n_dims'], {}), '(self.n_dims)\n', (21201, 21214), True, 'import numpy as np\n'), ((21302, 21323), 'numpy.zeros', 'np.zeros', (['self.n_dims'], {}), '(self.n_dims)\n', (21310, 21323), True, 'import numpy as np\n'), ((22474, 22493), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (22481, 22493), True, 'import numpy as np\n'), ((23861, 23879), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (23874, 23879), True, 'import numpy as np\n'), ((24288, 24302), 'numpy.diag', 'np.diag', (['alpha'], {}), '(alpha)\n', (24295, 24302), True, 'import numpy as np\n'), ((24399, 24422), 'cvxpy.Minimize', 'cp.Minimize', (['(c.T @ beta)'], {}), '(c.T @ beta)\n', (24410, 24422), True, 'import cvxpy as cp\n'), ((2035, 2052), 'numpy.dot', 'np.dot', (['self.A', 'x'], {}), '(self.A, x)\n', (2041, 2052), True, 'import numpy as np\n'), ((5438, 5449), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (5446, 5449), True, 'import numpy as np\n'), ((5470, 5481), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (5478, 5481), True, 'import numpy as np\n'), ((8884, 8899), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (8892, 8899), True, 'import numpy as np\n'), ((9050, 9079), 'cvxpy.bmat', 'cp.bmat', (['[[X, x], [x.T, one]]'], {}), '([[X, x], [x.T, one]])\n', (9057, 9079), True, 'import cvxpy as cp\n'), ((9493, 9504), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (9501, 9504), True, 'import numpy as np\n'), ((9525, 9536), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (9533, 9536), True, 'import numpy as np\n'), ((9641, 9656), 'cvxpy.trace', 'cp.trace', (['(I @ X)'], {}), '(I @ X)\n', (9649, 9656), True, 'import cvxpy as cp\n'), ((15496, 15534), 'cvxpy.quad_form', 'cp.quad_form', (['var_x_prime', 'self.A0s[i]'], {}), '(var_x_prime, self.A0s[i])\n', (15508, 15534), True, 'import cvxpy as cp\n'), ((5261, 5274), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (5269, 5274), True, 'import numpy as np\n'), ((9316, 9329), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (9324, 9329), True, 'import numpy as np\n'), ((16266, 16277), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (16274, 16277), True, 'import numpy as np\n'), ((16302, 16313), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (16310, 16313), True, 'import numpy as np\n'), ((22955, 22986), 'numpy.linalg.norm', 'np.linalg.norm', (['(xcf_ - x)'], {'ord': '(2)'}), '(xcf_ - x, ord=2)\n', (22969, 22986), True, 'import numpy as np\n'), ((6060, 6078), 'cvxpy.quad_form', 'cp.quad_form', (['x', 'I'], {}), '(x, I)\n', (6072, 6078), True, 'import cvxpy as cp\n'), ((16060, 16078), 'numpy.zeros', 'np.zeros', (['self.dim'], {}), '(self.dim)\n', (16068, 16078), True, 'import numpy as np\n'), ((16545, 16571), 'cvxpy.norm', 'cp.norm', (['(var_x - x_orig)', '(1)'], {}), '(var_x - x_orig, 1)\n', (16552, 16571), True, 'import cvxpy as cp\n'), ((19928, 19944), 'numpy.dot', 'np.dot', (['cov_i', 'a'], {}), '(cov_i, a)\n', (19934, 19944), True, 'import numpy as np\n'), ((22890, 22906), 'numpy.abs', 'np.abs', (['(xcf_ - x)'], {}), '(xcf_ - x)\n', (22896, 22906), True, 'import numpy as np\n'), ((23909, 23994), 'cvxpy.quad_form', 'cp.quad_form', (['(self.projection_matrix @ (x - self.projection_mean_sub) - x_i)', 'cov'], {}), '(self.projection_matrix @ (x - self.projection_mean_sub) - x_i, cov\n )\n', (23921, 23994), True, 'import cvxpy as cp\n'), ((24630, 24648), 'cvxpy.quad_form', 'cp.quad_form', (['x', 'I'], {}), '(x, I)\n', (24642, 24648), True, 'import cvxpy as cp\n'), ((15676, 15700), 'numpy.dot', 'np.dot', (['xcf', 'self.A1s[i]'], {}), '(xcf, self.A1s[i])\n', (15682, 15700), True, 'import numpy as np\n'), ((15627, 15651), 'numpy.dot', 'np.dot', (['xcf', 'self.A1s[i]'], {}), '(xcf, self.A1s[i])\n', (15633, 15651), True, 'import numpy as np\n'), ((16769, 16789), 'numpy.dot', 'np.dot', (['xcf', 'self.Q1'], {}), '(xcf, self.Q1)\n', (16775, 16789), True, 'import numpy as np\n'), ((16724, 16744), 'numpy.dot', 'np.dot', (['xcf', 'self.Q1'], {}), '(xcf, self.Q1)\n', (16730, 16744), True, 'import numpy as np\n'), ((16641, 16675), 'cvxpy.quad_form', 'cp.quad_form', (['var_x_prime', 'self.Q0'], {}), '(var_x_prime, self.Q0)\n', (16653, 16675), True, 'import cvxpy as cp\n')] |
import numpy as np
import cv2
def rvec2euler(rvec):
'''Converts rotation vector (Rodrigues) to euler angles
Returns
-------
ndarray
euler angles; shape=(3,); dtype=float
'''
return rotmat2euler(cv2.Rodrigues(rvec)[0])
def rvec2quat(rvec):
'''Converts rotation vector (Rodrigues) to quaternion
Returns
-------
ndarray
quaternion; shape=(4,); dtype=float
'''
euler = rvec2euler(rvec)
return euler2quat(euler)
def euler2quat(euler):
'''Converts euler angles to quaternion
Returns
-------
ndarray
quaternion; shape=(4,); dtype=float
'''
cy = np.cos(0.5 * euler[0])
sy = np.sin(0.5 * euler[0])
cp = np.cos(0.5 * euler[1])
sp = np.sin(0.5 * euler[1])
cr = np.cos(0.5 * euler[2])
sr = np.sin(0.5 * euler[2])
return np.array((cr*cp*cy+sr*sp*sy,
sr*cp*cy-cr*sp*sy,
cr*sp*cy+sr*cp*sy,
cr*cp*sy-sr*sp*cy))
def rotmat2euler(R):
'''Converts rotation matrix to euler angles
Returns
-------
ndarray
euler angles; shape=(3,); dtype=float
'''
sy = np.sqrt(R[0,0]*R[0,0]+R[1,1]*R[1,1])
singular = sy < 1e-6
if not singular:
x = np.arctan2(R[2,1], R[2,2])
y = np.arctan2(-R[2,0], sy)
z = np.arctan2(R[1,0], R[0,0])
else:
x = np.arctan2(-R[1,2],R[1,1])
y = np.arctan2(-R[2,0], sy)
z = 0
return np.array((x,y,z))
def transformation_matrix(rvec, tvec):
mat = np.zeros((4,4))
mat[:3,3] = tvec.flatten()
mat[:3,:3] = cv2.Rodrigues(rvec)[0]
mat[3,3] = 1
return mat
def inverse_transformation_matrix(rvec, tvec):
mat = np.zeros((4,4))
rot_mat = cv2.Rodrigues(rvec)[0]
# rotational matrices are orthogonal so inv <--> transpose
inv_rot_mat = rot_mat.T
mat[:3,3] = -np.dot(inv_rot_mat, tvec[:,0])
mat[:3,:3] = inv_rot_mat
mat[3,3] = 1
return mat
def invert_transformation(mtx):
inverted = np.copy(mtx)
inverted[:3,:3] = mtx[:3,:3].T
inverted[:3,3] = -np.dot(inverted[:3,:3], mtx[:3,3])
return inverted
def coord_transform(transform_mtx, pts):
if len(pts.shape) == 1:
pts = pts[None,:]
homog_pts = np.concatenate( (pts, np.ones((len(pts),1))), axis=1 )
new_homog_pts = np.dot(transform_mtx, homog_pts.T).T
new_pts = np.true_divide(new_homog_pts[:,:-1], new_homog_pts[:,[-1]])
return new_pts
| [
"numpy.copy",
"numpy.sqrt",
"numpy.array",
"numpy.zeros",
"cv2.Rodrigues",
"numpy.arctan2",
"numpy.cos",
"numpy.true_divide",
"numpy.dot",
"numpy.sin"
] | [((644, 666), 'numpy.cos', 'np.cos', (['(0.5 * euler[0])'], {}), '(0.5 * euler[0])\n', (650, 666), True, 'import numpy as np\n'), ((676, 698), 'numpy.sin', 'np.sin', (['(0.5 * euler[0])'], {}), '(0.5 * euler[0])\n', (682, 698), True, 'import numpy as np\n'), ((708, 730), 'numpy.cos', 'np.cos', (['(0.5 * euler[1])'], {}), '(0.5 * euler[1])\n', (714, 730), True, 'import numpy as np\n'), ((740, 762), 'numpy.sin', 'np.sin', (['(0.5 * euler[1])'], {}), '(0.5 * euler[1])\n', (746, 762), True, 'import numpy as np\n'), ((772, 794), 'numpy.cos', 'np.cos', (['(0.5 * euler[2])'], {}), '(0.5 * euler[2])\n', (778, 794), True, 'import numpy as np\n'), ((804, 826), 'numpy.sin', 'np.sin', (['(0.5 * euler[2])'], {}), '(0.5 * euler[2])\n', (810, 826), True, 'import numpy as np\n'), ((839, 969), 'numpy.array', 'np.array', (['(cr * cp * cy + sr * sp * sy, sr * cp * cy - cr * sp * sy, cr * sp * cy + \n sr * cp * sy, cr * cp * sy - sr * sp * cy)'], {}), '((cr * cp * cy + sr * sp * sy, sr * cp * cy - cr * sp * sy, cr * sp *\n cy + sr * cp * sy, cr * cp * sy - sr * sp * cy))\n', (847, 969), True, 'import numpy as np\n'), ((1159, 1205), 'numpy.sqrt', 'np.sqrt', (['(R[0, 0] * R[0, 0] + R[1, 1] * R[1, 1])'], {}), '(R[0, 0] * R[0, 0] + R[1, 1] * R[1, 1])\n', (1166, 1205), True, 'import numpy as np\n'), ((1468, 1487), 'numpy.array', 'np.array', (['(x, y, z)'], {}), '((x, y, z))\n', (1476, 1487), True, 'import numpy as np\n'), ((1536, 1552), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (1544, 1552), True, 'import numpy as np\n'), ((1713, 1729), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (1721, 1729), True, 'import numpy as np\n'), ((2014, 2026), 'numpy.copy', 'np.copy', (['mtx'], {}), '(mtx)\n', (2021, 2026), True, 'import numpy as np\n'), ((2377, 2438), 'numpy.true_divide', 'np.true_divide', (['new_homog_pts[:, :-1]', 'new_homog_pts[:, [-1]]'], {}), '(new_homog_pts[:, :-1], new_homog_pts[:, [-1]])\n', (2391, 2438), True, 'import numpy as np\n'), ((1256, 1284), 'numpy.arctan2', 'np.arctan2', (['R[2, 1]', 'R[2, 2]'], {}), '(R[2, 1], R[2, 2])\n', (1266, 1284), True, 'import numpy as np\n'), ((1295, 1319), 'numpy.arctan2', 'np.arctan2', (['(-R[2, 0])', 'sy'], {}), '(-R[2, 0], sy)\n', (1305, 1319), True, 'import numpy as np\n'), ((1331, 1359), 'numpy.arctan2', 'np.arctan2', (['R[1, 0]', 'R[0, 0]'], {}), '(R[1, 0], R[0, 0])\n', (1341, 1359), True, 'import numpy as np\n'), ((1380, 1409), 'numpy.arctan2', 'np.arctan2', (['(-R[1, 2])', 'R[1, 1]'], {}), '(-R[1, 2], R[1, 1])\n', (1390, 1409), True, 'import numpy as np\n'), ((1419, 1443), 'numpy.arctan2', 'np.arctan2', (['(-R[2, 0])', 'sy'], {}), '(-R[2, 0], sy)\n', (1429, 1443), True, 'import numpy as np\n'), ((1600, 1619), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rvec'], {}), '(rvec)\n', (1613, 1619), False, 'import cv2\n'), ((1743, 1762), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rvec'], {}), '(rvec)\n', (1756, 1762), False, 'import cv2\n'), ((1874, 1905), 'numpy.dot', 'np.dot', (['inv_rot_mat', 'tvec[:, 0]'], {}), '(inv_rot_mat, tvec[:, 0])\n', (1880, 1905), True, 'import numpy as np\n'), ((2084, 2120), 'numpy.dot', 'np.dot', (['inverted[:3, :3]', 'mtx[:3, 3]'], {}), '(inverted[:3, :3], mtx[:3, 3])\n', (2090, 2120), True, 'import numpy as np\n'), ((2326, 2360), 'numpy.dot', 'np.dot', (['transform_mtx', 'homog_pts.T'], {}), '(transform_mtx, homog_pts.T)\n', (2332, 2360), True, 'import numpy as np\n'), ((228, 247), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rvec'], {}), '(rvec)\n', (241, 247), False, 'import cv2\n')] |
import crosscat.cython_code.CyclicComponentModel as ccm
import math
import random
import numpy
import six
from scipy.stats import vonmises
from crosscat.utils.general_utils import logmeanexp
import pdb
pi = math.pi
next_seed = lambda rng: rng.randrange(2147483647)
default_hyperparameters = dict(a=1.0, b=pi, kappa=4.0)
default_data_parameters = dict(mu=pi, kappa=4.0)
###############################################################################
# Input-checking and exception-handling functions
###############################################################################
def check_type_force_float(x, name):
"""
If an int is passed, convert it to a float. If some other type is passed,
raise an exception.
"""
if type(x) is int:
return float(x)
elif not isinstance(x, (float, numpy.float64)):
raise TypeError("%r should be a float" % (name,))
else:
return x
def check_data_type_column_data(X):
"""
Makes sure that X is a numpy array and that it is a column vector
"""
if type(X) is not numpy.ndarray:
raise TypeError("X should be type numpy.ndarray")
if len(X.shape) == 2 and X.shape[1] > 1:
raise TypeError("X should have a single column.")
def check_hyperparams_dict(hypers):
if type(hypers) is not dict:
raise TypeError("hypers should be a dict")
keys = ['a', 'b', 'kappa']
for key in keys:
if key not in hypers:
raise KeyError("missing key in hypers: %r" % (key,))
for key, value in six.iteritems(hypers):
if key not in keys:
raise KeyError("invalid hypers key: %r" % (key,))
if not isinstance(value, (float, numpy.float64)):
raise TypeError("%r should be float" % (key,))
if key in ['a', 'kappa']:
if value <= 0.0:
raise ValueError("hypers[%r] should be greater than 0" % (key,))
if key == 'b':
if value <= 0.0 or value >= 2*pi:
raise ValueError("hypers[%r] should be in [0,2*pi]" % (key,))
def check_model_params_dict(params):
if type(params) is not dict:
raise TypeError("params should be a dict")
keys = ['mu', 'kappa']
for key in keys:
if key not in params:
raise KeyError("missing key in params: %r" % (key,))
for key, value in six.iteritems(params):
if key not in keys:
raise KeyError("invalid params key: %r" % (key,))
if not isinstance(value, (float, numpy.float64)):
raise TypeError("%r should be float" % (key,))
if key == "kappa":
if value <= 0.0:
raise ValueError("kappa should be greater than 0")
elif key != "mu":
raise KeyError("Invalid params key: %r" % (key,))
else:
if value < 0.0 or value > 2*pi:
raise ValueError("mu should be in [0,2*pi]")
###############################################################################
# The class extension
###############################################################################
class p_CyclicComponentModel(ccm.p_CyclicComponentModel):
model_type = 'vonmises'
cctype = 'cyclic'
@classmethod
def from_parameters(cls, N, data_params=default_data_parameters, hypers=None, gen_seed=0):
"""
Initialize a continuous component model with sufficient statistics
generated from random data.
Inputs:
N: the number of data points
data_params: a dict with the following keys
mu: the mean of the data
kappa: the precision of the data
hypers: a dict with the following keys
a: the prior precision of the mean
b: the prior mean of the
kappa: precision parameter
gen_seed: an integer from which the rng is seeded
"""
check_model_params_dict(data_params)
data_kappa = data_params['kappa']
data_mean = data_params['mu']
rng = random.Random(gen_seed)
X = [ [rng.vonmisesvariate(data_mean-math.pi, data_kappa)+math.pi] for i in range(N)]
X = numpy.array(X)
check_data_type_column_data(X)
if hypers is None:
hypers = cls.draw_hyperparameters(X, n_draws=1, gen_seed=next_seed(rng))[0]
check_hyperparams_dict(hypers)
sum_sin_x = numpy.sum(numpy.sin(X))
sum_cos_x = numpy.sum(numpy.cos(X))
hypers['fixed'] = 0.0
return cls(hypers, float(N), sum_sin_x, sum_cos_x)
@classmethod
def from_data(cls, X, hypers=None, gen_seed=0):
"""
Initialize a continuous component model with sufficient statistics
generated from data X
Inputs:
X: a column of data (numpy)
hypers: dict with the following entries
a: the prior precision of the mean
b: the prior mean of the
kappa: precision parameter
gen_seed: a int to seed the rng
"""
check_data_type_column_data(X)
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
rng = random.Random(gen_seed)
if hypers is None:
hypers = cls.draw_hyperparameters(X, gen_seed=next_seed(rng))[0]
check_hyperparams_dict(hypers)
N = len(X)
sum_sin_x = numpy.sum(numpy.sin(X))
sum_cos_x = numpy.sum(numpy.cos(X))
hypers['fixed'] = 0.0
return cls(hypers, float(N), sum_sin_x, sum_cos_x)
def sample_parameters_given_hyper(self, gen_seed=0):
"""
Samples a Gaussian parameter given the current hyperparameters.
Inputs:
gen_seed: integer used to seed the rng
"""
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
nprng = numpy.random.RandomState(gen_seed)
hypers = self.get_hypers()
a = hypers['a']
b = hypers['b']
kappa = hypers['kappa']
mu = nprng.vonmises(b-math.pi, a)+math.pi
kappa = hypers['kappa']
assert(kappa > 0)
assert(mu >= 0 and mu <= 2*pi)
params = {'mu': mu, 'kappa': kappa}
return params
def uncollapsed_likelihood(self, X, parameters):
"""
Calculates the score of the data X under this component model with mean
mu and precision kappa.
Inputs:
X: A column of data (numpy)
parameters: a dict with the following keys
mu: the Von Mises mean
kappa: the precision of the Von Mises
"""
check_data_type_column_data(X)
check_model_params_dict(parameters)
mu = parameters['mu']
kappa = parameters['kappa']
N = float(len(X))
hypers = self.get_hypers()
a = hypers['a']
b = hypers['b']
kappa = hypers['kappa']
sum_err = numpy.sum((mu-X)**2.0)
log_likelihood = self.log_likelihood(X, {'mu':mu, 'kappa':rho})
log_prior_mu = vonmises.logpdf(b, a)
log_p = log_likelihood + log_prior_mu + log_prior_rho
return log_p
@staticmethod
def log_likelihood(X, parameters):
"""
Calculates the log likelihood of the data X given mean mu and precision
kappa.
Inputs:
X: a column of data (numpy)
parameters: a dict with the following keys
mu: the Von Mises mean
kappa: the precision of the Von Mises
"""
check_data_type_column_data(X)
check_model_params_dict(parameters)
log_likelihood = numpy.sum(vonmises.logpdf(X-math.pi, parameters['mu']-math.pi, parameters['kappa']))
return log_likelihood
@staticmethod
def log_pdf(X, parameters):
"""
Calculates the pdf for each point in the data X given mean mu and
precision kappa.
Inputs:
X: a column of data (numpy)
parameters: a dict with the following keys
mu: the Von Mises mean
kappa: the precision of the Von Mises
"""
check_data_type_column_data(X)
check_model_params_dict(parameters)
return vonmises.logpdf(X--math.pi, parameters['kappa'],loc=parameters['mu']-math.pi)
@staticmethod
def cdf(X, parameters):
"""
Calculates the cdf for each point in the data X given mean mu and
precision kappa.
Inputs:
X: a column of data (numpy)
parameters: a dict with the following keys
mu: the Von Mises mean
kappa: the precision of the Von Mises
"""
check_data_type_column_data(X)
check_model_params_dict(parameters)
return vonmises.cdf(X-math.pi, parameters['mu']-math.pi, parameters['kappa'])
def brute_force_marginal_likelihood(self, X, n_samples=10000, gen_seed=0):
"""
Calculates the log marginal likelihood via brute force method in which
parameters (mu and kappa) are repeatedly drawn from the prior, the
likelihood is calculated for each set of parameters, then the average is
taken.
Inputs:
X: A column of data (numpy)
n_samples: the number of draws
gen_Seed: seed for the rng
"""
check_data_type_column_data(X)
if type(n_samples) is not int:
raise TypeError("n_samples should be an int")
if n_samples <= 0:
raise ValueError("n_samples should be greater than 0")
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
N = float(len(X))
rng = random.Random(gen_seed)
log_likelihoods = [0]*n_samples
for i in range(n_samples):
params = self.sample_parameters_given_hyper(gen_seed=next_seed(rng))
log_likelihoods[i] = self.log_likelihood(X, params)
log_marginal_likelihood = logmeanexp(log_likelihoods)
return log_marginal_likelihood
@staticmethod
def generate_discrete_support(params, support=0.95, nbins=100):
"""
returns a set of intervals over which the component model pdf is
supported.
Inputs:
params: a dict with entries 'mu' and 'kappa'
nbins: cardinality of the set or the number of grid points in the
approximation
support: a float in (0,1) that describes the amount of probability
we want in the range of support
"""
if type(nbins) is not int:
raise TypeError("nbins should be an int")
if nbins <= 0:
raise ValueError("nbins should be greater than 0")
support = check_type_force_float(support, "support")
if support <= 0.0 or support >= 1.0:
raise ValueError("support is a float st: 0 < support < 1")
check_model_params_dict(params)
mu = params['mu']
kappa = params['kappa']
assert(mu >= 0 and mu <= 2*math.pi)
a, b = vonmises.interval(support, kappa)
a += mu
b += mu
assert -math.pi <= a < b <= 3*math.pi
assert b - a <= 2*math.pi
support_range = b - a;
support_bin_size = support_range/(nbins-1.0)
bins = [a+i*support_bin_size for i in range(nbins)]
return bins
@staticmethod
def draw_hyperparameters(X, n_draws=1, gen_seed=0):
"""
Draws hyperparameters a, b, and kappa from the same distribution that
generates the grid in the C++ code.
Inputs:
X: a column of data (numpy)
n_draws: the number of draws
gen_seed: seed the rng
Output:
A list of dicts of draws where each entry has keys 'a', 'b', 'kappa'.
"""
check_data_type_column_data(X)
if type(n_draws) is not int:
raise TypeError("n_draws should be an int")
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
rng = random.Random(gen_seed)
samples = []
N = float(len(X))
vx = numpy.var(X)
a_kappa_draw_range = (vx, vx/N)
mu_draw_range = (0, 2*pi)
for i in range(n_draws):
a = math.exp(rng.uniform(a_kappa_draw_range[0], a_kappa_draw_range[1]))
kappa = math.exp(rng.uniform(a_kappa_draw_range[0], a_kappa_draw_range[1]))
b = rng.uniform(mu_draw_range[0], mu_draw_range[1])
this_draw = dict(a=a, b=b, kappa=kappa)
samples.append(this_draw)
assert len(samples) == n_draws
return samples
@staticmethod
def generate_data_from_parameters(params, N, gen_seed=0):
"""
Generates data from a gaussina distribution
Inputs:
params: a dict with entries 'mu' and 'kappa'
N: number of data points
"""
if type(N) is not int:
raise TypeError("N should be an int")
if N <= 0:
raise ValueError("N should be greater than 0")
nprng = numpy.random.RandomState(gen_seed)
check_model_params_dict(params)
mu = params['mu']
kappa = params['kappa']
X = numpy.array([[nprng.vonmises(mu-math.pi, kappa)+math.pi] for i in range(N)])
for x in X:
if x < 0. or x > 2.*math.pi:
pdb.set_trace()
assert len(X) == N
return X
@staticmethod
def get_model_parameter_bounds():
"""
Returns a dict where each key-value pair is a model parameter and a
tuple with the lower and upper bounds
"""
inf = float("inf")
params = dict(mu=(0.0,2*pi), rho=(0.0 ,inf))
return params
| [
"crosscat.utils.general_utils.logmeanexp",
"random.Random",
"numpy.array",
"numpy.sum",
"scipy.stats.vonmises.cdf",
"numpy.var",
"numpy.cos",
"pdb.set_trace",
"numpy.sin",
"scipy.stats.vonmises.interval",
"six.iteritems",
"scipy.stats.vonmises.logpdf",
"numpy.random.RandomState"
] | [((1542, 1563), 'six.iteritems', 'six.iteritems', (['hypers'], {}), '(hypers)\n', (1555, 1563), False, 'import six\n'), ((2358, 2379), 'six.iteritems', 'six.iteritems', (['params'], {}), '(params)\n', (2371, 2379), False, 'import six\n'), ((4037, 4060), 'random.Random', 'random.Random', (['gen_seed'], {}), '(gen_seed)\n', (4050, 4060), False, 'import random\n'), ((4167, 4181), 'numpy.array', 'numpy.array', (['X'], {}), '(X)\n', (4178, 4181), False, 'import numpy\n'), ((5192, 5215), 'random.Random', 'random.Random', (['gen_seed'], {}), '(gen_seed)\n', (5205, 5215), False, 'import random\n'), ((5894, 5928), 'numpy.random.RandomState', 'numpy.random.RandomState', (['gen_seed'], {}), '(gen_seed)\n', (5918, 5928), False, 'import numpy\n'), ((6968, 6994), 'numpy.sum', 'numpy.sum', (['((mu - X) ** 2.0)'], {}), '((mu - X) ** 2.0)\n', (6977, 6994), False, 'import numpy\n'), ((7087, 7108), 'scipy.stats.vonmises.logpdf', 'vonmises.logpdf', (['b', 'a'], {}), '(b, a)\n', (7102, 7108), False, 'from scipy.stats import vonmises\n'), ((8277, 8363), 'scipy.stats.vonmises.logpdf', 'vonmises.logpdf', (['(X - -math.pi)', "parameters['kappa']"], {'loc': "(parameters['mu'] - math.pi)"}), "(X - -math.pi, parameters['kappa'], loc=parameters['mu'] -\n math.pi)\n", (8292, 8363), False, 'from scipy.stats import vonmises\n'), ((8828, 8902), 'scipy.stats.vonmises.cdf', 'vonmises.cdf', (['(X - math.pi)', "(parameters['mu'] - math.pi)", "parameters['kappa']"], {}), "(X - math.pi, parameters['mu'] - math.pi, parameters['kappa'])\n", (8840, 8902), False, 'from scipy.stats import vonmises\n'), ((9758, 9781), 'random.Random', 'random.Random', (['gen_seed'], {}), '(gen_seed)\n', (9771, 9781), False, 'import random\n'), ((10037, 10064), 'crosscat.utils.general_utils.logmeanexp', 'logmeanexp', (['log_likelihoods'], {}), '(log_likelihoods)\n', (10047, 10064), False, 'from crosscat.utils.general_utils import logmeanexp\n'), ((11131, 11164), 'scipy.stats.vonmises.interval', 'vonmises.interval', (['support', 'kappa'], {}), '(support, kappa)\n', (11148, 11164), False, 'from scipy.stats import vonmises\n'), ((12140, 12163), 'random.Random', 'random.Random', (['gen_seed'], {}), '(gen_seed)\n', (12153, 12163), False, 'import random\n'), ((12227, 12239), 'numpy.var', 'numpy.var', (['X'], {}), '(X)\n', (12236, 12239), False, 'import numpy\n'), ((13185, 13219), 'numpy.random.RandomState', 'numpy.random.RandomState', (['gen_seed'], {}), '(gen_seed)\n', (13209, 13219), False, 'import numpy\n'), ((4408, 4420), 'numpy.sin', 'numpy.sin', (['X'], {}), '(X)\n', (4417, 4420), False, 'import numpy\n'), ((4452, 4464), 'numpy.cos', 'numpy.cos', (['X'], {}), '(X)\n', (4461, 4464), False, 'import numpy\n'), ((5412, 5424), 'numpy.sin', 'numpy.sin', (['X'], {}), '(X)\n', (5421, 5424), False, 'import numpy\n'), ((5456, 5468), 'numpy.cos', 'numpy.cos', (['X'], {}), '(X)\n', (5465, 5468), False, 'import numpy\n'), ((7694, 7771), 'scipy.stats.vonmises.logpdf', 'vonmises.logpdf', (['(X - math.pi)', "(parameters['mu'] - math.pi)", "parameters['kappa']"], {}), "(X - math.pi, parameters['mu'] - math.pi, parameters['kappa'])\n", (7709, 7771), False, 'from scipy.stats import vonmises\n'), ((13488, 13503), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (13501, 13503), False, 'import pdb\n')] |
from pytest import raises, mark
import numpy as np
from beyond.io.ccsds import dumps, loads, CcsdsError
@mark.parametrize("kep", ("kep", "nokep"))
def test_dump_opm(orbit, datafile, ccsds_format, helper, kep):
kep = kep == "kep"
ref = datafile("opm", kep)
txt = dumps(orbit, fmt=ccsds_format, kep=kep)
helper.assert_string(ref, txt)
def test_dump_opm_cov(orbit_cov, datafile, ccsds_format, helper):
ref = datafile("opm_cov")
txt = dumps(orbit_cov, fmt=ccsds_format)
helper.assert_string(ref, txt)
# Conversion to TNW
orbit_cov2 = orbit_cov.copy()
orbit_cov2.cov.frame = "TNW"
txt = dumps(orbit_cov2, fmt=ccsds_format)
helper.assert_string(datafile("opm_cov_tnw"), txt)
# Conversion to QSW
orbit_cov3 = orbit_cov.copy()
orbit_cov3.cov.frame = "QSW"
txt = dumps(orbit_cov3, fmt=ccsds_format)
helper.assert_string(datafile("opm_cov_qsw"), txt)
def test_dump_opm_man_impulsive(orbit_man, datafile, ccsds_format, helper):
ref = datafile("opm_impulsive_man_tnw")
txt = dumps(orbit_man, fmt=ccsds_format)
helper.assert_string(ref, txt)
ref = datafile("opm_impulsive_man_qsw")
for man in orbit_man.maneuvers:
man.frame = "QSW"
man._dv = np.array([man._dv[1], man._dv[0], man._dv[2]])
txt = dumps(orbit_man, fmt=ccsds_format)
helper.assert_string(ref, txt)
def test_dump_opm_man_continuous(orbit_continuous_man, datafile, ccsds_format, helper):
ref = datafile("opm_continuous_man_tnw")
txt = dumps(orbit_continuous_man, fmt=ccsds_format)
helper.assert_string(ref, txt)
ref = datafile("opm_continuous_man_qsw")
for man in orbit_continuous_man.maneuvers:
man.frame = "QSW"
man._dv = np.array([man._dv[1], man._dv[0], man._dv[2]])
txt = dumps(orbit_continuous_man, fmt=ccsds_format)
helper.assert_string(ref, txt)
@mark.jpl
def test_dump_opm_interplanetary(jplfiles, orbit, ccsds_format, datafile, helper):
orbit.frame = "MarsBarycenter"
txt = dumps(orbit, fmt=ccsds_format)
helper.assert_string(datafile("opm_interplanetary"), txt)
def test_dump_opm_user_defined(orbit, ccsds_format, datafile, helper):
subdict = orbit._data["ccsds_user_defined"] = {}
subdict["FOO"] = "foo enters"
subdict["BAR"] = "a bar"
txt = dumps(orbit, fmt=ccsds_format)
helper.assert_string(datafile("opm_user_defined"), txt)
########## LOAD
def test_load_opm(orbit, datafile, helper):
data = loads(datafile("opm"))
helper.assert_orbit(orbit, data)
def test_load_opm_no_unit(orbit, datafile, helper):
data = loads(datafile("opm_no_unit"))
helper.assert_orbit(orbit, data)
def test_load_opm_strange_unit(datafile):
# Dummy units, that aren't specified as valid
with raises(CcsdsError) as e:
loads(datafile("opm_strange_units"))
assert str(e.value) == "Unknown unit 'm/s' for the field X_DOT"
def test_load_opm_truncated(datafile):
# One mandatory line is missing
list_opm = datafile("opm").splitlines()
for i, line in enumerate(list_opm):
if "EPOCH" in line:
list_opm.pop(i)
break
truncated_opm = "\n".join(list_opm)
with raises(CcsdsError) as e:
loads(truncated_opm)
assert str(e.value) == "Missing mandatory parameter 'EPOCH'"
def test_load_opm_cov(orbit_cov, datafile, helper):
data_opm = loads(datafile("opm_cov"))
helper.assert_orbit(orbit_cov, data_opm)
def test_load_opm_cov_qsw(orbit_cov, datafile, helper):
data_opm = loads(datafile("opm_cov_qsw"))
orbit_cov.cov.frame = "QSW"
helper.assert_orbit(orbit_cov, data_opm)
def test_load_opm_man_impulsive(orbit_man, datafile, helper, ccsds_format):
str_data_opm_man = datafile("opm_impulsive_man_tnw")
data_opm_man = loads(str_data_opm_man)
helper.assert_orbit(orbit_man, data_opm_man)
list_data_opm_man = str_data_opm_man.splitlines()
if ccsds_format == "kvn":
number = 0
for i, line in enumerate(list_data_opm_man):
if ccsds_format == "kvn" and "MAN_EPOCH_IGNITION" in line:
if number:
list_data_opm_man = list_data_opm_man[:i]
break
else:
number += 1
data = "\n".join(list_data_opm_man)
else:
continue_flag = False
new_data = []
number = 0
for line in list_data_opm_man:
if continue_flag:
if "</maneuverParameters>" in line:
continue_flag = False
continue
if "<maneuverParameters>" in line:
if number:
continue_flag = True
continue
number += 1
new_data.append(line)
data = "\n".join(new_data)
orbit_man.maneuvers = orbit_man.maneuvers[0]
data = loads(data)
helper.assert_orbit(orbit_man, data)
def test_load_opm_man_continuous(orbit_continuous_man, datafile, ccsds_format, helper):
# Tweak the reference to convert impulsive maneuvers into continuous ones
data_continuous_man = datafile("opm_impulsive_man_tnw").splitlines()
for i, line in enumerate(data_continuous_man):
if "MAN_DURATION" in line:
if ccsds_format == "kvn":
data_continuous_man[i] = "MAN_DURATION = 180.000 [s]"
else:
data_continuous_man[i] = ' <MAN_DURATION units="s">180.000</MAN_DURATION>'
data_continuous_man = loads("\n".join(data_continuous_man))
helper.assert_orbit(orbit_continuous_man, data_continuous_man)
@mark.jpl
def test_load_interplanetary(jplfiles, orbit, datafile, helper):
orbit.frame = "MarsBarycenter"
data_opm = loads(datafile("opm_interplanetary"))
helper.assert_orbit(orbit, data_opm)
def test_load_user_defined(orbit, datafile, helper):
data_opm = loads(datafile("opm_user_defined"))
helper.assert_orbit(orbit, data_opm)
assert "ccsds_user_defined" in data_opm._data
subdict = data_opm._data["ccsds_user_defined"]
assert subdict["FOO"] == "foo enters"
assert subdict["BAR"] == "a bar"
| [
"beyond.io.ccsds.dumps",
"pytest.mark.parametrize",
"numpy.array",
"pytest.raises",
"beyond.io.ccsds.loads"
] | [((109, 150), 'pytest.mark.parametrize', 'mark.parametrize', (['"""kep"""', "('kep', 'nokep')"], {}), "('kep', ('kep', 'nokep'))\n", (125, 150), False, 'from pytest import raises, mark\n'), ((280, 319), 'beyond.io.ccsds.dumps', 'dumps', (['orbit'], {'fmt': 'ccsds_format', 'kep': 'kep'}), '(orbit, fmt=ccsds_format, kep=kep)\n', (285, 319), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((465, 499), 'beyond.io.ccsds.dumps', 'dumps', (['orbit_cov'], {'fmt': 'ccsds_format'}), '(orbit_cov, fmt=ccsds_format)\n', (470, 499), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((638, 673), 'beyond.io.ccsds.dumps', 'dumps', (['orbit_cov2'], {'fmt': 'ccsds_format'}), '(orbit_cov2, fmt=ccsds_format)\n', (643, 673), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((832, 867), 'beyond.io.ccsds.dumps', 'dumps', (['orbit_cov3'], {'fmt': 'ccsds_format'}), '(orbit_cov3, fmt=ccsds_format)\n', (837, 867), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((1061, 1095), 'beyond.io.ccsds.dumps', 'dumps', (['orbit_man'], {'fmt': 'ccsds_format'}), '(orbit_man, fmt=ccsds_format)\n', (1066, 1095), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((1315, 1349), 'beyond.io.ccsds.dumps', 'dumps', (['orbit_man'], {'fmt': 'ccsds_format'}), '(orbit_man, fmt=ccsds_format)\n', (1320, 1349), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((1531, 1576), 'beyond.io.ccsds.dumps', 'dumps', (['orbit_continuous_man'], {'fmt': 'ccsds_format'}), '(orbit_continuous_man, fmt=ccsds_format)\n', (1536, 1576), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((1809, 1854), 'beyond.io.ccsds.dumps', 'dumps', (['orbit_continuous_man'], {'fmt': 'ccsds_format'}), '(orbit_continuous_man, fmt=ccsds_format)\n', (1814, 1854), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((2032, 2062), 'beyond.io.ccsds.dumps', 'dumps', (['orbit'], {'fmt': 'ccsds_format'}), '(orbit, fmt=ccsds_format)\n', (2037, 2062), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((2326, 2356), 'beyond.io.ccsds.dumps', 'dumps', (['orbit'], {'fmt': 'ccsds_format'}), '(orbit, fmt=ccsds_format)\n', (2331, 2356), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((3814, 3837), 'beyond.io.ccsds.loads', 'loads', (['str_data_opm_man'], {}), '(str_data_opm_man)\n', (3819, 3837), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((4901, 4912), 'beyond.io.ccsds.loads', 'loads', (['data'], {}), '(data)\n', (4906, 4912), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n'), ((1257, 1303), 'numpy.array', 'np.array', (['[man._dv[1], man._dv[0], man._dv[2]]'], {}), '([man._dv[1], man._dv[0], man._dv[2]])\n', (1265, 1303), True, 'import numpy as np\n'), ((1751, 1797), 'numpy.array', 'np.array', (['[man._dv[1], man._dv[0], man._dv[2]]'], {}), '([man._dv[1], man._dv[0], man._dv[2]])\n', (1759, 1797), True, 'import numpy as np\n'), ((2789, 2807), 'pytest.raises', 'raises', (['CcsdsError'], {}), '(CcsdsError)\n', (2795, 2807), False, 'from pytest import raises, mark\n'), ((3215, 3233), 'pytest.raises', 'raises', (['CcsdsError'], {}), '(CcsdsError)\n', (3221, 3233), False, 'from pytest import raises, mark\n'), ((3248, 3268), 'beyond.io.ccsds.loads', 'loads', (['truncated_opm'], {}), '(truncated_opm)\n', (3253, 3268), False, 'from beyond.io.ccsds import dumps, loads, CcsdsError\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.