code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: UTF-8 -*-
from sympde.topology import Mapping
from sympde.calculus import grad, dot
from sympde.calculus import laplace
from sympde.topology import ScalarFunctionSpace
from sympde.topology import elements_of
from sympde.topology import NormalVector
from sympde.topology import Cube, Derham
from sympde.topology import Union
from sympde.expr import BilinearForm, LinearForm, integral
from sympde.expr import Norm
from sympde.expr import find, EssentialBC
from psydac.fem.basic import FemField
from psydac.api.discretization import discretize
from psydac.feec.pull_push import push_3d_hcurl, push_3d_hdiv
from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL, PSYDAC_BACKEND_NUMBA
from psydac.linalg.utilities import array_to_stencil
from psydac.linalg.iterative_solvers import cg
from mpi4py import MPI
import pytest
import numpy as np
import scipy as sc
#===============================================================================
def splitting_integrator_scipy(e0, b0, M1, M2, CURL, dt, niter):
CURL_T = CURL.T
M1_solver = sc.sparse.linalg.splu(M1)
def M1CM2_dot(b):
y1 = M2.dot(b)
y2 = CURL_T.dot(y1)
return M1_solver.solve(y2)
e_history = [e0]
b_history = [b0]
for ts in range(niter):
b = b_history[ts]
e = e_history[ts]
b_new = b - dt * CURL.dot(e)
e_new = e + dt * M1CM2_dot(b_new)
b_history.append(b_new)
e_history.append(e_new)
return e_history, b_history
def splitting_integrator_stencil(e0, b0, M1, M2, CURL, dt, niter):
CURL_T = CURL.transpose()
def M1CM2_dot(b):
y1 = M2.dot(b)
y2 = CURL_T.dot(y1)
return cg(M1, y2, tol=1e-12)[0]
e_history = [e0]
b_history = [b0]
for ts in range(niter):
b = b_history[ts]
e = e_history[ts]
b_new = b - dt * CURL.dot(e)
e_new = e + dt * M1CM2_dot(b_new)
b_history.append(b_new)
e_history.append(e_new)
return e_history, b_history
def evaluation_all_times(fields, x, y, z):
ak_value = np.empty(len(fields), dtype = 'float')
for i in range(len(fields)):
ak_value[i] = fields[i](x,y,z)
return ak_value
#==================================================================================
def run_maxwell_3d_scipy(logical_domain, mapping, e_ex, b_ex, ncells, degree, periodic, dt, niter):
domain = mapping(logical_domain)
derham = Derham(domain)
u0, v0 = elements_of(derham.V0, names='u0, v0')
u1, v1 = elements_of(derham.V1, names='u1, v1')
u2, v2 = elements_of(derham.V2, names='u2, v2')
u3, v3 = elements_of(derham.V3, names='u3, v3')
a0 = BilinearForm((u0, v0), integral(domain, u0*v0))
a1 = BilinearForm((u1, v1), integral(domain, dot(u1, v1)))
a2 = BilinearForm((u2, v2), integral(domain, dot(u2, v2)))
a3 = BilinearForm((u3, v3), integral(domain, u3*v3))
#==============================================================================
# Discrete objects: Psydac
domain_h = discretize(domain, ncells=ncells, comm=MPI.COMM_WORLD)
derham_h = discretize(derham, domain_h, degree=degree, periodic=periodic)
a1_h = discretize(a1, domain_h, (derham_h.V1, derham_h.V1), backend=PSYDAC_BACKEND_GPYCCEL)
a2_h = discretize(a2, domain_h, (derham_h.V2, derham_h.V2), backend=PSYDAC_BACKEND_GPYCCEL)
# StencilMatrix objects
M1 = a1_h.assemble().tosparse().tocsc()
M2 = a2_h.assemble().tosparse().tocsr()
# Diff operators
GRAD, CURL, DIV = derham_h.derivatives_as_matrices
# Porjectors
P0, P1, P2, P3 = derham_h.projectors(nquads=[5,5,5])
CURL = CURL.transform(lambda block: block.tokronstencil().tostencil()).tomatrix().tosparse().tocsr()
# initial conditions
e0_1 = lambda x, y, z: e_ex[0](0, x, y, z)
e0_2 = lambda x, y, z: e_ex[1](0, x, y, z)
e0_3 = lambda x, y, z: e_ex[2](0, x, y, z)
e0 = (e0_1, e0_2, e0_3)
b0_1 = lambda x, y, z : b_ex[0](0, x, y, z)
b0_2 = lambda x, y, z : b_ex[1](0, x, y, z)
b0_3 = lambda x, y, z : b_ex[2](0, x, y, z)
b0 = (b0_1, b0_2, b0_3)
# project initial conditions
e0_coeff = P1(e0).coeffs
b0_coeff = P2(b0).coeffs
# time integrator
e_history, b_history = splitting_integrator_scipy(e0_coeff.toarray(), b0_coeff.toarray(), M1, M2, CURL, dt, niter)
# study of fields
b_history = [array_to_stencil(bi, derham_h.V2.vector_space) for bi in b_history]
b_fields = [FemField(derham_h.V2, bi).fields for bi in b_history]
bx_fields = [bi[0] for bi in b_fields]
by_fields = [bi[1] for bi in b_fields]
bz_fields = [bi[2] for bi in b_fields]
bx_value_fun = lambda x, y, z: evaluation_all_times(bx_fields, x, y, z)
by_value_fun = lambda x, y, z: evaluation_all_times(by_fields, x, y, z)
bz_value_fun = lambda x, y, z: evaluation_all_times(bz_fields, x, y, z)
x,y,z = derham_h.V0.breaks
x, y = 0.5, 0.5
b_values_0 = []
for zi in z:
b_value_phys = push_3d_hdiv(bx_value_fun, by_value_fun, bz_value_fun, x, y, zi, mapping)
b_values_0.append(b_value_phys[0])
b_values_0 = np.array(b_values_0)
time_array = np.linspace(0, dt*niter, niter + 1)
tt, zz = np.meshgrid(time_array, z)
b_ex_values_0 = b_ex[0](tt, x, y, zz)
error = abs(b_values_0-b_ex_values_0).max()
return error
#==================================================================================
def run_maxwell_3d_stencil(logical_domain, mapping, e_ex, b_ex, ncells, degree, periodic, dt, niter):
domain = mapping(logical_domain)
derham = Derham(domain)
u0, v0 = elements_of(derham.V0, names='u0, v0')
u1, v1 = elements_of(derham.V1, names='u1, v1')
u2, v2 = elements_of(derham.V2, names='u2, v2')
u3, v3 = elements_of(derham.V3, names='u3, v3')
a0 = BilinearForm((u0, v0), integral(domain, u0*v0))
a1 = BilinearForm((u1, v1), integral(domain, dot(u1, v1)))
a2 = BilinearForm((u2, v2), integral(domain, dot(u2, v2)))
a3 = BilinearForm((u3, v3), integral(domain, u3*v3))
#==============================================================================
# Discrete objects: Psydac
domain_h = discretize(domain, ncells=ncells, comm=MPI.COMM_WORLD)
derham_h = discretize(derham, domain_h, degree=degree, periodic=periodic)
a1_h = discretize(a1, domain_h, (derham_h.V1, derham_h.V1), backend=PSYDAC_BACKEND_GPYCCEL)
a2_h = discretize(a2, domain_h, (derham_h.V2, derham_h.V2), backend=PSYDAC_BACKEND_GPYCCEL)
# StencilMatrix objects
M1 = a1_h.assemble()
M2 = a2_h.assemble()
# Diff operators
GRAD, CURL, DIV = derham_h.derivatives_as_matrices
# Porjectors
P0, P1, P2, P3 = derham_h.projectors(nquads=[5,5,5])
# initial conditions
e0_1 = lambda x, y, z: e_ex[0](0, x, y, z)
e0_2 = lambda x, y, z: e_ex[1](0, x, y, z)
e0_3 = lambda x, y, z: e_ex[2](0, x, y, z)
e0 = (e0_1, e0_2, e0_3)
b0_1 = lambda x, y, z : b_ex[0](0, x, y, z)
b0_2 = lambda x, y, z : b_ex[1](0, x, y, z)
b0_3 = lambda x, y, z : b_ex[2](0, x, y, z)
b0 = (b0_1, b0_2, b0_3)
# project initial conditions
e0_coeff = P1(e0).coeffs
b0_coeff = P2(b0).coeffs
# time integrator
e_history, b_history = splitting_integrator_stencil(e0_coeff, b0_coeff, M1, M2, CURL, dt, niter)
# study of fields
b_fields = [FemField(derham_h.V2, bi).fields for bi in b_history]
bx_fields = [bi[0] for bi in b_fields]
by_fields = [bi[1] for bi in b_fields]
bz_fields = [bi[2] for bi in b_fields]
bx_value_fun = lambda x, y, z: evaluation_all_times(bx_fields, x, y, z)
by_value_fun = lambda x, y, z: evaluation_all_times(by_fields, x, y, z)
bz_value_fun = lambda x, y, z: evaluation_all_times(bz_fields, x, y, z)
x,y,z = derham_h.V0.breaks
x, y = 0.5, 0.5
b_values_0 = []
for zi in z:
b_value_phys = push_3d_hdiv(bx_value_fun, by_value_fun, bz_value_fun, x, y, zi, mapping)
b_values_0.append(b_value_phys[0])
b_values_0 = np.array(b_values_0)
time_array = np.linspace(0, dt*niter, niter + 1)
tt, zz = np.meshgrid(time_array, z)
b_ex_values_0 = b_ex[0](tt, x, y, zz)
error = abs(b_values_0-b_ex_values_0).max()
return error
###############################################################################
# SERIAL TESTS
###############################################################################
#==============================================================================
# 3D Maxwell's equations with "Collela" map
#==============================================================================
def test_maxwell_3d_1():
class CollelaMapping3D(Mapping):
_expressions = {'x': 'k1*(x1 + eps*sin(2.*pi*x1)*sin(2.*pi*x2))',
'y': 'k2*(x2 + eps*sin(2.*pi*x1)*sin(2.*pi*x2))',
'z': 'k3*x3'}
_ldim = 3
_pdim = 3
M = CollelaMapping3D('M', k1=1, k2=1, k3=1, eps=0.1)
logical_domain = Cube('C', bounds1=(0, 1), bounds2=(0, 1), bounds3=(0, 1))
# exact solution
e_ex_0 = lambda t, x, y, z: 0
e_ex_1 = lambda t, x, y, z: -np.cos(2*np.pi*t-2*np.pi*z)
e_ex_2 = lambda t, x, y, z: 0
e_ex = (e_ex_0, e_ex_1, e_ex_2)
b_ex_0 = lambda t, x, y, z : np.cos(2*np.pi*t-2*np.pi*z)
b_ex_1 = lambda t, x, y, z : 0
b_ex_2 = lambda t, x, y, z : 0
b_ex = (b_ex_0, b_ex_1, b_ex_2)
#space parameters
ncells = [2**4, 2**3, 2**5]
degree = [2, 2, 2]
periodic = [True, True, True]
#time parameters
dt = 0.5*1/max(ncells)
niter = 10
T = dt*niter
error = run_maxwell_3d_scipy(logical_domain, M, e_ex, b_ex, ncells, degree, periodic, dt, niter)
assert abs(error - 0.04294761712765949) < 1e-9
def test_maxwell_3d_2():
class CollelaMapping3D(Mapping):
_expressions = {'x': 'k1*(x1 + eps*sin(2.*pi*x1)*sin(2.*pi*x2))',
'y': 'k2*(x2 + eps*sin(2.*pi*x1)*sin(2.*pi*x2))',
'z': 'k3*x3'}
_ldim = 3
_pdim = 3
M = CollelaMapping3D('M', k1=1, k2=1, k3=1, eps=0.1)
logical_domain = Cube('C', bounds1=(0, 1), bounds2=(0, 1), bounds3=(0, 1))
# exact solution
e_ex_0 = lambda t, x, y, z: 0
e_ex_1 = lambda t, x, y, z: -np.cos(2*np.pi*t-2*np.pi*z)
e_ex_2 = lambda t, x, y, z: 0
e_ex = (e_ex_0, e_ex_1, e_ex_2)
b_ex_0 = lambda t, x, y, z : np.cos(2*np.pi*t-2*np.pi*z)
b_ex_1 = lambda t, x, y, z : 0
b_ex_2 = lambda t, x, y, z : 0
b_ex = (b_ex_0, b_ex_1, b_ex_2)
#space parameters
ncells = [7, 7, 7]
degree = [2, 2, 2]
periodic = [True, True, True]
#time parameters
dt = 0.5*1/max(ncells)
niter = 2
T = dt*niter
error = run_maxwell_3d_stencil(logical_domain, M, e_ex, b_ex, ncells, degree, periodic, dt, niter)
assert abs(error - 0.24586986658559362) < 1e-9
#==============================================================================
# CLEAN UP SYMPY NAMESPACE
#==============================================================================
def teardown_module():
from sympy.core import cache
cache.clear_cache()
def teardown_function():
from sympy.core import cache
cache.clear_cache()
| [
"numpy.meshgrid",
"sympde.topology.elements_of",
"psydac.linalg.utilities.array_to_stencil",
"sympde.calculus.dot",
"psydac.fem.basic.FemField",
"scipy.sparse.linalg.splu",
"sympde.topology.Derham",
"sympde.expr.integral",
"psydac.linalg.iterative_solvers.cg",
"numpy.array",
"numpy.linspace",
... | [((1090, 1115), 'scipy.sparse.linalg.splu', 'sc.sparse.linalg.splu', (['M1'], {}), '(M1)\n', (1111, 1115), True, 'import scipy as sc\n'), ((2471, 2485), 'sympde.topology.Derham', 'Derham', (['domain'], {}), '(domain)\n', (2477, 2485), False, 'from sympde.topology import Cube, Derham\n'), ((2500, 2538), 'sympde.topology.elements_of', 'elements_of', (['derham.V0'], {'names': '"""u0, v0"""'}), "(derham.V0, names='u0, v0')\n", (2511, 2538), False, 'from sympde.topology import elements_of\n'), ((2552, 2590), 'sympde.topology.elements_of', 'elements_of', (['derham.V1'], {'names': '"""u1, v1"""'}), "(derham.V1, names='u1, v1')\n", (2563, 2590), False, 'from sympde.topology import elements_of\n'), ((2604, 2642), 'sympde.topology.elements_of', 'elements_of', (['derham.V2'], {'names': '"""u2, v2"""'}), "(derham.V2, names='u2, v2')\n", (2615, 2642), False, 'from sympde.topology import elements_of\n'), ((2656, 2694), 'sympde.topology.elements_of', 'elements_of', (['derham.V3'], {'names': '"""u3, v3"""'}), "(derham.V3, names='u3, v3')\n", (2667, 2694), False, 'from sympde.topology import elements_of\n'), ((3068, 3122), 'psydac.api.discretization.discretize', 'discretize', (['domain'], {'ncells': 'ncells', 'comm': 'MPI.COMM_WORLD'}), '(domain, ncells=ncells, comm=MPI.COMM_WORLD)\n', (3078, 3122), False, 'from psydac.api.discretization import discretize\n'), ((3138, 3200), 'psydac.api.discretization.discretize', 'discretize', (['derham', 'domain_h'], {'degree': 'degree', 'periodic': 'periodic'}), '(derham, domain_h, degree=degree, periodic=periodic)\n', (3148, 3200), False, 'from psydac.api.discretization import discretize\n'), ((3213, 3302), 'psydac.api.discretization.discretize', 'discretize', (['a1', 'domain_h', '(derham_h.V1, derham_h.V1)'], {'backend': 'PSYDAC_BACKEND_GPYCCEL'}), '(a1, domain_h, (derham_h.V1, derham_h.V1), backend=\n PSYDAC_BACKEND_GPYCCEL)\n', (3223, 3302), False, 'from psydac.api.discretization import discretize\n'), ((3309, 3398), 'psydac.api.discretization.discretize', 'discretize', (['a2', 'domain_h', '(derham_h.V2, derham_h.V2)'], {'backend': 'PSYDAC_BACKEND_GPYCCEL'}), '(a2, domain_h, (derham_h.V2, derham_h.V2), backend=\n PSYDAC_BACKEND_GPYCCEL)\n', (3319, 3398), False, 'from psydac.api.discretization import discretize\n'), ((5177, 5197), 'numpy.array', 'np.array', (['b_values_0'], {}), '(b_values_0)\n', (5185, 5197), True, 'import numpy as np\n'), ((5217, 5254), 'numpy.linspace', 'np.linspace', (['(0)', '(dt * niter)', '(niter + 1)'], {}), '(0, dt * niter, niter + 1)\n', (5228, 5254), True, 'import numpy as np\n'), ((5271, 5297), 'numpy.meshgrid', 'np.meshgrid', (['time_array', 'z'], {}), '(time_array, z)\n', (5282, 5297), True, 'import numpy as np\n'), ((5647, 5661), 'sympde.topology.Derham', 'Derham', (['domain'], {}), '(domain)\n', (5653, 5661), False, 'from sympde.topology import Cube, Derham\n'), ((5676, 5714), 'sympde.topology.elements_of', 'elements_of', (['derham.V0'], {'names': '"""u0, v0"""'}), "(derham.V0, names='u0, v0')\n", (5687, 5714), False, 'from sympde.topology import elements_of\n'), ((5728, 5766), 'sympde.topology.elements_of', 'elements_of', (['derham.V1'], {'names': '"""u1, v1"""'}), "(derham.V1, names='u1, v1')\n", (5739, 5766), False, 'from sympde.topology import elements_of\n'), ((5780, 5818), 'sympde.topology.elements_of', 'elements_of', (['derham.V2'], {'names': '"""u2, v2"""'}), "(derham.V2, names='u2, v2')\n", (5791, 5818), False, 'from sympde.topology import elements_of\n'), ((5832, 5870), 'sympde.topology.elements_of', 'elements_of', (['derham.V3'], {'names': '"""u3, v3"""'}), "(derham.V3, names='u3, v3')\n", (5843, 5870), False, 'from sympde.topology import elements_of\n'), ((6244, 6298), 'psydac.api.discretization.discretize', 'discretize', (['domain'], {'ncells': 'ncells', 'comm': 'MPI.COMM_WORLD'}), '(domain, ncells=ncells, comm=MPI.COMM_WORLD)\n', (6254, 6298), False, 'from psydac.api.discretization import discretize\n'), ((6314, 6376), 'psydac.api.discretization.discretize', 'discretize', (['derham', 'domain_h'], {'degree': 'degree', 'periodic': 'periodic'}), '(derham, domain_h, degree=degree, periodic=periodic)\n', (6324, 6376), False, 'from psydac.api.discretization import discretize\n'), ((6389, 6478), 'psydac.api.discretization.discretize', 'discretize', (['a1', 'domain_h', '(derham_h.V1, derham_h.V1)'], {'backend': 'PSYDAC_BACKEND_GPYCCEL'}), '(a1, domain_h, (derham_h.V1, derham_h.V1), backend=\n PSYDAC_BACKEND_GPYCCEL)\n', (6399, 6478), False, 'from psydac.api.discretization import discretize\n'), ((6485, 6574), 'psydac.api.discretization.discretize', 'discretize', (['a2', 'domain_h', '(derham_h.V2, derham_h.V2)'], {'backend': 'PSYDAC_BACKEND_GPYCCEL'}), '(a2, domain_h, (derham_h.V2, derham_h.V2), backend=\n PSYDAC_BACKEND_GPYCCEL)\n', (6495, 6574), False, 'from psydac.api.discretization import discretize\n'), ((8106, 8126), 'numpy.array', 'np.array', (['b_values_0'], {}), '(b_values_0)\n', (8114, 8126), True, 'import numpy as np\n'), ((8146, 8183), 'numpy.linspace', 'np.linspace', (['(0)', '(dt * niter)', '(niter + 1)'], {}), '(0, dt * niter, niter + 1)\n', (8157, 8183), True, 'import numpy as np\n'), ((8200, 8226), 'numpy.meshgrid', 'np.meshgrid', (['time_array', 'z'], {}), '(time_array, z)\n', (8211, 8226), True, 'import numpy as np\n'), ((9121, 9178), 'sympde.topology.Cube', 'Cube', (['"""C"""'], {'bounds1': '(0, 1)', 'bounds2': '(0, 1)', 'bounds3': '(0, 1)'}), "('C', bounds1=(0, 1), bounds2=(0, 1), bounds3=(0, 1))\n", (9125, 9178), False, 'from sympde.topology import Cube, Derham\n'), ((10292, 10349), 'sympde.topology.Cube', 'Cube', (['"""C"""'], {'bounds1': '(0, 1)', 'bounds2': '(0, 1)', 'bounds3': '(0, 1)'}), "('C', bounds1=(0, 1), bounds2=(0, 1), bounds3=(0, 1))\n", (10296, 10349), False, 'from sympde.topology import Cube, Derham\n'), ((11308, 11327), 'sympy.core.cache.clear_cache', 'cache.clear_cache', ([], {}), '()\n', (11325, 11327), False, 'from sympy.core import cache\n'), ((11391, 11410), 'sympy.core.cache.clear_cache', 'cache.clear_cache', ([], {}), '()\n', (11408, 11410), False, 'from sympy.core import cache\n'), ((2728, 2753), 'sympde.expr.integral', 'integral', (['domain', '(u0 * v0)'], {}), '(domain, u0 * v0)\n', (2736, 2753), False, 'from sympde.expr import BilinearForm, LinearForm, integral\n'), ((2911, 2936), 'sympde.expr.integral', 'integral', (['domain', '(u3 * v3)'], {}), '(domain, u3 * v3)\n', (2919, 2936), False, 'from sympde.expr import BilinearForm, LinearForm, integral\n'), ((4419, 4465), 'psydac.linalg.utilities.array_to_stencil', 'array_to_stencil', (['bi', 'derham_h.V2.vector_space'], {}), '(bi, derham_h.V2.vector_space)\n', (4435, 4465), False, 'from psydac.linalg.utilities import array_to_stencil\n'), ((5042, 5115), 'psydac.feec.pull_push.push_3d_hdiv', 'push_3d_hdiv', (['bx_value_fun', 'by_value_fun', 'bz_value_fun', 'x', 'y', 'zi', 'mapping'], {}), '(bx_value_fun, by_value_fun, bz_value_fun, x, y, zi, mapping)\n', (5054, 5115), False, 'from psydac.feec.pull_push import push_3d_hcurl, push_3d_hdiv\n'), ((5904, 5929), 'sympde.expr.integral', 'integral', (['domain', '(u0 * v0)'], {}), '(domain, u0 * v0)\n', (5912, 5929), False, 'from sympde.expr import BilinearForm, LinearForm, integral\n'), ((6087, 6112), 'sympde.expr.integral', 'integral', (['domain', '(u3 * v3)'], {}), '(domain, u3 * v3)\n', (6095, 6112), False, 'from sympde.expr import BilinearForm, LinearForm, integral\n'), ((7971, 8044), 'psydac.feec.pull_push.push_3d_hdiv', 'push_3d_hdiv', (['bx_value_fun', 'by_value_fun', 'bz_value_fun', 'x', 'y', 'zi', 'mapping'], {}), '(bx_value_fun, by_value_fun, bz_value_fun, x, y, zi, mapping)\n', (7983, 8044), False, 'from psydac.feec.pull_push import push_3d_hcurl, push_3d_hdiv\n'), ((9403, 9440), 'numpy.cos', 'np.cos', (['(2 * np.pi * t - 2 * np.pi * z)'], {}), '(2 * np.pi * t - 2 * np.pi * z)\n', (9409, 9440), True, 'import numpy as np\n'), ((10574, 10611), 'numpy.cos', 'np.cos', (['(2 * np.pi * t - 2 * np.pi * z)'], {}), '(2 * np.pi * t - 2 * np.pi * z)\n', (10580, 10611), True, 'import numpy as np\n'), ((1714, 1735), 'psydac.linalg.iterative_solvers.cg', 'cg', (['M1', 'y2'], {'tol': '(1e-12)'}), '(M1, y2, tol=1e-12)\n', (1716, 1735), False, 'from psydac.linalg.iterative_solvers import cg\n'), ((2802, 2813), 'sympde.calculus.dot', 'dot', (['u1', 'v1'], {}), '(u1, v1)\n', (2805, 2813), False, 'from sympde.calculus import grad, dot\n'), ((2865, 2876), 'sympde.calculus.dot', 'dot', (['u2', 'v2'], {}), '(u2, v2)\n', (2868, 2876), False, 'from sympde.calculus import grad, dot\n'), ((4504, 4529), 'psydac.fem.basic.FemField', 'FemField', (['derham_h.V2', 'bi'], {}), '(derham_h.V2, bi)\n', (4512, 4529), False, 'from psydac.fem.basic import FemField\n'), ((5978, 5989), 'sympde.calculus.dot', 'dot', (['u1', 'v1'], {}), '(u1, v1)\n', (5981, 5989), False, 'from sympde.calculus import grad, dot\n'), ((6041, 6052), 'sympde.calculus.dot', 'dot', (['u2', 'v2'], {}), '(u2, v2)\n', (6044, 6052), False, 'from sympde.calculus import grad, dot\n'), ((7433, 7458), 'psydac.fem.basic.FemField', 'FemField', (['derham_h.V2', 'bi'], {}), '(derham_h.V2, bi)\n', (7441, 7458), False, 'from psydac.fem.basic import FemField\n'), ((9268, 9305), 'numpy.cos', 'np.cos', (['(2 * np.pi * t - 2 * np.pi * z)'], {}), '(2 * np.pi * t - 2 * np.pi * z)\n', (9274, 9305), True, 'import numpy as np\n'), ((10439, 10476), 'numpy.cos', 'np.cos', (['(2 * np.pi * t - 2 * np.pi * z)'], {}), '(2 * np.pi * t - 2 * np.pi * z)\n', (10445, 10476), True, 'import numpy as np\n')] |
#-*- coding:utf-8 -*-
import numpy as np
from constant import input_cnt, output_cnt, RND_MEAN, RND_STD, LEARNING_RATE
class Perceptron:
'''
loss func = square(y - y`)
model param derivative = dL/d(w, b)
dL/dy * dy/dw = 2(y - y`) * x
dL/dy = d(square(y - y`))/dy = 2(y - y`) * d(y - y`)/dy
= 2(y - y`) * 1
dy/dw = d(wx + b)/dw = x
dL/dy * dy/db = 2(y - y`) * 1
dL/dy = d(square(y - y`))/dy = 2(y - y`) * d(y - y`)/dy
= 2(y - y`) * 1
dy/db = d(wx + b)/db = l
sgd -> (w,b) - lr/mb_size * sigma dL/d(w, b)
w - lr/mb_size * sigma 2(y - y`) * x
b - lr/mb_size * sigma 2(y - y`) * 1
'''
def __init__(self):
self.weight = np.random.normal(RND_MEAN, RND_STD,[input_cnt, output_cnt])
self.bias = np.zeros([output_cnt])
def forward_neuralnet(self, x):
output = np.matmul(x, self.weight) + self.bias
return output, x
def backprop_neuralnet(self, G_output, x):
g_output_w = x.transpose()
G_w = np.matmul(g_output_w, G_output)
G_b = np.sum(G_output, axis=0)
self.weight -= LEARNING_RATE * G_w
self.bias -= LEARNING_RATE * G_b
def forward_postproc(self, output, y):
diff = output - y
square = np.square(diff)
loss = np.mean(square)
return loss, diff
def backprop_postproc(self, G_loss, diff):
shape = diff.shape
# print(shape)
# print(np.prod(shape))
g_loss_square = np.ones(shape) / np.prod(shape)
g_square_diff = 2 * diff
g_diff_output = 1
G_square = g_loss_square * G_loss
G_diff = g_square_diff * G_square
G_output = g_diff_output * G_diff
return G_output | [
"numpy.sum",
"numpy.square",
"numpy.zeros",
"numpy.ones",
"numpy.mean",
"numpy.matmul",
"numpy.random.normal",
"numpy.prod"
] | [((767, 827), 'numpy.random.normal', 'np.random.normal', (['RND_MEAN', 'RND_STD', '[input_cnt, output_cnt]'], {}), '(RND_MEAN, RND_STD, [input_cnt, output_cnt])\n', (783, 827), True, 'import numpy as np\n'), ((847, 869), 'numpy.zeros', 'np.zeros', (['[output_cnt]'], {}), '([output_cnt])\n', (855, 869), True, 'import numpy as np\n'), ((1086, 1117), 'numpy.matmul', 'np.matmul', (['g_output_w', 'G_output'], {}), '(g_output_w, G_output)\n', (1095, 1117), True, 'import numpy as np\n'), ((1132, 1156), 'numpy.sum', 'np.sum', (['G_output'], {'axis': '(0)'}), '(G_output, axis=0)\n', (1138, 1156), True, 'import numpy as np\n'), ((1329, 1344), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (1338, 1344), True, 'import numpy as np\n'), ((1360, 1375), 'numpy.mean', 'np.mean', (['square'], {}), '(square)\n', (1367, 1375), True, 'import numpy as np\n'), ((924, 949), 'numpy.matmul', 'np.matmul', (['x', 'self.weight'], {}), '(x, self.weight)\n', (933, 949), True, 'import numpy as np\n'), ((1557, 1571), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1564, 1571), True, 'import numpy as np\n'), ((1574, 1588), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1581, 1588), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import platform
import numpy as np
from numpy import random as rd
class Class_Script:
@staticmethod
def Say_Hi():
try:
message = f'Hi from {Class_Script.Say_Hi.__name__} in {Class_Script.__name__}'
return message
except Exception:
return 0
else:
return 1
@staticmethod
def Show_Location(directory):
try:
path = os.getcwd()
new_path = path + directory
except Exception:
return 0
return new_path
@staticmethod
def Show_System_Info(pc_name):
try:
uname = platform.uname()
full_pc_name = f'This PC -> {pc_name}\n{uname}'
except Exception:
return 0
else:
return full_pc_name
@staticmethod
def Give_Random_Array(size):
if(type(size) == str):
return []
if(size < 0):
return []
get_random = lambda: rd.randint(0, 10)
try:
classic_array = [get_random() for _ in range(size)]
np_array = np.array(classic_array)
except Exception:
return []
else:
if(size == 0):
return [0]
else:
return np_array
def Main():
print(f'This is the {Main.__name__}() function from Script-2')
if __name__ == '__main__':
Class_Script.Say_Hi()
Main()
| [
"os.getcwd",
"numpy.random.randint",
"numpy.array",
"platform.uname"
] | [((457, 468), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (466, 468), False, 'import os\n'), ((667, 683), 'platform.uname', 'platform.uname', ([], {}), '()\n', (681, 683), False, 'import platform\n'), ((1016, 1033), 'numpy.random.randint', 'rd.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (1026, 1033), True, 'from numpy import random as rd\n'), ((1134, 1157), 'numpy.array', 'np.array', (['classic_array'], {}), '(classic_array)\n', (1142, 1157), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def travelwave_solve(mx, mt, L, T, h_sb, u_I, v_I, output='plot'):
'''
Solver for 1D wave equation with variable wave speed
using finite difference method (especially to solve the motion
of a tsunami in the open ocean)
param mx: number of gridpoints in space
integer object
param mt: number of gridpoints in time
integer object
param L: length of spatial domain
integer or float object
param T: total time to solve for
integer or float object
param h_sb: seabed shape / height along x
integer object, float object or function
e.g. function
def u_I(x):
y = 1*np.exp(-((x - 50)**2)/10)
return y
param u_I: initial displacement distribution
integer object, float object or function
e.g. function
def u_I(x):
y = 1.5 + 2*np.exp(-((x - 8)**2)/12)
return y
param v_I: initial velocity distribution
integer object, float object or function
e.g. function
def u_I(x):
y = np.sin(pi*x)
return y
param output: output='data',
x: array with values of x (gridpoints in space)
h_sb: array with seabed shape along x
u_jpls: array with solution of u(x, T)
output='plot',
plot u_jpls against x
default = 'plot'
return: depends on the option --> see param output
'''
# Set up the numerical environment variables
x = np.linspace(0, L, mx+1) # mesh points in space
t = np.linspace(0, T, mt+1) # mesh points in time
dx = x[1] - x[0] # gridspacing in x
dt = t[1] - t[0] # gridspacing in t
# Initialise initial condition
if isinstance(h_sb, (int, float)):
h_sb = np.full((1, mx+1), h_sb)[0] # constant for all x
elif callable(h_sb):
h_sb = h_sb(x)
if isinstance(u_I, (int, float)):
u_I = np.full((1, mx+1), u_I)[0] # constant for all x
elif callable(u_I):
u_I = u_I(x)
if isinstance(v_I, (int, float)):
v_I = np.full((1, mx+1), v_I)[0] # constant for all x
elif callable(v_I):
v_I = v_I(x)
# Define h_x using seabed shape h_sb and h_0
h_0 = min(u_I)
h_x = h_0 - h_sb
h_0max = max(u_I)
# Make sure that there is enough water in the sea
if max(h_sb) >= h_0:
print('There is not enough water in the sea for a 1D simulation!')
return
# Courant number (stability check)
hmax = max(h_x)
if hmax*dt/dx > 1:
mt = np.ceil(T/(dx/hmax)) # Change dt so that lambda is
t = np.linspace(0, T, mt+1) # smaller or equal to 1.0
dt = t[1] - t[0]
# Set mesh constant
c_sqr = (dt/dx)**2
# Set up the solution variables
u_j = np.zeros(x.size) # u at current time step
u_jp1s = np.zeros(x.size) # u at next time step
# Set initial condition
u_jmns = u_I # u at previous time step / initial condition
# First timestep
u_j[1:-1] = u_jmns[1:-1] + dt*v_I[1:-1] + 0.5*c_sqr*(0.5*(h_x[1:-1] \
+ h_x[2:])*(u_jmns[2:] - u_jmns[1:-1]) - 0.5*(h_x[1:-1] \
+ h_x[:-2])*(u_jmns[1:-1] - u_jmns[:-2]))
# Open boundary at x = 0
u_j[0] = u_jmns[0] + h_x[0]*(dt/dx)*(u_jmns[1] - u_jmns[0])
# Open boundary at x = L
u_j[-1] = u_jmns[-1] - h_x[-1]*(dt/dx)*(u_jmns[-1] - u_jmns[-2])
# Loop for regular timesteps
for n in range(2, mt+1):
u_jp1s[1:-1] = -u_jmns[1:-1] + 2*u_j[1:-1] + c_sqr*(0.5*(h_x[1:-1] \
+ h_x[2:])*(u_j[2:] - u_j[1:-1]) - 0.5*(h_x[1:-1] \
+ h_x[:-2])*(u_j[1:-1] - u_j[:-2]))
# Open boundary at x = 0
u_jp1s[0] = u_j[0] + h_x[0]*(dt/dx)*(u_j[1] - u_j[0])
# Open boundary at x = L
u_jp1s[-1] = u_j[-1] - h_x[-1]*(dt/dx)*(u_j[-1] - u_j[-2])
# Update arrays for next timestep
# All 3 u_j to reallocate memory 'pointers'
u_jmns, u_j, u_jp1s = u_j, u_jp1s, u_jmns
# Output
if output in ('PLOT', 'Plot', 'plot'):
plot_1Dtravelwave(x, u_jp1s, h_sb, h_0max, T)
elif output in ('DATA', 'Data', 'data'):
return x, h_sb, u_jp1s
else:
print("Check input argument 'output'!")
return
def plot_1Dtravelwave(x, u_jpls, h_sb, h_0max, T):
'''
Plotting function: plots u_jpls (solution) against x
including h_sb (seabed shape)
'''
# Plot
fig = plt.figure(figsize=(8, 4))
ax1 = fig.add_subplot(1, 1, 1)
label_1 = '$seabed$'
label_2 = '$u(x, ' + str(T) + ')$'
ax1.plot(x, h_sb, 'bo', markersize=2, label=label_1)
ax1.plot(x, u_jpls, color='red', linewidth=1.5, label=label_2)
# Limits for x-axis
xrange = abs((max(x)-min(x)))
xmin = min(x) - 0.04*xrange
xmax = max(x) + 0.04*xrange
# Limits for y-axis
yrange = abs(h_0max - min(h_sb))
ymin = min(h_sb) - 0.08*yrange
ymax = h_0max + 0.08*yrange
# Set axes limits
ax1.set_xlim([xmin, xmax])
ax1.set_ylim([ymin, ymax])
ax1.grid(True)
# Add legend
legend = ax1.legend(loc='best', fontsize='medium')
legend.get_frame().set_alpha(1)
# Use tight layout
plt.tight_layout()
plt.show()
if __name__ == "__main__":
import sys
travelwave_solve(*sys.argv[1:])
| [
"numpy.full",
"matplotlib.pyplot.show",
"numpy.ceil",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.tight_layout"
] | [((1785, 1810), 'numpy.linspace', 'np.linspace', (['(0)', 'L', '(mx + 1)'], {}), '(0, L, mx + 1)\n', (1796, 1810), True, 'import numpy as np\n'), ((1844, 1869), 'numpy.linspace', 'np.linspace', (['(0)', 'T', '(mt + 1)'], {}), '(0, T, mt + 1)\n', (1855, 1869), True, 'import numpy as np\n'), ((3166, 3182), 'numpy.zeros', 'np.zeros', (['x.size'], {}), '(x.size)\n', (3174, 3182), True, 'import numpy as np\n'), ((3234, 3250), 'numpy.zeros', 'np.zeros', (['x.size'], {}), '(x.size)\n', (3242, 3250), True, 'import numpy as np\n'), ((4947, 4973), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (4957, 4973), True, 'import matplotlib.pyplot as plt\n'), ((5707, 5725), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5723, 5725), True, 'import matplotlib.pyplot as plt\n'), ((5731, 5741), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5739, 5741), True, 'import matplotlib.pyplot as plt\n'), ((2911, 2935), 'numpy.ceil', 'np.ceil', (['(T / (dx / hmax))'], {}), '(T / (dx / hmax))\n', (2918, 2935), True, 'import numpy as np\n'), ((2980, 3005), 'numpy.linspace', 'np.linspace', (['(0)', 'T', '(mt + 1)'], {}), '(0, T, mt + 1)\n', (2991, 3005), True, 'import numpy as np\n'), ((2101, 2127), 'numpy.full', 'np.full', (['(1, mx + 1)', 'h_sb'], {}), '((1, mx + 1), h_sb)\n', (2108, 2127), True, 'import numpy as np\n'), ((2255, 2280), 'numpy.full', 'np.full', (['(1, mx + 1)', 'u_I'], {}), '((1, mx + 1), u_I)\n', (2262, 2280), True, 'import numpy as np\n'), ((2405, 2430), 'numpy.full', 'np.full', (['(1, mx + 1)', 'v_I'], {}), '((1, mx + 1), v_I)\n', (2412, 2430), True, 'import numpy as np\n')] |
# Homework from week 8
# a programme that displays a plot of the functions f(x)=x, g(x)=x2 and h(x)=x3 in the range [0, 4] on the one set of axes
import numpy as np
# import numpy
import matplotlib.pyplot as plt
# import matplotlib
x = np.arange(0.0, 4.0, 0.05)
# define x as a list and decided to use 0.05 as intervals to give the dots a clearer line
f = x
# define f(x) = x
g = x**2
# define g(x) = x**2
h = x**3
# define h(x) = x**3
plt.plot(f, f, "g.", label="x")
# plot of x by x in green
plt.plot(f, g, "r.", label="x_squared")
# plot of x squared in red
plt.plot(f, h, "b.", label="x_cubed")
# plot of x cubed in blue
plt.legend()
plt.title("Homework Week 8 - plots")
plt.xlabel("x axis")
plt.ylabel("y axis")
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((239, 264), 'numpy.arange', 'np.arange', (['(0.0)', '(4.0)', '(0.05)'], {}), '(0.0, 4.0, 0.05)\n', (248, 264), True, 'import numpy as np\n'), ((440, 471), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'f', '"""g."""'], {'label': '"""x"""'}), "(f, f, 'g.', label='x')\n", (448, 471), True, 'import matplotlib.pyplot as plt\n'), ((498, 537), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'g', '"""r."""'], {'label': '"""x_squared"""'}), "(f, g, 'r.', label='x_squared')\n", (506, 537), True, 'import matplotlib.pyplot as plt\n'), ((565, 602), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'h', '"""b."""'], {'label': '"""x_cubed"""'}), "(f, h, 'b.', label='x_cubed')\n", (573, 602), True, 'import matplotlib.pyplot as plt\n'), ((630, 642), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (640, 642), True, 'import matplotlib.pyplot as plt\n'), ((643, 679), 'matplotlib.pyplot.title', 'plt.title', (['"""Homework Week 8 - plots"""'], {}), "('Homework Week 8 - plots')\n", (652, 679), True, 'import matplotlib.pyplot as plt\n'), ((680, 700), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x axis"""'], {}), "('x axis')\n", (690, 700), True, 'import matplotlib.pyplot as plt\n'), ((701, 721), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y axis"""'], {}), "('y axis')\n", (711, 721), True, 'import matplotlib.pyplot as plt\n'), ((722, 732), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (730, 732), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 25 00:11:49 2020
@author: arslan
"""
from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, \
min_t_norm, max_s_norm, crisp
from numpy import linspace, meshgrid, zeros
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
# Defining the domain of the input variable x1.
domain1 = linspace(1., 2., 100)
# Defining the domain of the input variable x2.
domain2 = linspace(2., 3., 100)
# Defining the domain of the output variable y1.
domain3 = linspace(3., 4., 100)
# Defining the domain of the output variable y2.
domain4 = linspace(4., 5., 100)
# Defining the Small set for the input variable x1.
Small1 = IT2FS_Gaussian_UncertStd(domain1, [1., 0.15, 0.05, 1.])
# Defining the Small set for the input variable x2.
Small2 = IT2FS_Gaussian_UncertStd(domain2, [2., 0.15, 0.05, 1.])
# Defining the Medium set for the input variable x1.
Medium1 = IT2FS_Gaussian_UncertStd(domain1, [1.5, 0.15, 0.05, 1.])
# Defining the Medium set for the input variable x2.
Medium2 = IT2FS_Gaussian_UncertStd(domain2, [2.5, 0.15, 0.05, 1.])
# Defining the Large set for the input variable x1.
Large1 = IT2FS_Gaussian_UncertStd(domain1, [2., 0.15, 0.05, 1.])
# Defining the Large set for the input variable x1.
Large2 = IT2FS_Gaussian_UncertStd(domain2, [3., 0.15, 0.05, 1.])
# Plotting the sets defined for the input variable x1.
IT2FS_plot(Small1, Medium1, Large1,
legends=["Small", "Medium", "large"])
# Plotting the sets defined for the input variable x1.
IT2FS_plot(Small2, Medium2, Large2,
legends=["Small", "Medium", "large"])
# Defining the Low set for the output variable y1
Low1 = IT2FS_Gaussian_UncertStd(domain3, [3., 0.1, 0.05, 1.])
# Defining the Low set for the output variable y2
Low2 = IT2FS_Gaussian_UncertStd(domain4, [4., 0.1, 0.05, 1.])
# Defining the High set for the output variable y1
High1 = IT2FS_Gaussian_UncertStd(domain3, [4., 0.1, 0.05, 1.])
# Defining the High set for the output variable y2
High2 = IT2FS_Gaussian_UncertStd(domain4, [5., 0.1, 0.05, 1.])
# Plotting the sets defined for the output variable y1.
IT2FS_plot(Low1, High1,
legends=["Low", "High"])
# Plotting the sets defined for the output variable y2.
IT2FS_plot(Low2, High2,
legends=["Low", "High"])
# Defining the mamdani interval type 2 fuzzy logic system
myIT2FLS = IT2Mamdani(min_t_norm, max_s_norm)
# Adding the input variables to the myIT2FLS
myIT2FLS.add_input_variable("x1")
myIT2FLS.add_input_variable("x2")
# Adding the output variables to the myIT2FLS
myIT2FLS.add_output_variable("y1")
myIT2FLS.add_output_variable("y2")
# Defining the rule base of the MyIT2FLS
myIT2FLS.add_rule([("x1", Small1), ("x2", Small2)], [("y1", Low1), ("y2", Low2)])
myIT2FLS.add_rule([("x1", Small1), ("x2", Medium2)], [("y1", Low1), ("y2", High2)])
myIT2FLS.add_rule([("x1", Small1), ("x2", Large2)], [("y1", Low1), ("y2", High2)])
myIT2FLS.add_rule([("x1", Medium1), ("x2", Small2)], [("y1", Low1), ("y2", Low2)])
myIT2FLS.add_rule([("x1", Medium1), ("x2", Medium2)], [("y1", Low1), ("y2", High2)])
myIT2FLS.add_rule([("x1", Medium1), ("x2", Large2)], [("y1", High1), ("y2", High2)])
myIT2FLS.add_rule([("x1", Large1), ("x2", Small2)], [("y1", High1), ("y2", Low2)])
myIT2FLS.add_rule([("x1", Large1), ("x2", Medium2)], [("y1", High1), ("y2", High2)])
myIT2FLS.add_rule([("x1", Large1), ("x2", Large2)], [("y1", High1), ("y2", High2)])
# Evaluating the outputs of the myIT2FLS for the points in the input domain,
# and plotting the output surfaces.
X1, X2 = meshgrid(domain1, domain2)
Z1 = zeros(shape=(len(domain1), len(domain2)))
Z2 = zeros(shape=(len(domain1), len(domain2)))
for i, x1 in zip(range(len(domain1)), domain1):
for j, x2 in zip(range(len(domain2)), domain2):
it2out, tr = myIT2FLS.evaluate({"x1":x1, "x2":x2})
Z1[i, j], Z2[i, j] = crisp(tr["y1"]), crisp(tr["y2"])
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X1, X2, Z1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X1, X2, Z2, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
| [
"numpy.meshgrid",
"matplotlib.pyplot.show",
"pyit2fls.crisp",
"pyit2fls.IT2Mamdani",
"matplotlib.pyplot.figure",
"matplotlib.ticker.LinearLocator",
"matplotlib.ticker.FormatStrFormatter",
"pyit2fls.IT2FS_plot",
"numpy.linspace",
"pyit2fls.IT2FS_Gaussian_UncertStd"
] | [((491, 514), 'numpy.linspace', 'linspace', (['(1.0)', '(2.0)', '(100)'], {}), '(1.0, 2.0, 100)\n', (499, 514), False, 'from numpy import linspace, meshgrid, zeros\n'), ((572, 595), 'numpy.linspace', 'linspace', (['(2.0)', '(3.0)', '(100)'], {}), '(2.0, 3.0, 100)\n', (580, 595), False, 'from numpy import linspace, meshgrid, zeros\n'), ((654, 677), 'numpy.linspace', 'linspace', (['(3.0)', '(4.0)', '(100)'], {}), '(3.0, 4.0, 100)\n', (662, 677), False, 'from numpy import linspace, meshgrid, zeros\n'), ((736, 759), 'numpy.linspace', 'linspace', (['(4.0)', '(5.0)', '(100)'], {}), '(4.0, 5.0, 100)\n', (744, 759), False, 'from numpy import linspace, meshgrid, zeros\n'), ((820, 877), 'pyit2fls.IT2FS_Gaussian_UncertStd', 'IT2FS_Gaussian_UncertStd', (['domain1', '[1.0, 0.15, 0.05, 1.0]'], {}), '(domain1, [1.0, 0.15, 0.05, 1.0])\n', (844, 877), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((938, 995), 'pyit2fls.IT2FS_Gaussian_UncertStd', 'IT2FS_Gaussian_UncertStd', (['domain2', '[2.0, 0.15, 0.05, 1.0]'], {}), '(domain2, [2.0, 0.15, 0.05, 1.0])\n', (962, 995), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((1058, 1115), 'pyit2fls.IT2FS_Gaussian_UncertStd', 'IT2FS_Gaussian_UncertStd', (['domain1', '[1.5, 0.15, 0.05, 1.0]'], {}), '(domain1, [1.5, 0.15, 0.05, 1.0])\n', (1082, 1115), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((1179, 1236), 'pyit2fls.IT2FS_Gaussian_UncertStd', 'IT2FS_Gaussian_UncertStd', (['domain2', '[2.5, 0.15, 0.05, 1.0]'], {}), '(domain2, [2.5, 0.15, 0.05, 1.0])\n', (1203, 1236), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((1298, 1355), 'pyit2fls.IT2FS_Gaussian_UncertStd', 'IT2FS_Gaussian_UncertStd', (['domain1', '[2.0, 0.15, 0.05, 1.0]'], {}), '(domain1, [2.0, 0.15, 0.05, 1.0])\n', (1322, 1355), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((1416, 1473), 'pyit2fls.IT2FS_Gaussian_UncertStd', 'IT2FS_Gaussian_UncertStd', (['domain2', '[3.0, 0.15, 0.05, 1.0]'], {}), '(domain2, [3.0, 0.15, 0.05, 1.0])\n', (1440, 1473), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((1528, 1601), 'pyit2fls.IT2FS_plot', 'IT2FS_plot', (['Small1', 'Medium1', 'Large1'], {'legends': "['Small', 'Medium', 'large']"}), "(Small1, Medium1, Large1, legends=['Small', 'Medium', 'large'])\n", (1538, 1601), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((1671, 1744), 'pyit2fls.IT2FS_plot', 'IT2FS_plot', (['Small2', 'Medium2', 'Large2'], {'legends': "['Small', 'Medium', 'large']"}), "(Small2, Medium2, Large2, legends=['Small', 'Medium', 'large'])\n", (1681, 1744), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((1815, 1871), 'pyit2fls.IT2FS_Gaussian_UncertStd', 'IT2FS_Gaussian_UncertStd', (['domain3', '[3.0, 0.1, 0.05, 1.0]'], {}), '(domain3, [3.0, 0.1, 0.05, 1.0])\n', (1839, 1871), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((1928, 1984), 'pyit2fls.IT2FS_Gaussian_UncertStd', 'IT2FS_Gaussian_UncertStd', (['domain4', '[4.0, 0.1, 0.05, 1.0]'], {}), '(domain4, [4.0, 0.1, 0.05, 1.0])\n', (1952, 1984), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((2043, 2099), 'pyit2fls.IT2FS_Gaussian_UncertStd', 'IT2FS_Gaussian_UncertStd', (['domain3', '[4.0, 0.1, 0.05, 1.0]'], {}), '(domain3, [4.0, 0.1, 0.05, 1.0])\n', (2067, 2099), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((2158, 2214), 'pyit2fls.IT2FS_Gaussian_UncertStd', 'IT2FS_Gaussian_UncertStd', (['domain4', '[5.0, 0.1, 0.05, 1.0]'], {}), '(domain4, [5.0, 0.1, 0.05, 1.0])\n', (2182, 2214), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((2270, 2318), 'pyit2fls.IT2FS_plot', 'IT2FS_plot', (['Low1', 'High1'], {'legends': "['Low', 'High']"}), "(Low1, High1, legends=['Low', 'High'])\n", (2280, 2318), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((2389, 2437), 'pyit2fls.IT2FS_plot', 'IT2FS_plot', (['Low2', 'High2'], {'legends': "['Low', 'High']"}), "(Low2, High2, legends=['Low', 'High'])\n", (2399, 2437), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((2521, 2555), 'pyit2fls.IT2Mamdani', 'IT2Mamdani', (['min_t_norm', 'max_s_norm'], {}), '(min_t_norm, max_s_norm)\n', (2531, 2555), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((3707, 3733), 'numpy.meshgrid', 'meshgrid', (['domain1', 'domain2'], {}), '(domain1, domain2)\n', (3715, 3733), False, 'from numpy import linspace, meshgrid, zeros\n'), ((4056, 4068), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4066, 4068), True, 'import matplotlib.pyplot as plt\n'), ((4352, 4362), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4360, 4362), True, 'import matplotlib.pyplot as plt\n'), ((4370, 4382), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4380, 4382), True, 'import matplotlib.pyplot as plt\n'), ((4666, 4676), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4674, 4676), True, 'import matplotlib.pyplot as plt\n'), ((4234, 4251), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (4247, 4251), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((4282, 4309), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (4300, 4309), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((4548, 4565), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (4561, 4565), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((4596, 4623), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (4614, 4623), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((4016, 4031), 'pyit2fls.crisp', 'crisp', (["tr['y1']"], {}), "(tr['y1'])\n", (4021, 4031), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n'), ((4033, 4048), 'pyit2fls.crisp', 'crisp', (["tr['y2']"], {}), "(tr['y2'])\n", (4038, 4048), False, 'from pyit2fls import IT2Mamdani, IT2FS_Gaussian_UncertStd, IT2FS_plot, min_t_norm, max_s_norm, crisp\n')] |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.scalerel.leonard2014` implements
:class:`Leonard2014_SCR`
:class:`Leonard2014_Interplate`
"""
from numpy import power, log10
from openquake.hazardlib.scalerel.base import BaseMSRSigma, BaseASRSigma
class Leonard2014_SCR(BaseMSRSigma, BaseASRSigma):
"""
<NAME>., 2014. Self-consistent earthquake fault-scaling relations:
Update and extension to stable continental strike-slip faults.
Bulletin of the Seismological Society of America, 104(6), pp 2953-2965.
Implements both magnitude-area and area-magnitude scaling relationships.
"""
def get_median_area(self, mag, rake):
"""
Calculates median fault area from magnitude.
"""
if rake is None:
# Return average of strike-slip and dip-slip curves
return power(10.0, (mag - 4.185))
elif (-45 <= rake <= 45) or (rake >= 135) or (rake <= -135):
# strike-slip
return power(10.0, (mag - 4.18))
else:
# Dip-slip (thrust or normal), and undefined rake
return power(10.0, (mag - 4.19))
def get_std_dev_area(self, mag, rake):
"""
Returns zero for now
"""
return 0.0
def get_median_mag(self, area, rake):
"""
Returns magnitude for a given fault area
"""
if rake is None:
# Return average of strike-slip and dip-slip curves
return log10(area) + 4.185
elif (-45 <= rake <= 45) or (rake >= 135) or (rake <= -135):
# strike slip
return log10(area) + 4.18
else:
# Dip slip (thrust or normal), and undefined rake
return log10(area) + 4.19
def get_std_dev_mag(self, area, rake):
"""
Returns zero for now
"""
return 0.0
class Leonard2014_Interplate(BaseMSRSigma, BaseASRSigma):
"""
<NAME>., 2014. Self-consistent earthquake fault-scaling relations:
Update and extension to stable continental strike-slip faults.
Bulletin of the Seismological Society of America, 104(6), pp 2953-2965.
Implements both magnitude-area and area-magnitude scaling relationships.
"""
def get_median_area(self, mag, rake):
"""
Calculates median fault area from magnitude.
"""
if rake is None:
# Return average of strike-slip and dip-slip curves
return power(10.0, (mag - 3.995))
elif (-45 <= rake <= 45) or (rake >= 135) or (rake <= -135):
# strike slip
return power(10.0, (mag - 3.99))
else:
# Dip slip (thrust or normal), and undefined rake
return power(10.0, (mag - 4.00))
def get_std_dev_area(self, mag, rake):
"""
Returns zero for now
"""
return 0.0
def get_median_mag(self, area, rake):
"""
Calculates median magnitude from fault area.
"""
if rake is None:
# Return average of strike-slip and dip-slip curves
return log10(area) + 3.995
elif (-45 <= rake <= 45) or (rake >= 135) or (rake <= -135):
# strike slip
return log10(area) + 3.99
else:
# Dip slip (thrust or normal), and undefined rake
return log10(area) + 4.00
def get_std_dev_mag(self, area, rake):
"""
Returns None for now
"""
return 0.0
| [
"numpy.power",
"numpy.log10"
] | [((1582, 1606), 'numpy.power', 'power', (['(10.0)', '(mag - 4.185)'], {}), '(10.0, mag - 4.185)\n', (1587, 1606), False, 'from numpy import power, log10\n'), ((3188, 3212), 'numpy.power', 'power', (['(10.0)', '(mag - 3.995)'], {}), '(10.0, mag - 3.995)\n', (3193, 3212), False, 'from numpy import power, log10\n'), ((1723, 1746), 'numpy.power', 'power', (['(10.0)', '(mag - 4.18)'], {}), '(10.0, mag - 4.18)\n', (1728, 1746), False, 'from numpy import power, log10\n'), ((1844, 1867), 'numpy.power', 'power', (['(10.0)', '(mag - 4.19)'], {}), '(10.0, mag - 4.19)\n', (1849, 1867), False, 'from numpy import power, log10\n'), ((2210, 2221), 'numpy.log10', 'log10', (['area'], {}), '(area)\n', (2215, 2221), False, 'from numpy import power, log10\n'), ((3329, 3352), 'numpy.power', 'power', (['(10.0)', '(mag - 3.99)'], {}), '(10.0, mag - 3.99)\n', (3334, 3352), False, 'from numpy import power, log10\n'), ((3450, 3472), 'numpy.power', 'power', (['(10.0)', '(mag - 4.0)'], {}), '(10.0, mag - 4.0)\n', (3455, 3472), False, 'from numpy import power, log10\n'), ((3820, 3831), 'numpy.log10', 'log10', (['area'], {}), '(area)\n', (3825, 3831), False, 'from numpy import power, log10\n'), ((2344, 2355), 'numpy.log10', 'log10', (['area'], {}), '(area)\n', (2349, 2355), False, 'from numpy import power, log10\n'), ((2458, 2469), 'numpy.log10', 'log10', (['area'], {}), '(area)\n', (2463, 2469), False, 'from numpy import power, log10\n'), ((3954, 3965), 'numpy.log10', 'log10', (['area'], {}), '(area)\n', (3959, 3965), False, 'from numpy import power, log10\n'), ((4068, 4079), 'numpy.log10', 'log10', (['area'], {}), '(area)\n', (4073, 4079), False, 'from numpy import power, log10\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import bases
from ..utils import PanoUpsampleW
''' Dense (per-pixel) depth estimation '''
class DepthBase(nn.Module):
def __init__(self):
super(DepthBase, self).__init__()
def infer(self, x_emb):
depth = self(x_emb)['depth']
return {'depth': depth}
def compute_losses(self, x_emb, batch):
gt = batch['depth']
mask = (gt > 0)
# Forward
pred_dict = self(x_emb)
pred = pred_dict['depth']
# Compute losses
losses = {}
l1 = (pred[mask] - gt[mask]).abs()
l2 = (pred[mask] - gt[mask]).pow(2)
losses['mae'] = l1.mean()
losses['rmse'] = l2.mean().sqrt()
losses['delta1'] = (torch.max(pred[mask]/gt[mask], gt[mask]/pred[mask]) < 1.25).float().mean()
losses['total.depth'] = loss_for_backward(pred_dict['depth1d'], gt, mask, self.loss)
if 'residual' in pred_dict:
with torch.no_grad():
gt_residual = gt - pred_dict['depth1d'].detach()
losses['total.residual'] = loss_for_backward(pred_dict['residual'], gt_residual, mask, 'l1')
return losses
def loss_for_backward(pred, gt, mask, loss):
if loss == 'l1':
return F.l1_loss(pred[mask], gt[mask])
elif loss == 'l2':
return F.mse_loss(pred[mask], gt[mask])
elif loss == 'huber':
return F.smooth_l1_loss(pred[mask], gt[mask])
elif loss == 'berhu':
l1 = (pred[mask] - gt[mask]).abs().mean()
l2 = (pred[mask] - gt[mask]).pow(2).mean()
with torch.no_grad():
c = max(l1.detach().max() * 0.2, 0.01)
l2c = (l2 + c**2) / (2 * c)
return torch.where(l1<=c, l1, l2c).mean()
else:
raise NotImplementedError
class DepthEstimator(DepthBase):
def __init__(self, emb_dim, basis='dct', loss='l1', n_components=64,
init_weight=0.1, init_bias=2.5, output_height=512,
resisual=False, basis_tuning=False):
super(DepthEstimator, self).__init__()
self.loss = loss
self.output_height = output_height
basis = getattr(bases, basis)(n_components, output_height)
if basis_tuning:
self.basis = nn.Parameter(basis)
else:
self.register_buffer('basis', basis)
self.estimator = nn.Sequential(
nn.Conv1d(emb_dim, emb_dim, 1),
nn.BatchNorm1d(emb_dim),
nn.ReLU(inplace=True),
nn.Conv1d(emb_dim, n_components, 1, bias=False),
)
self.bias = nn.Parameter(torch.full([1], init_bias))
nn.init.normal_(self.estimator[-1].weight, std=init_weight/np.sqrt(emb_dim/2))
self.residual = None
if resisual:
self.residual = nn.Sequential(
nn.Conv2d(256, 64, 3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 1, 1, bias=False),
PanoUpsampleW(4),
nn.UpsamplingBilinear2d(scale_factor=(4,1)),
)
def forward(self, x_emb):
ws = self.estimator(x_emb['1D'])
if self.basis is None:
h, w = self.output_height, ws.shape[-1]
depth = self.bias + F.interpolate(ws.unsqueeze(1), size=(h,w), mode='bilinear', align_corners=False)
else:
depth = self.bias + torch.einsum('bkw,kh->bhw', ws, self.basis).unsqueeze(1)
ret_dict = {'depth': depth, 'depth1d': depth}
if self.residual is not None:
residual = 0.1 * self.residual(x_emb['conv_list'][0].detach())
ret_dict['residual'] = residual
ret_dict['depth'] = depth + residual
return ret_dict
| [
"torch.nn.Parameter",
"torch.nn.UpsamplingBilinear2d",
"torch.nn.ReLU",
"torch.where",
"torch.nn.Conv1d",
"torch.nn.BatchNorm1d",
"torch.nn.functional.mse_loss",
"torch.nn.functional.l1_loss",
"torch.full",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.einsum",
"torch.max",
"torch.no_g... | [((1312, 1343), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['pred[mask]', 'gt[mask]'], {}), '(pred[mask], gt[mask])\n', (1321, 1343), True, 'import torch.nn.functional as F\n'), ((1382, 1414), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['pred[mask]', 'gt[mask]'], {}), '(pred[mask], gt[mask])\n', (1392, 1414), True, 'import torch.nn.functional as F\n'), ((2296, 2315), 'torch.nn.Parameter', 'nn.Parameter', (['basis'], {}), '(basis)\n', (2308, 2315), True, 'import torch.nn as nn\n'), ((2432, 2462), 'torch.nn.Conv1d', 'nn.Conv1d', (['emb_dim', 'emb_dim', '(1)'], {}), '(emb_dim, emb_dim, 1)\n', (2441, 2462), True, 'import torch.nn as nn\n'), ((2476, 2499), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['emb_dim'], {}), '(emb_dim)\n', (2490, 2499), True, 'import torch.nn as nn\n'), ((2513, 2534), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2520, 2534), True, 'import torch.nn as nn\n'), ((2548, 2595), 'torch.nn.Conv1d', 'nn.Conv1d', (['emb_dim', 'n_components', '(1)'], {'bias': '(False)'}), '(emb_dim, n_components, 1, bias=False)\n', (2557, 2595), True, 'import torch.nn as nn\n'), ((2640, 2666), 'torch.full', 'torch.full', (['[1]', 'init_bias'], {}), '([1], init_bias)\n', (2650, 2666), False, 'import torch\n'), ((1020, 1035), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1033, 1035), False, 'import torch\n'), ((1456, 1494), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['pred[mask]', 'gt[mask]'], {}), '(pred[mask], gt[mask])\n', (1472, 1494), True, 'import torch.nn.functional as F\n'), ((2865, 2909), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(64)', '(3)'], {'padding': '(1)', 'bias': '(False)'}), '(256, 64, 3, padding=1, bias=False)\n', (2874, 2909), True, 'import torch.nn as nn\n'), ((2927, 2945), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2941, 2945), True, 'import torch.nn as nn\n'), ((2963, 2984), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2970, 2984), True, 'import torch.nn as nn\n'), ((3002, 3033), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(1)', '(1)'], {'bias': '(False)'}), '(64, 1, 1, bias=False)\n', (3011, 3033), True, 'import torch.nn as nn\n'), ((3085, 3129), 'torch.nn.UpsamplingBilinear2d', 'nn.UpsamplingBilinear2d', ([], {'scale_factor': '(4, 1)'}), '(scale_factor=(4, 1))\n', (3108, 3129), True, 'import torch.nn as nn\n'), ((2735, 2755), 'numpy.sqrt', 'np.sqrt', (['(emb_dim / 2)'], {}), '(emb_dim / 2)\n', (2742, 2755), True, 'import numpy as np\n'), ((1635, 1650), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1648, 1650), False, 'import torch\n'), ((3458, 3501), 'torch.einsum', 'torch.einsum', (['"""bkw,kh->bhw"""', 'ws', 'self.basis'], {}), "('bkw,kh->bhw', ws, self.basis)\n", (3470, 3501), False, 'import torch\n'), ((798, 853), 'torch.max', 'torch.max', (['(pred[mask] / gt[mask])', '(gt[mask] / pred[mask])'], {}), '(pred[mask] / gt[mask], gt[mask] / pred[mask])\n', (807, 853), False, 'import torch\n'), ((1754, 1783), 'torch.where', 'torch.where', (['(l1 <= c)', 'l1', 'l2c'], {}), '(l1 <= c, l1, l2c)\n', (1765, 1783), False, 'import torch\n')] |
from biocrnpyler import *
import numpy as np
#Parameters
kb, ku, ktx, ktl, kdeg = 200, 10, 2.0, 50.0, 1.5
#(mechanism.name, part_id, param_name)
parameters = {"kb":kb, "ku":ku, "ktx":ktx, "ktl":ktl, "kdeg":kdeg,
"cooperativity":4,
('translation_mm', 'BCD', 'ku'):ku, ('translation_mm', 'BCD', 'kb'): kb, ('translation_mm', 'BCD', 'ktl'):ktl,
('transcription_mm', 'activator', "ktx"): ktx, ('transcription_mm', 'repressor', "ktx"): ktx,
('one_step_cooperative_binding', 'repressor', 'kb'):1000, ('one_step_cooperative_binding',"repressor", 'ku'):5.0,
('transcription_mm', 'repressor', 'kb'):1, ('transcription_mm',"repressor", 'ku'):1000.0,
('one_step_cooperative_binding', 'activator', 'kb'):1000, ('one_step_cooperative_binding', "activator", 'ku'):5.0,
('transcription_mm', 'activator', 'kb'): 1000, ('transcription_mm', "activator", 'ku'): 1.0,
('transcription_mm', 'P_regulated', "kb_leak"): kb/10,('transcription_mm', 'P_regulated', "ku_leak"): ku*10,
('transcription_mm', 'P_regulated', "ktx_leak"):ktx}
P_reg = RegulatedPromoter("P_regulated", regulators=["activator", "repressor"], leak=True)
reg_rep_assembly = DNAassembly(name="reporter", promoter=P_reg, rbs="BCD")
activator = Protein("activator")
repressor = Protein("repressor")
components = [reg_rep_assembly, activator, repressor]
myMixture = BasicExtract(name="txtl", parameters=parameters, components=components, parameter_warnings=False)
myCRN = myMixture.compile_crn()
print("\n"+repr(myCRN))
time = np.arange(0, 20, .01)
import pylab as plt
x0 = {"protein_activator":0, "protein_repressor":0, "dna_reporter":10, "protein_Ribo":100, "protein_RNAP":20, "protein_RNAase":10}
R_const = myCRN.simulate_with_bioscrape(time, stochastic = False, initial_condition_dict = x0)
x0 = {"protein_activator":0, "protein_repressor":50, "dna_reporter":10, "protein_Ribo":100, "protein_RNAP":20, "protein_RNAase":10}
R_repressed = myCRN.simulate_with_bioscrape(time, stochastic = False, initial_condition_dict = x0)
x0 = {"protein_activator":50, "protein_repressor":0, "dna_reporter":10, "protein_Ribo":100, "protein_RNAP":20, "protein_RNAase":10}
R_active = myCRN.simulate_with_bioscrape(time, stochastic = False, initial_condition_dict = x0)
plt.figure()
plt.plot(time, R_const["protein_reporter"], label = "Constituitive Expression")
plt.plot(time, R_repressed["protein_reporter"], label = "Repressed Expression")
plt.plot(time, R_active["protein_reporter"], label = "Activated Expression")
plt.legend()
plt.show()
| [
"pylab.show",
"pylab.plot",
"numpy.arange",
"pylab.figure",
"pylab.legend"
] | [((1602, 1624), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(0.01)'], {}), '(0, 20, 0.01)\n', (1611, 1624), True, 'import numpy as np\n'), ((2335, 2347), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (2345, 2347), True, 'import pylab as plt\n'), ((2348, 2425), 'pylab.plot', 'plt.plot', (['time', "R_const['protein_reporter']"], {'label': '"""Constituitive Expression"""'}), "(time, R_const['protein_reporter'], label='Constituitive Expression')\n", (2356, 2425), True, 'import pylab as plt\n'), ((2428, 2505), 'pylab.plot', 'plt.plot', (['time', "R_repressed['protein_reporter']"], {'label': '"""Repressed Expression"""'}), "(time, R_repressed['protein_reporter'], label='Repressed Expression')\n", (2436, 2505), True, 'import pylab as plt\n'), ((2508, 2582), 'pylab.plot', 'plt.plot', (['time', "R_active['protein_reporter']"], {'label': '"""Activated Expression"""'}), "(time, R_active['protein_reporter'], label='Activated Expression')\n", (2516, 2582), True, 'import pylab as plt\n'), ((2585, 2597), 'pylab.legend', 'plt.legend', ([], {}), '()\n', (2595, 2597), True, 'import pylab as plt\n'), ((2599, 2609), 'pylab.show', 'plt.show', ([], {}), '()\n', (2607, 2609), True, 'import pylab as plt\n')] |
import numpy as np
from rlkit.envs.ant_multitask_base import MultitaskAntEnv
from . import register_env
# Copy task structure from https://github.com/jonasrothfuss/ProMP/blob/master/meta_policy_search/envs/mujoco_envs/ant_rand_goal.py
@register_env("ant-vel")
class AntVelEnv(MultitaskAntEnv):
# Note that goal here refers to goal velocity
def __init__(
self, task={}, n_tasks=2, randomize_tasks=True, **kwargs
):
np.random.seed(3)
super(AntVelEnv, self).__init__(task, n_tasks, **kwargs)
def step(self, action):
xposbefore = np.copy(self.get_body_com("torso"))
self.do_simulation(action, self.frame_skip)
xposafter = self.get_body_com("torso")
comvel = (xposafter[0] - xposbefore[0]) / self.dt
forward_reward = -np.abs(comvel - self._goal) + 1.0
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = (
0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
)
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() and state[2] >= 0.2 and \
state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return (
ob,
reward,
done,
dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward,
comvel=comvel,
state=state,
),
)
def reward(self, info, goal):
comvel = info["comvel"]
forward_reward = -np.abs(comvel - goal) + 1.0
reward_ctrl = info["reward_ctrl"]
reward_contact = info["reward_contact"]
reward_survive = info["reward_survive"]
state = info["state"]
notdone = np.isfinite(state).all() and state[2] >= 0.2 and \
state[2] <= 1.0
reward = forward_reward + reward_ctrl + reward_contact + reward_survive
done = not notdone
return reward, done
def sample_tasks(self, num_tasks):
tasks = np.random.uniform(0.0, 3.0, (num_tasks, ))
tasks = [{"goal": goal} for goal in tasks]
return tasks
def _get_obs(self):
"""
return np.concatenate(
[
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
self.get_body_xmat("torso").flat,
self.get_body_com("torso"),
]
).reshape(-1)
"""
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
])
@property
def action_bounds(self):
bounds = self.sim.model.actuator_ctrlrange.copy().astype(np.float32)
return bounds.T
def reset_model(self):
self.comvel = np.array([0., 0., 0.])
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
@register_env("ant-vel-sparse")
class AntVelSparseEnv(MultitaskAntEnv):
# Note that goal here refers to goal velocity
def __init__(
self, task={}, n_tasks=2, randomize_tasks=True, **kwargs
):
np.random.seed(3)
self.goal_radius = 0.5
super(AntVelSparseEnv, self).__init__(task, n_tasks, **kwargs)
def step(self, action):
xposbefore = np.copy(self.get_body_com("torso"))
self.do_simulation(action, self.frame_skip)
xposafter = self.get_body_com("torso")
comvel = (xposafter[0] - xposbefore[0]) / self.dt
# forward_reward = -np.abs(comvel - self._goal) + 1.0
forward_reward = -np.abs(comvel - self._goal)
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = (
0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
)
survive_reward = 0.05
forward_reward = self.sparsify_rewards(forward_reward)
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() and state[2] >= 0.2 and \
state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return (
ob,
reward,
done,
dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward,
comvel=comvel,
state=state,
),
)
def reward(self, info, goal):
comvel = info["comvel"]
# forward_reward = -np.abs(comvel - goal) + 1.0
forward_reward = -np.abs(comvel - goal)
forward_reward = self.sparsify_rewards(forward_reward)
reward_ctrl = info["reward_ctrl"]
reward_contact = info["reward_contact"]
reward_survive = info["reward_survive"]
state = info["state"]
notdone = np.isfinite(state).all() and state[2] >= 0.2 and \
state[2] <= 1.0
reward = forward_reward + reward_ctrl + reward_contact + reward_survive
done = not notdone
return reward, done
def sample_tasks(self, num_tasks):
tasks = np.random.uniform(-1.5, 1.5, (num_tasks, ))
tasks = [{"goal": goal} for goal in tasks]
return tasks
def sparsify_rewards(self, r):
"""
if r < -self.goal_radius:
r = -2
# r = -1
r = r + 2
"""
if r > -self.goal_radius:
r = r + 1
# r = r + 1
return r
def _get_obs(self):
"""
return np.concatenate(
[
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
self.get_body_xmat("torso").flat,
self.get_body_com("torso"),
]
).reshape(-1)
"""
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
])
@property
def action_bounds(self):
bounds = self.sim.model.actuator_ctrlrange.copy().astype(np.float32)
return bounds.T
def reset_model(self):
self.comvel = np.array([0., 0., 0.])
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
| [
"numpy.random.uniform",
"numpy.random.seed",
"numpy.abs",
"numpy.square",
"numpy.isfinite",
"numpy.clip",
"numpy.array"
] | [((445, 462), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (459, 462), True, 'import numpy as np\n'), ((2322, 2363), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(3.0)', '(num_tasks,)'], {}), '(0.0, 3.0, (num_tasks,))\n', (2339, 2363), True, 'import numpy as np\n'), ((3173, 3198), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (3181, 3198), True, 'import numpy as np\n'), ((3649, 3666), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (3663, 3666), True, 'import numpy as np\n'), ((5795, 5837), 'numpy.random.uniform', 'np.random.uniform', (['(-1.5)', '(1.5)', '(num_tasks,)'], {}), '(-1.5, 1.5, (num_tasks,))\n', (5812, 5837), True, 'import numpy as np\n'), ((6892, 6917), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (6900, 6917), True, 'import numpy as np\n'), ((4100, 4127), 'numpy.abs', 'np.abs', (['(comvel - self._goal)'], {}), '(comvel - self._goal)\n', (4106, 4127), True, 'import numpy as np\n'), ((5250, 5271), 'numpy.abs', 'np.abs', (['(comvel - goal)'], {}), '(comvel - goal)\n', (5256, 5271), True, 'import numpy as np\n'), ((797, 824), 'numpy.abs', 'np.abs', (['(comvel - self._goal)'], {}), '(comvel - self._goal)\n', (803, 824), True, 'import numpy as np\n'), ((941, 968), 'numpy.square', 'np.square', (['(action / scaling)'], {}), '(action / scaling)\n', (950, 968), True, 'import numpy as np\n'), ((1834, 1855), 'numpy.abs', 'np.abs', (['(comvel - goal)'], {}), '(comvel - goal)\n', (1840, 1855), True, 'import numpy as np\n'), ((4238, 4265), 'numpy.square', 'np.square', (['(action / scaling)'], {}), '(action / scaling)\n', (4247, 4265), True, 'import numpy as np\n'), ((1037, 1075), 'numpy.clip', 'np.clip', (['self.sim.data.cfrc_ext', '(-1)', '(1)'], {}), '(self.sim.data.cfrc_ext, -1, 1)\n', (1044, 1075), True, 'import numpy as np\n'), ((1248, 1266), 'numpy.isfinite', 'np.isfinite', (['state'], {}), '(state)\n', (1259, 1266), True, 'import numpy as np\n'), ((2048, 2066), 'numpy.isfinite', 'np.isfinite', (['state'], {}), '(state)\n', (2059, 2066), True, 'import numpy as np\n'), ((2922, 2960), 'numpy.clip', 'np.clip', (['self.sim.data.cfrc_ext', '(-1)', '(1)'], {}), '(self.sim.data.cfrc_ext, -1, 1)\n', (2929, 2960), True, 'import numpy as np\n'), ((4334, 4372), 'numpy.clip', 'np.clip', (['self.sim.data.cfrc_ext', '(-1)', '(1)'], {}), '(self.sim.data.cfrc_ext, -1, 1)\n', (4341, 4372), True, 'import numpy as np\n'), ((4608, 4626), 'numpy.isfinite', 'np.isfinite', (['state'], {}), '(state)\n', (4619, 4626), True, 'import numpy as np\n'), ((5521, 5539), 'numpy.isfinite', 'np.isfinite', (['state'], {}), '(state)\n', (5532, 5539), True, 'import numpy as np\n'), ((6641, 6679), 'numpy.clip', 'np.clip', (['self.sim.data.cfrc_ext', '(-1)', '(1)'], {}), '(self.sim.data.cfrc_ext, -1, 1)\n', (6648, 6679), True, 'import numpy as np\n')] |
#!/usr/bin/python3
"""
probability distributions.
"""
import sys
import numpy as np
import scipy as sp
from scipy.stats import binom
def main(args):
n = 17
dist = binom(n, .765)
print("Mean =", dist.mean())
print("Var =", dist.var())
print("Std =", dist.std())
x = np.array([0,1,2,3,4,5,6])
print("prob dist =", dist.pmf(x))
samples = dist.rvs(size = 10000000)
print(set(samples), n)
m = len(samples)
p_est = sum(samples)/(m*n)
print("p(est) = {0} n = {1}".format(p_est, n))
if __name__ == "__main__":
main(sys.argv[1:]) | [
"numpy.array",
"scipy.stats.binom"
] | [((176, 191), 'scipy.stats.binom', 'binom', (['n', '(0.765)'], {}), '(n, 0.765)\n', (181, 191), False, 'from scipy.stats import binom\n'), ((296, 327), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6]'], {}), '([0, 1, 2, 3, 4, 5, 6])\n', (304, 327), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-0.x/blob/master/LICENSE
import codecs
import json
import numpy
import awkward0.array.base
import awkward0.array.chunked
import awkward0.array.indexed
import awkward0.array.jagged
import awkward0.array.masked
import awkward0.array.objects
import awkward0.array.table
import awkward0.array.virtual
import awkward0.type
import awkward0.util
################################################################################ type conversions
def schema2type(schema):
import pyarrow
def recurse(tpe, nullable):
if isinstance(tpe, pyarrow.lib.DictionaryType):
out = recurse(tpe.dictionary.type, nullable)
if nullable:
return awkward0.type.OptionType(out)
else:
return out
elif isinstance(tpe, pyarrow.lib.StructType):
out = None
for i in range(tpe.num_children):
x = awkward0.type.ArrayType(tpe[i].name, recurse(tpe[i].type, tpe[i].nullable))
if out is None:
out = x
else:
out = out & x
if nullable:
return awkward0.type.OptionType(out)
else:
return out
elif isinstance(tpe, pyarrow.lib.ListType):
out = awkward0.type.ArrayType(float("inf"), recurse(tpe.value_type, nullable))
if nullable:
return awkward0.type.OptionType(out)
else:
return out
elif isinstance(tpe, pyarrow.lib.UnionType):
out = None
for i in range(tpe.num_children):
x = recurse(tpe[i].type, nullable)
if out is None:
out = x
else:
out = out | x
if nullable:
return awkward0.type.OptionType(out)
else:
return out
elif tpe == pyarrow.string():
if nullable:
return awkward0.type.OptionType(str)
else:
return str
elif tpe == pyarrow.binary():
if nullable:
return awkward0.type.OptionType(bytes)
else:
return bytes
elif tpe == pyarrow.bool_():
out = awkward0.numpy.dtype(bool)
if nullable:
return awkward0.type.OptionType(out)
else:
return out
elif isinstance(tpe, pyarrow.lib.DataType):
if nullable:
return awkward0.type.OptionType(tpe.to_pandas_dtype())
else:
return tpe.to_pandas_dtype()
else:
raise NotImplementedError(repr(tpe))
out = None
for name in schema.names:
field = schema.field(name)
mytype = awkward0.type.ArrayType(name, recurse(field.type, field.nullable))
if out is None:
out = mytype
else:
out = out & mytype
return out
################################################################################ value conversions
# we need an opt-out of the large indices in certain cases, otherwise use by default
def toarrow(obj):
import pyarrow
def recurse(obj, mask):
if isinstance(obj, numpy.ndarray):
return pyarrow.array(obj, mask=mask)
elif isinstance(obj, awkward0.array.chunked.ChunkedArray): # includes AppendableArray
raise TypeError("only top-level ChunkedArrays can be converted to Arrow (as RecordBatches)")
elif isinstance(obj, awkward0.array.indexed.IndexedArray):
if mask is None:
return pyarrow.DictionaryArray.from_arrays(obj.index, recurse(obj.content, mask))
else:
return recurse(obj.content[obj.index], mask)
elif isinstance(obj, awkward0.array.indexed.SparseArray):
return recurse(obj.dense, mask)
elif isinstance(obj, awkward0.array.jagged.JaggedArray):
obj = obj.compact()
if mask is not None:
mask = obj.tojagged(mask).flatten()
arrow_type = pyarrow.ListArray
# 64bit offsets not yet completely golden in arrow
# if hasattr(pyarrow, 'LargeListArray') and obj.starts.itemsize > 4:
# arrow_type = pyarrow.LargeListArray
return arrow_type.from_arrays(obj.offsets, recurse(obj.content, mask))
elif isinstance(obj, awkward0.array.masked.IndexedMaskedArray):
thismask = obj.boolmask(maskedwhen=True)
if mask is not None:
thismask = mask | thismask
if len(obj.content) == 0:
content = obj.numpy.empty(len(obj.mask), dtype=obj.DEFAULTTYPE)
else:
content = obj.content[obj.mask]
return recurse(content, thismask)
elif isinstance(obj, awkward0.array.masked.MaskedArray): # includes BitMaskedArray
thismask = obj.boolmask(maskedwhen=True)
if mask is not None:
thismask = mask | thismask
return recurse(obj.content, thismask)
elif isinstance(obj, awkward0.array.objects.StringArray):
if obj.encoding is None and hasattr(pyarrow.BinaryArray, 'from_buffers'):
arrow_type = pyarrow.BinaryArray
arrow_offset_type = pyarrow.binary()
# 64bit offsets not yet completely golden in arrow
# if hasattr(pyarrow, 'LargeBinaryArray') and obj.starts.itemsize > 4:
# arrow_type = pyarrow.LargeBinaryArray
# arrow_offset_type = pyarrow.large_binary()
convert = lambda length, offsets, content: arrow_type.from_buffers(arrow_offset_type, length, [None, offsets, content])
elif codecs.lookup(obj.encoding) is codecs.lookup("utf-8") or obj.encoding is None:
arrow_type = pyarrow.StringArray
# if hasattr(pyarrow, 'LargeStringArray') and obj.starts.itemsize > 4:
# arrow_type = pyarrow.LargeStringArray
convert = lambda length, offsets, content: arrow_type.from_buffers(length, offsets, content)
else:
raise ValueError("only encoding=None or encoding='utf-8' can be converted to Arrow")
obj = obj.compact()
offsets = obj.offsets
if offsets.dtype != numpy.dtype(numpy.int32):
offsets = offsets.astype(numpy.int32)
return convert(len(offsets) - 1, pyarrow.py_buffer(offsets), pyarrow.py_buffer(obj.content))
elif isinstance(obj, awkward0.array.objects.ObjectArray):
# throw away Python object interpretation, which Arrow can't handle while being multilingual
return recurse(obj.content, mask)
elif isinstance(obj, awkward0.array.table.Table):
return pyarrow.StructArray.from_arrays([recurse(x, mask) for x in obj.contents.values()], list(obj.contents))
elif isinstance(obj, awkward0.array.union.UnionArray):
contents = []
for i, x in enumerate(obj.contents):
if mask is None:
thismask = None
else:
thistags = (obj.tags == i)
thismask = obj.numpy.empty(len(x), dtype=obj.MASKTYPE)
thismask[obj.index[thistags]] = mask[thistags] # hmm... obj.index could have repeats; the Arrow mask in that case would not be well-defined...
contents.append(recurse(x, thismask))
return pyarrow.UnionArray.from_dense(pyarrow.array(obj.tags.astype(numpy.int8)), pyarrow.array(obj.index.astype(numpy.int32)), contents)
elif isinstance(obj, awkward0.array.virtual.VirtualArray):
return recurse(obj.array, mask)
else:
raise TypeError("cannot convert type {0} to Arrow".format(type(obj)))
if isinstance(obj, awkward0.array.chunked.ChunkedArray): # includes AppendableArray
batches = []
for chunk in obj.chunks:
arr = toarrow(chunk)
if isinstance(arr, pyarrow.Table):
batches.extend(arr.to_batches())
else:
batches.append(pyarrow.RecordBatch.from_arrays([arr], [""]))
return pyarrow.Table.from_batches(batches)
elif isinstance(obj, awkward0.array.masked.IndexedMaskedArray) and isinstance(obj.content, awkward0.array.table.Table):
mask = obj.boolmask(maskedwhen=True)
if len(obj.content) == 0:
content = obj.numpy.empty(len(obj.mask), dtype=obj.DEFAULTTYPE)
else:
content = obj.content[obj.mask]
return pyarrow.Table.from_batches([pyarrow.RecordBatch.from_arrays([recurse(x, mask) for x in obj.content.contents.values()], list(obj.content.contents))])
elif isinstance(obj, awkward0.array.masked.MaskedArray) and isinstance(obj.content, awkward0.array.table.Table): # includes BitMaskedArray
mask = obj.boolmask(maskedwhen=True)
return pyarrow.Table.from_batches([pyarrow.RecordBatch.from_arrays([recurse(x, mask) for x in obj.content.contents.values()], list(obj.content.contents))])
elif isinstance(obj, awkward0.array.table.Table):
return pyarrow.Table.from_batches([pyarrow.RecordBatch.from_arrays([recurse(x, None) for x in obj.contents.values()], list(obj.contents))])
else:
return recurse(obj, None)
def fromarrow(obj):
import pyarrow
awkwardlib = awkward0
ARROW_BITMASKTYPE = awkwardlib.numpy.uint8
ARROW_INDEXTYPE = awkwardlib.numpy.int32
ARROW_LARGEINDEXTYPE = awkwardlib.numpy.int64
ARROW_TAGTYPE = awkwardlib.numpy.uint8
ARROW_CHARTYPE = awkwardlib.numpy.uint8
def popbuffers(array, tpe, buffers, length):
if isinstance(tpe, pyarrow.lib.DictionaryType):
index = popbuffers(None if array is None else array.indices, tpe.index_type, buffers, length)
if hasattr(tpe, "dictionary"):
content = fromarrow(tpe.dictionary)
elif array is not None:
content = fromarrow(array.dictionary)
else:
raise NotImplementedError("no way to access Arrow dictionary inside of UnionArray")
if isinstance(index, awkwardlib.BitMaskedArray):
return awkwardlib.BitMaskedArray(index.mask, awkwardlib.IndexedArray(index.content, content), maskedwhen=index.maskedwhen, lsborder=index.lsborder)
else:
return awkwardlib.IndexedArray(index, content)
elif isinstance(tpe, pyarrow.lib.StructType):
assert getattr(tpe, "num_buffers", 1) == 1
mask = buffers.pop(0)
pairs = []
for i in range(tpe.num_children):
pairs.append((tpe[i].name, popbuffers(None if array is None else array.field(tpe[i].name), tpe[i].type, buffers, length)))
out = awkwardlib.Table.frompairs(pairs, 0) # FIXME: better rowstart
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif isinstance(tpe, pyarrow.lib.ListType):
assert getattr(tpe, "num_buffers", 2) == 2
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_INDEXTYPE)[:length + 1]
content = popbuffers(None if array is None else array.flatten(), tpe.value_type, buffers, offsets[-1])
out = awkwardlib.JaggedArray.fromoffsets(offsets, content)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif hasattr(pyarrow.lib, 'LargeListType') and isinstance(tpe, pyarrow.lib.LargeListType):
assert getattr(tpe, "num_buffers", 2) == 2
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_LARGEINDEXTYPE)[:length + 1]
content = popbuffers(None if array is None else array.flatten(), tpe.value_type, buffers, offsets[-1])
out = awkwardlib.JaggedArray.fromoffsets(offsets, content)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif isinstance(tpe, pyarrow.lib.UnionType) and tpe.mode == "sparse":
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
tags = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_TAGTYPE)[:length]
assert buffers.pop(0) is None
index = awkwardlib.numpy.arange(len(tags), dtype=ARROW_INDEXTYPE)
contents = []
for i in range(tpe.num_children):
try:
sublength = index[tags == i][-1] + 1
except IndexError:
sublength = 0
contents.append(popbuffers(None, tpe[i].type, buffers, sublength))
for i in range(len(contents)):
these = index[tags == i]
if len(these) == 0:
contents[i] = contents[i][0:0]
else:
contents[i] = contents[i][: these[-1] + 1]
out = awkwardlib.UnionArray(tags, index, contents)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif isinstance(tpe, pyarrow.lib.UnionType) and tpe.mode == "dense":
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
tags = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_TAGTYPE)[:length]
index = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_INDEXTYPE)[:length]
contents = []
for i in range(tpe.num_children):
try:
sublength = index[tags == i].max() + 1
except ValueError:
sublength = 0
contents.append(popbuffers(None, tpe[i].type, buffers, sublength))
for i in range(len(contents)):
these = index[tags == i]
if len(these) == 0:
contents[i] = contents[i][0:0]
else:
contents[i] = contents[i][: these.max() + 1]
out = awkwardlib.UnionArray(tags, index, contents)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif tpe == pyarrow.string():
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_INDEXTYPE)[:length + 1]
content = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_CHARTYPE)[:offsets[-1]]
out = awkwardlib.StringArray.fromoffsets(offsets, content[:offsets[-1]], encoding="utf-8")
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif tpe == pyarrow.large_string():
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_LARGEINDEXTYPE)[:length + 1]
content = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_CHARTYPE)[:offsets[-1]]
out = awkwardlib.StringArray.fromoffsets(offsets, content[:offsets[-1]], encoding="utf-8")
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif tpe == pyarrow.binary():
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_INDEXTYPE)[:length + 1]
content = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_CHARTYPE)[:offsets[-1]]
out = awkwardlib.StringArray.fromoffsets(offsets, content[:offsets[-1]], encoding=None)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif tpe == pyarrow.large_binary():
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_LARGEINDEXTYPE)[:length + 1]
content = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_CHARTYPE)[:offsets[-1]]
out = awkwardlib.StringArray.fromoffsets(offsets, content[:offsets[-1]], encoding=None)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif tpe == pyarrow.bool_():
assert getattr(tpe, "num_buffers", 2) == 2
mask = buffers.pop(0)
out = awkwardlib.numpy.unpackbits(awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_CHARTYPE)).view(awkwardlib.MaskedArray.BOOLTYPE)
out = out.reshape(-1, 8)[:,::-1].reshape(-1)[:length] # lsborder=True
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif isinstance(tpe, pyarrow.lib.DataType):
assert getattr(tpe, "num_buffers", 2) == 2
mask = buffers.pop(0)
out = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=tpe.to_pandas_dtype())[:length]
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
else:
raise NotImplementedError(repr(tpe))
if isinstance(obj, pyarrow.lib.Array):
buffers = obj.buffers()
out = popbuffers(obj, obj.type, buffers, len(obj))
assert len(buffers) == 0
return out
elif isinstance(obj, pyarrow.lib.ChunkedArray):
chunks = [x for x in obj.chunks if len(x) > 0]
if len(chunks) == 1:
return fromarrow(chunks[0])
else:
return awkwardlib.ChunkedArray([fromarrow(x) for x in chunks], chunksizes=[len(x) for x in chunks])
elif isinstance(obj, pyarrow.lib.RecordBatch):
out = awkwardlib.Table()
for n, x in zip(obj.schema.names, obj.columns):
out[n] = fromarrow(x)
return out
elif isinstance(obj, pyarrow.lib.Table):
chunks = []
chunksizes = []
for batch in obj.to_batches():
chunk = fromarrow(batch)
if len(chunk) > 0:
chunks.append(chunk)
chunksizes.append(len(chunk))
if len(chunks) == 1:
return chunks[0]
else:
return awkwardlib.ChunkedArray(chunks, chunksizes=chunksizes)
else:
raise NotImplementedError(type(obj))
################################################################################ Parquet file handling
def toparquet(where, obj, **options):
import pyarrow.parquet
options["where"] = where
def convert(obj, message):
if isinstance(obj, (awkward0.array.base.AwkwardArray, numpy.ndarray)):
out = toarrow(obj)
if isinstance(out, pyarrow.Table):
return out
else:
return pyarrow.Table.from_batches([pyarrow.RecordBatch.from_arrays([out], [""])])
else:
raise TypeError(message)
if isinstance(obj, awkward0.array.chunked.ChunkedArray):
obj = iter(obj.chunks)
try:
awkitem = next(obj)
except StopIteration:
raise ValueError("iterable is empty")
arritem = convert(awkitem, None)
if "schema" not in options:
options["schema"] = arritem.schema
writer = pyarrow.parquet.ParquetWriter(**options)
writer.write_table(arritem)
try:
while True:
try:
awkitem = next(obj)
except StopIteration:
break
else:
writer.write_table(convert(awkitem, None))
finally:
writer.close()
elif isinstance(obj, (awkward0.array.base.AwkwardArray, numpy.ndarray)):
arritem = convert(obj, None)
options["schema"] = arritem.schema
writer = pyarrow.parquet.ParquetWriter(**options)
writer.write_table(arritem)
writer.close()
else:
try:
obj = iter(obj)
except TypeError:
raise TypeError("cannot write {0} to Parquet file".format(type(obj)))
try:
awkitem = next(obj)
except StopIteration:
raise ValueError("iterable is empty")
arritem = convert(awkitem, "cannot write iterator of {0} to Parquet file".format(type(awkitem)))
if "schema" not in options:
options["schema"] = arritem.schema
writer = pyarrow.parquet.ParquetWriter(**options)
writer.write_table(arritem)
try:
while True:
try:
awkitem = next(obj)
except StopIteration:
break
else:
writer.write_table(convert(awkitem, "cannot write iterator of {0} to Parquet file".format(type(awkitem))))
finally:
writer.close()
class _ParquetFile(object):
def __init__(self, file, metadata=None, common_metadata=None):
self.file = file
self.metadata = metadata
self.common_metadata = common_metadata
self._init()
def _init(self):
import pyarrow.parquet
self.parquetfile = pyarrow.parquet.ParquetFile(self.file, metadata=self.metadata, common_metadata=self.common_metadata)
self.type = schema2type(self.parquetfile.schema.to_arrow_schema())
def __getstate__(self):
return {"file": self.file, "metadata": self.metadata, "common_metadata": self.common_metadata}
def __setstate__(self, state):
self.file = state["file"]
self.metadata = state["metadata"]
self.common_metadata = state["common_metadata"]
self._init()
def __call__(self, rowgroup, column):
return fromarrow(self.parquetfile.read_row_group(rowgroup, columns=[column]))[column]
def tojson(self):
json.dumps([self.file, self.metadata, self.common_metadata])
return {"file": self.file, "metadata": self.metadata, "common_metadata": self.common_metadata}
@classmethod
def fromjson(cls, state):
return cls(state["file"], metadata=state["metadata"], common_metadata=state["common_metadata"])
def fromparquet(file, cache=None, persistvirtual=False, metadata=None, common_metadata=None):
awkwardlib = awkward0
parquetfile = _ParquetFile(file, metadata=metadata, common_metadata=common_metadata)
columns = parquetfile.type.columns
chunks = []
chunksizes = []
for i in range(parquetfile.parquetfile.num_row_groups):
numrows = parquetfile.parquetfile.metadata.row_group(i).num_rows
if numrows > 0:
if columns == [""]:
chunk = awkwardlib.VirtualArray(parquetfile, (i, ""), cache=cache, type=awkwardlib.type.ArrayType(numrows, parquetfile.type[""]), persistvirtual=persistvirtual)
else:
chunk = awkwardlib.Table()
for n in columns:
q = awkwardlib.VirtualArray(parquetfile, (i, n), cache=cache, type=awkwardlib.type.ArrayType(numrows, parquetfile.type[n]), persistvirtual=persistvirtual)
chunk.contents[n] = q
chunks.append(chunk)
chunksizes.append(numrows)
return awkwardlib.ChunkedArray(chunks, chunksizes)
| [
"pyarrow.RecordBatch.from_arrays",
"pyarrow.py_buffer",
"pyarrow.Table.from_batches",
"codecs.lookup",
"numpy.dtype",
"json.dumps",
"pyarrow.large_string",
"pyarrow.bool_",
"pyarrow.binary",
"pyarrow.array",
"pyarrow.large_binary",
"pyarrow.parquet.ParquetWriter",
"pyarrow.parquet.ParquetFil... | [((8399, 8434), 'pyarrow.Table.from_batches', 'pyarrow.Table.from_batches', (['batches'], {}), '(batches)\n', (8425, 8434), False, 'import pyarrow\n'), ((21302, 21342), 'pyarrow.parquet.ParquetWriter', 'pyarrow.parquet.ParquetWriter', ([], {}), '(**options)\n', (21331, 21342), False, 'import pyarrow\n'), ((23172, 23276), 'pyarrow.parquet.ParquetFile', 'pyarrow.parquet.ParquetFile', (['self.file'], {'metadata': 'self.metadata', 'common_metadata': 'self.common_metadata'}), '(self.file, metadata=self.metadata,\n common_metadata=self.common_metadata)\n', (23199, 23276), False, 'import pyarrow\n'), ((23837, 23897), 'json.dumps', 'json.dumps', (['[self.file, self.metadata, self.common_metadata]'], {}), '([self.file, self.metadata, self.common_metadata])\n', (23847, 23897), False, 'import json\n'), ((3381, 3410), 'pyarrow.array', 'pyarrow.array', (['obj'], {'mask': 'mask'}), '(obj, mask=mask)\n', (3394, 3410), False, 'import pyarrow\n'), ((21846, 21886), 'pyarrow.parquet.ParquetWriter', 'pyarrow.parquet.ParquetWriter', ([], {}), '(**options)\n', (21875, 21886), False, 'import pyarrow\n'), ((22437, 22477), 'pyarrow.parquet.ParquetWriter', 'pyarrow.parquet.ParquetWriter', ([], {}), '(**options)\n', (22466, 22477), False, 'import pyarrow\n'), ((8338, 8382), 'pyarrow.RecordBatch.from_arrays', 'pyarrow.RecordBatch.from_arrays', (['[arr]', "['']"], {}), "([arr], [''])\n", (8369, 8382), False, 'import pyarrow\n'), ((20844, 20888), 'pyarrow.RecordBatch.from_arrays', 'pyarrow.RecordBatch.from_arrays', (['[out]', "['']"], {}), "([out], [''])\n", (20875, 20888), False, 'import pyarrow\n'), ((2003, 2019), 'pyarrow.string', 'pyarrow.string', ([], {}), '()\n', (2017, 2019), False, 'import pyarrow\n'), ((2165, 2181), 'pyarrow.binary', 'pyarrow.binary', ([], {}), '()\n', (2179, 2181), False, 'import pyarrow\n'), ((2331, 2346), 'pyarrow.bool_', 'pyarrow.bool_', ([], {}), '()\n', (2344, 2346), False, 'import pyarrow\n'), ((15271, 15287), 'pyarrow.string', 'pyarrow.string', ([], {}), '()\n', (15285, 15287), False, 'import pyarrow\n'), ((15959, 15981), 'pyarrow.large_string', 'pyarrow.large_string', ([], {}), '()\n', (15979, 15981), False, 'import pyarrow\n'), ((5448, 5464), 'pyarrow.binary', 'pyarrow.binary', ([], {}), '()\n', (5462, 5464), False, 'import pyarrow\n'), ((6499, 6523), 'numpy.dtype', 'numpy.dtype', (['numpy.int32'], {}), '(numpy.int32)\n', (6510, 6523), False, 'import numpy\n'), ((6625, 6651), 'pyarrow.py_buffer', 'pyarrow.py_buffer', (['offsets'], {}), '(offsets)\n', (6642, 6651), False, 'import pyarrow\n'), ((6653, 6683), 'pyarrow.py_buffer', 'pyarrow.py_buffer', (['obj.content'], {}), '(obj.content)\n', (6670, 6683), False, 'import pyarrow\n'), ((16658, 16674), 'pyarrow.binary', 'pyarrow.binary', ([], {}), '()\n', (16672, 16674), False, 'import pyarrow\n'), ((17343, 17365), 'pyarrow.large_binary', 'pyarrow.large_binary', ([], {}), '()\n', (17363, 17365), False, 'import pyarrow\n'), ((5897, 5924), 'codecs.lookup', 'codecs.lookup', (['obj.encoding'], {}), '(obj.encoding)\n', (5910, 5924), False, 'import codecs\n'), ((5928, 5950), 'codecs.lookup', 'codecs.lookup', (['"""utf-8"""'], {}), "('utf-8')\n", (5941, 5950), False, 'import codecs\n'), ((18039, 18054), 'pyarrow.bool_', 'pyarrow.bool_', ([], {}), '()\n', (18052, 18054), False, 'import pyarrow\n')] |
import numpy as np
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.manifold import TSNE, SpectralEmbedding
import matplotlib.pyplot as plt
loc_emb_file = 'experiments/randomtests/t_sse_del_2020-06-26_0/models/user_embedding.npy'
u_emb = np.load(loc_emb_file)
print('User embedding size:', str(u_emb.shape))
# print('Fitting PCA')
# pca = PCA(n_components=2, svd_solver='full')
# dim_red = pca.fit_transform(u_emb)
tsne = TSNE(n_components=2, init='random', random_state=0, perplexity=50)
dim_red = tsne.fit_transform(u_emb)
# se = SpectralEmbedding(n_components=2, affinity='rbf', random_state=0, gamma=0.01)
# dim_red = se.fit_transform(u_emb)
x = dim_red[:,0]
y = dim_red[:,1]
plt.plot(x, y, ".", markersize=2)
plt.show()
print('Visualising')
#
# def dump_variances(x):
# mean = np.mean(x, axis=0)
# cov = np.cov(x, rowvar=False)
# var = np.var(x, axis=0)
# u_emb_stats = {'mean': mean,
# 'cov': cov,
# 'var': var}
# pickle.dump(u_emb_stats, open("u_emb_stats.p", "wb"))
# print(x)
#
# loc_emb_file = 'experiments/randomtests/t_sse_del_2020-06-26_1/models/user_embedding.npy'
| [
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"sklearn.manifold.TSNE"
] | [((258, 279), 'numpy.load', 'np.load', (['loc_emb_file'], {}), '(loc_emb_file)\n', (265, 279), True, 'import numpy as np\n'), ((444, 510), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'init': '"""random"""', 'random_state': '(0)', 'perplexity': '(50)'}), "(n_components=2, init='random', random_state=0, perplexity=50)\n", (448, 510), False, 'from sklearn.manifold import TSNE, SpectralEmbedding\n'), ((704, 737), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {'markersize': '(2)'}), "(x, y, '.', markersize=2)\n", (712, 737), True, 'import matplotlib.pyplot as plt\n'), ((738, 748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (746, 748), True, 'import matplotlib.pyplot as plt\n')] |
from moviepy import editor as med
import numpy as np
import cv2
from scipy.io import wavfile
import imageio
import subprocess
from librosa.output import write_wav
class AudioSignal:
def __init__(self, data, sample_rate):
self._data = np.copy(data)
self._sample_rate = sample_rate
@staticmethod
def from_wav_file(wave_file_path):
sample_rate, data = wavfile.read(wave_file_path)
return AudioSignal(data, sample_rate)
@staticmethod
def from_mp4(mp4_path):
aud, _ = separate_streams(mp4_path)
return AudioSignal(aud.ts, aud.fps)
# def save_to_wav_file(self, wave_file_path, sample_type=np.int16):
# self.set_sample_type(sample_type)
# wavfile.write(wave_file_path, self._sample_rate, self._data)
def save_to_wav_file(self, wave_file_path):
write_wav(wave_file_path, self._data, self._sample_rate)
def get_data(self, channel_index=None):
# data shape: (n_samples) or (n_samples, n_channels)
if channel_index is None:
return self._data
if channel_index not in range(self.get_number_of_channels()):
raise IndexError("invalid channel index")
if channel_index == 0 and self.get_number_of_channels() == 1:
return self._data
return self._data[:, channel_index]
def get_number_of_samples(self):
return self._data.shape[0]
def get_number_of_channels(self):
# data shape: (n_samples) or (n_samples, n_channels)
if len(self._data.shape) == 1:
return 1
return self._data.shape[1]
def get_sample_rate(self):
return self._sample_rate
def get_sample_type(self):
return self._data.dtype
def get_format(self):
return dict(n_channels=self.get_number_of_channels(),
sample_rate=self.get_sample_rate())
def get_length_in_seconds(self):
return float(self.get_number_of_samples()) / self.get_sample_rate()
def set_sample_type(self, sample_type):
sample_type_info = np.iinfo(sample_type)
self._data = self._data.clip(sample_type_info.min, sample_type_info.max).astype(sample_type)
def amplify(self, reference_signal):
factor = float(np.abs(reference_signal.get_data()).max()) / np.abs(self._data).max()
new_max_value = self._data.max() * factor
new_min_value = self._data.min() * factor
sample_type_info = np.iinfo(self.get_sample_type())
if new_max_value > sample_type_info.max or new_min_value < sample_type_info.min:
raise Exception("amplified signal exceeds audio format boundaries")
self._data = (self._data.astype(np.float64) * factor).astype(self.get_sample_type())
def amplify_by_factor(self, factor):
self._data = self._data.astype(np.float64)
self._data *= factor
def peak_normalize(self, peak=None):
self._data = self._data.astype(np.float64)
if peak is None:
peak = np.abs(self._data).max()
self._data /= peak
return peak
def split(self, n_slices):
return [AudioSignal(s, self._sample_rate) for s in np.split(self._data, n_slices)]
def slice(self, start_sample_index, end_sample_index):
return AudioSignal(self._data[start_sample_index:end_sample_index], self._sample_rate)
def pad_with_zeros(self, new_length):
if self.get_number_of_samples() > new_length:
raise Exception("cannot zero-pad for shorter signal length")
new_shape = list(self._data.shape)
new_shape[0] = new_length
self._data = np.copy(self._data)
foo = np.resize(self._data, new_shape)
self._data = foo
def truncate(self, new_length):
if self.get_number_of_samples() < new_length:
raise Exception("cannot truncate for longer signal length")
self._data = self._data[:new_length]
@staticmethod
def concat(signals):
for signal in signals:
if signal.get_format() != signals[0].get_format():
raise Exception("concating audio signals with different formats is not supported")
data = [signal.get_data() for signal in signals]
return AudioSignal(np.concatenate(data), signals[0].get_sample_rate())
class AudioMixer:
@staticmethod
def mix(audio_signals, mixing_weights=None):
if mixing_weights is None:
mixing_weights = [1] * len(audio_signals)
reference_signal = audio_signals[0]
mixed_data = np.zeros(shape=reference_signal.get_data().shape, dtype=np.float64)
for i, signal in enumerate(audio_signals):
if signal.get_format() != reference_signal.get_format():
raise Exception("mixing audio signals with different format is not supported")
mixed_data += (float(mixing_weights[i])) * signal.get_data()
return AudioSignal(mixed_data, reference_signal.get_sample_rate())
@staticmethod
def snr_factor(signal, noise, snr_db):
s = signal.get_data()
n = noise.get_data()
if s.size != n.size:
raise Exception('signal and noise must have the same length')
eq = np.sqrt(np.var(s) / np.var(n))
factor = eq * (10 ** (-snr_db / 20.0))
return factor
class FFMPEG:
@staticmethod
def downsample(input_audio_file_path, output_audio_file_path, sample_rate):
subprocess.check_call(["ffmpeg", "-i", input_audio_file_path, "-ar",
str(sample_rate), output_audio_file_path, "-y"])
@staticmethod
def merge(input_video_file_path, input_audio_file_path, output_video_file_path):
subprocess.check_call(["ffmpeg", '-hide_banner', '-loglevel', 'panic', "-i",
input_video_file_path, "-i", input_audio_file_path,
"-c:v", "copy", "-map", "0:v:0", "-map", "1:a:0",
output_video_file_path])
class VidImgs:
def __init__(self, vid):
self.imgs = np.array([cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
for img in vid.iter_frames()])
self.fps = vid.fps
self.dur = vid.duration
def to_video_clip(self):
imgs = gray2rgb(self.imgs)
clips = [med.ImageClip(img).set_duration(1 / self.fps) for img in imgs]
mov = med.concatenate_videoclips(clips, method='compose')
mov = mov.set_fps(self.fps)
return mov
def write_videofile(self, fname='movie.mp4', *args, **kwargs):
self.to_videoClip().write_videofile(fname, self.fps, *args, **kwargs)
class AudTs:
def __init__(self, aud):
self.ts = aud.to_soundarray().mean(axis=1)
self.fps = aud.fps
self.dur = aud.duration
class VideoFileReader:
def __init__(self, video_file_path):
self._video_fd = imageio.get_reader(video_file_path)
def close(self):
self._video_fd.close()
def read_all_frames(self, convert_to_gray_scale=False):
mov = []
for i in range(self.get_frame_count()):
mov.append(self._video_fd.get_data(i))
mov = np.array(mov, dtype=np.uint8)
if convert_to_gray_scale:
mov = rgb2gray(mov)
return mov
def read_next_frame(self, convert_to_gray_scale=False):
frame = self._video_fd.get_next_data()
if convert_to_gray_scale:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
return frame
def get_frame_rate(self):
return self._video_fd.get_meta_data()["fps"]
def get_frame_size(self):
return self._video_fd.get_meta_data()["size"]
def get_frame_count(self):
# return self._video_fd.get_length()
return self._video_fd.count_frames()
def get_frame_width(self):
return self.get_frame_size()[0]
def get_frame_height(self):
return self.get_frame_size()[1]
def get_format(self):
return dict(frame_rate=self.get_frame_rate(),
frame_width=self.get_frame_width(),
frame_height=self.get_frame_height())
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
class VideoFileWriter:
def __init__(self, video_file_path, frame_rate):
self._video_fd = imageio.get_writer(video_file_path, fps=frame_rate)
def close(self):
self._video_fd.close()
def write_frame(self, frame):
self._video_fd.append_data(frame)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def separate_streams(path_to_mov):
"""
Separate audio and video streams from media (e.g. mp4)
Parameters
----------
path_to_mov: str
Path to audio-video (e.g., mp4)
Returns
-------
aud: audio stream object (moviepy class)
vid: video stream object (moviepy class)
"""
with med.VideoFileClip(path_to_mov) as vid:
aud = vid.audio
a = AudTs(aud)
v = VidImgs(vid)
return a, v
def to_video_clip(imgs, fps):
imgs = gray2rgb(imgs)
clips = [med.ImageClip(img).set_duration(1 / fps) for img in imgs]
mov = med.concatenate_videoclips(clips, method='compose')
mov = mov.set_fps(fps)
return mov
def gray2rgb(imgs):
imgs = [cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) for img in imgs]
return np.array(imgs)
def rgb2gray(imgs):
imgs = [cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) for img in imgs]
return np.array(imgs)
| [
"numpy.abs",
"numpy.concatenate",
"numpy.copy",
"numpy.resize",
"moviepy.editor.VideoFileClip",
"cv2.cvtColor",
"moviepy.editor.ImageClip",
"numpy.iinfo",
"scipy.io.wavfile.read",
"numpy.split",
"numpy.array",
"moviepy.editor.concatenate_videoclips",
"imageio.get_reader",
"imageio.get_writ... | [((9281, 9332), 'moviepy.editor.concatenate_videoclips', 'med.concatenate_videoclips', (['clips'], {'method': '"""compose"""'}), "(clips, method='compose')\n", (9307, 9332), True, 'from moviepy import editor as med\n'), ((9475, 9489), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (9483, 9489), True, 'import numpy as np\n'), ((9590, 9604), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (9598, 9604), True, 'import numpy as np\n'), ((248, 261), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (255, 261), True, 'import numpy as np\n'), ((388, 416), 'scipy.io.wavfile.read', 'wavfile.read', (['wave_file_path'], {}), '(wave_file_path)\n', (400, 416), False, 'from scipy.io import wavfile\n'), ((843, 899), 'librosa.output.write_wav', 'write_wav', (['wave_file_path', 'self._data', 'self._sample_rate'], {}), '(wave_file_path, self._data, self._sample_rate)\n', (852, 899), False, 'from librosa.output import write_wav\n'), ((2068, 2089), 'numpy.iinfo', 'np.iinfo', (['sample_type'], {}), '(sample_type)\n', (2076, 2089), True, 'import numpy as np\n'), ((3628, 3647), 'numpy.copy', 'np.copy', (['self._data'], {}), '(self._data)\n', (3635, 3647), True, 'import numpy as np\n'), ((3662, 3694), 'numpy.resize', 'np.resize', (['self._data', 'new_shape'], {}), '(self._data, new_shape)\n', (3671, 3694), True, 'import numpy as np\n'), ((5693, 5904), 'subprocess.check_call', 'subprocess.check_call', (["['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-i',\n input_video_file_path, '-i', input_audio_file_path, '-c:v', 'copy',\n '-map', '0:v:0', '-map', '1:a:0', output_video_file_path]"], {}), "(['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-i',\n input_video_file_path, '-i', input_audio_file_path, '-c:v', 'copy',\n '-map', '0:v:0', '-map', '1:a:0', output_video_file_path])\n", (5714, 5904), False, 'import subprocess\n'), ((6383, 6434), 'moviepy.editor.concatenate_videoclips', 'med.concatenate_videoclips', (['clips'], {'method': '"""compose"""'}), "(clips, method='compose')\n", (6409, 6434), True, 'from moviepy import editor as med\n'), ((6881, 6916), 'imageio.get_reader', 'imageio.get_reader', (['video_file_path'], {}), '(video_file_path)\n', (6899, 6916), False, 'import imageio\n'), ((7161, 7190), 'numpy.array', 'np.array', (['mov'], {'dtype': 'np.uint8'}), '(mov, dtype=np.uint8)\n', (7169, 7190), True, 'import numpy as np\n'), ((8371, 8422), 'imageio.get_writer', 'imageio.get_writer', (['video_file_path'], {'fps': 'frame_rate'}), '(video_file_path, fps=frame_rate)\n', (8389, 8422), False, 'import imageio\n'), ((9015, 9045), 'moviepy.editor.VideoFileClip', 'med.VideoFileClip', (['path_to_mov'], {}), '(path_to_mov)\n', (9032, 9045), True, 'from moviepy import editor as med\n'), ((9409, 9446), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2RGB'], {}), '(img, cv2.COLOR_GRAY2RGB)\n', (9421, 9446), False, 'import cv2\n'), ((9524, 9561), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (9536, 9561), False, 'import cv2\n'), ((4249, 4269), 'numpy.concatenate', 'np.concatenate', (['data'], {}), '(data)\n', (4263, 4269), True, 'import numpy as np\n'), ((7439, 7478), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (7451, 7478), False, 'import cv2\n'), ((3173, 3203), 'numpy.split', 'np.split', (['self._data', 'n_slices'], {}), '(self._data, n_slices)\n', (3181, 3203), True, 'import numpy as np\n'), ((5219, 5228), 'numpy.var', 'np.var', (['s'], {}), '(s)\n', (5225, 5228), True, 'import numpy as np\n'), ((5231, 5240), 'numpy.var', 'np.var', (['n'], {}), '(n)\n', (5237, 5240), True, 'import numpy as np\n'), ((6066, 6103), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (6078, 6103), False, 'import cv2\n'), ((9213, 9231), 'moviepy.editor.ImageClip', 'med.ImageClip', (['img'], {}), '(img)\n', (9226, 9231), True, 'from moviepy import editor as med\n'), ((2301, 2319), 'numpy.abs', 'np.abs', (['self._data'], {}), '(self._data)\n', (2307, 2319), True, 'import numpy as np\n'), ((3010, 3028), 'numpy.abs', 'np.abs', (['self._data'], {}), '(self._data)\n', (3016, 3028), True, 'import numpy as np\n'), ((6306, 6324), 'moviepy.editor.ImageClip', 'med.ImageClip', (['img'], {}), '(img)\n', (6319, 6324), True, 'from moviepy import editor as med\n')] |
import numpy as np
import pytest
from numba import jit
from scipy.stats import norm
from respy.conditional_draws import kalman_update
@jit(nopython=True)
def numpy_array_qr(arr):
"""QR decomposition for each matrix in a 3d array."""
out = np.zeros_like(arr)
nind = len(arr)
for i in range(nind):
q, r = np.linalg.qr(arr[i])
out[i] = r
return out
def slow_kalman_update(states, root_covs, measurements, loadings, meas_sd):
"""Make a Kalman update.
This is a too slow but readable and well tested implementation of a square-root
Kalman update.
Params
------
states : np.array
2d numpy array of shape (nind, nfac) with initial means of the states
root_covs : np.array
3d numpy array of shape (nind, nfac, nfac) with lower triangular cholesky
factors of the state covariance matrix
measurements : np.array
1d numpy array of length nind with observed measurements
loadings : np.array
1d numpy array of length nfac
meas_sd : float
standard deviation of measurement error
Returns
-------
states : np.array
2d numpy array with updated states
root_covs : np.array
3d numpy array with updated covariance matrices
References
----------
<NAME>. Introduction to Random Signals and Applied Kalman Filtering.
Wiley and sons, 2012.
"""
states = states.copy()
root_covs = root_covs.copy()
nobs, nfac = states.shape
expected_measurements = np.dot(states, loadings)
residuals = measurements - expected_measurements
f_stars = np.dot(np.transpose(root_covs, axes=(0, 2, 1)), loadings.reshape(nfac, 1))
m = np.zeros((nobs, nfac + 1, nfac + 1))
m[:, 0, 0] = meas_sd
m[:, 1:, :1] = f_stars
m[:, 1:, 1:] = np.transpose(root_covs, axes=(0, 2, 1))
r = numpy_array_qr(m)
root_covs[:] = np.transpose(r[:, 1:, 1:], axes=(0, 2, 1))
root_sigmas = r[:, 0, 0]
kalman_gains = r[:, 0, 1:] / root_sigmas.reshape(nobs, 1)
states[:] += kalman_gains * residuals.reshape(nobs, 1)
probs = norm.logpdf(residuals, scale=np.abs(r[:, 0, 0]))
return states, root_covs, probs
def random_kalman_input(seed):
np.random.seed(seed)
slow = {}
nstates = np.random.choice(range(1, 7))
nind = np.random.choice(range(1, 30))
slow["states"] = np.zeros((nind, nstates))
loadings = np.zeros(nstates)
measured_pos = np.random.choice(range(nstates))
loadings[measured_pos] = 1.0
slow["loadings"] = loadings
slow["measurements"] = np.random.normal(scale=0.2, size=nind)
slow["meas_sd"] = np.random.uniform(low=0.8, high=1.2)
root_covs = np.zeros((nind, nstates, nstates))
for i in range(nind):
helper = np.eye(nstates)
helper[np.tril_indices(nstates)] = np.random.uniform(
low=-0.0001, high=0.00001, size=int(0.5 * nstates * (nstates + 1))
)
root_covs[i] = helper
slow["root_covs"] = root_covs
choice = np.full(nind, np.argmax(loadings)).astype(np.uint16)
extended_cholcovs_t = np.zeros((nind, nstates + 1, nstates + 1))
extended_cholcovs_t[:, 1:, 1:] = np.transpose(root_covs, axes=(0, 2, 1))
fast = (
slow["states"],
slow["measurements"],
extended_cholcovs_t,
slow["meas_sd"],
choice,
)
return slow, fast
@pytest.mark.parametrize("seed", range(10))
def test_kalman_update(seed):
slow_input, fast_input = random_kalman_input(seed)
slow_states, slow_root_covs, slow_probs = slow_kalman_update(**slow_input)
fast_probs = kalman_update(*fast_input)
updated_extended_cholcovs_t = fast_input[2]
fast_states = fast_input[0]
fast_root_covs = np.transpose(
updated_extended_cholcovs_t[:, 1:, 1:], axes=(0, 2, 1)
)
slow_covs = np.matmul(slow_root_covs, np.transpose(slow_root_covs, axes=(0, 2, 1)))
fast_covs = np.matmul(fast_root_covs, np.transpose(fast_root_covs, axes=(0, 2, 1)))
np.testing.assert_array_almost_equal(fast_states, slow_states)
np.testing.assert_array_almost_equal(fast_probs, slow_probs)
np.testing.assert_array_almost_equal(fast_covs, slow_covs)
| [
"numpy.tril_indices",
"numpy.random.uniform",
"numpy.zeros_like",
"numpy.random.seed",
"numpy.eye",
"numpy.abs",
"numpy.argmax",
"numpy.linalg.qr",
"numpy.zeros",
"numpy.transpose",
"respy.conditional_draws.kalman_update",
"numba.jit",
"numpy.random.normal",
"numpy.dot",
"numpy.testing.a... | [((138, 156), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (141, 156), False, 'from numba import jit\n'), ((250, 268), 'numpy.zeros_like', 'np.zeros_like', (['arr'], {}), '(arr)\n', (263, 268), True, 'import numpy as np\n'), ((1529, 1553), 'numpy.dot', 'np.dot', (['states', 'loadings'], {}), '(states, loadings)\n', (1535, 1553), True, 'import numpy as np\n'), ((1706, 1742), 'numpy.zeros', 'np.zeros', (['(nobs, nfac + 1, nfac + 1)'], {}), '((nobs, nfac + 1, nfac + 1))\n', (1714, 1742), True, 'import numpy as np\n'), ((1814, 1853), 'numpy.transpose', 'np.transpose', (['root_covs'], {'axes': '(0, 2, 1)'}), '(root_covs, axes=(0, 2, 1))\n', (1826, 1853), True, 'import numpy as np\n'), ((1901, 1943), 'numpy.transpose', 'np.transpose', (['r[:, 1:, 1:]'], {'axes': '(0, 2, 1)'}), '(r[:, 1:, 1:], axes=(0, 2, 1))\n', (1913, 1943), True, 'import numpy as np\n'), ((2232, 2252), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2246, 2252), True, 'import numpy as np\n'), ((2375, 2400), 'numpy.zeros', 'np.zeros', (['(nind, nstates)'], {}), '((nind, nstates))\n', (2383, 2400), True, 'import numpy as np\n'), ((2416, 2433), 'numpy.zeros', 'np.zeros', (['nstates'], {}), '(nstates)\n', (2424, 2433), True, 'import numpy as np\n'), ((2579, 2617), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.2)', 'size': 'nind'}), '(scale=0.2, size=nind)\n', (2595, 2617), True, 'import numpy as np\n'), ((2640, 2676), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.8)', 'high': '(1.2)'}), '(low=0.8, high=1.2)\n', (2657, 2676), True, 'import numpy as np\n'), ((2693, 2727), 'numpy.zeros', 'np.zeros', (['(nind, nstates, nstates)'], {}), '((nind, nstates, nstates))\n', (2701, 2727), True, 'import numpy as np\n'), ((3096, 3138), 'numpy.zeros', 'np.zeros', (['(nind, nstates + 1, nstates + 1)'], {}), '((nind, nstates + 1, nstates + 1))\n', (3104, 3138), True, 'import numpy as np\n'), ((3176, 3215), 'numpy.transpose', 'np.transpose', (['root_covs'], {'axes': '(0, 2, 1)'}), '(root_covs, axes=(0, 2, 1))\n', (3188, 3215), True, 'import numpy as np\n'), ((3610, 3636), 'respy.conditional_draws.kalman_update', 'kalman_update', (['*fast_input'], {}), '(*fast_input)\n', (3623, 3636), False, 'from respy.conditional_draws import kalman_update\n'), ((3738, 3806), 'numpy.transpose', 'np.transpose', (['updated_extended_cholcovs_t[:, 1:, 1:]'], {'axes': '(0, 2, 1)'}), '(updated_extended_cholcovs_t[:, 1:, 1:], axes=(0, 2, 1))\n', (3750, 3806), True, 'import numpy as np\n'), ((4003, 4065), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['fast_states', 'slow_states'], {}), '(fast_states, slow_states)\n', (4039, 4065), True, 'import numpy as np\n'), ((4070, 4130), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['fast_probs', 'slow_probs'], {}), '(fast_probs, slow_probs)\n', (4106, 4130), True, 'import numpy as np\n'), ((4135, 4193), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['fast_covs', 'slow_covs'], {}), '(fast_covs, slow_covs)\n', (4171, 4193), True, 'import numpy as np\n'), ((330, 350), 'numpy.linalg.qr', 'np.linalg.qr', (['arr[i]'], {}), '(arr[i])\n', (342, 350), True, 'import numpy as np\n'), ((1629, 1668), 'numpy.transpose', 'np.transpose', (['root_covs'], {'axes': '(0, 2, 1)'}), '(root_covs, axes=(0, 2, 1))\n', (1641, 1668), True, 'import numpy as np\n'), ((2771, 2786), 'numpy.eye', 'np.eye', (['nstates'], {}), '(nstates)\n', (2777, 2786), True, 'import numpy as np\n'), ((3864, 3908), 'numpy.transpose', 'np.transpose', (['slow_root_covs'], {'axes': '(0, 2, 1)'}), '(slow_root_covs, axes=(0, 2, 1))\n', (3876, 3908), True, 'import numpy as np\n'), ((3952, 3996), 'numpy.transpose', 'np.transpose', (['fast_root_covs'], {'axes': '(0, 2, 1)'}), '(fast_root_covs, axes=(0, 2, 1))\n', (3964, 3996), True, 'import numpy as np\n'), ((2138, 2156), 'numpy.abs', 'np.abs', (['r[:, 0, 0]'], {}), '(r[:, 0, 0])\n', (2144, 2156), True, 'import numpy as np\n'), ((2802, 2826), 'numpy.tril_indices', 'np.tril_indices', (['nstates'], {}), '(nstates)\n', (2817, 2826), True, 'import numpy as np\n'), ((3030, 3049), 'numpy.argmax', 'np.argmax', (['loadings'], {}), '(loadings)\n', (3039, 3049), True, 'import numpy as np\n')] |
import json
import pickle
import random
from collections import Counter, defaultdict
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples
from Levenshtein import distance
def get_many_clusters(skill_nums, average_emb_clust, n_clusters, numclust_its=10):
# - numclust_its iterations of clustering,
# - changing random prop_sampled% of data
# - changing clustering intial conditions
clustering_results = pd.DataFrame(index=skill_nums)
for i in range(numclust_its):
clustering = KMeans(n_clusters=n_clusters, random_state=i)
cluster_num = clustering.fit_predict(
[average_emb_clust[k] for k in skill_nums]
).tolist()
new_clustering_results = pd.DataFrame(
cluster_num, index=skill_nums, columns=[f"Cluster set {i}"]
)
clustering_results = pd.concat(
[clustering_results, new_clustering_results], axis=1
)
return clustering_results
def get_consensus_clusters_mappings(consensus_results_df, k):
"""
consensus_results_df: a dataframe of each skill and the clusters it was assigned
to with 10 iterations of clustering
"""
consensus_sets = [
"".join([str(cc) for cc in c]) for c in consensus_results_df.values.tolist()
]
# set(consensus_sets) is stochastic - so need to sort
consensus_sets_unique = list(set(consensus_sets))
consensus_sets_unique.sort()
# e.g. how similar is '1234' to '1235'?
all_dists_matrix = []
for set_1 in consensus_sets_unique:
temp_list = []
for set_2 in consensus_sets_unique:
lev_dict = distance(set_1, set_2)
temp_list.append(lev_dict)
all_dists_matrix.append(temp_list)
# Cluster the consensus sets to group them together
# e.g. '1234', '1235' and '1233' in group 1
# '5478' and '5479' in group 2
clustering_dists = KMeans(n_clusters=k, random_state=42)
cluster_num = clustering_dists.fit_predict(all_dists_matrix).tolist()
consensus_set_mapper = dict(zip(list(consensus_sets_unique), cluster_num))
return [consensus_set_mapper[c] for c in consensus_sets]
def get_top_tf_idf_words(vect, feature_names, top_n=2):
"""
From https://stackoverflow.com/questions/34232190/scikit-learn-tfidfvectorizer-how-to-get-top-n-terms-with-highest-tf-idf-score
"""
sorted_nzs = np.argsort(vect.data)[: -(top_n + 1) : -1]
return feature_names[vect.indices[sorted_nzs]].tolist()
def get_level_names(sentence_embs, level_col_name, top_n):
# Merge all the texts within each subsection of this level
hier_level_texts = []
level_nums = []
for level_num, level_data in sentence_embs.groupby(level_col_name):
hier_level_texts.append(" ".join(level_data["description"].tolist()))
level_nums.append(level_num)
vectorizer = TfidfVectorizer()
vect = vectorizer.fit_transform(hier_level_texts)
feature_names = np.array(vectorizer.get_feature_names())
level_names = {
level_num: "-".join(get_top_tf_idf_words(doc_vec, feature_names, top_n=top_n))
for level_num, doc_vec in zip(level_nums, vect)
}
return level_names
def get_new_level(
sentence_embs,
previous_level_col,
k_means_n,
k_means_max_iter,
check_low_siloutte=False,
silhouette_threshold=0,
):
# Mean sentence embedding for the previous level
average_emb_dict = dict(
sentence_embs.groupby(previous_level_col)["reduced_points_umap"].apply(
lambda x: np.mean(x.tolist(), axis=0).tolist()
)
)
cluster_mapper = cluster_level_mapper(
average_emb_dict,
k_means_n=k_means_n,
k_means_max_iter=k_means_max_iter,
check_low_siloutte=check_low_siloutte,
silhouette_threshold=silhouette_threshold,
)
return cluster_mapper
def cluster_level_mapper(
embeddings_dict,
k_means_n,
k_means_max_iter=5000,
check_low_siloutte=False,
silhouette_threshold=0,
):
"""
Cluster the embeddings in embeddings_dict values to create a mapper dictionary
from the embeddings_dict keys to the cluster number.
e.g. embeddings_dict = {0: [1.23,5.67], 1: [4.56,7.8],...}
prev2next_map = {0:5, 1:34, ...}
"""
clustering = KMeans(
n_clusters=k_means_n, max_iter=k_means_max_iter, random_state=42
)
cluster_num = clustering.fit_predict(list(embeddings_dict.values())).tolist()
if check_low_siloutte:
# The Silhouette Coefficient is a measure of how well samples are clustered with samples
# that are similar to themselves.
silhouette_samples_n = silhouette_samples(
list(embeddings_dict.values()), cluster_num
)
# Give any not well clustered points a new cluster number
not_well_clust = list(
np.argwhere(silhouette_samples_n < silhouette_threshold).flatten()
)
new_cluster_num = k_means_n
for ix in not_well_clust:
cluster_num[ix] = new_cluster_num
new_cluster_num += 1
cluster_mapper = {k: v for k, v in zip(list(embeddings_dict.keys()), cluster_num)}
return cluster_mapper
def get_new_level_consensus(sentence_embs, previous_level_col, k_means_n, numclust_its):
# Mean sentence embedding for the previous level
average_emb_dict = dict(
sentence_embs.groupby(previous_level_col)["reduced_points_umap"].apply(
lambda x: np.mean(x.tolist(), axis=0).tolist()
)
)
clustering_results = get_many_clusters(
list(average_emb_dict.keys()),
list(average_emb_dict.values()),
n_clusters=k_means_n,
numclust_its=numclust_its,
)
consensus_set_mappings = get_consensus_clusters_mappings(
clustering_results, k=k_means_n
)
cluster_mapper = dict(zip(list(average_emb_dict.keys()), consensus_set_mappings))
return cluster_mapper
| [
"pandas.DataFrame",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.cluster.KMeans",
"Levenshtein.distance",
"numpy.argsort",
"numpy.argwhere",
"pandas.concat"
] | [((572, 602), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'skill_nums'}), '(index=skill_nums)\n', (584, 602), True, 'import pandas as pd\n'), ((2035, 2072), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'random_state': '(42)'}), '(n_clusters=k, random_state=42)\n', (2041, 2072), False, 'from sklearn.cluster import KMeans\n'), ((2990, 3007), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (3005, 3007), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4418, 4490), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k_means_n', 'max_iter': 'k_means_max_iter', 'random_state': '(42)'}), '(n_clusters=k_means_n, max_iter=k_means_max_iter, random_state=42)\n', (4424, 4490), False, 'from sklearn.cluster import KMeans\n'), ((658, 703), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': 'i'}), '(n_clusters=n_clusters, random_state=i)\n', (664, 703), False, 'from sklearn.cluster import KMeans\n'), ((857, 930), 'pandas.DataFrame', 'pd.DataFrame', (['cluster_num'], {'index': 'skill_nums', 'columns': "[f'Cluster set {i}']"}), "(cluster_num, index=skill_nums, columns=[f'Cluster set {i}'])\n", (869, 930), True, 'import pandas as pd\n'), ((982, 1045), 'pandas.concat', 'pd.concat', (['[clustering_results, new_clustering_results]'], {'axis': '(1)'}), '([clustering_results, new_clustering_results], axis=1)\n', (991, 1045), True, 'import pandas as pd\n'), ((2511, 2532), 'numpy.argsort', 'np.argsort', (['vect.data'], {}), '(vect.data)\n', (2521, 2532), True, 'import numpy as np\n'), ((1766, 1788), 'Levenshtein.distance', 'distance', (['set_1', 'set_2'], {}), '(set_1, set_2)\n', (1774, 1788), False, 'from Levenshtein import distance\n'), ((4980, 5036), 'numpy.argwhere', 'np.argwhere', (['(silhouette_samples_n < silhouette_threshold)'], {}), '(silhouette_samples_n < silhouette_threshold)\n', (4991, 5036), True, 'import numpy as np\n')] |
"""
Licensed Materials - Property of IBM
Restricted Materials of IBM
20190891
© Copyright IBM Corp. 2021 All Rights Reserved.
"""
import logging
import numpy as np
from ibmfl.data.data_handler import DataHandler
from ibmfl.util.datasets import load_mnist
logger = logging.getLogger(__name__)
class MnistPytorchDataHandler(DataHandler):
def __init__(self, data_config=None):
super().__init__()
self.file_name = None
if data_config is not None:
if 'npz_file' in data_config:
self.file_name = data_config['npz_file']
# load the datasets
(self.x_train, self.y_train), (self.x_test, self.y_test) = self.load_dataset()
# pre-process the datasets
self.preprocess()
def get_data(self):
"""
Gets pre-process mnist training and testing data.
:return: training data
:rtype: `tuple`
"""
return (self.x_train, self.y_train), (self.x_test, self.y_test)
def load_dataset(self, nb_points=500):
"""
Loads the training and testing datasets from a given local path.
If no local path is provided, it will download the original MNIST \
dataset online, and reduce the dataset size to contain \
500 data points per training and testing dataset.
Because this method
is for testing it takes as input the number of datapoints, nb_points,
to be included in the training and testing set.
:param nb_points: Number of data points to be included in each set if
no local dataset is provided.
:type nb_points: `int`
:return: training and testing datasets
:rtype: `tuple`
"""
if self.file_name is None:
(x_train, y_train), (x_test, y_test) = load_mnist()
x_train = x_train[:nb_points]
y_train = y_train[:nb_points]
x_test = x_test[:nb_points]
y_test = y_test[:nb_points]
else:
try:
logger.info('Loaded training data from ' + str(self.file_name))
data_train = np.load(self.file_name)
x_train = data_train['x_train']
y_train = data_train['y_train']
x_test = data_train['x_test']
y_test = data_train['y_test']
except Exception:
raise IOError('Unable to load training data from path '
'provided in config file: ' +
self.file_name)
return (x_train, y_train), (x_test, y_test)
def preprocess(self):
"""
Preprocesses the training and testing dataset, \
e.g., reshape the images according to self.channels_first; \
convert the labels to binary class matrices.
:return: None
"""
img_rows, img_cols = 28, 28
self.x_train = self.x_train.astype('float32').reshape(self.x_train.shape[0], 1, img_rows, img_cols)
self.x_test = self.x_test.astype('float32').reshape(self.x_test.shape[0], 1,img_rows, img_cols)
print(self.x_train.shape[0], 'train samples')
print(self.x_test.shape[0], 'test samples')
self.y_train = self.y_train.astype('int64')
self.y_test = self.y_test.astype('int64')
print('y_train shape:', self.y_train.shape)
print(self.y_train.shape[0], 'train samples')
print(self.y_test.shape[0], 'test samples')
| [
"ibmfl.util.datasets.load_mnist",
"numpy.load",
"logging.getLogger"
] | [((265, 292), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (282, 292), False, 'import logging\n'), ((1792, 1804), 'ibmfl.util.datasets.load_mnist', 'load_mnist', ([], {}), '()\n', (1802, 1804), False, 'from ibmfl.util.datasets import load_mnist\n'), ((2109, 2132), 'numpy.load', 'np.load', (['self.file_name'], {}), '(self.file_name)\n', (2116, 2132), True, 'import numpy as np\n')] |
# from meshStats import readMeshStats
import sys
import os
import numpy as np
import math
import scipy.integrate as integrate
# from scipy import integrate as I
import matplotlib.pyplot as plt
import os
from printInflow import printInflowSurface
def readMeshStats():
# Run check mesh, dump output in to a log file.
os.system("checkMesh > log.checkMesh")
mesh_stats = {}
with open('log.checkMesh', 'r') as f:
# print(f.read())
line = f.readline()
# Find mesh stats
while 'Mesh stats' not in line:
line = f.readline()
for i in range(9):
line = f.readline()
line = line.split(":")
# Strip leading and trailing spaces.
for i in range(len(line)):
line[i] = line[i].strip()
# Replace internal space with underscores.
line[0] = line[0].replace(' ', '_')
mesh_stats[line[0]] = int(line[1])
os.system("rm log.checkMesh")
return mesh_stats
def readNfaces(patch):
path = 'constant/polyMesh/boundary'
with open(path, 'r') as f:
line = f.readline()
# finds the given string 'patch',
while patch not in line:
line = f.readline()
# skips a few lines
line = f.readline()
line = f.readline()
line = f.readline()
f.close()
# parses the string to return nFaces as an int.
return int(line.split(' ', 14)[-1][0:-2])
return
def readFaceLabels(patch):
# Read labels to all faces in the mesh
path = 'constant/polyMesh/sets/' + patch
nFaces = readNfaces(patch)
face_labels = {}
with open(path, 'r') as f:
# print(f.read())
# Look for line containing nFaces
line = f.readline()
while str(nFaces) not in line:
# print(str(nFaces))
line = f.readline()
# Skip a line
line = f.readline()
# Read the labeled IDs for each face in the patch.
for i in range(nFaces):
# face_labels.append(int(f.readline()))
label = f.readline().split('\n')[0]
face_labels[label] = int(label)
f.close()
return face_labels
def readFacePoints(face_labels, mesh_stats):
# Read labels for all points in the mesh.
path = 'constant/polyMesh/faces'
face_dict = {}
face_points = {}
with open(path, 'r') as f:
line = f.readline()
while str(mesh_stats['faces']) not in line:
line = f.readline()
f.readline()
# Save all points in mesh to a dictionary.
for i in range(mesh_stats['faces']):
# print(i, f.readline())
face_dict[str(i)] = f.readline()
# print(len(face_dict))
# print(len(face_labels))
# Use created dict to save labels for patch of interest to a seperate dict.
for face in face_labels:
# print(face)
# print(face, face_dict[str(face)])
# print(face_dict[str(face)].split("(")[1].split(")")[0].split(' '))
face_points[str(face)] = face_dict[str(face)].split("(")[1].split(")")[0].split(' ')
# Return faces for patch of interest.
return face_points
def readPointLabels(tNFaces):
path = 'constant/polyMesh/faces'
# nFaces for the entire mesh.
point_labels = {}
with open(path, 'r') as f:
line = f.readline()
while str(tNFaces) not in line:
# print(str(nFaces))
line = f.readline()
# print(line)
line = f.readline()
for i in range(tNFaces):
# print(f.readline())
line = f.readline()
# Parse line to isolate numbers in the string.
line = line.split('(')
line = line[1].split(' ')
line[-1] = line[-1].split(')')[0]
# print(line)
point_labels[str(i)] = line
# Retrurn a dict that maps each label ID (str) to the correct coordinates.
return point_labels
def processPointCoordinates(nPoints):
# Read coordinates for all th points in the mesh.
path = 'constant/polyMesh/points'
point_cooordinates = {}
with open(path, 'r') as f:
line = f.readline()
while str(nPoints) not in line:
line = f.readline()
line = f.readline()
for i in range(nPoints):
line = f.readline().strip("(").strip('\n').strip(")").split(" ")
# print(line)
point_cooordinates[str(i)] = line
# Retrurn a dict that maps each label ID (str) to the correct coordinates.
return point_cooordinates
def processCentroids(face_labels, point_labels, face_points, point_cooordinates):
# Calculates centroids for each face in patch of interest.
centroid = {}
for face in face_labels:
face = str(face)
# print('face', face)
# print('face_points[face]', face_points[face])
# print('point_labels[face]', point_labels[face])
# print(len(face_points[face]))
# for i in range(len(face_points[face])):
# # print(point_cooordinates[str(face)])
# coordinate = str(face_points[face][i])
# print(coordinate, point_cooordinates[coordinate])
# print()
centroid[face] = calculateCentroid(face, face_points, point_cooordinates)
# Return centroids as a dict that maps ID labels to the centroid vector.
return centroid
def calculateCentroid(face, face_points, point_cooordinates):
# Calculate and return the centroid vector for a given face
x_c = 0
y_c = 0
z_c = 0
for i in range(len(face_points[face])):
# print(point_cooordinates[str(face)])
coordinate = str(face_points[face][i])
print(coordinate, point_cooordinates[coordinate])
x_c = x_c + float(point_cooordinates[coordinate][0])
y_c = y_c + float(point_cooordinates[coordinate][1])
z_c = z_c + float(point_cooordinates[coordinate][2])
x_c = x_c / 3.0
y_c = y_c / 3.0
z_c = z_c / 3.0
# print(x_c, y_c, z_c)
return [x_c, y_c, z_c]
def calculateNorm(face, point_labels, face_points, point_cooordinates):
# Save needed cooridantes in a 2D list.
face_vectors = []
for i in range(len(point_labels[face])):
coordinate = str(face_points[face][i])
# print(coordinate, point_cooordinates[coordinate])
position_vector = point_cooordinates[coordinate]
face_vectors.append(position_vector)
# print()
# print()
# Save relative poition vectors.
Ax = float(face_vectors[0][0]) - float(face_vectors[1][0])
Ay = float(face_vectors[0][1]) - float(face_vectors[1][1])
Az = float(face_vectors[0][2]) - float(face_vectors[1][2])
Bx = float(face_vectors[0][0]) - float(face_vectors[2][0])
By = float(face_vectors[0][1]) - float(face_vectors[2][1])
Bz = float(face_vectors[0][2]) - float(face_vectors[2][2])
A = np.array([Ax, Ay, Az])
B = np.array([Bx, By, Bz])
return np.cross(B, A)
def processNorms(face_labels, point_labels, face_points, point_cooordinates):
# Wrapper function for calcuteNorms
norm = {}
for face in face_labels:
face = str(face)
norm[face] = calculateNorm(face, point_labels, face_points, point_cooordinates)
# print(norm[face])
return norm
def graphNorm(face_norms, face_centroids):
centroids = []
norms = []
# Convert data from dict to lists
for face in face_centroids:
centroids.append(face_centroids[str(face)])
for face in face_norms:
norms.append(face_norms[face])
# Extract coordinate data, save to unique lists
x_c = []
y_c = []
z_c = []
for point in centroids:
x_c.append(point[0])
y_c.append(point[1])
z_c.append(point[2])
x_n = []
y_n = []
z_n = []
for point in norms:
x_n.append(point[0])
y_n.append(point[1])
z_n.append(point[2])
# Plot data
ax = plt.figure().add_subplot(projection = '3d')
ax.quiver(x_c, y_c, z_c, x_n, y_n, z_n, length = 0.1, normalize = True)
ax.set_xlim([0.0, 1.0])
plt.show()
# print(centroids)
def graphCentroid(face_centroids):
centroids = []
# Convert data from dict to lists
for face in face_centroids:
centroids.append(face_centroids[str(face)])
# Extract coordinate data, save to unique lists
x_c = []
y_c = []
z_c = []
for point in centroids:
x_c.append(point[0])
y_c.append(point[1])
z_c.append(point[2])
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(x_c, y_c, z_c)
ax.set_xlim([0.0, 1.0])
plt.show()
def findSymmetryTheta(face_centroids):
theta = {}
for face in face_centroids:
# print(type(face))
# print(face_centroids[face])
y = float(face_centroids[face][1])
z = float(face_centroids[face][2])
theta[face] = math.atan(abs(y/z))
return theta
def findSymmetryPhi(face_centroids):
phi = {}
for face in face_centroids:
# print(type(face))
# print(face_centroids[face])
y = float(face_centroids[face][1])
x = float(face_centroids[face][0])
phi[face] = math.atan(abs(y/x))
return phi
def graphSymmetryTheta(face_centroids, face_thetas):
centroids = []
thetas = []
# Convert data from dict to lists
for face in face_centroids:
centroids.append(face_centroids[str(face)])
for face in face_thetas:
thetas.append(face_thetas[str(face)])
max_theta = max(thetas)
for theta in thetas:
# print(theta)
print()
# theta = theta / max_theta
# thetas = thetas / max(thetas)
# Extract coordinate data, save to unique lists
x_c = []
y_c = []
z_c = []
for point in centroids:
x_c.append(point[0])
y_c.append(point[1])
z_c.append(point[2])
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(x_c, y_c, z_c, c = thetas)
ax.set_xlim([0.0, 1.0])
plt.show()
def processMagnitudes(face_labels, face_centroids):
magnitudes = {}
for face in face_centroids:
# r: position vector
r = face_centroids[face]
x = r[0]
y = r[1]
z = r[2]
magnitudes[face] = np.sqrt(x**2 + y**2 + z**2)
return magnitudes
def calculateLimitingTheta(ga):
pi = math.pi
return 0.5 * pi * (np.sqrt((ga+1)/(ga-1)) - 1)
def calculateLimitingVelo(ga, To, m):
k = 1.3806e-23
# return np.sqrt(((2*ga) / (ga - 1) * ((k * To) / m)))
return np.sqrt((2*ga*k*To) / ((ga-1)*(m)))
def angularDependenceA(ga, theta):
theta_l = calculateLimitingTheta(ga)
pi = math.pi
return np.cos(0.5 * pi * theta / theta_l) ** ((ga+0.41)/(ga-1))
def angularDependence(ga, theta, phi):
theta_l = calculateLimitingTheta(ga)
pi = math.pi
f_theta = np.cos(0.5 * pi * theta / theta_l) ** ((ga+0.41)/(ga-1))
print(type(phi))
f_phi = np.cos(0.5 * pi * phi / theta_l) ** ((ga+0.41)/(ga-1))
return f_theta * f_phi
def f(x):
return np.sin(x) * angularDependenceA(1.4, x)
def calculateNormCoeff(ga, theta_l):
print(type(f))
print(type(theta_l))
theta = np.linspace(0, theta_l, 500)
y = f(theta)
print('theta.shape', theta.shape[0])
print('y.shape', y.shape)
denom = integrate.trapezoid(y, theta)
print('denom', denom)
return (0.5 * np.sqrt((ga - 1) / (ga + 1))) / denom
def calculateRhoN(face_data, physical_props):
print(face_data[0])
print(face_data[1])
print(face_data[2])
# ga = physical_props['ga']
ga = 1.4
To = 300
Mwt = 28.0134
Na = 6.023e23
Mmass = Mwt * 1.66605e-27
# To = physical_props['To']
# m = physical_props['m']
theta = face_data[2]
phi = face_data[3]
r_e = 0.8255e-3/2.0
r = 0.5
theta_l = calculateLimitingTheta(ga)
print('theta_l', theta_l)
vel_l = calculateLimitingVelo(ga, To, Mmass)
print('vel_l', vel_l)
# Po = physical_props['Po']
Po = 475*6894.75729
pi = math.pi
A = calculateNormCoeff(ga, theta_l)
print('A', A)
f1 = (2 * A * Po) / (vel_l**2)
f2 = (2 / (ga + 1)) ** (1 / (ga - 1))
f3 = (r_e / r) ** 2
f4 = angularDependence(ga, theta, phi)
conv = (Na*1000) / Mwt
return (f1*f2*f3*f4*conv)
# return 4e15
# return np.format_float_scientific(4e15)
def calculateU(face_data, physical_props):
norm = face_data[1]
theta = face_data[2]
phi = face_data[3]
To = 300
Mwt = 28.0134
Na = 6.023e23
Mmass = Mwt * 1.66605e-27
ga = 1.4
v_l = calculateLimitingVelo(ga, To, Mmass)
Ux = v_l * np.cos(phi) * np.sin(theta)
Uy = v_l * np.sin(phi) * np.sin(theta)
Uz = v_l * np.cos(theta)
# mag = np.sqrt(Ux**2 + Uy**2 + Uz**2)
return [Ux, Uy, Uz]
# return mag
def calculateT(face_data, physical_props):
return 300
def processSourceFlowModel(mesh_data, physical_props):
face_centroids = mesh_data[0]
face_norms = mesh_data[1]
face_thetas = mesh_data[2]
face_phis = mesh_data[3]
face_rhoN = {}
face_U = {}
face_T = {}
for face in face_thetas:
# print(face)
face_data = [
face_centroids[face],
face_norms[face],
face_thetas[face],
face_phis[face],
]
face_rhoN[face] = calculateRhoN(face_data, physical_props)
face_U[face] = calculateU(face_data, physical_props)
face_T[face] = calculateT(face_data, physical_props)
return [face_rhoN, face_U, face_T]
def plumeSourceFlowModel():
# ======================================================= #
# || parse through polyMesh files for needed mesh info || #
# ======================================================= #
# Process basic mesh information
mesh_stats = readMeshStats()
# Read ID labels for each face in patch
face_labels = readFaceLabels('inflow')
# Read ID labels for the points of each face in patch
face_points = readFacePoints(face_labels, mesh_stats)
# Read ID labels for all the points in the mesh.
point_labels = readPointLabels(mesh_stats['faces'])
# Read coordinate data for all the points in the mesh.
point_cooordinates = processPointCoordinates(mesh_stats['points'])
# ============================================================ #
# || Calculate needed mesh properties for source flow model ||
# ============================================================ #
face_centroids = processCentroids(face_labels, point_labels, face_points, point_cooordinates)
# graphCentroid(face_centroids)
# face_magnitudes = processMagnitudes(face_labels, point_labels, face_points, point_cooordinates)
face_magnitudes = processMagnitudes(face_labels, face_centroids)
# print(face_magnitudes)
# graphMagnitudes(face_magnitudes)
face_norms = processNorms(face_labels, point_labels, face_points, point_cooordinates)
# graphNorm(face_norms, face_centroids)
face_thetas = findSymmetryTheta(face_centroids)
# graphSymmetryTheta(face_centroids, face_thetas)
face_phis = findSymmetryPhi(face_centroids)
mesh_data = [face_centroids, face_norms, face_thetas, face_phis]
# ======================================= #
# || Implement plume source flow model || #
# ======================================= #
# physical properties
physical_props = {}
physical_props['ga'] = 1.67
physical_props['To'] = 300 # K
physical_props['m'] = 6.63e-26 # kg / M
physical_props['Po'] = 34500 # 5psi in Pa
inflow = processSourceFlowModel(mesh_data, physical_props)
# print(inflow[0])
# print(inflow[1])
# print(inflow[2])
# Print boundaryT, boundaryU, and boundaryRhoN
printInflowSurface(inflow)
plumeSourceFlowModel() | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"numpy.cross",
"os.system",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"scipy.integrate.trapezoid",
"printInflow.printInflowSurface",
"numpy.sqrt"
] | [((323, 361), 'os.system', 'os.system', (['"""checkMesh > log.checkMesh"""'], {}), "('checkMesh > log.checkMesh')\n", (332, 361), False, 'import os\n'), ((963, 992), 'os.system', 'os.system', (['"""rm log.checkMesh"""'], {}), "('rm log.checkMesh')\n", (972, 992), False, 'import os\n'), ((6977, 6999), 'numpy.array', 'np.array', (['[Ax, Ay, Az]'], {}), '([Ax, Ay, Az])\n', (6985, 6999), True, 'import numpy as np\n'), ((7008, 7030), 'numpy.array', 'np.array', (['[Bx, By, Bz]'], {}), '([Bx, By, Bz])\n', (7016, 7030), True, 'import numpy as np\n'), ((7042, 7056), 'numpy.cross', 'np.cross', (['B', 'A'], {}), '(B, A)\n', (7050, 7056), True, 'import numpy as np\n'), ((8182, 8192), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8190, 8192), True, 'import matplotlib.pyplot as plt\n'), ((8612, 8624), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8622, 8624), True, 'import matplotlib.pyplot as plt\n'), ((8634, 8659), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (8642, 8659), True, 'import matplotlib.pyplot as plt\n'), ((8724, 8734), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8732, 8734), True, 'import matplotlib.pyplot as plt\n'), ((10002, 10014), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10012, 10014), True, 'import matplotlib.pyplot as plt\n'), ((10024, 10049), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (10032, 10049), True, 'import matplotlib.pyplot as plt\n'), ((10126, 10136), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10134, 10136), True, 'import matplotlib.pyplot as plt\n'), ((10662, 10703), 'numpy.sqrt', 'np.sqrt', (['(2 * ga * k * To / ((ga - 1) * m))'], {}), '(2 * ga * k * To / ((ga - 1) * m))\n', (10669, 10703), True, 'import numpy as np\n'), ((11305, 11333), 'numpy.linspace', 'np.linspace', (['(0)', 'theta_l', '(500)'], {}), '(0, theta_l, 500)\n', (11316, 11333), True, 'import numpy as np\n'), ((11434, 11463), 'scipy.integrate.trapezoid', 'integrate.trapezoid', (['y', 'theta'], {}), '(y, theta)\n', (11453, 11463), True, 'import scipy.integrate as integrate\n'), ((15888, 15914), 'printInflow.printInflowSurface', 'printInflowSurface', (['inflow'], {}), '(inflow)\n', (15906, 15914), False, 'from printInflow import printInflowSurface\n'), ((10383, 10416), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (10390, 10416), True, 'import numpy as np\n'), ((10803, 10837), 'numpy.cos', 'np.cos', (['(0.5 * pi * theta / theta_l)'], {}), '(0.5 * pi * theta / theta_l)\n', (10809, 10837), True, 'import numpy as np\n'), ((10977, 11011), 'numpy.cos', 'np.cos', (['(0.5 * pi * theta / theta_l)'], {}), '(0.5 * pi * theta / theta_l)\n', (10983, 11011), True, 'import numpy as np\n'), ((11067, 11099), 'numpy.cos', 'np.cos', (['(0.5 * pi * phi / theta_l)'], {}), '(0.5 * pi * phi / theta_l)\n', (11073, 11099), True, 'import numpy as np\n'), ((11171, 11180), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (11177, 11180), True, 'import numpy as np\n'), ((12761, 12774), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (12767, 12774), True, 'import numpy as np\n'), ((12804, 12817), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (12810, 12817), True, 'import numpy as np\n'), ((12833, 12846), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (12839, 12846), True, 'import numpy as np\n'), ((8030, 8042), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8040, 8042), True, 'import matplotlib.pyplot as plt\n'), ((10506, 10534), 'numpy.sqrt', 'np.sqrt', (['((ga + 1) / (ga - 1))'], {}), '((ga + 1) / (ga - 1))\n', (10513, 10534), True, 'import numpy as np\n'), ((11508, 11536), 'numpy.sqrt', 'np.sqrt', (['((ga - 1) / (ga + 1))'], {}), '((ga - 1) / (ga + 1))\n', (11515, 11536), True, 'import numpy as np\n'), ((12747, 12758), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (12753, 12758), True, 'import numpy as np\n'), ((12790, 12801), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (12796, 12801), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import itertools
from typing import Optional, Union
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, clone
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures, StandardScaler
from sklearn.utils.validation import check_is_fitted, check_scalar
from datafold.dynfold.base import TransformType, TSCTransformerMixin
from datafold.pcfold import MultiquadricKernel, PCManifold, TSCDataFrame
from datafold.pcfold.kernels import PCManifoldKernel
from datafold.pcfold.timeseries.collection import TSCException
class TSCFeaturePreprocess(BaseEstimator, TSCTransformerMixin):
"""Wrapper of a scikit-learn preprocess algorithms to allow time series
collections as input and output.
Often scikit-learn performs "pandas.DataFrame in -> numpy.ndarray out". This wrapper
makes sure to have "pandas.DataFrame in -> pandas.DataFrame out".
Parameters
----------
sklearn_transformer
See `here <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing>`_
for a list of possible preprocessing algorithms.
"""
_cls_valid_scale_names = ("min-max", "standard")
# flag from scikit-learn -- need to set that check_estimator is valid
_required_parameters = ["sklearn_transformer"]
def __init__(self, sklearn_transformer):
self.sklearn_transformer = sklearn_transformer
@classmethod
def from_name(cls, name: str) -> "TSCFeaturePreprocess":
"""Select common transform algorithms by name.
Parameters
----------
name
- "center" -:class:`sklearn.preprocessing.StandardScaler`
- "min-max" - :class:`sklearn.preprocessing.MinMaxScaler`
- "standard" - :class:`sklearn.preprocessing.StandardScaler`
Returns
-------
TSCFeaturePreprocess
new instance
"""
if name == "center":
return cls(StandardScaler(copy=True, with_mean=True, with_std=False))
if name == "min-max":
return cls(MinMaxScaler(feature_range=(0, 1), copy=True))
elif name == "standard":
return cls(StandardScaler(copy=True, with_mean=True, with_std=True))
else:
raise ValueError(
f"name='{name}' is not known. Choose from {cls._cls_valid_scale_names}"
)
def fit(self, X: TransformType, y=None, **fit_params) -> "TSCFeaturePreprocess":
"""Calls fit of internal transform ``sklearn`` object.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Training data of shape `(n_samples, n_features)`.
y: None
ignored
**fit_params: Dict[str, object]
`None`
Returns
-------
TSCFeaturePreprocess
self
"""
if not hasattr(self.sklearn_transformer, "transform"):
raise AttributeError("sklearn object has no 'transform' attribute")
X = self._validate_datafold_data(X)
self._setup_feature_attrs_fit(X, features_out="like_features_in")
self._read_fit_params(attrs=None, fit_params=fit_params)
self.sklearn_transformer_fit_ = clone(
estimator=self.sklearn_transformer, safe=True
)
X_intern = self._X_to_numpy(X)
self.sklearn_transformer_fit_.fit(X_intern)
return self
def transform(self, X: TransformType):
"""Calls transform of internal transform ``sklearn`` object.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data to transform of shape `(n_samples, n_features)`.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type and shape as `X`
"""
check_is_fitted(self, "sklearn_transformer_fit_")
X = self._validate_datafold_data(X)
self._validate_feature_input(X, direction="transform")
X_intern = self._X_to_numpy(X)
values = self.sklearn_transformer_fit_.transform(X_intern)
return self._same_type_X(
X=X, values=values, feature_names=self.feature_names_out_
)
def fit_transform(self, X: TransformType, y=None, **fit_params):
"""Calls fit_transform of internal transform ``sklearn`` object..
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Training data to transform of shape `(n_samples, n_features)`.
y: None
ignored
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type and shape as `X`
"""
X = self._validate_datafold_data(X)
self._setup_feature_attrs_fit(X, features_out="like_features_in")
self.sklearn_transformer_fit_ = clone(self.sklearn_transformer)
values = self.sklearn_transformer_fit_.fit_transform(X)
return self._same_type_X(
X=X, values=values, feature_names=self.feature_names_out_
)
def inverse_transform(self, X: TransformType):
"""Calls `inverse_transform` of internal transform ``sklearn`` object.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data to map back of shape `(n_samples, n_features)`.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type and shape as `X`
"""
if not hasattr(self.sklearn_transformer, "inverse_transform"):
raise AttributeError("sklearn object has no 'inverse_transform' attribute")
X_intern = self._X_to_numpy(X)
values = self.sklearn_transformer_fit_.inverse_transform(X_intern)
return self._same_type_X(
X=X, values=values, feature_names=self.feature_names_in_
)
class TSCIdentity(BaseEstimator, TSCTransformerMixin):
"""Transformer as a "passthrough" placeholder and/or attaching a constant feature.
Parameters
----------
include_const
If True, a constant (all ones) column is attached to the data.
rename_features
If True, to each feature name the suffix "_id" is attached after `transform`.
Attributes
----------
is_fit_ : bool
True if fit has been called.
"""
def __init__(self, *, include_const: bool = False, rename_features: bool = False):
self.include_const = include_const
self.rename_features = rename_features
def fit(self, X: TransformType, y=None, **fit_params):
"""Passthrough data and set internals for validation.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data of shape `(n_samples, n_features)`.
y: None
ignored
**fit_params: Dict[str, object]
None
Returns
-------
TSCIdentity
self
"""
X = self._validate_datafold_data(X)
self._read_fit_params(attrs=None, fit_params=fit_params)
if self._has_feature_names(X):
if self.rename_features:
features_out = np.asarray([f"{col}_id" for col in X.columns])
else:
features_out = X.columns
if self.include_const:
features_out = np.append(features_out, ["const"])
else:
features_out = "like_features_in"
self._setup_feature_attrs_fit(X, features_out=features_out)
# Dummy attribute to indicate that fit was called
self.is_fit_ = True
return self
def transform(self, X: TransformType) -> TransformType:
"""Passthrough data and validate feature.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data of shape `(n_samples, n_features)` to passthrough.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type and shape as `X`
"""
check_is_fitted(self, "is_fit_")
X = self._validate_datafold_data(X)
self._validate_feature_input(X, direction="transform")
if self._has_feature_names(X):
X = X.copy(deep=True)
if self.rename_features:
X = X.add_suffix("_id")
if self.include_const:
X["const"] = 1
else:
if self.include_const:
X = np.column_stack([X, np.ones(X.shape[0])])
# Need to copy to not alter the original data
return X
def inverse_transform(self, X: TransformType):
"""Passthrough data and validate features shape.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data to passthrough of shape `(n_samples, n_features)`.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type and shape as `X`
"""
check_is_fitted(self, "is_fit_")
X = self._validate_datafold_data(X)
self._validate_feature_input(X, direction="inverse_transform")
if self.include_const:
X = X.copy(deep=True)
if self._has_feature_names(X):
X = X.drop("const", axis=1)
else:
X = X[:, :-1]
return X
class TSCPrincipalComponent(PCA, TSCTransformerMixin):
"""Compute principal components from data.
This is a subclass of scikit-learn's ``PCA`` to generalize the
input and output of :class:`pandas.DataFrames` and :class:`.TSCDataFrame`. All input
parameters remain the same. For documentation please visit:
* `PCA docu <https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_
* `PCA user guide <https://scikit-learn.org/stable/modules/decomposition.html#pca>`_
"""
def fit(self, X: TransformType, y=None, **fit_params) -> "PCA":
"""Compute the principal components from training data.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Training data of shape `(n_samples, n_features)`.
y: None
ignored
**fit_params: Dict[str, object]
None
Returns
-------
TSCPrincipalComponent
self
"""
X = self._validate_datafold_data(X)
self._read_fit_params(attrs=None, fit_params=fit_params)
# validation happens here:
super(TSCPrincipalComponent, self).fit(self._X_to_numpy(X), y=y)
self._setup_feature_attrs_fit(
X, features_out=[f"pca{i}" for i in range(self.n_components_)]
)
return self
def transform(self, X: TransformType):
"""Apply dimension reduction by projecting the data on principal components.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Out-of-sample points of shape `(n_samples, n_features)` to perform dimension
reduction on.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type as `X` of shape `(n_samples, n_components_)`
"""
check_is_fitted(self)
X = self._validate_datafold_data(X)
self._validate_feature_input(X, direction="transform")
pca_data = super(TSCPrincipalComponent, self).transform(self._X_to_numpy(X))
return self._same_type_X(
X, values=pca_data, feature_names=self.feature_names_out_
)
def fit_transform(self, X: TransformType, y=None, **fit_params) -> TransformType:
"""Compute principal components from data and reduce dimension on same data.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Training data of shape `(n_samples, n_features)`.
y: None
ignored
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type as `X` of shape `(n_samples, n_components_)`
"""
X = self._validate_datafold_data(X)
pca_values = super(TSCPrincipalComponent, self).fit_transform(
self._X_to_numpy(X), y=y
)
self._setup_feature_attrs_fit(
X, features_out=[f"pca{i}" for i in range(self.n_components_)]
)
return self._same_type_X(
X, values=pca_values, feature_names=self.feature_names_out_
)
def inverse_transform(self, X: TransformType):
"""Map data from the reduced space back to the original space.
Parameters
----------
X:
Out-of-sample points of shape `(n_samples, n_components_)` to map back to
original space.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type as `X` of shape `(n_samples, n_features)`
"""
self._validate_feature_input(X, direction="inverse_transform")
X_intern = self._X_to_numpy(X)
data_orig_space = super(TSCPrincipalComponent, self).inverse_transform(X_intern)
return self._same_type_X(
X, values=data_orig_space, feature_names=self.feature_names_in_
)
class TSCTakensEmbedding(BaseEstimator, TSCTransformerMixin):
r"""Perform Takens time delay embedding on time series collection data.
Parameters
----------
delays
Number for time delays to embed.
lag
Number of time steps to lag before embedding starts.
frequency
Time step frequency to emebd (e.g. to embed every sample or only every second
or third).
kappa
Weight of exponential factor in delayed coordinates
:math:`e^{-d \cdot \kappa}(x_{-d})` with :math:`d = 0, \ldots delays` being the
delay index. Adapted from :cite:`berry_time-scale_2013`, Eq. 2.1).
Attributes
----------
delay_indices_ : numpy.ndarray
Delay indices (backwards in time) assuming a fixed time delta in the time series.
min_timesteps_: int
Minimum required time steps for each time series to have a single embedding
vector.
delta_time_fit_
Time delta measured during model fit. This is primarily used to check that
`transform` or `inverse_transform` data still have the same time delta for
consistency.
References
----------
* Original paper from Takens :cite:`takens_detecting_1981`
* time delay embedding in the context of Koopman operator, e.g.
:cite:`champion_discovery_2019` or :cite:`arbabi_ergodic_2017`
"""
def __init__(
self, delays: int = 10, *, lag: int = 0, frequency: int = 1, kappa: float = 0
):
self.lag = lag
self.delays = delays
self.frequency = frequency
self.kappa = kappa
def _validate_parameter(self):
check_scalar(
self.lag, name="lag", target_type=(int, np.integer), min_val=0, max_val=None
)
# TODO also allow 0 delays? This would only "passthrough",
# but makes it is easier in pipelines etc.
check_scalar(
self.delays,
name="delays",
target_type=(int, np.integer),
min_val=1,
max_val=None,
)
check_scalar(
self.frequency,
name="delays",
target_type=(int, np.integer),
min_val=1,
max_val=None,
)
check_scalar(
self.kappa,
name="kappa",
target_type=(int, np.integer, float, np.floating),
min_val=0.0,
max_val=None,
)
if self.frequency > 1 and self.delays <= 1:
raise ValueError(
f"If frequency (={self.frequency} is larger than 1, "
f"then number for delays (={self.delays}) has to be larger "
"than 1)."
)
def _setup_delay_indices_array(self):
# zero delay (original data) is not contained as an index
# This makes it easier to just delay through the indices (instead of computing
# the indices during the delay.
return self.lag + (
np.arange(1, (self.delays * self.frequency) + 1, self.frequency)
)
def _columns_to_type_str(self, X):
# in case the column in not string it is important to transform it here to
# string. Otherwise, There are mixed types (e.g. int and str), because the
# delayed columns are all strings to indicate the delay number.
X.columns = X.columns.astype(np.str_)
return X
def _expand_all_delay_columns(self, cols):
def expand():
delayed_columns = list()
for delay_idx in self.delay_indices_:
# rename columns: [column_name]:d[delay_index]
_cur_delay_columns = [f"{col}:d{delay_idx}" for col in cols.astype(str)]
delayed_columns.append(_cur_delay_columns)
return delayed_columns
# the name of the original indices is not changed, therefore append the delay
# indices to
columns_names = cols.tolist() + list(itertools.chain(*expand()))
return pd.Index(
columns_names,
dtype=np.str_,
copy=False,
name=TSCDataFrame.tsc_feature_col_name,
)
def fit(self, X: TSCDataFrame, y=None, **fit_params) -> "TSCTakensEmbedding":
"""Compute delay indices based on settings and validate input with setting.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame
Time series collection to validate for time delay embedding.
y: None
ignored
Returns
-------
TSCTakensEmbedding
self
Raises
------
TSCException
Time series collection requirements in `X`: (1) time delta must be constant
(2) all time series must have the minimum number of time samples to obtain
one sample in the time delay embedding.
"""
self._validate_parameter()
self._read_fit_params(attrs=None, fit_params=fit_params)
self.delay_indices_ = self._setup_delay_indices_array()
self.min_timesteps_ = max(self.delay_indices_) + 1
X = self._validate_datafold_data(
X,
tsc_kwargs={
"ensure_const_delta_time": True,
"ensure_min_timesteps": self.min_timesteps_,
},
ensure_tsc=True,
)
X = self._columns_to_type_str(X)
# save delta time during fit to check that time series collections in
# transform have the same delta time
self.delta_time_fit_ = X.delta_time
features_out = self._expand_all_delay_columns(X.columns)
self._setup_feature_attrs_fit(X, features_out=features_out)
return self
def transform(self, X: TSCDataFrame) -> TSCDataFrame:
"""Perform Takens time delay embedding for each time series in the collection.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame
Time series collection.
Returns
-------
TSCDataFrame
Each time series is shortend by the number of samples required for the
delays. The type can fall back to `pandas.DataFrame` if the result is not
not a valid :class:`.TSCDataFrame` anymore (this is a typical scenario for
time series initial conditions).
Raises
------
TSCException
Time series collection requirements in `X`: (1) time delta must be constant
(2) all time series must have the minimum number of time samples to obtain
one sample in the time delay embedding.
"""
X = self._validate_datafold_data(
X,
tsc_kwargs={
# must be same time delta as during fit
"ensure_delta_time": self.delta_time_fit_,
"ensure_min_timesteps": self.min_timesteps_,
},
ensure_tsc=True,
)
X = self._columns_to_type_str(X)
self._validate_feature_input(X, direction="transform")
#################################
### Implementation using pandas by using shift()
### This implementation is better readable, and is for many cases similarly
# fast to the numpy version (below), but has a performance drop for
# high-dimensions (dim>500)
# id_groupby = X.groupby(TSCDataFrame.IDX_ID_NAME)
# concat_dfs = [X]
#
# for delay_idx in self.delay_indices_:
# shifted_data = id_groupby.shift(delay_idx, fill_value=np.nan)
# shifted_data = shifted_data.add_suffix(f":d{delay_idx}")
# concat_dfs.append(shifted_data)
#
# X = pd.concat(concat_dfs, axis=1)
# if self.fillin_handle == "remove":
# # _TODO: use pandas.dropna()
# bool_idx = np.logical_not(np.sum(pd.isnull(X), axis=1).astype(np.bool))
# X = X.loc[bool_idx]
# Implementation using numpy functions.
# pre-allocate list
delayed_timeseries = [pd.DataFrame([])] * len(X.ids)
max_delay = max(self.delay_indices_)
if self.kappa > 0:
# only the delayed coordinates are multiplied with the exp factor
kappa_vec = np.exp(-self.kappa * np.arange(1, self.delays + 1))
# the np.repeat assumes the following pattern:
# (a,b), (a:d1, b:d1), (a:d2, b:d2), ...
kappa_vec = np.repeat(kappa_vec, self.n_features_in_)
else:
kappa_vec = None
for idx, (_, df) in enumerate(X.groupby(TSCDataFrame.tsc_id_idx_name)):
# use time series numpy block
time_series_numpy = df.to_numpy()
# max_delay determines the earliest sample that has no fill-in
original_data = time_series_numpy[max_delay:, :]
# select the data (row_wise) for each delay block
# in last iteration "max_delay - delay == 0"
delayed_data = np.hstack(
[
time_series_numpy[max_delay - delay : -delay, :]
for delay in self.delay_indices_
]
)
if self.kappa > 0:
delayed_data = delayed_data.astype(float)
delayed_data *= kappa_vec
# go back to DataFrame, and adapt the index by excluding removed indices
df = pd.DataFrame(
np.hstack([original_data, delayed_data]),
index=df.index[max_delay:],
columns=self.feature_names_out_,
)
delayed_timeseries[idx] = df
X = TSCDataFrame(pd.concat(delayed_timeseries, axis=0))
return X
def inverse_transform(self, X: TransformType) -> TransformType:
"""Remove time delayed feature columns of time delay embedded time series
collection.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame
Time delayed data of shape `(n_samples, n_features_embedded)`
Returns
-------
TSCDataFrame, pandas.DataFrame
same type as `X` of shape `(n_samples, n_features_original)`
"""
check_is_fitted(self)
X = self._validate_datafold_data(X, ensure_tsc=True)
self._validate_feature_input(X, direction="inverse_transform")
return X.loc[:, self.feature_names_in_]
class TSCRadialBasis(BaseEstimator, TSCTransformerMixin):
"""Represent data in coefficients of radial basis functions.
Parameters
----------
kernel
Radial basis kernel to compute the coefficients with. Defaults to
:code:`MultiquadricKernel(epsilon=1.0)`.
center_type
Selection of what to take as centers during fit.
* `all_data` - all data points during fit are used as centers
* `fit_params` - set the center points with keyword arguments during fit
* `initial_condition` - take the initial condition states as centers.
Note for this option the data `X` during fit must be of
type :class:`.TSCDataFrame`.
exact_distance
An inexact distance computation increases computational performance at the cost of
numerical inaccuracies (~1e-7 for Euclidean distance, and ~1 e-14 for squared
Eucledian distance).
Attributes
----------
centers_: numpy.ndarray
The center points of the radial basis functions.
inv_coeff_matrix_: numpy.ndarray
Matrix to map radial basis coefficients to original space. Computation is
delayed until `inverse_transform` is called for the first time.
"""
_cls_valid_center_types = ["all_data", "fit_params", "initial_condition"]
def __init__(
self,
kernel: Optional[PCManifoldKernel] = None,
*, # keyword-only
center_type: str = "all_data",
exact_distance=True,
):
self.kernel = kernel
self.center_type = center_type
self.exact_distance = exact_distance
def _validate_center_type(self, center_type):
if center_type not in self._cls_valid_center_types:
raise ValueError(
f"center_type={center_type} not valid. Choose from "
f"{self._cls_valid_center_types} "
)
def _get_default_kernel(self):
return MultiquadricKernel(epsilon=1.0)
def fit(self, X: TransformType, y=None, **fit_params) -> "TSCRadialBasis":
"""Set the point centers of the radial basis functions.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data of shape (n_centers, n_features) to extract point centers from. Must be
of type :class:`TSCDataFrame` if center type is `initial_condition`.
y: None
ignored
**fit_params: Dict[str, object]
centers: numpy.ndarray
Points where the radial basis functions are centered.
`center_type="fit_params"` must be set during initialization.
Returns
-------
TSCRadialBasis
self
"""
X = self._validate_datafold_data(X)
self._validate_center_type(center_type=self.center_type)
_centers = self._read_fit_params(
attrs=[("centers", None)], fit_params=fit_params
)
if self.center_type == "all_data":
if _centers is not None:
raise ValueError("center points were passed but center_type='all_data'")
self.centers_ = self._X_to_numpy(X)
elif self.center_type == "fit_params":
if _centers is None:
raise ValueError("The center points were not provided in 'fit_params'.")
try:
self.centers_ = np.asarray(_centers).astype(float)
except TypeError:
raise TypeError(
"centers were not passed to fit_params or not array-like."
)
if self.centers_.ndim != 2 or self.centers_.shape[1] != X.shape[1]:
raise ValueError(
"The center points must be a matrix with same point "
"dimension than 'X'."
)
elif self.center_type == "initial_condition":
if not isinstance(X, TSCDataFrame):
raise TypeError("'X' must be of type TSCDataFrame.")
self.centers_ = X.initial_states().to_numpy()
else:
raise RuntimeError(
"center_type was not checked correctly. Please report bug."
)
set_kernel = (
self.kernel if self.kernel is not None else self._get_default_kernel()
)
self.centers_ = PCManifold(
self.centers_,
kernel=set_kernel,
dist_kwargs=dict(backend="brute", exact_numeric=self.exact_distance),
)
n_centers = self.centers_.shape[0]
self._setup_feature_attrs_fit(X, [f"rbf{i}" for i in range(n_centers)])
return self
def transform(self, X: TransformType) -> TransformType:
"""Transform data to radial basis functions coefficients.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data of shape `(n_samples, n_features)`.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type as `X` of shape `(n_samples, n_centers)`
"""
check_is_fitted(self, attributes=["centers_"])
X = self._validate_datafold_data(X)
self._validate_feature_input(X, direction="transform")
X_intern = self._X_to_numpy(X)
rbf_coeff = self.centers_.compute_kernel_matrix(Y=X_intern)
return self._same_type_X(
X, values=rbf_coeff, feature_names=self.feature_names_out_
)
def fit_transform(self, X, y=None, **fit_params):
"""Set the data as centers and transform to radial basis coefficients.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Radial basis center points and data to transform of shape \
`(n_samples, n_features)`
y: None
ignored
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type as `X` of shape `(n_samples, n_centers)`
"""
self.fit(X, **fit_params)
X_intern = self._X_to_numpy(X)
self._validate_center_type(center_type=self.center_type)
if self.center_type == "all_data":
# compute pdist distance matrix, which is often more efficient
rbf_coeff = self.centers_.compute_kernel_matrix()
else: # self.center_type in ["initial_condition", "fit_params"]:
rbf_coeff = self.centers_.compute_kernel_matrix(Y=X_intern)
# import matplotlib.pyplot as plt; plt.matshow(rbf_coeff)
return self._same_type_X(
X=X, values=rbf_coeff, feature_names=self.feature_names_out_
)
def inverse_transform(self, X: TransformType):
"""Transform radial basis coefficients back to the original function values.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Coefficient representation of the radial basis functions of shape \
`(n_samples, n_center)`.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
same type as `X` of shape `(n_samples, n_features)`
"""
self._validate_feature_input(X, direction="inverse_transform")
if self._has_feature_names(X):
rbf_coeff = X.to_numpy()
else:
rbf_coeff = X
if not hasattr(self, "inv_coeff_matrix_"):
# save inv_coeff_matrix_
center_kernel = self.centers_.compute_kernel_matrix()
self.inv_coeff_matrix_ = np.linalg.lstsq(
center_kernel, self.centers_, rcond=None
)[0]
X_inverse = rbf_coeff @ self.inv_coeff_matrix_
return self._same_type_X(
X, values=X_inverse, feature_names=self.feature_names_in_
)
class TSCPolynomialFeatures(PolynomialFeatures, TSCTransformerMixin):
"""Compute polynomial features from data.
This is a subclass of ``PolynomialFeatures`` from scikit-learn to generalize the
input and output of :class:`pandas.DataFrames` and :class:`.TSCDataFrame`.
This class adds the parameter `include_first_order` to choose whether to include the
identity states. For all other parameters please visit the super class
documentation of
`PolynomialFeatures <https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html>`_.
"""
def __init__(
self,
degree: int = 2,
*, # keyword-only
interaction_only: bool = False,
include_bias: bool = False,
include_first_order=False,
):
self.include_first_order = include_first_order
super(TSCPolynomialFeatures, self).__init__(
degree=degree,
interaction_only=interaction_only,
include_bias=include_bias,
order="C",
)
@property
def powers_(self):
powers = super(TSCPolynomialFeatures, self).powers_
if self.include_first_order:
return powers
else:
return powers[powers.sum(axis=1) != 1, :]
def _get_poly_feature_names(self, X, input_features=None):
# Note: get_feature_names function is already provided by super class
if self._has_feature_names(X):
feature_names = self.get_feature_names(
input_features=X.columns.astype(np.str_)
)
else:
feature_names = self.get_feature_names()
return feature_names
def _non_id_state_mask(self):
powers = super(TSCPolynomialFeatures, self).powers_
return powers.sum(axis=1) != 1
def fit(self, X: TransformType, y=None, **fit_params) -> "TSCPolynomialFeatures":
"""Compute number of output features.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data of shape `(n_samples, n_features)`.
y: None
ignored
**fit_params: Dict[str, object]
None
Returns
-------
TSCPolynomialFeatures
self
"""
X = self._validate_datafold_data(X)
self._read_fit_params(attrs=None, fit_params=fit_params)
super(TSCPolynomialFeatures, self).fit(X, y=y)
self._setup_feature_attrs_fit(
X,
features_out=self._get_poly_feature_names(X),
)
return self
def transform(self, X: TransformType) -> TransformType:
"""Transform data to polynomial features.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
The data of shape `(n_samples, n_features)` to transform.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
Transformed data of shape `(n_samples, n_polynomials)` and with same type
as `X`.
"""
check_is_fitted(self)
X = self._validate_datafold_data(X)
self._validate_feature_input(X, direction="transform")
poly_data = super(TSCPolynomialFeatures, self).transform(X)
if not self.include_first_order:
poly_data = poly_data[:, self._non_id_state_mask()]
poly_data = self._same_type_X(
X, values=poly_data, feature_names=self._get_poly_feature_names(X)
)
return poly_data
class TSCApplyLambdas(BaseEstimator, TSCTransformerMixin):
"""Transform data in an element-by-element fashion with lambda functions.
Each function is called on every column in the data (i.e. the number of samples
remains the same).
Two examples using a Python lambda expression and a NumPy's
`ufunc <https://numpy.org/devdocs/reference/ufuncs.html>`_:
.. code-block:: python
TSCApplyLambdas(lambdas=[lambda x: x**3])
TSCApplyLambdas(lambdas=[np.sin, np.cos])
Parameters
----------
lambdas
List of `lambda` or `ufunc` functions (`ufunc` should not be reducing the data).
Each column `X_col` is passed to the function and the returned `X_transformed`
data must be of the same shape as `X_col`, i.e.
:code:`X_transformed = func(X_col)`
"""
def __init__(self, lambdas):
self.lambdas = lambdas
def _not_implemented_numpy_arrays(self, X):
if isinstance(X, np.ndarray):
raise NotImplementedError(
"Currently not implemented for numpy.ndarray. If this is required please "
"open an issue on Gitlab."
)
def fit(self, X: TransformType, y=None, **fit_params) -> "TSCApplyLambdas":
"""Set internal feature information.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Training data.
y: None
ignored
**fit_params: Dict[str, object]
None
Returns
-------
TSCApplyLambdas
self
"""
self._not_implemented_numpy_arrays(X)
X = self._validate_datafold_data(X, ensure_tsc=True)
self._read_fit_params(attrs=None, fit_params=fit_params)
features_out = [
f"{feature_name}_lambda{i}"
for feature_name in X.columns
for i in range(len(self.lambdas))
]
self._setup_feature_attrs_fit(X, features_out=features_out)
return self
def transform(self, X: TransformType) -> TransformType:
"""Transform data with specified lambda functions.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data of shape `(n_samples, n_features)` to transform.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
The transformed data of same type as `X` and of shape
`(n_samples, n_lambdas * n_features)`
"""
self._not_implemented_numpy_arrays(X)
check_is_fitted(self)
X = self._validate_datafold_data(X, ensure_tsc=True)
self._validate_feature_input(X, direction="transform")
lambdas_applied = list()
for i, _lambda in enumerate(self.lambdas):
lambda_result = X.apply(func=_lambda, axis=0, raw=True)
lambda_result.columns = pd.Index(
[f"{feature_name}_lambda{i}" for feature_name in X.columns]
)
lambdas_applied.append(lambda_result)
X_transformed = pd.concat(lambdas_applied, axis=1)
X_transformed.columns.name = TSCDataFrame.tsc_feature_col_name
if isinstance(X, TSCDataFrame):
return TSCDataFrame(X_transformed)
else:
return X_transformed
class TSCFiniteDifference(BaseEstimator, TSCTransformerMixin):
"""Compute time derivative with finite difference scheme.
.. note::
The class internally uses the Python package findiff, which currently is
optional in *datafold*. The class raises an `ImportError` if findiff is not
installed.
Parameters
----------
spacing: Union[str, float]
The time difference between samples. If "dt" (str) then the time sampling
frequency of a :meth:`.TSCDataFrame.delta_time` is used during fit.
diff_order
The derivative order.
accuracy
The convergence order of the finite difference scheme.
Attributes
----------
spacing_
The resolved time difference between samples. Equals the parameter
input if it was of type :class`float`.
See Also
--------
`findiff documentation <https://findiff.readthedocs.io/en/latest/>`_
"""
def __init__(
self,
*, # keyword-only
spacing: Union[str, float] = "dt",
diff_order: int = 1,
accuracy: int = 2,
):
self.spacing = spacing
self.diff_order = diff_order
self.accuracy = accuracy
def fit(self, X: TransformType, y=None, **fit_params) -> "TSCFiniteDifference":
"""Set and validate time spacing between samples.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data of shape `(n_samples, n_features)`.
y: None
ignored
**fit_params: Dict[str, object]
None
Returns
-------
TSCFiniteDifference
self
Raises
------
TSCException
If time series data has not a constant time delta or the input `X` has not
the same value as specified in `spacing` during initialization.
"""
X = self._validate_datafold_data(
X,
ensure_tsc=False,
tsc_kwargs=dict(
ensure_delta_time=self.spacing
if isinstance(self.spacing, float)
else None
),
)
self._read_fit_params(attrs=None, fit_params=fit_params)
if self._has_feature_names(X):
features_out = [f"{col}_dot" for col in X.columns]
else:
features_out = X.shape[1]
self._setup_feature_attrs_fit(X=X, features_out=features_out)
if self.spacing == "dt":
if not isinstance(X, TSCDataFrame):
raise TypeError(
"For input 'spacing=dt' a time series collections is required."
)
self.spacing_ = X.delta_time
if isinstance(self.spacing_, pd.Series) or np.isnan(self.spacing_):
raise TSCException.not_const_delta_time(actual_delta_time=self.spacing_)
else:
self.spacing_ = self.spacing
if (
isinstance(X, TSCDataFrame)
and np.asarray(self.spacing_ != X.delta_time).all()
):
raise ValueError(
f"A spacing of {self.spacing} was specified, but the time series "
f"collection has a time delta of {X.delta_time}"
)
check_scalar(
self.spacing_,
"spacing",
target_type=(int, np.integer, float, np.floating),
min_val=np.finfo(float).eps,
max_val=None,
)
self.spacing_ = float(self.spacing_)
check_scalar(
self.diff_order,
"diff_order",
target_type=(int, np.integer),
min_val=1,
max_val=None,
)
check_scalar(
self.accuracy,
name="accuracy",
target_type=(int, np.integer),
min_val=1,
max_val=None,
)
return self
def transform(self, X: TransformType) -> TransformType:
"""Compute the finite difference values.
Parameters
----------
X: TSCDataFrame, pandas.DataFrame, numpy.ndarray
Data of shape `(n_samples, n_features)`.
Returns
-------
TSCDataFrame, pandas.DataFrame, numpy.ndarray
Transformed data of same shape and type as `X`.
Raises
------
TSCException
If input `X` has a different time delta than data during `fit`.
"""
check_is_fitted(self)
X = self._validate_datafold_data(
X,
ensure_tsc=True,
tsc_kwargs=dict(ensure_delta_time=self.spacing_),
)
self._validate_feature_input(X=X, direction="transform")
time_derivative = X.tsc.time_derivative(
scheme="center",
diff_order=self.diff_order,
accuracy=self.accuracy,
shift_index=True,
)
time_derivative = time_derivative.add_suffix(f"_dot{self.diff_order}")
return time_derivative
| [
"sklearn.preprocessing.StandardScaler",
"sklearn.preprocessing.MinMaxScaler",
"numpy.ones",
"numpy.isnan",
"numpy.arange",
"datafold.pcfold.timeseries.collection.TSCException.not_const_delta_time",
"sklearn.base.clone",
"sklearn.utils.validation.check_scalar",
"pandas.DataFrame",
"numpy.append",
... | [((3287, 3339), 'sklearn.base.clone', 'clone', ([], {'estimator': 'self.sklearn_transformer', 'safe': '(True)'}), '(estimator=self.sklearn_transformer, safe=True)\n', (3292, 3339), False, 'from sklearn.base import BaseEstimator, clone\n'), ((3897, 3946), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""sklearn_transformer_fit_"""'], {}), "(self, 'sklearn_transformer_fit_')\n", (3912, 3946), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((4927, 4958), 'sklearn.base.clone', 'clone', (['self.sklearn_transformer'], {}), '(self.sklearn_transformer)\n', (4932, 4958), False, 'from sklearn.base import BaseEstimator, clone\n'), ((8131, 8163), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""is_fit_"""'], {}), "(self, 'is_fit_')\n", (8146, 8163), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((9092, 9124), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""is_fit_"""'], {}), "(self, 'is_fit_')\n", (9107, 9124), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((11342, 11363), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (11357, 11363), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((15031, 15125), 'sklearn.utils.validation.check_scalar', 'check_scalar', (['self.lag'], {'name': '"""lag"""', 'target_type': '(int, np.integer)', 'min_val': '(0)', 'max_val': 'None'}), "(self.lag, name='lag', target_type=(int, np.integer), min_val=0,\n max_val=None)\n", (15043, 15125), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((15272, 15372), 'sklearn.utils.validation.check_scalar', 'check_scalar', (['self.delays'], {'name': '"""delays"""', 'target_type': '(int, np.integer)', 'min_val': '(1)', 'max_val': 'None'}), "(self.delays, name='delays', target_type=(int, np.integer),\n min_val=1, max_val=None)\n", (15284, 15372), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((15449, 15552), 'sklearn.utils.validation.check_scalar', 'check_scalar', (['self.frequency'], {'name': '"""delays"""', 'target_type': '(int, np.integer)', 'min_val': '(1)', 'max_val': 'None'}), "(self.frequency, name='delays', target_type=(int, np.integer),\n min_val=1, max_val=None)\n", (15461, 15552), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((15629, 15749), 'sklearn.utils.validation.check_scalar', 'check_scalar', (['self.kappa'], {'name': '"""kappa"""', 'target_type': '(int, np.integer, float, np.floating)', 'min_val': '(0.0)', 'max_val': 'None'}), "(self.kappa, name='kappa', target_type=(int, np.integer, float,\n np.floating), min_val=0.0, max_val=None)\n", (15641, 15749), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((17380, 17475), 'pandas.Index', 'pd.Index', (['columns_names'], {'dtype': 'np.str_', 'copy': '(False)', 'name': 'TSCDataFrame.tsc_feature_col_name'}), '(columns_names, dtype=np.str_, copy=False, name=TSCDataFrame.\n tsc_feature_col_name)\n', (17388, 17475), True, 'import pandas as pd\n'), ((23572, 23593), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (23587, 23593), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((25725, 25756), 'datafold.pcfold.MultiquadricKernel', 'MultiquadricKernel', ([], {'epsilon': '(1.0)'}), '(epsilon=1.0)\n', (25743, 25756), False, 'from datafold.pcfold import MultiquadricKernel, PCManifold, TSCDataFrame\n'), ((28879, 28925), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {'attributes': "['centers_']"}), "(self, attributes=['centers_'])\n", (28894, 28925), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((34690, 34711), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (34705, 34711), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((37727, 37748), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (37742, 37748), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((38240, 38274), 'pandas.concat', 'pd.concat', (['lambdas_applied'], {'axis': '(1)'}), '(lambdas_applied, axis=1)\n', (38249, 38274), True, 'import pandas as pd\n'), ((42048, 42151), 'sklearn.utils.validation.check_scalar', 'check_scalar', (['self.diff_order', '"""diff_order"""'], {'target_type': '(int, np.integer)', 'min_val': '(1)', 'max_val': 'None'}), "(self.diff_order, 'diff_order', target_type=(int, np.integer),\n min_val=1, max_val=None)\n", (42060, 42151), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((42228, 42332), 'sklearn.utils.validation.check_scalar', 'check_scalar', (['self.accuracy'], {'name': '"""accuracy"""', 'target_type': '(int, np.integer)', 'min_val': '(1)', 'max_val': 'None'}), "(self.accuracy, name='accuracy', target_type=(int, np.integer),\n min_val=1, max_val=None)\n", (42240, 42332), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((42975, 42996), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (42990, 42996), False, 'from sklearn.utils.validation import check_is_fitted, check_scalar\n'), ((16364, 16426), 'numpy.arange', 'np.arange', (['(1)', '(self.delays * self.frequency + 1)', 'self.frequency'], {}), '(1, self.delays * self.frequency + 1, self.frequency)\n', (16373, 16426), True, 'import numpy as np\n'), ((21820, 21861), 'numpy.repeat', 'np.repeat', (['kappa_vec', 'self.n_features_in_'], {}), '(kappa_vec, self.n_features_in_)\n', (21829, 21861), True, 'import numpy as np\n'), ((22360, 22457), 'numpy.hstack', 'np.hstack', (['[time_series_numpy[max_delay - delay:-delay, :] for delay in self.\n delay_indices_]'], {}), '([time_series_numpy[max_delay - delay:-delay, :] for delay in self\n .delay_indices_])\n', (22369, 22457), True, 'import numpy as np\n'), ((23025, 23062), 'pandas.concat', 'pd.concat', (['delayed_timeseries'], {'axis': '(0)'}), '(delayed_timeseries, axis=0)\n', (23034, 23062), True, 'import pandas as pd\n'), ((38064, 38133), 'pandas.Index', 'pd.Index', (["[f'{feature_name}_lambda{i}' for feature_name in X.columns]"], {}), "([f'{feature_name}_lambda{i}' for feature_name in X.columns])\n", (38072, 38133), True, 'import pandas as pd\n'), ((38406, 38433), 'datafold.pcfold.TSCDataFrame', 'TSCDataFrame', (['X_transformed'], {}), '(X_transformed)\n', (38418, 38433), False, 'from datafold.pcfold import MultiquadricKernel, PCManifold, TSCDataFrame\n'), ((2003, 2060), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'copy': '(True)', 'with_mean': '(True)', 'with_std': '(False)'}), '(copy=True, with_mean=True, with_std=False)\n', (2017, 2060), False, 'from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures, StandardScaler\n'), ((2115, 2160), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)', 'copy': '(True)'}), '(feature_range=(0, 1), copy=True)\n', (2127, 2160), False, 'from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures, StandardScaler\n'), ((7265, 7311), 'numpy.asarray', 'np.asarray', (["[f'{col}_id' for col in X.columns]"], {}), "([f'{col}_id' for col in X.columns])\n", (7275, 7311), True, 'import numpy as np\n'), ((7438, 7472), 'numpy.append', 'np.append', (['features_out', "['const']"], {}), "(features_out, ['const'])\n", (7447, 7472), True, 'import numpy as np\n'), ((21424, 21440), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (21436, 21440), True, 'import pandas as pd\n'), ((22808, 22848), 'numpy.hstack', 'np.hstack', (['[original_data, delayed_data]'], {}), '([original_data, delayed_data])\n', (22817, 22848), True, 'import numpy as np\n'), ((31338, 31395), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['center_kernel', 'self.centers_'], {'rcond': 'None'}), '(center_kernel, self.centers_, rcond=None)\n', (31353, 31395), True, 'import numpy as np\n'), ((41259, 41282), 'numpy.isnan', 'np.isnan', (['self.spacing_'], {}), '(self.spacing_)\n', (41267, 41282), True, 'import numpy as np\n'), ((41306, 41372), 'datafold.pcfold.timeseries.collection.TSCException.not_const_delta_time', 'TSCException.not_const_delta_time', ([], {'actual_delta_time': 'self.spacing_'}), '(actual_delta_time=self.spacing_)\n', (41339, 41372), False, 'from datafold.pcfold.timeseries.collection import TSCException\n'), ((2218, 2274), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'copy': '(True)', 'with_mean': '(True)', 'with_std': '(True)'}), '(copy=True, with_mean=True, with_std=True)\n', (2232, 2274), False, 'from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures, StandardScaler\n'), ((21652, 21681), 'numpy.arange', 'np.arange', (['(1)', '(self.delays + 1)'], {}), '(1, self.delays + 1)\n', (21661, 21681), True, 'import numpy as np\n'), ((41937, 41952), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (41945, 41952), True, 'import numpy as np\n'), ((8579, 8598), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (8586, 8598), True, 'import numpy as np\n'), ((41510, 41551), 'numpy.asarray', 'np.asarray', (['(self.spacing_ != X.delta_time)'], {}), '(self.spacing_ != X.delta_time)\n', (41520, 41551), True, 'import numpy as np\n'), ((27175, 27195), 'numpy.asarray', 'np.asarray', (['_centers'], {}), '(_centers)\n', (27185, 27195), True, 'import numpy as np\n')] |
"""LMM testing code"""
import unittest
import scipy as SP
import numpy as np
import sys
from limix.core.covar import FreeFormCov
from limix.utils.check_grad import mcheck_grad
class TestFreeForm(unittest.TestCase):
def setUp(self):
SP.random.seed(1)
self.n=4
self.C = FreeFormCov(self.n)
self.name = 'freeform'
self.n_params=self.C.getNumberParams()
params=SP.randn(self.n_params)
self.C.setParams(params)
def test_grad(self):
def func(x, i):
self.C.setParams(x)
return self.C.K()
def grad(x, i):
self.C.setParams(x)
return self.C.K_grad_i(i)
x0 = self.C.getParams()
err = mcheck_grad(func, grad, x0)
np.testing.assert_almost_equal(err, 0., decimal=6)
def test_param_activation(self):
self.assertEqual(len(self.C.getParams()), 10)
self.C.act_K = False
self.assertEqual(len(self.C.getParams()), 0)
self.C.setParams(np.array([]))
with self.assertRaises(ValueError):
self.C.setParams(np.array([0]))
with self.assertRaises(ValueError):
self.C.K_grad_i(0)
def test_Khess(self):
cov = self.C
for j in range(cov.getNumberParams()):
def func(x, i):
cov.setParams(x)
return cov.K_grad_i(j)
def grad(x, i):
cov.setParams(x)
return cov.K_hess_i_j(j, i)
x0 = cov.getParams()
err = mcheck_grad(func, grad, x0)
np.testing.assert_almost_equal(err, 0.)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"scipy.randn",
"numpy.testing.assert_almost_equal",
"limix.utils.check_grad.mcheck_grad",
"scipy.random.seed",
"numpy.array",
"limix.core.covar.FreeFormCov"
] | [((1655, 1670), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1668, 1670), False, 'import unittest\n'), ((245, 262), 'scipy.random.seed', 'SP.random.seed', (['(1)'], {}), '(1)\n', (259, 262), True, 'import scipy as SP\n'), ((297, 316), 'limix.core.covar.FreeFormCov', 'FreeFormCov', (['self.n'], {}), '(self.n)\n', (308, 316), False, 'from limix.core.covar import FreeFormCov\n'), ((410, 433), 'scipy.randn', 'SP.randn', (['self.n_params'], {}), '(self.n_params)\n', (418, 433), True, 'import scipy as SP\n'), ((721, 748), 'limix.utils.check_grad.mcheck_grad', 'mcheck_grad', (['func', 'grad', 'x0'], {}), '(func, grad, x0)\n', (732, 748), False, 'from limix.utils.check_grad import mcheck_grad\n'), ((758, 809), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['err', '(0.0)'], {'decimal': '(6)'}), '(err, 0.0, decimal=6)\n', (788, 809), True, 'import numpy as np\n'), ((1009, 1021), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1017, 1021), True, 'import numpy as np\n'), ((1543, 1570), 'limix.utils.check_grad.mcheck_grad', 'mcheck_grad', (['func', 'grad', 'x0'], {}), '(func, grad, x0)\n', (1554, 1570), False, 'from limix.utils.check_grad import mcheck_grad\n'), ((1583, 1623), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['err', '(0.0)'], {}), '(err, 0.0)\n', (1613, 1623), True, 'import numpy as np\n'), ((1096, 1109), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1104, 1109), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of sierras (https://github.com/fernandezfran/sierras/).
# Copyright (c) 2021, <NAME>
# License: MIT
# Full Text: https://github.com/fernandezfran/sierras/blob/master/LICENSE
# ============================================================================
# IMPORTS
# ============================================================================
import os
import pathlib
from matplotlib.testing.decorators import check_figures_equal
import numpy as np
import pandas as pd
import pytest
import sierras.arrhenius
# ============================================================================
# CONSTANTS
# ============================================================================
TEST_DATA_PATH = pathlib.Path(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "test_data")
)
# ============================================================================
# TESTS
# ============================================================================
@pytest.mark.parametrize(
("temps", "dcoeffs", "dcoeffserr", "ref", "decimal"),
[
( # roughly equivalent to Fuller 1953 silicon data.
np.array([1250, 1153.36, 1063.13, 970.65, 861.04, 769.34]),
np.array(
[
7.72104e-06,
4.386714e-06,
2.23884e-06,
5.58574e-07,
5.15115e-07,
7.58213e-08,
]
),
np.array(
[
1.42028e-06,
9.239103e-07,
6.98605e-07,
1.93034e-07,
1.18240e-07,
2.85640e-09,
]
),
(-9196.819, -4.394486),
(3, 3),
),
( # roughly equivalent to de Souza LJ 2006 data.
np.array(
[
1217.694563,
934.963910,
863.118100,
792.707095,
734.074259,
659.996304,
597.864428,
537.747162,
474.885671,
414.531828,
356.332201,
]
),
np.array(
[
0.031304,
0.020066,
0.017822,
0.014099,
0.011692,
0.008660,
0.007094,
0.004650,
0.003090,
0.001521,
0.000681,
]
),
None,
(-1919.8839, -1.8267787),
(4, 7),
),
( # roughly equivalent to de Wei-Zhong LJ 2008 Kubo-Green data.
np.array(
[
0.7000154,
0.80037778,
0.90050338,
1.00071451,
1.10114817,
1.20103096,
]
)
* 1_000,
np.array(
[
0.03800871,
0.04596339,
0.05495668,
0.0619257,
0.07191,
0.08090012,
]
),
None,
(-1258.7578, -1.49366295),
(4, 7),
),
],
)
def test_arrhenius_diffusion_fit(temps, dcoeffs, dcoeffserr, ref, decimal):
"""Test the ArrheniusDiffusion class, fit method."""
arrhenius = sierras.arrhenius.ArrheniusDiffusion(
temps, dcoeffs, differr=dcoeffserr
)
model = arrhenius.fit()
np.testing.assert_almost_equal(model.slope_.magnitude, ref[0], decimal[0])
np.testing.assert_almost_equal(
model.intercept_.magnitude, ref[1], decimal[1]
)
assert str(model.slope_.units) == "kelvin"
assert str(model.intercept_.units) == "dimensionless"
@pytest.mark.parametrize(
("temps", "dcoeffs", "dcoeffserr", "ref"),
[
( # roughly equivalent to Fuller 1953 silicon data.
np.array([1250, 1153.36, 1063.13, 970.65, 861.04, 769.34]),
np.array(
[
7.72104e-06,
4.386714e-06,
2.23884e-06,
5.58574e-07,
5.15115e-07,
7.58213e-08,
]
),
np.array(
[
1.42028e-06,
9.239103e-07,
6.98605e-07,
1.93034e-07,
1.18240e-07,
2.85640e-09,
]
),
(2.9132e-15, 2.9348e-15),
),
( # roughly equivalent to de Souza LJ 2006 data.
np.array(
[
1217.694563,
934.963910,
863.118100,
792.707095,
734.074259,
659.996304,
597.864428,
537.747162,
474.885671,
414.531828,
356.332201,
]
),
np.array(
[
0.031304,
0.020066,
0.017822,
0.014099,
0.011692,
0.008660,
0.007094,
0.004650,
0.003090,
0.001521,
0.000681,
]
),
None,
(0.0002674998, None),
),
( # roughly equivalent to de Wei-Zhong LJ 2008 Kubo-Green data.
np.array(
[
0.7000154,
0.80037778,
0.90050338,
1.00071451,
1.10114817,
1.20103096,
]
)
* 1_000,
np.array(
[
0.03800871,
0.04596339,
0.05495668,
0.0619257,
0.07191,
0.08090012,
]
),
None,
(0.0033812, None),
),
],
)
def test_arrhenius_diffusion_extrapolate(temps, dcoeffs, dcoeffserr, ref):
"""Test the ArrheniusDiffusion class, extrapolate method."""
arrhenius = sierras.arrhenius.ArrheniusDiffusion(
temps, dcoeffs, differr=dcoeffserr
)
arrhenius.fit()
damb = arrhenius.extrapolate()
if ref[1] is None:
np.testing.assert_almost_equal(damb.magnitude, ref[0])
else:
np.testing.assert_almost_equal(damb.value.magnitude, ref[0])
np.testing.assert_almost_equal(damb.error.magnitude, ref[1])
assert str(damb.units) == "centimeter ** 2 / second"
@pytest.mark.parametrize(
("temps", "dcoeffs", "dcoeffserr", "ref"),
[
( # roughly equivalent to Fuller 1953 silicon data.
np.array([1250, 1153.36, 1063.13, 970.65, 861.04, 769.34]),
np.array(
[
7.72104e-06,
4.386714e-06,
2.23884e-06,
5.58574e-07,
5.15115e-07,
7.58213e-08,
]
),
np.array(
[
1.42028e-06,
9.239103e-07,
6.98605e-07,
1.93034e-07,
1.18240e-07,
2.85640e-09,
]
),
(0.79252057, 0.0243509),
),
( # roughly equivalent to de Souza LJ 2006 data.
np.array(
[
1217.694563,
934.963910,
863.118100,
792.707095,
734.074259,
659.996304,
597.864428,
537.747162,
474.885671,
414.531828,
356.332201,
]
),
np.array(
[
0.031304,
0.020066,
0.017822,
0.014099,
0.011692,
0.008660,
0.007094,
0.004650,
0.003090,
0.001521,
0.000681,
]
),
None,
(0.16544279, None),
),
( # roughly equivalent to de Wei-Zhong LJ 2008 Kubo-Green data.
np.array(
[
0.7000154,
0.80037778,
0.90050338,
1.00071451,
1.10114817,
1.20103096,
]
)
* 1_000,
np.array(
[
0.03800871,
0.04596339,
0.05495668,
0.0619257,
0.07191,
0.08090012,
]
),
None,
(0.1084714, None),
),
],
)
def test_arrhenius_diffusion_activation_energy(
temps, dcoeffs, dcoeffserr, ref
):
"""Test the ArrheniusDiffusion class, activation energy method."""
arrhenius = sierras.arrhenius.ArrheniusDiffusion(
temps, dcoeffs, differr=dcoeffserr
)
arrhenius.fit()
acteng = arrhenius.activation_energy()
if ref[1] is None:
np.testing.assert_almost_equal(acteng.magnitude, ref[0])
else:
np.testing.assert_almost_equal(acteng.value.magnitude, ref[0])
np.testing.assert_almost_equal(acteng.error.magnitude, ref[1])
assert str(acteng.units) == "electron_volt"
@pytest.mark.parametrize(
("temps", "dcoeffs", "dcoeffserr"),
[
( # roughly equivalent to Fuller 1953 silicon data.
np.array([1250, 1153.36, 1063.13, 970.65, 861.04, 769.34]),
np.array(
[
7.72104e-06,
4.386714e-06,
2.23884e-06,
5.58574e-07,
5.15115e-07,
7.58213e-08,
]
),
np.array(
[
1.42028e-06,
9.239103e-07,
6.98605e-07,
1.93034e-07,
1.18240e-07,
2.85640e-09,
]
),
),
( # roughly equivalent to de Souza LJ 2006 data.
np.array(
[
1217.694563,
934.963910,
863.118100,
792.707095,
734.074259,
659.996304,
597.864428,
537.747162,
474.885671,
414.531828,
356.332201,
]
),
np.array(
[
0.031304,
0.020066,
0.017822,
0.014099,
0.011692,
0.008660,
0.007094,
0.004650,
0.003090,
0.001521,
0.000681,
]
),
None,
),
( # roughly equivalent to de Wei-Zhong LJ 2008 Kubo-Green data.
np.array(
[
0.7000154,
0.80037778,
0.90050338,
1.00071451,
1.10114817,
1.20103096,
]
)
* 1_000,
np.array(
[
0.03800871,
0.04596339,
0.05495668,
0.0619257,
0.07191,
0.08090012,
]
),
None,
),
],
)
@check_figures_equal(extensions=["png", "pdf"], tol=0.005)
def test_arrhenius_diffusion_plot(
fig_test, fig_ref, temps, dcoeffs, dcoeffserr
):
"""Test the ArrheniusDiffusion class, plots."""
arrhenius = sierras.arrhenius.ArrheniusDiffusion(
temps, dcoeffs, differr=dcoeffserr
)
model = arrhenius.fit()
slope, intercept = model.slope_, model.intercept_
# test
test_ax = fig_test.subplots()
arrhenius.plot(ax=test_ax)
# expected
exp_ax = fig_ref.subplots()
exp_ax.errorbar(
1 / temps,
np.log(dcoeffs),
yerr=dcoeffserr / dcoeffs if dcoeffserr is not None else None,
marker="o",
ls="",
label="diffusion",
)
exp_ax.plot(
1 / temps, intercept.magnitude + slope.magnitude / temps, label="fit"
)
@pytest.mark.parametrize(
("temps", "dcoeffs", "dcoeffserr", "reffname"),
[
( # roughly equivalent to de Souza LJ 2006 data.
np.array(
[
1217.694563,
934.963910,
863.118100,
792.707095,
734.074259,
659.996304,
597.864428,
537.747162,
474.885671,
414.531828,
356.332201,
]
),
np.array(
[
0.031304,
0.020066,
0.017822,
0.014099,
0.011692,
0.008660,
0.007094,
0.004650,
0.003090,
0.001521,
0.000681,
]
),
None,
"desouza06-LJ.csv",
),
( # roughly equivalent to de Wei-Zhong LJ 2008 Kubo-Green data.
np.array(
[
0.7000154,
0.80037778,
0.90050338,
1.00071451,
1.10114817,
1.20103096,
]
)
* 1_000,
np.array(
[
0.03800871,
0.04596339,
0.05495668,
0.0619257,
0.07191,
0.08090012,
]
),
None,
"wei-zhong08-KG.csv",
),
( # roughly equivalent to Fuller 1953 silicon data.
np.array([1250, 1153.36, 1063.13, 970.65, 861.04, 769.34]),
np.array(
[
7.72104e-06,
4.386714e-06,
2.23884e-06,
5.58574e-07,
5.15115e-07,
7.58213e-08,
]
),
np.array(
[
1.42028e-06,
9.239103e-07,
6.98605e-07,
1.93034e-07,
1.18240e-07,
2.85640e-09,
]
),
"fuller53-Si.csv",
),
],
)
def test_arrhenius_diffusion_to_csv(temps, dcoeffs, dcoeffserr, reffname):
"""Test the ArrheniusDiffusion class, save to csv."""
df_ref = pd.read_csv(str(TEST_DATA_PATH / reffname), dtype=np.float32)
arrhenius = sierras.arrhenius.ArrheniusDiffusion(
temps, dcoeffs, differr=dcoeffserr
)
arrhenius.fit()
df = arrhenius.to_dataframe()
pd.testing.assert_frame_equal(df, df_ref)
| [
"pandas.testing.assert_frame_equal",
"numpy.log",
"numpy.testing.assert_almost_equal",
"os.path.dirname",
"numpy.array",
"matplotlib.testing.decorators.check_figures_equal"
] | [((12526, 12583), 'matplotlib.testing.decorators.check_figures_equal', 'check_figures_equal', ([], {'extensions': "['png', 'pdf']", 'tol': '(0.005)'}), "(extensions=['png', 'pdf'], tol=0.005)\n", (12545, 12583), False, 'from matplotlib.testing.decorators import check_figures_equal\n'), ((3823, 3897), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['model.slope_.magnitude', 'ref[0]', 'decimal[0]'], {}), '(model.slope_.magnitude, ref[0], decimal[0])\n', (3853, 3897), True, 'import numpy as np\n'), ((3902, 3980), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['model.intercept_.magnitude', 'ref[1]', 'decimal[1]'], {}), '(model.intercept_.magnitude, ref[1], decimal[1])\n', (3932, 3980), True, 'import numpy as np\n'), ((16153, 16194), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['df', 'df_ref'], {}), '(df, df_ref)\n', (16182, 16194), True, 'import pandas as pd\n'), ((6875, 6929), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['damb.magnitude', 'ref[0]'], {}), '(damb.magnitude, ref[0])\n', (6905, 6929), True, 'import numpy as np\n'), ((6948, 7008), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['damb.value.magnitude', 'ref[0]'], {}), '(damb.value.magnitude, ref[0])\n', (6978, 7008), True, 'import numpy as np\n'), ((7017, 7077), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['damb.error.magnitude', 'ref[1]'], {}), '(damb.error.magnitude, ref[1])\n', (7047, 7077), True, 'import numpy as np\n'), ((9933, 9989), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['acteng.magnitude', 'ref[0]'], {}), '(acteng.magnitude, ref[0])\n', (9963, 9989), True, 'import numpy as np\n'), ((10008, 10070), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['acteng.value.magnitude', 'ref[0]'], {}), '(acteng.value.magnitude, ref[0])\n', (10038, 10070), True, 'import numpy as np\n'), ((10079, 10141), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['acteng.error.magnitude', 'ref[1]'], {}), '(acteng.error.magnitude, ref[1])\n', (10109, 10141), True, 'import numpy as np\n'), ((13082, 13097), 'numpy.log', 'np.log', (['dcoeffs'], {}), '(dcoeffs)\n', (13088, 13097), True, 'import numpy as np\n'), ((819, 844), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (834, 844), False, 'import os\n'), ((1194, 1252), 'numpy.array', 'np.array', (['[1250, 1153.36, 1063.13, 970.65, 861.04, 769.34]'], {}), '([1250, 1153.36, 1063.13, 970.65, 861.04, 769.34])\n', (1202, 1252), True, 'import numpy as np\n'), ((1266, 1359), 'numpy.array', 'np.array', (['[7.72104e-06, 4.386714e-06, 2.23884e-06, 5.58574e-07, 5.15115e-07, 7.58213e-08]'], {}), '([7.72104e-06, 4.386714e-06, 2.23884e-06, 5.58574e-07, 5.15115e-07,\n 7.58213e-08])\n', (1274, 1359), True, 'import numpy as np\n'), ((1538, 1630), 'numpy.array', 'np.array', (['[1.42028e-06, 9.239103e-07, 6.98605e-07, 1.93034e-07, 1.1824e-07, 2.8564e-09]'], {}), '([1.42028e-06, 9.239103e-07, 6.98605e-07, 1.93034e-07, 1.1824e-07, \n 2.8564e-09])\n', (1546, 1630), True, 'import numpy as np\n'), ((1935, 2080), 'numpy.array', 'np.array', (['[1217.694563, 934.96391, 863.1181, 792.707095, 734.074259, 659.996304, \n 597.864428, 537.747162, 474.885671, 414.531828, 356.332201]'], {}), '([1217.694563, 934.96391, 863.1181, 792.707095, 734.074259, \n 659.996304, 597.864428, 537.747162, 474.885671, 414.531828, 356.332201])\n', (1943, 2080), True, 'import numpy as np\n'), ((2361, 2483), 'numpy.array', 'np.array', (['[0.031304, 0.020066, 0.017822, 0.014099, 0.011692, 0.00866, 0.007094, \n 0.00465, 0.00309, 0.001521, 0.000681]'], {}), '([0.031304, 0.020066, 0.017822, 0.014099, 0.011692, 0.00866, \n 0.007094, 0.00465, 0.00309, 0.001521, 0.000681])\n', (2369, 2483), True, 'import numpy as np\n'), ((3208, 3286), 'numpy.array', 'np.array', (['[0.03800871, 0.04596339, 0.05495668, 0.0619257, 0.07191, 0.08090012]'], {}), '([0.03800871, 0.04596339, 0.05495668, 0.0619257, 0.07191, 0.08090012])\n', (3216, 3286), True, 'import numpy as np\n'), ((4255, 4313), 'numpy.array', 'np.array', (['[1250, 1153.36, 1063.13, 970.65, 861.04, 769.34]'], {}), '([1250, 1153.36, 1063.13, 970.65, 861.04, 769.34])\n', (4263, 4313), True, 'import numpy as np\n'), ((4327, 4420), 'numpy.array', 'np.array', (['[7.72104e-06, 4.386714e-06, 2.23884e-06, 5.58574e-07, 5.15115e-07, 7.58213e-08]'], {}), '([7.72104e-06, 4.386714e-06, 2.23884e-06, 5.58574e-07, 5.15115e-07,\n 7.58213e-08])\n', (4335, 4420), True, 'import numpy as np\n'), ((4599, 4691), 'numpy.array', 'np.array', (['[1.42028e-06, 9.239103e-07, 6.98605e-07, 1.93034e-07, 1.1824e-07, 2.8564e-09]'], {}), '([1.42028e-06, 9.239103e-07, 6.98605e-07, 1.93034e-07, 1.1824e-07, \n 2.8564e-09])\n', (4607, 4691), True, 'import numpy as np\n'), ((4978, 5123), 'numpy.array', 'np.array', (['[1217.694563, 934.96391, 863.1181, 792.707095, 734.074259, 659.996304, \n 597.864428, 537.747162, 474.885671, 414.531828, 356.332201]'], {}), '([1217.694563, 934.96391, 863.1181, 792.707095, 734.074259, \n 659.996304, 597.864428, 537.747162, 474.885671, 414.531828, 356.332201])\n', (4986, 5123), True, 'import numpy as np\n'), ((5404, 5526), 'numpy.array', 'np.array', (['[0.031304, 0.020066, 0.017822, 0.014099, 0.011692, 0.00866, 0.007094, \n 0.00465, 0.00309, 0.001521, 0.000681]'], {}), '([0.031304, 0.020066, 0.017822, 0.014099, 0.011692, 0.00866, \n 0.007094, 0.00465, 0.00309, 0.001521, 0.000681])\n', (5412, 5526), True, 'import numpy as np\n'), ((6227, 6305), 'numpy.array', 'np.array', (['[0.03800871, 0.04596339, 0.05495668, 0.0619257, 0.07191, 0.08090012]'], {}), '([0.03800871, 0.04596339, 0.05495668, 0.0619257, 0.07191, 0.08090012])\n', (6235, 6305), True, 'import numpy as np\n'), ((7290, 7348), 'numpy.array', 'np.array', (['[1250, 1153.36, 1063.13, 970.65, 861.04, 769.34]'], {}), '([1250, 1153.36, 1063.13, 970.65, 861.04, 769.34])\n', (7298, 7348), True, 'import numpy as np\n'), ((7362, 7455), 'numpy.array', 'np.array', (['[7.72104e-06, 4.386714e-06, 2.23884e-06, 5.58574e-07, 5.15115e-07, 7.58213e-08]'], {}), '([7.72104e-06, 4.386714e-06, 2.23884e-06, 5.58574e-07, 5.15115e-07,\n 7.58213e-08])\n', (7370, 7455), True, 'import numpy as np\n'), ((7634, 7726), 'numpy.array', 'np.array', (['[1.42028e-06, 9.239103e-07, 6.98605e-07, 1.93034e-07, 1.1824e-07, 2.8564e-09]'], {}), '([1.42028e-06, 9.239103e-07, 6.98605e-07, 1.93034e-07, 1.1824e-07, \n 2.8564e-09])\n', (7642, 7726), True, 'import numpy as np\n'), ((8012, 8157), 'numpy.array', 'np.array', (['[1217.694563, 934.96391, 863.1181, 792.707095, 734.074259, 659.996304, \n 597.864428, 537.747162, 474.885671, 414.531828, 356.332201]'], {}), '([1217.694563, 934.96391, 863.1181, 792.707095, 734.074259, \n 659.996304, 597.864428, 537.747162, 474.885671, 414.531828, 356.332201])\n', (8020, 8157), True, 'import numpy as np\n'), ((8438, 8560), 'numpy.array', 'np.array', (['[0.031304, 0.020066, 0.017822, 0.014099, 0.011692, 0.00866, 0.007094, \n 0.00465, 0.00309, 0.001521, 0.000681]'], {}), '([0.031304, 0.020066, 0.017822, 0.014099, 0.011692, 0.00866, \n 0.007094, 0.00465, 0.00309, 0.001521, 0.000681])\n', (8446, 8560), True, 'import numpy as np\n'), ((9259, 9337), 'numpy.array', 'np.array', (['[0.03800871, 0.04596339, 0.05495668, 0.0619257, 0.07191, 0.08090012]'], {}), '([0.03800871, 0.04596339, 0.05495668, 0.0619257, 0.07191, 0.08090012])\n', (9267, 9337), True, 'import numpy as np\n'), ((10338, 10396), 'numpy.array', 'np.array', (['[1250, 1153.36, 1063.13, 970.65, 861.04, 769.34]'], {}), '([1250, 1153.36, 1063.13, 970.65, 861.04, 769.34])\n', (10346, 10396), True, 'import numpy as np\n'), ((10410, 10503), 'numpy.array', 'np.array', (['[7.72104e-06, 4.386714e-06, 2.23884e-06, 5.58574e-07, 5.15115e-07, 7.58213e-08]'], {}), '([7.72104e-06, 4.386714e-06, 2.23884e-06, 5.58574e-07, 5.15115e-07,\n 7.58213e-08])\n', (10418, 10503), True, 'import numpy as np\n'), ((10682, 10774), 'numpy.array', 'np.array', (['[1.42028e-06, 9.239103e-07, 6.98605e-07, 1.93034e-07, 1.1824e-07, 2.8564e-09]'], {}), '([1.42028e-06, 9.239103e-07, 6.98605e-07, 1.93034e-07, 1.1824e-07, \n 2.8564e-09])\n', (10690, 10774), True, 'import numpy as np\n'), ((11023, 11168), 'numpy.array', 'np.array', (['[1217.694563, 934.96391, 863.1181, 792.707095, 734.074259, 659.996304, \n 597.864428, 537.747162, 474.885671, 414.531828, 356.332201]'], {}), '([1217.694563, 934.96391, 863.1181, 792.707095, 734.074259, \n 659.996304, 597.864428, 537.747162, 474.885671, 414.531828, 356.332201])\n', (11031, 11168), True, 'import numpy as np\n'), ((11449, 11571), 'numpy.array', 'np.array', (['[0.031304, 0.020066, 0.017822, 0.014099, 0.011692, 0.00866, 0.007094, \n 0.00465, 0.00309, 0.001521, 0.000681]'], {}), '([0.031304, 0.020066, 0.017822, 0.014099, 0.011692, 0.00866, \n 0.007094, 0.00465, 0.00309, 0.001521, 0.000681])\n', (11457, 11571), True, 'import numpy as np\n'), ((12238, 12316), 'numpy.array', 'np.array', (['[0.03800871, 0.04596339, 0.05495668, 0.0619257, 0.07191, 0.08090012]'], {}), '([0.03800871, 0.04596339, 0.05495668, 0.0619257, 0.07191, 0.08090012])\n', (12246, 12316), True, 'import numpy as np\n'), ((13495, 13640), 'numpy.array', 'np.array', (['[1217.694563, 934.96391, 863.1181, 792.707095, 734.074259, 659.996304, \n 597.864428, 537.747162, 474.885671, 414.531828, 356.332201]'], {}), '([1217.694563, 934.96391, 863.1181, 792.707095, 734.074259, \n 659.996304, 597.864428, 537.747162, 474.885671, 414.531828, 356.332201])\n', (13503, 13640), True, 'import numpy as np\n'), ((13921, 14043), 'numpy.array', 'np.array', (['[0.031304, 0.020066, 0.017822, 0.014099, 0.011692, 0.00866, 0.007094, \n 0.00465, 0.00309, 0.001521, 0.000681]'], {}), '([0.031304, 0.020066, 0.017822, 0.014099, 0.011692, 0.00866, \n 0.007094, 0.00465, 0.00309, 0.001521, 0.000681])\n', (13929, 14043), True, 'import numpy as np\n'), ((14742, 14820), 'numpy.array', 'np.array', (['[0.03800871, 0.04596339, 0.05495668, 0.0619257, 0.07191, 0.08090012]'], {}), '([0.03800871, 0.04596339, 0.05495668, 0.0619257, 0.07191, 0.08090012])\n', (14750, 14820), True, 'import numpy as np\n'), ((15127, 15185), 'numpy.array', 'np.array', (['[1250, 1153.36, 1063.13, 970.65, 861.04, 769.34]'], {}), '([1250, 1153.36, 1063.13, 970.65, 861.04, 769.34])\n', (15135, 15185), True, 'import numpy as np\n'), ((15199, 15292), 'numpy.array', 'np.array', (['[7.72104e-06, 4.386714e-06, 2.23884e-06, 5.58574e-07, 5.15115e-07, 7.58213e-08]'], {}), '([7.72104e-06, 4.386714e-06, 2.23884e-06, 5.58574e-07, 5.15115e-07,\n 7.58213e-08])\n', (15207, 15292), True, 'import numpy as np\n'), ((15471, 15563), 'numpy.array', 'np.array', (['[1.42028e-06, 9.239103e-07, 6.98605e-07, 1.93034e-07, 1.1824e-07, 2.8564e-09]'], {}), '([1.42028e-06, 9.239103e-07, 6.98605e-07, 1.93034e-07, 1.1824e-07, \n 2.8564e-09])\n', (15479, 15563), True, 'import numpy as np\n'), ((2924, 3010), 'numpy.array', 'np.array', (['[0.7000154, 0.80037778, 0.90050338, 1.00071451, 1.10114817, 1.20103096]'], {}), '([0.7000154, 0.80037778, 0.90050338, 1.00071451, 1.10114817, \n 1.20103096])\n', (2932, 3010), True, 'import numpy as np\n'), ((5943, 6029), 'numpy.array', 'np.array', (['[0.7000154, 0.80037778, 0.90050338, 1.00071451, 1.10114817, 1.20103096]'], {}), '([0.7000154, 0.80037778, 0.90050338, 1.00071451, 1.10114817, \n 1.20103096])\n', (5951, 6029), True, 'import numpy as np\n'), ((8975, 9061), 'numpy.array', 'np.array', (['[0.7000154, 0.80037778, 0.90050338, 1.00071451, 1.10114817, 1.20103096]'], {}), '([0.7000154, 0.80037778, 0.90050338, 1.00071451, 1.10114817, \n 1.20103096])\n', (8983, 9061), True, 'import numpy as np\n'), ((11954, 12040), 'numpy.array', 'np.array', (['[0.7000154, 0.80037778, 0.90050338, 1.00071451, 1.10114817, 1.20103096]'], {}), '([0.7000154, 0.80037778, 0.90050338, 1.00071451, 1.10114817, \n 1.20103096])\n', (11962, 12040), True, 'import numpy as np\n'), ((14458, 14544), 'numpy.array', 'np.array', (['[0.7000154, 0.80037778, 0.90050338, 1.00071451, 1.10114817, 1.20103096]'], {}), '([0.7000154, 0.80037778, 0.90050338, 1.00071451, 1.10114817, \n 1.20103096])\n', (14466, 14544), True, 'import numpy as np\n')] |
import tensorflow as tf
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, Reshape,\
BatchNormalization, LeakyReLU, Dropout
from keras.models import load_model
from keras.models import model_from_json
from keras.callbacks import History, ModelCheckpoint
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax,\
Nadam
from keras.initializers import RandomNormal
import json
import numpy as np
from aovek.validate.model_metrics import ModelMetrics
sess = tf.Session()
K.set_session(sess)
class YOLO:
"""
Class for YOLO Convolutional neural network
"""
def __init__(self, config):
self.image_size = config['image_info']['image_size']
self.color_channels = config['image_info']['color_channels']
self.grid_size = config['label_info']['grid_size']
self.number_of_annotations =\
config['label_info']['number_of_annotations']
self.batch_size = config['network']['train']['batch_size']
self.number_of_epochs = config['network']['train']['number_of_epochs']
self.alpha_coord = config['network']['train']['loss']['alpha_coord']
self.alpha_noobj = config['network']['train']['loss']['alpha_noobj']
self.optimizer_type =\
config['network']['train']['optimizer']['optimizer']
self.learning_rate =\
config['network']['train']['optimizer']['learning_rate']
self.decay = config['network']['train']['optimizer']['decay']
self.optimizer = None
self.metrics = None
self.history = History()
self.model_metrics = None
self.model_structure = None
self.model_binary_data_file =\
config['network']['model_binary_data_file']
self.model_json_structure_file =\
config['network']['json_model_structure']
self.model_checkpoint_binary_data_file =\
config['network']['model_checkpoint_binary_data_file']
self.iou_threshold = config['network']['predict']['iou_threshold']
self.prob_threshold = config['network']['predict']['prob_threshold']
self.model = None
def create_model(self):
input = Input(shape=(self.image_size, self.image_size,
self.color_channels))
network = self.create_network(input)
model = Model(input, network)
self.optimizer = self.create_optimizer()
model.compile(optimizer=self.optimizer,
loss=self.custom_loss)
model.summary()
self.model = model
def create_network(self, input):
network = Conv2D(filters=32,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_1',
kernel_initializer=RandomNormal(),
use_bias=True)(input)
network = BatchNormalization(name='norm_1')(network)
network = LeakyReLU(alpha=0.1, name='relu_1')(network)
network = MaxPooling2D(pool_size=(2, 2),
name='pool_1')(network)
network = Conv2D(filters=64,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_2',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_2')(network)
network = LeakyReLU(alpha=0.1, name='relu_2')(network)
network = MaxPooling2D(pool_size=(2, 2),
name='pool_2')(network)
network = Conv2D(filters=128,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_3',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_3')(network)
network = LeakyReLU(alpha=0.1, name='relu_3')(network)
network = Conv2D(filters=64,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
name='conv_4',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_4')(network)
network = LeakyReLU(alpha=0.1, name='relu_4')(network)
network = Conv2D(filters=128,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_5',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_5')(network)
network = LeakyReLU(alpha=0.1, name='relu_5')(network)
network = MaxPooling2D(pool_size=(2, 2),
name='pool_3')(network)
network = Conv2D(filters=256,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_6',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_6')(network)
network = LeakyReLU(alpha=0.1, name='relu_6')(network)
network = Conv2D(filters=128,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
name='conv_7',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_7')(network)
network = LeakyReLU(alpha=0.1, name='relu_7')(network)
network = Conv2D(filters=256,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_8',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_8')(network)
network = LeakyReLU(alpha=0.1, name='relu_8')(network)
network = MaxPooling2D(pool_size=(2, 2),
name='pool_4')(network)
network = Conv2D(filters=512,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_9',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_9')(network)
network = LeakyReLU(alpha=0.1, name='relu_9')(network)
network = Conv2D(filters=256,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
name='conv_10',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_10')(network)
network = LeakyReLU(alpha=0.1, name='relu_10')(network)
network = Conv2D(filters=512,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_11',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_11')(network)
network = LeakyReLU(alpha=0.1, name='relu_11')(network)
network = Conv2D(filters=256,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
name='conv_12',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_12')(network)
network = LeakyReLU(alpha=0.1, name='relu_12')(network)
network = Conv2D(filters=512,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_13',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_13')(network)
network = LeakyReLU(alpha=0.1, name='relu_13')(network)
network = MaxPooling2D(pool_size=(2, 2),
name='pool_5')(network)
network = Conv2D(filters=1024,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_14',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_14')(network)
network = LeakyReLU(alpha=0.1, name='relu_14')(network)
network = Dropout(rate=0.5, name='drop_1')(network)
network = Conv2D(filters=1024,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
name='conv_15',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = BatchNormalization(name='norm_15')(network)
network = LeakyReLU(alpha=0.1, name='relu_15')(network)
network = Dropout(rate=0.5, name='drop_2')(network)
network = Conv2D(filters=(self.number_of_annotations + 1),
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
activation='sigmoid',
name='conv_16',
kernel_initializer=RandomNormal(),
use_bias=True)(network)
network = Reshape((self.grid_size,
self.grid_size,
(self.number_of_annotations + 1)))(network)
return network
def create_optimizer(self):
if self.optimizer_type == 'SGD':
optimizer = SGD(lr=self.learning_rate, decay=self.decay)
elif self.optimizer_type == 'RMSprop':
optimizer = RMSprop(lr=self.learning_rate, decay=self.decay)
elif self.optimizer_type == 'Adagrad':
optimizer = Adagrad()
elif self.optimizer_type == 'Adadelta':
optimizer = Adadelta()
elif self.optimizer_type == 'Adam':
optimizer = Adam(lr=self.learning_rate, decay=self.decay)
elif self.optimizer_type == 'Adamax':
optimizer = Adamax(lr=self.learning_rate, decay=self.decay)
elif self.optimizer_type == 'Nadam':
optimizer = Nadam()
return optimizer
def train(self, train_data, train_labels, validation_data,
validation_labels):
self.metrics = ModelMetrics(validation_data, validation_labels,
self)
model_checkpoint = ModelCheckpoint(
self.model_checkpoint_binary_data_file, monitor='val_loss')
self.model.fit(train_data, train_labels,
batch_size=self.batch_size,
epochs=self.number_of_epochs,
validation_data=(validation_data, validation_labels),
shuffle=True,
callbacks=[self.history, self.metrics,
model_checkpoint])
def custom_loss(self, true, pred):
loss = tf.Variable(0, dtype=tf.float32)
true =\
tf.reshape(true, shape=(-1, self.grid_size ** 2,
(self.number_of_annotations + 1)))
pred =\
tf.reshape(pred, shape=(-1, self.grid_size ** 2,
(self.number_of_annotations + 1)))
x_true = true[:, :, 0]
x_pred = pred[:, :, 0]
y_true = true[:, :, 1]
y_pred = pred[:, :, 1]
w_true = true[:, :, 2]
w_pred = pred[:, :, 2]
h_true = true[:, :, 3]
h_pred = pred[:, :, 3]
p_true = true[:, :, 4]
p_pred = pred[:, :, 4]
loss = tf.add(loss, tf.reduce_sum(
tf.scalar_mul(self.alpha_coord, tf.multiply(
p_true, tf.add(tf.squared_difference(x_true, x_pred),
tf.squared_difference(y_true, y_pred))))))
loss = tf.add(loss, tf.reduce_sum(
tf.scalar_mul(self.alpha_coord, tf.multiply(
p_true, tf.add(tf.squared_difference(tf.sqrt(w_true),
tf.sqrt(w_pred)),
tf.squared_difference(tf.sqrt(h_true),
tf.sqrt(h_pred)))))))
loss = tf.add(loss, tf.reduce_sum(tf.multiply(
p_true, tf.squared_difference(p_true, p_pred))))
loss = tf.add(loss, tf.reduce_sum(tf.scalar_mul(
self.alpha_noobj, tf.multiply(
(1 - p_true), tf.squared_difference(p_true, p_pred)))))
return loss
def predict(self, image):
predict = self.model.predict(image)
predict = self.boxes_to_corners(predict)
return predict
def predict_boxes(self, image):
predict = self.predict(image)
true_boxes = self.non_max_suppression(predict)
return true_boxes
def predict_images(self, video):
video_predictions = self.predict(video)
predictions = []
for pred in video_predictions:
true_boxes = self.non_max_suppression(pred)
predictions.append(true_boxes)
predictions = sess.run(predictions)
max_pred = max(len(pred) for pred in predictions) + 1
predictions =\
np.array([np.vstack([pred, [[0] * 5] * (max_pred - len(pred))])
for pred in predictions])
return predictions
def boxes_to_corners(self, prediction):
prediction = np.reshape(prediction, (-1, self.grid_size ** 2,
self.number_of_annotations + 1))
corners_prediction = np.array(prediction, copy=True)
corners_prediction[:, :, 0] =\
prediction[:, :, 0] - (prediction[:, :, 2] / 2)
corners_prediction[:, :, 1] =\
prediction[:, :, 1] - (prediction[:, :, 3] / 2)
corners_prediction[:, :, 2] =\
prediction[:, :, 0] + (prediction[:, :, 2] / 2)
corners_prediction[:, :, 3] =\
prediction[:, :, 1] + (prediction[:, :, 3] / 2)
return corners_prediction
def non_max_suppression(self, predict):
predict = predict[predict[:, 4] > self.prob_threshold]
probabilities = predict[:, 4]
boxes = predict[:, :4]
true_boxes_idx =\
tf.image.non_max_suppression(boxes, probabilities,
self.grid_size ** 2,
iou_threshold=self.iou_threshold)
true_boxes = tf.gather(boxes, true_boxes_idx)
true_probabilities = tf.gather(probabilities, true_boxes_idx)
true_boxes = tf.concat([true_boxes, true_probabilities[:, None]],
axis=1)
return true_boxes
def sess_run(self, tensor):
return sess.run(tensor)
def save_model(self):
self.model.save(self.model_binary_data_file)
def load_model(self):
custom_objects = self.get_custom_objects()
self.model = load_model(self.model_binary_data_file,
custom_objects=custom_objects)
def load_model_file(self, model_file):
custom_objects = self.get_custom_objects()
self.model = load_model(model_file,
custom_objects=custom_objects)
def save_json_model_structure(self):
json_model_structure = self.model.to_json()
with open(self.model_json_structure_file, 'w') as f:
json.dump(json_model_structure, f)
def load_model_from_json_structure(self):
custom_objects = self.get_custom_objects()
self.model = model_from_json(self.model_json_structure_file,
custom_objects=custom_objects)
def get_custom_objects(self):
custom_objects = {"custom_loss": self.custom_loss}
return custom_objects
def summary(self, train_data, train_labels, validation_data,
validation_labels, test_data, test_labels):
self.model_metrics =\
self.genarate_metrics(train_data, train_labels, validation_data,
validation_labels, test_data, test_labels)
self.model_structure = self.genarate_model_structure()
def genarate_metrics(self, train_data, train_labels, validation_data,
validation_labels, test_data, test_labels):
train_metrics =\
self.metrics.eval_model_metrics(train_data, train_labels)
validation_metrics =\
self.metrics.eval_model_metrics(validation_data, validation_labels)
test_metrics = self.metrics.eval_model_metrics(test_data, test_labels)
train_loss = self.model.evaluate(train_data, train_labels)
validation_loss =\
self.model.evaluate(validation_data, validation_labels)
test_loss = self.model.evaluate(test_data, test_labels)
metrics = self.get_metrics_values(train_metrics, validation_metrics,
test_metrics, train_loss,
validation_loss, test_loss)
return metrics
def genarate_model_structure(self):
model_structure = []
self.model.summary(print_fn=lambda row: model_structure.append(row))
model_structure = '\n'.join(model_structure)
return model_structure
def get_metrics_values(self, train_metrics, validation_metrics,
test_metrics, train_loss, validation_loss,
test_loss):
loss = {'train_loss': train_loss,
'validation_loss': validation_loss,
'test_loss': test_loss}
iou = {'train_iou': train_metrics['iou'],
'validation_iou': validation_metrics['iou'],
'test_iou': test_metrics['iou']}
precision = {'train_precision': train_metrics['precision'],
'validation_precision': validation_metrics['precision'],
'test_precision': test_metrics['precision']}
recall = {'train_recall': train_metrics['recall'],
'validation_recall': validation_metrics['recall'],
'test_recall': test_metrics['recall']}
f1_score = {'train_f1_score': train_metrics['f1_score'],
'validation_f1_score': validation_metrics['f1_score'],
'test_f1_score': test_metrics['f1_score']}
return {'loss': loss, 'iou': iou, 'precision': precision,
'recall': recall, 'f1_score': f1_score}
def get_metrics(self):
return self.model_metrics
def get_model_structure(self):
return self.model_structure
def get_model_history(self):
return {**self.history.history,
**self.metrics.get_validation_metrics()}
def get_optimizer_params(self):
return self.optimizer.get_config()
def get_optimizer_type(self):
return type(self.optimizer)
def get_batch_size(self):
return self.batch_size
| [
"keras.models.load_model",
"keras.optimizers.Adadelta",
"tensorflow.reshape",
"keras.optimizers.Adagrad",
"keras.models.Model",
"tensorflow.Variable",
"keras.layers.Input",
"keras.layers.Reshape",
"tensorflow.sqrt",
"keras.initializers.RandomNormal",
"keras.optimizers.Adamax",
"keras.optimizer... | [((548, 560), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (558, 560), True, 'import tensorflow as tf\n'), ((561, 580), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (574, 580), True, 'from keras import backend as K\n'), ((1633, 1642), 'keras.callbacks.History', 'History', ([], {}), '()\n', (1640, 1642), False, 'from keras.callbacks import History, ModelCheckpoint\n'), ((2248, 2316), 'keras.layers.Input', 'Input', ([], {'shape': '(self.image_size, self.image_size, self.color_channels)'}), '(shape=(self.image_size, self.image_size, self.color_channels))\n', (2253, 2316), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((2409, 2430), 'keras.models.Model', 'Model', (['input', 'network'], {}), '(input, network)\n', (2414, 2430), False, 'from keras.models import Model\n'), ((11354, 11408), 'aovek.validate.model_metrics.ModelMetrics', 'ModelMetrics', (['validation_data', 'validation_labels', 'self'], {}), '(validation_data, validation_labels, self)\n', (11366, 11408), False, 'from aovek.validate.model_metrics import ModelMetrics\n'), ((11473, 11548), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['self.model_checkpoint_binary_data_file'], {'monitor': '"""val_loss"""'}), "(self.model_checkpoint_binary_data_file, monitor='val_loss')\n", (11488, 11548), False, 'from keras.callbacks import History, ModelCheckpoint\n'), ((12000, 12032), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'dtype': 'tf.float32'}), '(0, dtype=tf.float32)\n', (12011, 12032), True, 'import tensorflow as tf\n'), ((12062, 12147), 'tensorflow.reshape', 'tf.reshape', (['true'], {'shape': '(-1, self.grid_size ** 2, self.number_of_annotations + 1)'}), '(true, shape=(-1, self.grid_size ** 2, self.number_of_annotations +\n 1))\n', (12072, 12147), True, 'import tensorflow as tf\n'), ((12210, 12295), 'tensorflow.reshape', 'tf.reshape', (['pred'], {'shape': '(-1, self.grid_size ** 2, self.number_of_annotations + 1)'}), '(pred, shape=(-1, self.grid_size ** 2, self.number_of_annotations +\n 1))\n', (12220, 12295), True, 'import tensorflow as tf\n'), ((14496, 14581), 'numpy.reshape', 'np.reshape', (['prediction', '(-1, self.grid_size ** 2, self.number_of_annotations + 1)'], {}), '(prediction, (-1, self.grid_size ** 2, self.number_of_annotations +\n 1))\n', (14506, 14581), True, 'import numpy as np\n'), ((14653, 14684), 'numpy.array', 'np.array', (['prediction'], {'copy': '(True)'}), '(prediction, copy=True)\n', (14661, 14684), True, 'import numpy as np\n'), ((15334, 15443), 'tensorflow.image.non_max_suppression', 'tf.image.non_max_suppression', (['boxes', 'probabilities', '(self.grid_size ** 2)'], {'iou_threshold': 'self.iou_threshold'}), '(boxes, probabilities, self.grid_size ** 2,\n iou_threshold=self.iou_threshold)\n', (15362, 15443), True, 'import tensorflow as tf\n'), ((15543, 15575), 'tensorflow.gather', 'tf.gather', (['boxes', 'true_boxes_idx'], {}), '(boxes, true_boxes_idx)\n', (15552, 15575), True, 'import tensorflow as tf\n'), ((15605, 15645), 'tensorflow.gather', 'tf.gather', (['probabilities', 'true_boxes_idx'], {}), '(probabilities, true_boxes_idx)\n', (15614, 15645), True, 'import tensorflow as tf\n'), ((15668, 15728), 'tensorflow.concat', 'tf.concat', (['[true_boxes, true_probabilities[:, None]]'], {'axis': '(1)'}), '([true_boxes, true_probabilities[:, None]], axis=1)\n', (15677, 15728), True, 'import tensorflow as tf\n'), ((16031, 16101), 'keras.models.load_model', 'load_model', (['self.model_binary_data_file'], {'custom_objects': 'custom_objects'}), '(self.model_binary_data_file, custom_objects=custom_objects)\n', (16041, 16101), False, 'from keras.models import load_model\n'), ((16250, 16303), 'keras.models.load_model', 'load_model', (['model_file'], {'custom_objects': 'custom_objects'}), '(model_file, custom_objects=custom_objects)\n', (16260, 16303), False, 'from keras.models import load_model\n'), ((16658, 16736), 'keras.models.model_from_json', 'model_from_json', (['self.model_json_structure_file'], {'custom_objects': 'custom_objects'}), '(self.model_json_structure_file, custom_objects=custom_objects)\n', (16673, 16736), False, 'from keras.models import model_from_json\n'), ((2995, 3028), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_1"""'}), "(name='norm_1')\n", (3013, 3028), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((3056, 3091), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_1"""'}), "(alpha=0.1, name='relu_1')\n", (3065, 3091), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((3119, 3164), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'name': '"""pool_1"""'}), "(pool_size=(2, 2), name='pool_1')\n", (3131, 3164), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((3537, 3570), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_2"""'}), "(name='norm_2')\n", (3555, 3570), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((3598, 3633), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_2"""'}), "(alpha=0.1, name='relu_2')\n", (3607, 3633), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((3661, 3706), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'name': '"""pool_2"""'}), "(pool_size=(2, 2), name='pool_2')\n", (3673, 3706), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((4080, 4113), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_3"""'}), "(name='norm_3')\n", (4098, 4113), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((4141, 4176), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_3"""'}), "(alpha=0.1, name='relu_3')\n", (4150, 4176), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((4517, 4550), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_4"""'}), "(name='norm_4')\n", (4535, 4550), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((4578, 4613), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_4"""'}), "(alpha=0.1, name='relu_4')\n", (4587, 4613), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((4955, 4988), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_5"""'}), "(name='norm_5')\n", (4973, 4988), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((5016, 5051), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_5"""'}), "(alpha=0.1, name='relu_5')\n", (5025, 5051), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((5079, 5124), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'name': '"""pool_3"""'}), "(pool_size=(2, 2), name='pool_3')\n", (5091, 5124), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((5498, 5531), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_6"""'}), "(name='norm_6')\n", (5516, 5531), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((5559, 5594), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_6"""'}), "(alpha=0.1, name='relu_6')\n", (5568, 5594), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((5936, 5969), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_7"""'}), "(name='norm_7')\n", (5954, 5969), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((5997, 6032), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_7"""'}), "(alpha=0.1, name='relu_7')\n", (6006, 6032), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((6374, 6407), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_8"""'}), "(name='norm_8')\n", (6392, 6407), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((6435, 6470), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_8"""'}), "(alpha=0.1, name='relu_8')\n", (6444, 6470), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((6498, 6543), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'name': '"""pool_4"""'}), "(pool_size=(2, 2), name='pool_4')\n", (6510, 6543), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((6917, 6950), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_9"""'}), "(name='norm_9')\n", (6935, 6950), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((6978, 7013), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_9"""'}), "(alpha=0.1, name='relu_9')\n", (6987, 7013), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((7356, 7390), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_10"""'}), "(name='norm_10')\n", (7374, 7390), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((7418, 7454), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_10"""'}), "(alpha=0.1, name='relu_10')\n", (7427, 7454), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((7797, 7831), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_11"""'}), "(name='norm_11')\n", (7815, 7831), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((7859, 7895), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_11"""'}), "(alpha=0.1, name='relu_11')\n", (7868, 7895), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((8238, 8272), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_12"""'}), "(name='norm_12')\n", (8256, 8272), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((8300, 8336), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_12"""'}), "(alpha=0.1, name='relu_12')\n", (8309, 8336), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((8679, 8713), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_13"""'}), "(name='norm_13')\n", (8697, 8713), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((8741, 8777), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_13"""'}), "(alpha=0.1, name='relu_13')\n", (8750, 8777), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((8805, 8850), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'name': '"""pool_5"""'}), "(pool_size=(2, 2), name='pool_5')\n", (8817, 8850), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((9226, 9260), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_14"""'}), "(name='norm_14')\n", (9244, 9260), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((9288, 9324), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_14"""'}), "(alpha=0.1, name='relu_14')\n", (9297, 9324), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((9352, 9384), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.5)', 'name': '"""drop_1"""'}), "(rate=0.5, name='drop_1')\n", (9359, 9384), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((9729, 9763), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""norm_15"""'}), "(name='norm_15')\n", (9747, 9763), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((9791, 9827), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)', 'name': '"""relu_15"""'}), "(alpha=0.1, name='relu_15')\n", (9800, 9827), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((9855, 9887), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.5)', 'name': '"""drop_2"""'}), "(rate=0.5, name='drop_2')\n", (9862, 9887), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((10308, 10381), 'keras.layers.Reshape', 'Reshape', (['(self.grid_size, self.grid_size, self.number_of_annotations + 1)'], {}), '((self.grid_size, self.grid_size, self.number_of_annotations + 1))\n', (10315, 10381), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, Reshape, BatchNormalization, LeakyReLU, Dropout\n'), ((10569, 10613), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'self.learning_rate', 'decay': 'self.decay'}), '(lr=self.learning_rate, decay=self.decay)\n', (10572, 10613), False, 'from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam\n'), ((16504, 16538), 'json.dump', 'json.dump', (['json_model_structure', 'f'], {}), '(json_model_structure, f)\n', (16513, 16538), False, 'import json\n'), ((10685, 10733), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'self.learning_rate', 'decay': 'self.decay'}), '(lr=self.learning_rate, decay=self.decay)\n', (10692, 10733), False, 'from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam\n'), ((2914, 2928), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (2926, 2928), False, 'from keras.initializers import RandomNormal\n'), ((3454, 3468), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (3466, 3468), False, 'from keras.initializers import RandomNormal\n'), ((3997, 4011), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (4009, 4011), False, 'from keras.initializers import RandomNormal\n'), ((4434, 4448), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (4446, 4448), False, 'from keras.initializers import RandomNormal\n'), ((4872, 4886), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (4884, 4886), False, 'from keras.initializers import RandomNormal\n'), ((5415, 5429), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (5427, 5429), False, 'from keras.initializers import RandomNormal\n'), ((5853, 5867), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (5865, 5867), False, 'from keras.initializers import RandomNormal\n'), ((6291, 6305), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (6303, 6305), False, 'from keras.initializers import RandomNormal\n'), ((6834, 6848), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (6846, 6848), False, 'from keras.initializers import RandomNormal\n'), ((7273, 7287), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (7285, 7287), False, 'from keras.initializers import RandomNormal\n'), ((7714, 7728), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (7726, 7728), False, 'from keras.initializers import RandomNormal\n'), ((8155, 8169), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (8167, 8169), False, 'from keras.initializers import RandomNormal\n'), ((8596, 8610), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (8608, 8610), False, 'from keras.initializers import RandomNormal\n'), ((9143, 9157), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (9155, 9157), False, 'from keras.initializers import RandomNormal\n'), ((9646, 9660), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (9658, 9660), False, 'from keras.initializers import RandomNormal\n'), ((10224, 10238), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {}), '()\n', (10236, 10238), False, 'from keras.initializers import RandomNormal\n'), ((10805, 10814), 'keras.optimizers.Adagrad', 'Adagrad', ([], {}), '()\n', (10812, 10814), False, 'from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam\n'), ((13353, 13390), 'tensorflow.squared_difference', 'tf.squared_difference', (['p_true', 'p_pred'], {}), '(p_true, p_pred)\n', (13374, 13390), True, 'import tensorflow as tf\n'), ((10887, 10897), 'keras.optimizers.Adadelta', 'Adadelta', ([], {}), '()\n', (10895, 10897), False, 'from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam\n'), ((13525, 13562), 'tensorflow.squared_difference', 'tf.squared_difference', (['p_true', 'p_pred'], {}), '(p_true, p_pred)\n', (13546, 13562), True, 'import tensorflow as tf\n'), ((10966, 11011), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.learning_rate', 'decay': 'self.decay'}), '(lr=self.learning_rate, decay=self.decay)\n', (10970, 11011), False, 'from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam\n'), ((12777, 12814), 'tensorflow.squared_difference', 'tf.squared_difference', (['x_true', 'x_pred'], {}), '(x_true, x_pred)\n', (12798, 12814), True, 'import tensorflow as tf\n'), ((12847, 12884), 'tensorflow.squared_difference', 'tf.squared_difference', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (12868, 12884), True, 'import tensorflow as tf\n'), ((11082, 11129), 'keras.optimizers.Adamax', 'Adamax', ([], {'lr': 'self.learning_rate', 'decay': 'self.decay'}), '(lr=self.learning_rate, decay=self.decay)\n', (11088, 11129), False, 'from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam\n'), ((13044, 13059), 'tensorflow.sqrt', 'tf.sqrt', (['w_true'], {}), '(w_true)\n', (13051, 13059), True, 'import tensorflow as tf\n'), ((13114, 13129), 'tensorflow.sqrt', 'tf.sqrt', (['w_pred'], {}), '(w_pred)\n', (13121, 13129), True, 'import tensorflow as tf\n'), ((13185, 13200), 'tensorflow.sqrt', 'tf.sqrt', (['h_true'], {}), '(h_true)\n', (13192, 13200), True, 'import tensorflow as tf\n'), ((13255, 13270), 'tensorflow.sqrt', 'tf.sqrt', (['h_pred'], {}), '(h_pred)\n', (13262, 13270), True, 'import tensorflow as tf\n'), ((11199, 11206), 'keras.optimizers.Nadam', 'Nadam', ([], {}), '()\n', (11204, 11206), False, 'from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam\n')] |
import pickle
import numpy as np
from collections import defaultdict
def save_pickle(obj, FILEPATH):
f = open(FILEPATH, 'wb')
pickle.dump(obj, f)
f.close()
def open_pickle(FILEPATH):
f = open(FILEPATH, 'rb')
obj = pickle.load(f)
f.close()
return obj
def save_arrays(FILEPATH, exp_num, order, X_metrics, Y_metrics, threshold,
pct_5, pct_95, A_biases, lower_bound, upper_bound):
results_dict = open_pickle(FILEPATH)
results_dict[exp_num] = results_dict.get(exp_num, defaultdict(dict))
order_dict = results_dict[exp_num].get(order, {})
order_dict['X_array'] = X_metrics
order_dict['Y_array'] = Y_metrics
order_dict['X_mean'] = np.mean(X_metrics)
order_dict['Y_mean'] = np.mean(Y_metrics)
order_dict['threshold'] = threshold
#order_dict['pct_5'] = pct_5
#order_dict['pct_95'] = pct_95
#order_dict['A_biases'] = A_biases
#order_dict['lower_bound'] = lower_bound
#order_dict['upper_bound'] = upper_bound
results_dict[exp_num][order] = order_dict
save_pickle(results_dict, FILEPATH)
print(f"Results array successfully saved to file {FILEPATH} under\
keys [{exp_num}][{order}]")
def save_experiment_arbitrary_label(filepath, exp_num, order, label, data, display=None):
results_dict = open_pickle(filepath)
results_dict[exp_num] = results_dict.get(exp_num, defaultdict(dict))
order_dict = results_dict[exp_num].get(order, {})
order_dict[label] = data
results_dict[exp_num][order] = order_dict
save_pickle(results_dict, filepath)
if display == 'all':
print(f'FULL RESULTS DICT FOR EXP {exp_num}', results_dict[exp_num])
elif display == 'some':
print(f'SPECIFIC RESULTS FOR EXP {exp_num}, LABEL "{label}": \
{results_dict[exp_num][order][label]}')
print(f"Results array successfully saved to file {filepath} under\
keys [{exp_num}][{order}][{label}]")
def save_scalers(filepath, exp_num, order, scaler):
results_dict = open_pickle(filepath)
results_dict[exp_num] = results_dict.get(exp_num, defaultdict(dict))
results_dict[exp_num][order] = scaler
save_pickle(results_dict, filepath)
def save_array_old(FILEPATH, arr, exp_num, order, list_name):
results_dict = open_pickle(FILEPATH)
exp_name = str(order)+'_order_'+list_name
results_dict[exp_num][exp_name] = arr
save_pickle(results_dict, FILEPATH)
print(f"Results array successfully saved to file {FILEPATH} under\
keys [{exp_num}]['{exp_name}']")
def filter_terms_not_in_wemodel(we_model, X_terms, Y_terms):
term_list_names = ['first_list', 'second_list']
term_lists = [X_terms, Y_terms]
for i in range(len(term_lists)):
lst = term_lists[i]
name = term_list_names[i]
unknown_words = [w for w in lst if w not in we_model.wv]
print(f'The following terms were removed from the list {name} because they were not found in the we_model: {unknown_words}')
X_terms_filtered = [x for x in X_terms if x in we_model.wv]
Y_terms_filtered = [y for y in Y_terms if y in we_model.wv]
diff = abs(len(X_terms_filtered) - len(Y_terms_filtered))
if len(X_terms_filtered) > len(Y_terms_filtered):
print(f'The following terms were removed from the first list to balance the length of the lists: {X_terms_filtered[:diff]}')
X_terms_filtered = X_terms_filtered[diff:]
elif len(Y_terms_filtered) > len(X_terms_filtered):
print(f'The following terms were removed from the second list to balance the length of the lists: {Y_terms_filtered[:diff]}')
Y_terms_filtered = Y_terms_filtered[diff:]
return (X_terms_filtered, Y_terms_filtered)
| [
"numpy.mean",
"collections.defaultdict",
"pickle.dump",
"pickle.load"
] | [((135, 154), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (146, 154), False, 'import pickle\n'), ((236, 250), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (247, 250), False, 'import pickle\n'), ((683, 701), 'numpy.mean', 'np.mean', (['X_metrics'], {}), '(X_metrics)\n', (690, 701), True, 'import numpy as np\n'), ((729, 747), 'numpy.mean', 'np.mean', (['Y_metrics'], {}), '(Y_metrics)\n', (736, 747), True, 'import numpy as np\n'), ((507, 524), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (518, 524), False, 'from collections import defaultdict\n'), ((1357, 1374), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1368, 1374), False, 'from collections import defaultdict\n'), ((2055, 2072), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (2066, 2072), False, 'from collections import defaultdict\n')] |
# T-Test Assignment
import numpy as np
"""
1.Certain refined edible oil is packed in tins holding 16 kg each. The filling machine can maintain this but with a standard deviation of 0.5 kg. Samples of 25 are taken from the production line. If a sample means is (i)16.35kg (ii)15.8kg, Can we be 95 percent sure that the sample has come from a population of 16kg tins?
"""
print("Assignment 1")
from scipy.stats import t
print("\nTrial 1 with 16.35")
pMean=16
sMean=16.35
n=25
tstatistics=(sMean-pMean)/(0.5/np.sqrt(n))
print("T Statistics:",tstatistics)
tcritical_l=t.ppf(q=0.05/2,df=n-1)
tcritical_u=-tcritical_l
if tstatistics<tcritical_l or tstatistics>tcritical_u:
print("Reject the Null Hypothesis")
else:
print("Fail to reject the Null Hypothesis")
print("\nTrial 2 with 15.8")
pMean=16
sMean=15.8
n=25
tstatistics=(sMean-pMean)/(0.5/np.sqrt(n))
print("T Statistics:",tstatistics)
tcritical_l=t.ppf(q=0.05/2,df=n-1)
tcritical_u=-tcritical_l
if tstatistics<tcritical_l or tstatistics>tcritical_u:
print("Reject the Null Hypothesis")
else:
print("Fail to reject the Null Hypothesis")
print("\n")
"""
2.A filling machine is expected to fill 5kg of powder into bags. A sample of 10 bags gave the weighs 4.7, 4.9, 5.0, 5.1, 5.4, 5.2, 4.6,5.1, 4.6 and 4.7. Test whether the machine is working properly.
"""
print("Assignment 2")
from scipy.stats import ttest_1samp
sample_array=np.array([4.7, 4.9, 5.0, 5.1, 5.4, 5.2, 4.6,5.1, 4.6, 4.7])
tstats,pvalue=ttest_1samp(sample_array,5)
print("T-statistics:",tstats)
print("P value:",pvalue)
if pvalue < 0.05:
print("Reject the Null Hypothesis")
print("The filling machine is working properly")
else:
print("Fails to reject the Null Hypothesis")
print("The filling machine is working properly")
print("\n")
"""
3.Two sets of ten students selected at random from a college were taken; One set was given memory test as they were and the other was given the memory test after two weeks of training and the scores are given below
Set A: 10 8 7 9 8 10 9 6 7 8
Set B: 12 8 8 10 8 11 9 8 9 9
Do you think there is a significant effect due to training?
"""
print("Assignement 3")
sample1=np.array([10,8,7,9,8,10,9,6,7,8])
sample2=np.array([12,8,8,10,8,11,9,8,9,9])
from scipy.stats import ttest_ind
tstats,pvalue=ttest_ind(sample1,sample2)
print("T-statistics:",tstats)
print("P value:",pvalue)
if pvalue < 0.05:
print("Reject the Null Hypothesis")
print("No significant effect due to Training")
else:
print("Fails to reject the Null Hypothesis")
print("Significant effect due to Training") | [
"scipy.stats.ttest_1samp",
"scipy.stats.ttest_ind",
"numpy.array",
"scipy.stats.t.ppf",
"numpy.sqrt"
] | [((568, 595), 'scipy.stats.t.ppf', 't.ppf', ([], {'q': '(0.05 / 2)', 'df': '(n - 1)'}), '(q=0.05 / 2, df=n - 1)\n', (573, 595), False, 'from scipy.stats import t\n'), ((911, 938), 'scipy.stats.t.ppf', 't.ppf', ([], {'q': '(0.05 / 2)', 'df': '(n - 1)'}), '(q=0.05 / 2, df=n - 1)\n', (916, 938), False, 'from scipy.stats import t\n'), ((1401, 1461), 'numpy.array', 'np.array', (['[4.7, 4.9, 5.0, 5.1, 5.4, 5.2, 4.6, 5.1, 4.6, 4.7]'], {}), '([4.7, 4.9, 5.0, 5.1, 5.4, 5.2, 4.6, 5.1, 4.6, 4.7])\n', (1409, 1461), True, 'import numpy as np\n'), ((1475, 1503), 'scipy.stats.ttest_1samp', 'ttest_1samp', (['sample_array', '(5)'], {}), '(sample_array, 5)\n', (1486, 1503), False, 'from scipy.stats import ttest_1samp\n'), ((2166, 2208), 'numpy.array', 'np.array', (['[10, 8, 7, 9, 8, 10, 9, 6, 7, 8]'], {}), '([10, 8, 7, 9, 8, 10, 9, 6, 7, 8])\n', (2174, 2208), True, 'import numpy as np\n'), ((2208, 2251), 'numpy.array', 'np.array', (['[12, 8, 8, 10, 8, 11, 9, 8, 9, 9]'], {}), '([12, 8, 8, 10, 8, 11, 9, 8, 9, 9])\n', (2216, 2251), True, 'import numpy as np\n'), ((2293, 2320), 'scipy.stats.ttest_ind', 'ttest_ind', (['sample1', 'sample2'], {}), '(sample1, sample2)\n', (2302, 2320), False, 'from scipy.stats import ttest_ind\n'), ((509, 519), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (516, 519), True, 'import numpy as np\n'), ((852, 862), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (859, 862), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 21 13:34:22 2020
@author: josep
Helper functions for the recordlinkage script
"""
import pandas as pd
import re
import unicodedata
from numpy import cos, sin, arcsin, sqrt
from math import radians
def strip_accents(text):
text=str(text)
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return str(text)
#distance calculation
def haversine(row):
lon1 = row['Longitude_1']
lat1 = row['Latitude_1']
lon2 = row['Longitude_2']
lat2 = row['Latitude_2']
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * arcsin(sqrt(a))
D = 6371 * c
return D
#address standardization - includes province names to their abbreviations
en_sub={'alberta': 'ab',
'manitoba': 'mb',
'british columbia': 'bc',
'new brunswick': 'nb',
'newfoundland': 'nl',
'newfoundland and labrador': 'nl',
'nova scotia': 'ns',
'ontario': 'on',
'quebec': 'qc',
'prince edward island': 'pe',
'saskatchewan': 'sk',
'northwest territories': 'nt',
'nunavut': 'nu',
'yukon territories': 'yt',
'avenue': 'av',
'ave': 'av',
'boulevard':'blvd',
'by-pass':'bypass',
'circle':'cir',
'circuit':'circt',
'concession':'conc',
'crescent':'cres',
'corners':'crnrs',
'crossing':'cross',
'crossroad':'crossrd',
'court':'crt',
'diversion':'divers',
'drive':'dr',
'esplanade':'espl',
'estates':'estate',
'expressway':'expy',
'extension':'exten',
'freeway':'fwy',
'gardens':'gdns',
'harbour':'harbr',
'grounds':'grnds',
'highlands':'hghlds',
'heights':'hts',
'highway':'hwy',
'laneway':'lanewy',
'lookout':'lkout',
'limits':'lmts',
'mountain':'mtn',
'orchard':'orch',
'passage':'pass',
'park':'pk',
'parkway':'pky',
'place':'pl',
'plateau':'plat',
'promenade':'prom',
'point':'pt',
'pathway':'ptway',
'plateau':'plat',
'private':'pvt',
'promenade':'prom',
'road':'rd',
'range':'rg',
'route':'rte',
'rightofway':'rtofwy',
'section':'sectn',
'sideroad':'siderd',
'square':'sq',
'street':'st',
'subdivision':'subdiv',
'terrace':'terr',
'townline':'tline',
'tournabout':'trnabt',
'village':'villge'
}
en_dirs={'east':'e',
'west':'w',
'north':'n',
'south':'s',
'northeast':'ne',
'north-east':'ne',
'northwest':'nw',
'north-west':'nw',
'southeast':'se',
'south-east':'se',
'southwest':'sw',
'south-west':'sw'
}
fr_sub={'autoroute':'aut',
'avenue':'av',
'ave':'av',
'boulevard':'boul',
'barrage':'brge',
'centre':'c',
'carré':'car',
'cul-de-sac':'cds',
'chemin':'ch',
'carrefour':'carref',
'croissant':'crois',
'échangeur':'éch',
'esplanada':'espl',
'impasse':'imp',
'passage':'pass',
'plateau':'plat',
'promenade':'prom',
'rond-point':'rdpt',
'ruelle':'rle',
'route':'rte',
'sentier':'sent',
'terrasse':'tsse',
}
fr_dirs={'est':'e',
'ouest':'o',
'nord':'n',
'sud':'s',
'nordest':'ne',
'nord-est':'ne',
'nordouest':'no',
'nord-ouest':'no',
'sudest':'se',
'sud-est':'se',
'sudouest':'so',
'sud-ouest':'so'
}
def AddressClean(text,lang='en'):
#reads in string, makes replacements of street types and directions
#specify the language you want to use, default is english
if lang=='en':
sub=en_sub
dirs=en_dirs
elif lang=='fr':
sub=fr_sub
dirs=fr_dirs
else:
print("specify lang='en' or lang='fr'")
return
#replace periods
text=text.replace('.','')
r"""
Another version of AddressClean is 'smart', in the sense that it only
shortens directions or street types in certain contexts (see version on github)
because we're applying this to both street names and whole address strings,
those rules aren't likely to work terribly well
so we'll apply them universally, and hope for the best
"""
for i,j in dirs.items():
expr=re.compile(r"\b"+re.escape(i)+r"\b")
text=re.sub(expr,j,text)
for i, j in sub.items():
expr=re.compile(r"\b"+re.escape(i)+r"\b")
text=re.sub(expr,j,text)
return text | [
"unicodedata.normalize",
"re.escape",
"numpy.sin",
"numpy.cos",
"re.sub",
"numpy.sqrt"
] | [((4258, 4279), 're.sub', 're.sub', (['expr', 'j', 'text'], {}), '(expr, j, text)\n', (4264, 4279), False, 'import re\n'), ((4370, 4391), 're.sub', 're.sub', (['expr', 'j', 'text'], {}), '(expr, j, text)\n', (4376, 4391), False, 'import re\n'), ((838, 851), 'numpy.sin', 'sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (841, 851), False, 'from numpy import cos, sin, arcsin, sqrt\n'), ((913, 920), 'numpy.sqrt', 'sqrt', (['a'], {}), '(a)\n', (917, 920), False, 'from numpy import cos, sin, arcsin, sqrt\n'), ((855, 864), 'numpy.cos', 'cos', (['lat1'], {}), '(lat1)\n', (858, 864), False, 'from numpy import cos, sin, arcsin, sqrt\n'), ((867, 876), 'numpy.cos', 'cos', (['lat2'], {}), '(lat2)\n', (870, 876), False, 'from numpy import cos, sin, arcsin, sqrt\n'), ((879, 892), 'numpy.sin', 'sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (882, 892), False, 'from numpy import cos, sin, arcsin, sqrt\n'), ((422, 456), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 'text'], {}), "('NFD', text)\n", (443, 456), False, 'import unicodedata\n'), ((4228, 4240), 're.escape', 're.escape', (['i'], {}), '(i)\n', (4237, 4240), False, 'import re\n'), ((4337, 4349), 're.escape', 're.escape', (['i'], {}), '(i)\n', (4346, 4349), False, 'import re\n')] |
import os
import torch
import numpy as np
from config import get_config
from src.Learner import face_learner
from src.models.efficientnet import EfficientNet
# 재현을 위해 seed 고정하기
import random
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == '__main__':
conf = get_config()
## conf.work_path 가 없으면 생성
if not os.path.isdir(conf.model_path):
os.mkdir(conf.model_path)
# 재현을 위해 seed 고정하기
seed_everything(conf.seed)
learner = face_learner(conf)
learner.train(conf)
| [
"os.mkdir",
"numpy.random.seed",
"os.path.isdir",
"torch.manual_seed",
"torch.cuda.manual_seed",
"random.seed",
"config.get_config",
"src.Learner.face_learner"
] | [((225, 242), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (236, 242), False, 'import random\n'), ((292, 312), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (306, 312), True, 'import numpy as np\n'), ((317, 340), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (334, 340), False, 'import torch\n'), ((345, 373), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (367, 373), False, 'import torch\n'), ((504, 516), 'config.get_config', 'get_config', ([], {}), '()\n', (514, 516), False, 'from config import get_config\n'), ((695, 713), 'src.Learner.face_learner', 'face_learner', (['conf'], {}), '(conf)\n', (707, 713), False, 'from src.Learner import face_learner\n'), ((559, 589), 'os.path.isdir', 'os.path.isdir', (['conf.model_path'], {}), '(conf.model_path)\n', (572, 589), False, 'import os\n'), ((599, 624), 'os.mkdir', 'os.mkdir', (['conf.model_path'], {}), '(conf.model_path)\n', (607, 624), False, 'import os\n')] |
import numpy as np
import torch
from PIL import Image
import os
import pandas as pd
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import download_url
from torch.utils.data import Dataset
import scipy.io as sio
class Cub2011(Dataset):
base_folder = 'CUB_200_2011/images'
url = 'http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz'
filename = 'CUB_200_2011.tgz'
tgz_md5 = '97eceeb196236b17998738112f37df78'
train_test_split_easy_dir = 'data/CUB2011/train_test_split_easy.mat'
train_test_split_hard_dir = 'data/CUB2011/train_test_split_hard.mat'
def __init__(self, root, train=True, transform=None, loader=default_loader, download=False, easy=True,
split='article', all=False, augment=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.loader = default_loader
self.train = train
self.easy = easy
self.split = split
self.all = all
self.augment = augment
if download:
self._download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
def _load_metadata(self):
images = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'images.txt'), sep=' ',
names=['img_id', 'filepath'])
image_class_labels = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'image_class_labels.txt'),
sep=' ', names=['img_id', 'target'])
if self.split == 'ours':
train_test_split = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'train_test_split.txt'),
sep=' ', names=['img_id', 'is_training_img'])
elif self.split == 'article':
if self.easy:
train_test_split_dir = self.train_test_split_easy_dir
else:
train_test_split_dir = self.train_test_split_hard_dir
train_test_split = sio.loadmat(train_test_split_dir)
train_cid = train_test_split['train_cid'].squeeze()
test_cid = train_test_split['test_cid'].squeeze()
map_ = {c: 1 for c in train_cid}
map_.update({c: 0 for c in test_cid})
images_target = image_class_labels['target'].to_numpy()
images_split = np.asarray([map_[target] for target in images_target])
data_ = np.column_stack((image_class_labels['img_id'].to_numpy(), images_split))
train_test_split = pd.DataFrame(data=data_, columns=['img_id', 'is_training_img'])
else:
raise ValueError("split is incorrect, please choose split==ours or split==article")
# 4 columns - image_id, path, is_training_image, class_labels
data = images.merge(image_class_labels, on='img_id')
self.data = data.merge(train_test_split, on='img_id')
if not self.all:
if self.train:
self.data = self.data[self.data.is_training_img == 1]
a=1
else:
self.data = self.data[self.data.is_training_img == 0]
def _check_integrity(self):
try:
self._load_metadata()
except Exception:
return False
for index, row in self.data.iterrows():
filepath = os.path.join(self.root, self.base_folder, row.filepath)
if not os.path.isfile(filepath):
print(filepath)
return False
return True
def _download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.tgz_md5)
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = self.data.iloc[idx]
path = os.path.join(self.root, self.base_folder, sample.filepath)
label = sample.target - 1 # Targets start at 1 by default, so shift to 0
img = Image.open(path).convert('RGB')
if self.augment:
# training mode - create two augmentations
aug_image1 = self.transform(img)
aug_image2 = self.transform(img)
final_sample = {'image1': aug_image1, 'image2': aug_image2, 'label': label}
else:
# test mode - no need for augmentations
image = self.transform(img)
final_sample = {'image': image, 'label': label}
return final_sample
| [
"pandas.DataFrame",
"os.path.join",
"scipy.io.loadmat",
"numpy.asarray",
"PIL.Image.open",
"torchvision.datasets.utils.download_url",
"os.path.isfile",
"torch.is_tensor",
"os.path.expanduser"
] | [((843, 867), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (861, 867), False, 'import os\n'), ((3914, 3976), 'torchvision.datasets.utils.download_url', 'download_url', (['self.url', 'self.root', 'self.filename', 'self.tgz_md5'], {}), '(self.url, self.root, self.filename, self.tgz_md5)\n', (3926, 3976), False, 'from torchvision.datasets.utils import download_url\n'), ((4210, 4230), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (4225, 4230), False, 'import torch\n'), ((4318, 4376), 'os.path.join', 'os.path.join', (['self.root', 'self.base_folder', 'sample.filepath'], {}), '(self.root, self.base_folder, sample.filepath)\n', (4330, 4376), False, 'import os\n'), ((1385, 1438), 'os.path.join', 'os.path.join', (['self.root', '"""CUB_200_2011"""', '"""images.txt"""'], {}), "(self.root, 'CUB_200_2011', 'images.txt')\n", (1397, 1438), False, 'import os\n'), ((1551, 1616), 'os.path.join', 'os.path.join', (['self.root', '"""CUB_200_2011"""', '"""image_class_labels.txt"""'], {}), "(self.root, 'CUB_200_2011', 'image_class_labels.txt')\n", (1563, 1616), False, 'import os\n'), ((3546, 3601), 'os.path.join', 'os.path.join', (['self.root', 'self.base_folder', 'row.filepath'], {}), '(self.root, self.base_folder, row.filepath)\n', (3558, 3601), False, 'import os\n'), ((1775, 1838), 'os.path.join', 'os.path.join', (['self.root', '"""CUB_200_2011"""', '"""train_test_split.txt"""'], {}), "(self.root, 'CUB_200_2011', 'train_test_split.txt')\n", (1787, 1838), False, 'import os\n'), ((2185, 2218), 'scipy.io.loadmat', 'sio.loadmat', (['train_test_split_dir'], {}), '(train_test_split_dir)\n', (2196, 2218), True, 'import scipy.io as sio\n'), ((2541, 2595), 'numpy.asarray', 'np.asarray', (['[map_[target] for target in images_target]'], {}), '([map_[target] for target in images_target])\n', (2551, 2595), True, 'import numpy as np\n'), ((2722, 2785), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data_', 'columns': "['img_id', 'is_training_img']"}), "(data=data_, columns=['img_id', 'is_training_img'])\n", (2734, 2785), True, 'import pandas as pd\n'), ((3622, 3646), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (3636, 3646), False, 'import os\n'), ((4006, 4044), 'os.path.join', 'os.path.join', (['self.root', 'self.filename'], {}), '(self.root, self.filename)\n', (4018, 4044), False, 'import os\n'), ((4475, 4491), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (4485, 4491), False, 'from PIL import Image\n')] |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import numpy as np
from logging import getLogger
from .model import update_predictions, flip_attributes
from .utils import print_accuracies
logger = getLogger()
class Evaluator(object):
def __init__(self, ae, lat_dis, ptc_dis, clf_dis, eval_clf, data, params):
"""
Evaluator initialization.
"""
# data / parameters
self.data = data
self.params = params
# modules
self.ae = ae
self.lat_dis = lat_dis
self.ptc_dis = ptc_dis
self.clf_dis = clf_dis
self.eval_clf = eval_clf
assert eval_clf.img_sz == params.img_sz
assert all(attr in eval_clf.attr for attr in params.attr)
def eval_reconstruction_loss(self):
"""
Compute the autoencoder reconstruction perplexity.
"""
data = self.data
params = self.params
self.ae.eval()
bs = params.batch_size
costs = []
for i in range(0, len(data), bs):
batch_x, batch_y = data.eval_batch(i, i + bs)
_, dec_outputs = self.ae(batch_x, batch_y)
costs.append(((dec_outputs[-1] - batch_x) ** 2).mean().data[0])
return np.mean(costs)
def eval_lat_dis_accuracy(self):
"""
Compute the latent discriminator prediction accuracy.
"""
data = self.data
params = self.params
self.ae.eval()
self.lat_dis.eval()
bs = params.batch_size
all_preds = [[] for _ in range(len(params.attr))]
for i in range(0, len(data), bs):
batch_x, batch_y = data.eval_batch(i, i + bs)
enc_outputs = self.ae.encode(batch_x)
preds = self.lat_dis(enc_outputs[-1 - params.n_skip]).data.cpu()
update_predictions(all_preds, preds, batch_y.data.cpu(), params)
return [np.mean(x) for x in all_preds]
def eval_ptc_dis_accuracy(self):
"""
Compute the patch discriminator prediction accuracy.
"""
data = self.data
params = self.params
self.ae.eval()
self.ptc_dis.eval()
bs = params.batch_size
real_preds = []
fake_preds = []
for i in range(0, len(data), bs):
# batch / encode / decode
batch_x, batch_y = data.eval_batch(i, i + bs)
flipped = flip_attributes(batch_y, params, 'all')
_, dec_outputs = self.ae(batch_x, flipped)
# predictions
real_preds.extend(self.ptc_dis(batch_x).data.tolist())
fake_preds.extend(self.ptc_dis(dec_outputs[-1]).data.tolist())
return real_preds, fake_preds
def eval_clf_dis_accuracy(self):
"""
Compute the classifier discriminator prediction accuracy.
"""
data = self.data
params = self.params
self.ae.eval()
self.clf_dis.eval()
bs = params.batch_size
all_preds = [[] for _ in range(params.n_attr)]
for i in range(0, len(data), bs):
# batch / encode / decode
batch_x, batch_y = data.eval_batch(i, i + bs)
enc_outputs = self.ae.encode(batch_x)
# flip all attributes one by one
k = 0
for j, (_, n_cat) in enumerate(params.attr):
for value in range(n_cat):
flipped = flip_attributes(batch_y, params, j, new_value=value)
dec_outputs = self.ae.decode(enc_outputs, flipped)
# classify
clf_dis_preds = self.clf_dis(dec_outputs[-1])[:, j:j + n_cat].max(1)[1].view(-1)
all_preds[k].extend((clf_dis_preds.data.cpu() == value).tolist())
k += 1
assert k == params.n_attr
return [np.mean(x) for x in all_preds]
def eval_clf_accuracy(self):
"""
Compute the accuracy of flipped attributes according to the trained classifier.
"""
data = self.data
params = self.params
self.ae.eval()
bs = params.batch_size
idx = []
for j in range(len(params.attr)):
attr_index = self.eval_clf.attr.index(params.attr[j])
idx.append(sum([x[1] for x in self.eval_clf.attr[:attr_index]]))
all_preds = [[] for _ in range(params.n_attr)]
for i in range(0, len(data), bs):
# batch / encode / decode
batch_x, batch_y = data.eval_batch(i, i + bs)
enc_outputs = self.ae.encode(batch_x)
# flip all attributes one by one
k = 0
for j, (_, n_cat) in enumerate(params.attr):
for value in range(n_cat):
flipped = flip_attributes(batch_y, params, j, new_value=value)
dec_outputs = self.ae.decode(enc_outputs, flipped)
# classify
clf_preds = self.eval_clf(dec_outputs[-1])[:, idx[j]:idx[j] + n_cat].max(1)[1].view(-1)
all_preds[k].extend((clf_preds.data.cpu() == value).tolist())
k += 1
assert k == params.n_attr
return [np.mean(x) for x in all_preds]
def evaluate(self, n_epoch):
"""
Evaluate all models / log evaluation results.
"""
params = self.params
logger.info('')
# reconstruction loss
ae_loss = self.eval_reconstruction_loss()
# latent discriminator accuracy
log_lat_dis = []
if params.n_lat_dis:
lat_dis_accu = self.eval_lat_dis_accuracy()
log_lat_dis.append(('lat_dis_accu', np.mean(lat_dis_accu)))
for accu, (name, _) in zip(lat_dis_accu, params.attr):
log_lat_dis.append(('lat_dis_accu_%s' % name, accu))
logger.info('Latent discriminator accuracy:')
print_accuracies(log_lat_dis)
# patch discriminator accuracy
log_ptc_dis = []
if params.n_ptc_dis:
ptc_dis_real_preds, ptc_dis_fake_preds = self.eval_ptc_dis_accuracy()
accu_real = (np.array(ptc_dis_real_preds).astype(np.float32) >= 0.5).mean()
accu_fake = (np.array(ptc_dis_fake_preds).astype(np.float32) <= 0.5).mean()
log_ptc_dis.append(('ptc_dis_preds_real', np.mean(ptc_dis_real_preds)))
log_ptc_dis.append(('ptc_dis_preds_fake', np.mean(ptc_dis_fake_preds)))
log_ptc_dis.append(('ptc_dis_accu_real', accu_real))
log_ptc_dis.append(('ptc_dis_accu_fake', accu_fake))
log_ptc_dis.append(('ptc_dis_accu', (accu_real + accu_fake) / 2))
logger.info('Patch discriminator accuracy:')
print_accuracies(log_ptc_dis)
# classifier discriminator accuracy
log_clf_dis = []
if params.n_clf_dis:
clf_dis_accu = self.eval_clf_dis_accuracy()
k = 0
log_clf_dis += [('clf_dis_accu', np.mean(clf_dis_accu))]
for name, n_cat in params.attr:
log_clf_dis.append(('clf_dis_accu_%s' % name, np.mean(clf_dis_accu[k:k + n_cat])))
log_clf_dis.extend([('clf_dis_accu_%s_%i' % (name, j), clf_dis_accu[k + j])
for j in range(n_cat)])
k += n_cat
logger.info('Classifier discriminator accuracy:')
print_accuracies(log_clf_dis)
# classifier accuracy
log_clf = []
clf_accu = self.eval_clf_accuracy()
k = 0
log_clf += [('clf_accu', np.mean(clf_accu))]
for name, n_cat in params.attr:
log_clf.append(('clf_accu_%s' % name, np.mean(clf_accu[k:k + n_cat])))
log_clf.extend([('clf_accu_%s_%i' % (name, j), clf_accu[k + j])
for j in range(n_cat)])
k += n_cat
logger.info('Classifier accuracy:')
print_accuracies(log_clf)
# log autoencoder loss
logger.info('Autoencoder loss: %.5f' % ae_loss)
# JSON log
to_log = dict([
('n_epoch', n_epoch),
('ae_loss', ae_loss)
] + log_lat_dis + log_ptc_dis + log_clf_dis + log_clf)
logger.debug("__log__:%s" % json.dumps(to_log))
return to_log
def compute_accuracy(classifier, data, params):
"""
Compute the classifier prediction accuracy.
"""
classifier.eval()
bs = params.batch_size
all_preds = [[] for _ in range(len(classifier.attr))]
for i in range(0, len(data), bs):
batch_x, batch_y = data.eval_batch(i, i + bs)
preds = classifier(batch_x).data.cpu()
update_predictions(all_preds, preds, batch_y.data.cpu(), params)
return [np.mean(x) for x in all_preds]
| [
"numpy.mean",
"numpy.array",
"logging.getLogger",
"json.dumps"
] | [((357, 368), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (366, 368), False, 'from logging import getLogger\n'), ((1395, 1409), 'numpy.mean', 'np.mean', (['costs'], {}), '(costs)\n', (1402, 1409), True, 'import numpy as np\n'), ((8863, 8873), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (8870, 8873), True, 'import numpy as np\n'), ((2050, 2060), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2057, 2060), True, 'import numpy as np\n'), ((3977, 3987), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3984, 3987), True, 'import numpy as np\n'), ((5329, 5339), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (5336, 5339), True, 'import numpy as np\n'), ((7703, 7720), 'numpy.mean', 'np.mean', (['clf_accu'], {}), '(clf_accu)\n', (7710, 7720), True, 'import numpy as np\n'), ((8373, 8391), 'json.dumps', 'json.dumps', (['to_log'], {}), '(to_log)\n', (8383, 8391), False, 'import json\n'), ((5805, 5826), 'numpy.mean', 'np.mean', (['lat_dis_accu'], {}), '(lat_dis_accu)\n', (5812, 5826), True, 'import numpy as np\n'), ((6471, 6498), 'numpy.mean', 'np.mean', (['ptc_dis_real_preds'], {}), '(ptc_dis_real_preds)\n', (6478, 6498), True, 'import numpy as np\n'), ((6555, 6582), 'numpy.mean', 'np.mean', (['ptc_dis_fake_preds'], {}), '(ptc_dis_fake_preds)\n', (6562, 6582), True, 'import numpy as np\n'), ((7110, 7131), 'numpy.mean', 'np.mean', (['clf_dis_accu'], {}), '(clf_dis_accu)\n', (7117, 7131), True, 'import numpy as np\n'), ((7813, 7843), 'numpy.mean', 'np.mean', (['clf_accu[k:k + n_cat]'], {}), '(clf_accu[k:k + n_cat])\n', (7820, 7843), True, 'import numpy as np\n'), ((7240, 7274), 'numpy.mean', 'np.mean', (['clf_dis_accu[k:k + n_cat]'], {}), '(clf_dis_accu[k:k + n_cat])\n', (7247, 7274), True, 'import numpy as np\n'), ((6266, 6294), 'numpy.array', 'np.array', (['ptc_dis_real_preds'], {}), '(ptc_dis_real_preds)\n', (6274, 6294), True, 'import numpy as np\n'), ((6354, 6382), 'numpy.array', 'np.array', (['ptc_dis_fake_preds'], {}), '(ptc_dis_fake_preds)\n', (6362, 6382), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 11 14:32:42 2021
@author: 91960
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
# Read the learned parameters of the best model
#mean_rating = np.float(pd.read_csv('../Weights/Mean_Rating/Weight_lr0.01_reg0.2_factor40_epoch100.csv', index_col = 0).to_numpy())
#user_bias = pd.read_csv('../Weights/User_Bias/Weight_lr0.01_reg0.2_factor40_epoch100.csv', index_col = 0).to_numpy().flatten()
#item_bias = pd.read_csv('../Weights/Item_Bias/Weight_lr0.01_reg0.2_factor40_epoch100.csv', index_col = 0).to_numpy().flatten()
user_factor = pd.read_csv('../Weights/User_Factor/Weight_lr0.01_reg0.2_factor10_epoch100.csv', index_col = 0).to_numpy()
item_factor = pd.read_csv('../Weights/Item_Factor/Weight_lr0.01_reg0.2_factor10_epoch100.csv', index_col = 0).to_numpy()
# Data used during training of these models
data = pd.read_csv('../Data/u.data', delimiter = '\t', names = ['User', 'Item', 'Rating', 'Time'])
num_users = len(pd.unique(data['User']))
num_items = len(pd.unique(data['Item']))
train_data = pd.read_csv('../Data_Split/train.csv').to_numpy()[:, 1 :]
val_data = pd.read_csv('../Data_Split/val.csv').to_numpy()[:, 1 :]
test_data = pd.read_csv('../Data_Split/test.csv').to_numpy()[:, 1:]
# Movie Index - Name Mapping
movie_indices = []
movie_name = []
movie_file = open('../Data/u.item')
csv_file = csv.reader(movie_file, delimiter = '|')
for row in csv_file:
movie_indices.append(int(row[0]) - 1)
movie_name.append(row[1])
movie_name = np.array(movie_name)
# Explanation for a particular prediction
user_index = 900
num_recos = 10
movie_indices_train_user = train_data[train_data[:, 0] == user_index][:, 1]
best_movie_indices_train_user = train_data[(train_data[:, 0] == user_index) & (train_data[:, 2] >= 5)]
best_movies_training = movie_name[best_movie_indices_train_user[:, 1]]
threshold_num_train_ratings = 10
train_movie_indices, num_train_movie_count = np.unique(train_data[:, 1], return_counts = True)
train_movies_below_threshold = train_movie_indices[np.argwhere(num_train_movie_count <= threshold_num_train_ratings).flatten()]
items_to_del_test = np.array(list(set(np.concatenate((movie_indices_train_user, train_movies_below_threshold)))))
movie_indices_test_user = np.delete(np.arange(num_items), items_to_del_test)
#movie_indices_test_user = np.delete(np.arange(num_items), movie_indices_train_user)
movie_rating_predict_user = np.dot(item_factor, user_factor[user_index])
movie_rating_test_predict_user = movie_rating_predict_user[movie_indices_test_user]
top_movies_user_local = np.argsort(movie_rating_test_predict_user)[-num_recos : ][::-1]
top_movies_user = movie_indices_test_user[top_movies_user_local]
top_movies_name_user = movie_name[top_movies_user]
top_rating_user = movie_rating_test_predict_user[top_movies_user_local]
print('Movies rated 5/5 by User {}'.format(user_index))
print('\n')
print(best_movies_training)
print('\n\n')
print('Movies Suggested By Recommender System')
print('\n')
print(top_movies_name_user)
| [
"csv.reader",
"numpy.concatenate",
"pandas.read_csv",
"pandas.unique",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"numpy.argwhere",
"numpy.dot",
"numpy.unique"
] | [((922, 1013), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/u.data"""'], {'delimiter': '"""\t"""', 'names': "['User', 'Item', 'Rating', 'Time']"}), "('../Data/u.data', delimiter='\\t', names=['User', 'Item',\n 'Rating', 'Time'])\n", (933, 1013), True, 'import pandas as pd\n'), ((1431, 1468), 'csv.reader', 'csv.reader', (['movie_file'], {'delimiter': '"""|"""'}), "(movie_file, delimiter='|')\n", (1441, 1468), False, 'import csv\n'), ((1585, 1605), 'numpy.array', 'np.array', (['movie_name'], {}), '(movie_name)\n', (1593, 1605), True, 'import numpy as np\n'), ((2026, 2073), 'numpy.unique', 'np.unique', (['train_data[:, 1]'], {'return_counts': '(True)'}), '(train_data[:, 1], return_counts=True)\n', (2035, 2073), True, 'import numpy as np\n'), ((2521, 2565), 'numpy.dot', 'np.dot', (['item_factor', 'user_factor[user_index]'], {}), '(item_factor, user_factor[user_index])\n', (2527, 2565), True, 'import numpy as np\n'), ((1033, 1056), 'pandas.unique', 'pd.unique', (["data['User']"], {}), "(data['User'])\n", (1042, 1056), True, 'import pandas as pd\n'), ((1075, 1098), 'pandas.unique', 'pd.unique', (["data['Item']"], {}), "(data['Item'])\n", (1084, 1098), True, 'import pandas as pd\n'), ((2361, 2381), 'numpy.arange', 'np.arange', (['num_items'], {}), '(num_items)\n', (2370, 2381), True, 'import numpy as np\n'), ((636, 734), 'pandas.read_csv', 'pd.read_csv', (['"""../Weights/User_Factor/Weight_lr0.01_reg0.2_factor10_epoch100.csv"""'], {'index_col': '(0)'}), "('../Weights/User_Factor/Weight_lr0.01_reg0.2_factor10_epoch100.csv'\n , index_col=0)\n", (647, 734), True, 'import pandas as pd\n'), ((758, 856), 'pandas.read_csv', 'pd.read_csv', (['"""../Weights/Item_Factor/Weight_lr0.01_reg0.2_factor10_epoch100.csv"""'], {'index_col': '(0)'}), "('../Weights/Item_Factor/Weight_lr0.01_reg0.2_factor10_epoch100.csv'\n , index_col=0)\n", (769, 856), True, 'import pandas as pd\n'), ((2676, 2718), 'numpy.argsort', 'np.argsort', (['movie_rating_test_predict_user'], {}), '(movie_rating_test_predict_user)\n', (2686, 2718), True, 'import numpy as np\n'), ((1116, 1154), 'pandas.read_csv', 'pd.read_csv', (['"""../Data_Split/train.csv"""'], {}), "('../Data_Split/train.csv')\n", (1127, 1154), True, 'import pandas as pd\n'), ((1186, 1222), 'pandas.read_csv', 'pd.read_csv', (['"""../Data_Split/val.csv"""'], {}), "('../Data_Split/val.csv')\n", (1197, 1222), True, 'import pandas as pd\n'), ((1255, 1292), 'pandas.read_csv', 'pd.read_csv', (['"""../Data_Split/test.csv"""'], {}), "('../Data_Split/test.csv')\n", (1266, 1292), True, 'import pandas as pd\n'), ((2128, 2193), 'numpy.argwhere', 'np.argwhere', (['(num_train_movie_count <= threshold_num_train_ratings)'], {}), '(num_train_movie_count <= threshold_num_train_ratings)\n', (2139, 2193), True, 'import numpy as np\n'), ((2246, 2318), 'numpy.concatenate', 'np.concatenate', (['(movie_indices_train_user, train_movies_below_threshold)'], {}), '((movie_indices_train_user, train_movies_below_threshold))\n', (2260, 2318), True, 'import numpy as np\n')] |
"""
Framingham Risk Score Calculation
Code borrowed from
https://github.com/fonnesbeck/framingham_risk
"""
import numpy as np
from cvdm.score import cox_surv, BaseRisk
from cvdm.score import clean_bp, clean_bmi, clean_tot_chol, clean_hdl, clean_age
NONLAB_WOMEN = {
"coef": np.array([2.72107, # log age
0.51125, # BMI
2.81291, # log SBP (not treated)
2.88267, # log SBP (treated)
0.61868, # smoking
0.77763 # diabetes
]),
"s0": 0.94833,
"const": 26.0145
}
NONLAB_MEN = {
"coef": np.array([3.11296,
0.79277,
1.85508,
1.92672,
0.70953,
0.53160
]),
"s0": 0.88431,
"const": 23.9388
}
LAB_MEN = {
"coef": np.array([3.06117, # log age
1.12370, # log total cholesterol
-0.93263, # log HDL cholesterol
1.93303, # log SBP (not treated)
1.99881, # log SBP (treated)
0.65451, # smoking
0.57367 # diabetes
]),
"s0": 0.88936,
"const": 23.9802
}
LAB_WOMEN = {
"coef": np.array([2.32888,
1.20904,
-0.70833,
2.76157,
2.82263,
0.52873,
0.69154
]),
"s0": 0.95012,
"const": 26.1931
}
class FrsSimple(BaseRisk):
features = ["female",
"cur_smoke",
"dm"]
feat_key = features + ["index_age",
"bmi",
"sbp",
"htn_treat"]
def score(self, row):
return frs_simple(row["female"],
row["index_age"],
row["bmi"],
row["sbp"],
row["htn_treat"],
row["cur_smoke"],
row["dm"])
def get_features(self, row):
feat_dict = super().get_features(row)
feat_dict["age_log"] = np.log(row["index_age"])
feat_dict["bmi_log"] = np.log(row["bmi"])
feat_dict["sbp_nhtn"] = np.log(row["sbp"])*(1-row["htn_treat"])
feat_dict["sbp_htn"] = np.log(row["sbp"])*(row["htn_treat"])
return feat_dict
class FrsPrimary(BaseRisk):
features = ["female",
"cur_smoke",
"dm"]
feat_key = features + ["index_age",
"chol_tot",
"chol_hdl",
"sbp",
"htn_treat"]
def score(self, row):
return frs_primary(row['female'],
row["index_age"],
row["chol_tot"],
row["chol_hdl"],
row["sbp"],
row["htn_treat"],
row['cur_smoke'],
row["dm"])
def get_features(self, row):
feat_dict = super().get_features(row)
feat_dict["age_log"] = np.log(row["index_age"])
feat_dict["tot_log"] = np.log(row["chol_tot"])
feat_dict["hdl_log"] = np.log(row["chol_hdl"])
feat_dict["sbp_nhtn"] = np.log(row["sbp"])*(1-row["htn_treat"])
feat_dict["sbp_htn"] = np.log(row["sbp"])*(row["htn_treat"])
return feat_dict
def frs_simple(female, age, bmi, sbp, htn, smk, diab):
"""
10-year risk calculated using the Simple Non-Laboratory
Framingham Risk Score (FRS) Calculation.
Parameters
----------
female : boolean
age : numeric
Age of subject
bmi : numeric
BMI of subject
sbp : numeric
Systolic blood pressure of subject
ht_treat : bool or int
Treatment for hypertension (True or False)
smk : bool or int
Subject is smoker (True or False)
diab : bool or int
Subject has diabetes (True or False)
"""
xFeat = np.array([np.log(clean_age(age)),
np.log(clean_bmi(bmi)),
np.log(clean_bp(sbp))*(1-htn),
np.log(clean_bp(sbp))*htn,
smk,
diab])
genderInfo = NONLAB_MEN
if female:
genderInfo = NONLAB_WOMEN
return cox_surv(xFeat, genderInfo["coef"],
genderInfo["s0"], genderInfo["const"])
def frs_primary(female, age, tot_chol, hdl, sbp, htn, smk, diab):
"""
"""
xFeat = np.array([np.log(clean_age(age)),
np.log(clean_tot_chol(tot_chol)),
np.log(clean_hdl(hdl)),
np.log(clean_bp(sbp))*(1-htn),
np.log(clean_bp(sbp))*htn,
smk,
diab])
genderInfo = LAB_MEN
if female:
genderInfo = LAB_WOMEN
return cox_surv(xFeat, genderInfo["coef"],
genderInfo["s0"], genderInfo["const"])
| [
"numpy.log",
"cvdm.score.clean_tot_chol",
"cvdm.score.clean_hdl",
"cvdm.score.cox_surv",
"numpy.array",
"cvdm.score.clean_bmi",
"cvdm.score.clean_bp",
"cvdm.score.clean_age"
] | [((283, 347), 'numpy.array', 'np.array', (['[2.72107, 0.51125, 2.81291, 2.88267, 0.61868, 0.77763]'], {}), '([2.72107, 0.51125, 2.81291, 2.88267, 0.61868, 0.77763])\n', (291, 347), True, 'import numpy as np\n'), ((631, 694), 'numpy.array', 'np.array', (['[3.11296, 0.79277, 1.85508, 1.92672, 0.70953, 0.5316]'], {}), '([3.11296, 0.79277, 1.85508, 1.92672, 0.70953, 0.5316])\n', (639, 694), True, 'import numpy as np\n'), ((895, 968), 'numpy.array', 'np.array', (['[3.06117, 1.1237, -0.93263, 1.93303, 1.99881, 0.65451, 0.57367]'], {}), '([3.06117, 1.1237, -0.93263, 1.93303, 1.99881, 0.65451, 0.57367])\n', (903, 968), True, 'import numpy as np\n'), ((1321, 1395), 'numpy.array', 'np.array', (['[2.32888, 1.20904, -0.70833, 2.76157, 2.82263, 0.52873, 0.69154]'], {}), '([2.32888, 1.20904, -0.70833, 2.76157, 2.82263, 0.52873, 0.69154])\n', (1329, 1395), True, 'import numpy as np\n'), ((4543, 4617), 'cvdm.score.cox_surv', 'cox_surv', (['xFeat', "genderInfo['coef']", "genderInfo['s0']", "genderInfo['const']"], {}), "(xFeat, genderInfo['coef'], genderInfo['s0'], genderInfo['const'])\n", (4551, 4617), False, 'from cvdm.score import cox_surv, BaseRisk\n'), ((5111, 5185), 'cvdm.score.cox_surv', 'cox_surv', (['xFeat', "genderInfo['coef']", "genderInfo['s0']", "genderInfo['const']"], {}), "(xFeat, genderInfo['coef'], genderInfo['s0'], genderInfo['const'])\n", (5119, 5185), False, 'from cvdm.score import cox_surv, BaseRisk\n'), ((2270, 2294), 'numpy.log', 'np.log', (["row['index_age']"], {}), "(row['index_age'])\n", (2276, 2294), True, 'import numpy as np\n'), ((2326, 2344), 'numpy.log', 'np.log', (["row['bmi']"], {}), "(row['bmi'])\n", (2332, 2344), True, 'import numpy as np\n'), ((3290, 3314), 'numpy.log', 'np.log', (["row['index_age']"], {}), "(row['index_age'])\n", (3296, 3314), True, 'import numpy as np\n'), ((3346, 3369), 'numpy.log', 'np.log', (["row['chol_tot']"], {}), "(row['chol_tot'])\n", (3352, 3369), True, 'import numpy as np\n'), ((3401, 3424), 'numpy.log', 'np.log', (["row['chol_hdl']"], {}), "(row['chol_hdl'])\n", (3407, 3424), True, 'import numpy as np\n'), ((2377, 2395), 'numpy.log', 'np.log', (["row['sbp']"], {}), "(row['sbp'])\n", (2383, 2395), True, 'import numpy as np\n'), ((2448, 2466), 'numpy.log', 'np.log', (["row['sbp']"], {}), "(row['sbp'])\n", (2454, 2466), True, 'import numpy as np\n'), ((3457, 3475), 'numpy.log', 'np.log', (["row['sbp']"], {}), "(row['sbp'])\n", (3463, 3475), True, 'import numpy as np\n'), ((3528, 3546), 'numpy.log', 'np.log', (["row['sbp']"], {}), "(row['sbp'])\n", (3534, 3546), True, 'import numpy as np\n'), ((4233, 4247), 'cvdm.score.clean_age', 'clean_age', (['age'], {}), '(age)\n', (4242, 4247), False, 'from cvdm.score import clean_bp, clean_bmi, clean_tot_chol, clean_hdl, clean_age\n'), ((4279, 4293), 'cvdm.score.clean_bmi', 'clean_bmi', (['bmi'], {}), '(bmi)\n', (4288, 4293), False, 'from cvdm.score import clean_bp, clean_bmi, clean_tot_chol, clean_hdl, clean_age\n'), ((4751, 4765), 'cvdm.score.clean_age', 'clean_age', (['age'], {}), '(age)\n', (4760, 4765), False, 'from cvdm.score import clean_bp, clean_bmi, clean_tot_chol, clean_hdl, clean_age\n'), ((4797, 4821), 'cvdm.score.clean_tot_chol', 'clean_tot_chol', (['tot_chol'], {}), '(tot_chol)\n', (4811, 4821), False, 'from cvdm.score import clean_bp, clean_bmi, clean_tot_chol, clean_hdl, clean_age\n'), ((4853, 4867), 'cvdm.score.clean_hdl', 'clean_hdl', (['hdl'], {}), '(hdl)\n', (4862, 4867), False, 'from cvdm.score import clean_bp, clean_bmi, clean_tot_chol, clean_hdl, clean_age\n'), ((4325, 4338), 'cvdm.score.clean_bp', 'clean_bp', (['sbp'], {}), '(sbp)\n', (4333, 4338), False, 'from cvdm.score import clean_bp, clean_bmi, clean_tot_chol, clean_hdl, clean_age\n'), ((4378, 4391), 'cvdm.score.clean_bp', 'clean_bp', (['sbp'], {}), '(sbp)\n', (4386, 4391), False, 'from cvdm.score import clean_bp, clean_bmi, clean_tot_chol, clean_hdl, clean_age\n'), ((4899, 4912), 'cvdm.score.clean_bp', 'clean_bp', (['sbp'], {}), '(sbp)\n', (4907, 4912), False, 'from cvdm.score import clean_bp, clean_bmi, clean_tot_chol, clean_hdl, clean_age\n'), ((4952, 4965), 'cvdm.score.clean_bp', 'clean_bp', (['sbp'], {}), '(sbp)\n', (4960, 4965), False, 'from cvdm.score import clean_bp, clean_bmi, clean_tot_chol, clean_hdl, clean_age\n')] |
__all__ = ['mollview', 'projplot']
import numpy as np
from .pixelfunc import ang2pix, npix2nside
from .rotator import Rotator
from matplotlib.projections.geo import GeoAxes
###### WARNING #################
# this module is work in progress, the aim is to reimplement the healpy
# plot functions using the new features of matplotlib and remove most
# of the custom projection code
class ThetaFormatterShiftPi(GeoAxes.ThetaFormatter):
"""Shifts labelling by pi
Shifts labelling from -180,180 to 0-360"""
def __call__(self, x, pos=None):
if x != 0:
x *= -1
if x < 0:
x += 2*np.pi
return super(ThetaFormatterShiftPi, self).__call__(x, pos)
def lonlat(theta, phi):
"""Converts theta and phi to longitude and latitude
From colatitude to latitude and from astro longitude to geo longitude"""
longitude = -1*np.asarray(phi)
latitude = np.pi/2 - np.asarray(theta)
return longitude, latitude
def mollview(m=None, rot=None, coord=None, unit='',
xsize=1000, nest=False,
min=None, max=None, flip='astro',
format='%g',
cbar=True, cmap=None,
norm=None,
graticule=False, graticule_labels=False,
**kwargs):
"""Plot an healpix map (given as an array) in Mollweide projection.
Parameters
----------
map : float, array-like or None
An array containing the map, supports masked maps, see the `ma` function.
If None, will display a blank map, useful for overplotting.
rot : scalar or sequence, optional
Describe the rotation to apply.
In the form (lon, lat, psi) (unit: degrees) : the point at
longitude *lon* and latitude *lat* will be at the center. An additional rotation
of angle *psi* around this direction is applied.
coord : sequence of character, optional
Either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to rotate
the map from the first to the second coordinate system.
unit : str, optional
A text describing the unit of the data. Default: ''
xsize : int, optional
The size of the image. Default: 800
nest : bool, optional
If True, ordering scheme is NESTED. Default: False (RING)
min : float, optional
The minimum range value
max : float, optional
The maximum range value
flip : {'astro', 'geo'}, optional
Defines the convention of projection : 'astro' (default, east towards left, west towards right)
or 'geo' (east towards roght, west towards left)
format : str, optional
The format of the scale label. Default: '%g'
cbar : bool, optional
Display the colorbar. Default: True
norm : {'hist', 'log', None}
Color normalization, hist= histogram equalized color mapping,
log= logarithmic color mapping, default: None (linear color mapping)
kwargs : keywords
any additional keyword is passed to pcolormesh
graticule : bool
add graticule
graticule_labels : bool
longitude and latitude labels
"""
# not implemented features
if not (norm is None):
raise NotImplementedError()
# Create the figure
import matplotlib.pyplot as plt
width = 8.5
fig = plt.figure(figsize=(width,width*.63))
ax = fig.add_subplot(111, projection="mollweide")
# FIXME: make a more general axes creation that works also with subplots
#ax = plt.gcf().add_axes((.125, .1, .9, .9), projection="mollweide")
# remove white space around the image
plt.subplots_adjust(left=0.02, right=0.98, top=0.95, bottom=0.05)
if graticule and graticule_labels:
plt.subplots_adjust(left=0.04, right=0.98, top=0.95, bottom=0.05)
if not m is None:
# auto min and max
if min is None:
min = m.min()
if max is None:
max = m.max()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ysize = xsize/2
theta = np.linspace(np.pi, 0, ysize)
phi = np.linspace(-np.pi, np.pi, xsize)
longitude = np.radians(np.linspace(-180, 180, xsize))
if flip == "astro":
longitude = longitude[::-1]
latitude = np.radians(np.linspace(-90, 90, ysize))
# project the map to a rectangular matrix xsize x ysize
PHI, THETA = np.meshgrid(phi, theta)
# coord or rotation
if coord or rot:
r = Rotator(coord=coord, rot=rot, inv=True)
THETA, PHI = r(THETA.flatten(), PHI.flatten())
THETA = THETA.reshape(ysize, xsize)
PHI = PHI.reshape(ysize, xsize)
nside = npix2nside(len(m))
if not m is None:
grid_pix = ang2pix(nside, THETA, PHI, nest=nest)
grid_map = m[grid_pix]
# plot
ret = plt.pcolormesh(longitude, latitude, grid_map, vmin=min, vmax=max, rasterized=True, **kwargs)
# graticule
plt.grid(graticule)
if graticule:
longitude_grid_spacing = 60 # deg
ax.set_longitude_grid(longitude_grid_spacing)
if width < 10:
ax.set_latitude_grid(45)
ax.set_longitude_grid_ends(90)
if graticule_labels:
ax.xaxis.set_major_formatter(ThetaFormatterShiftPi(longitude_grid_spacing))
else:
# remove longitude and latitude labels
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
# colorbar
if cbar and not m is None:
cb = fig.colorbar(ret, orientation='horizontal', shrink=.4, pad=0.05, ticks=[min, max])
cb.ax.xaxis.set_label_text(unit)
cb.ax.xaxis.labelpad = -8
# workaround for issue with viewers, see colorbar docstring
cb.solids.set_edgecolor("face")
plt.draw()
finally:
ax.hold(washold)
return ret
def projplot(theta, phi, fmt=None, **kwargs):
"""projplot is a wrapper around :func:`matplotlib.Axes.plot` to take into account the
spherical projection.
You can call this function as::
projplot(theta, phi) # plot a line going through points at coord (theta, phi)
projplot(theta, phi, 'bo') # plot 'o' in blue at coord (theta, phi)
Parameters
----------
theta, phi : float, array-like
Coordinates of point to plot in radians.
fmt : str
A format string (see :func:`matplotlib.Axes.plot` for details)
Notes
-----
Other keywords are passed to :func:`matplotlib.Axes.plot`.
See Also
--------
projscatter, projtext
"""
import matplotlib.pyplot as plt
longitude, latitude = lonlat(theta, phi)
if fmt is None:
ret = plt.plot(longitude, latitude, **kwargs)
else:
ret = plt.plot(longitude, latitude, fmt, **kwargs)
return ret
| [
"numpy.meshgrid",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.grid"
] | [((3317, 3358), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, width * 0.63)'}), '(figsize=(width, width * 0.63))\n', (3327, 3358), True, 'import matplotlib.pyplot as plt\n'), ((3606, 3671), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.02)', 'right': '(0.98)', 'top': '(0.95)', 'bottom': '(0.05)'}), '(left=0.02, right=0.98, top=0.95, bottom=0.05)\n', (3625, 3671), True, 'import matplotlib.pyplot as plt\n'), ((879, 894), 'numpy.asarray', 'np.asarray', (['phi'], {}), '(phi)\n', (889, 894), True, 'import numpy as np\n'), ((920, 937), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (930, 937), True, 'import numpy as np\n'), ((3719, 3784), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.04)', 'right': '(0.98)', 'top': '(0.95)', 'bottom': '(0.05)'}), '(left=0.04, right=0.98, top=0.95, bottom=0.05)\n', (3738, 3784), True, 'import matplotlib.pyplot as plt\n'), ((4169, 4197), 'numpy.linspace', 'np.linspace', (['np.pi', '(0)', 'ysize'], {}), '(np.pi, 0, ysize)\n', (4180, 4197), True, 'import numpy as np\n'), ((4214, 4247), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', 'xsize'], {}), '(-np.pi, np.pi, xsize)\n', (4225, 4247), True, 'import numpy as np\n'), ((4523, 4546), 'numpy.meshgrid', 'np.meshgrid', (['phi', 'theta'], {}), '(phi, theta)\n', (4534, 4546), True, 'import numpy as np\n'), ((5124, 5143), 'matplotlib.pyplot.grid', 'plt.grid', (['graticule'], {}), '(graticule)\n', (5132, 5143), True, 'import matplotlib.pyplot as plt\n'), ((6011, 6021), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (6019, 6021), True, 'import matplotlib.pyplot as plt\n'), ((6913, 6952), 'matplotlib.pyplot.plot', 'plt.plot', (['longitude', 'latitude'], {}), '(longitude, latitude, **kwargs)\n', (6921, 6952), True, 'import matplotlib.pyplot as plt\n'), ((6977, 7021), 'matplotlib.pyplot.plot', 'plt.plot', (['longitude', 'latitude', 'fmt'], {}), '(longitude, latitude, fmt, **kwargs)\n', (6985, 7021), True, 'import matplotlib.pyplot as plt\n'), ((4280, 4309), 'numpy.linspace', 'np.linspace', (['(-180)', '(180)', 'xsize'], {}), '(-180, 180, xsize)\n', (4291, 4309), True, 'import numpy as np\n'), ((4409, 4436), 'numpy.linspace', 'np.linspace', (['(-90)', '(90)', 'ysize'], {}), '(-90, 90, ysize)\n', (4420, 4436), True, 'import numpy as np\n'), ((5002, 5098), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['longitude', 'latitude', 'grid_map'], {'vmin': 'min', 'vmax': 'max', 'rasterized': '(True)'}), '(longitude, latitude, grid_map, vmin=min, vmax=max,\n rasterized=True, **kwargs)\n', (5016, 5098), True, 'import matplotlib.pyplot as plt\n')] |
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister
from qiskit.circuit import Instruction
from compiler import composer
from optimizer import Optimizer
import numpy as np
import toml
def is_unitary(operator, tolerance=0.0001):
h, w = operator.shape
if not h == w:
return False
adjoint = np.conjugate(operator.transpose())
product1 = np.dot(operator, adjoint)
product2 = np.dot(adjoint, operator)
ida = np.eye(h)
return np.allclose(product1, ida) & np.allclose(product2, ida)
def prob_transition(graph, gtype='normal', alpha=0.85):
if gtype == 'google':
return google_matrix(alpha, graph)
else:
pmatrix = np.zeros(graph.shape)
indegrees = np.sum(graph, axis=0)
for ix, indeg in enumerate(indegrees):
if indeg == 0:
pmatrix[:, ix] = graph[:, ix]
else:
pmatrix[:, ix] = graph[:, ix]/indeg
return pmatrix
def google_matrix(alpha, C):
E = connect_to_E(C)
N = len(C)
G = alpha*E + (1-alpha)/N * np.ones((N, N), dtype=float)
return G
def connect_to_E(C):
'''
C is conectivity matrix
C: np.array
output
E: np.array
'''
N = len(C)
C = np.array(C)
E = np.zeros(C.shape)
rowsum = np.sum(C, axis=0)
for ind, val in enumerate(rowsum):
if val == 0:
for j in range(N):
E[j][ind] = 1/N
else:
for j in range(N):
E[j][ind] = C[j][ind]/val
assert(np.sum(np.sum(E, axis=0)) == N)
return E
graph = np.array([[0, 1, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 1, 1, 0]])
pb = prob_transition(graph)
step = 1
qc = composer.CircuitComposer(graph, pb, step).qw_circuit(validation=False)
opt = Optimizer(graph, pb).optimize(qc, 3, open('ruleset.toml', 'r'))
| [
"numpy.sum",
"numpy.eye",
"compiler.composer.CircuitComposer",
"numpy.allclose",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"optimizer.Optimizer",
"numpy.dot"
] | [((1587, 1653), 'numpy.array', 'np.array', (['[[0, 1, 1, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, 1, 1, 0]]'], {}), '([[0, 1, 1, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, 1, 1, 0]])\n', (1595, 1653), True, 'import numpy as np\n'), ((379, 404), 'numpy.dot', 'np.dot', (['operator', 'adjoint'], {}), '(operator, adjoint)\n', (385, 404), True, 'import numpy as np\n'), ((420, 445), 'numpy.dot', 'np.dot', (['adjoint', 'operator'], {}), '(adjoint, operator)\n', (426, 445), True, 'import numpy as np\n'), ((456, 465), 'numpy.eye', 'np.eye', (['h'], {}), '(h)\n', (462, 465), True, 'import numpy as np\n'), ((1242, 1253), 'numpy.array', 'np.array', (['C'], {}), '(C)\n', (1250, 1253), True, 'import numpy as np\n'), ((1262, 1279), 'numpy.zeros', 'np.zeros', (['C.shape'], {}), '(C.shape)\n', (1270, 1279), True, 'import numpy as np\n'), ((1293, 1310), 'numpy.sum', 'np.sum', (['C'], {'axis': '(0)'}), '(C, axis=0)\n', (1299, 1310), True, 'import numpy as np\n'), ((477, 503), 'numpy.allclose', 'np.allclose', (['product1', 'ida'], {}), '(product1, ida)\n', (488, 503), True, 'import numpy as np\n'), ((506, 532), 'numpy.allclose', 'np.allclose', (['product2', 'ida'], {}), '(product2, ida)\n', (517, 532), True, 'import numpy as np\n'), ((688, 709), 'numpy.zeros', 'np.zeros', (['graph.shape'], {}), '(graph.shape)\n', (696, 709), True, 'import numpy as np\n'), ((730, 751), 'numpy.sum', 'np.sum', (['graph'], {'axis': '(0)'}), '(graph, axis=0)\n', (736, 751), True, 'import numpy as np\n'), ((1750, 1791), 'compiler.composer.CircuitComposer', 'composer.CircuitComposer', (['graph', 'pb', 'step'], {}), '(graph, pb, step)\n', (1774, 1791), False, 'from compiler import composer\n'), ((1827, 1847), 'optimizer.Optimizer', 'Optimizer', (['graph', 'pb'], {}), '(graph, pb)\n', (1836, 1847), False, 'from optimizer import Optimizer\n'), ((1067, 1095), 'numpy.ones', 'np.ones', (['(N, N)'], {'dtype': 'float'}), '((N, N), dtype=float)\n', (1074, 1095), True, 'import numpy as np\n'), ((1539, 1556), 'numpy.sum', 'np.sum', (['E'], {'axis': '(0)'}), '(E, axis=0)\n', (1545, 1556), True, 'import numpy as np\n')] |
from sklearn.exceptions import NotFittedError
import logging
import numpy as np
from qiskit.providers import BaseBackend, Backend
from qiskit.utils import QuantumInstance
from typing import Optional, Union
from sklearn.base import RegressorMixin
from .qknn_base import QNeighborsBase
from ...encodings import EncodingMap
logger = logging.getLogger(__name__)
class QKNeighborsRegressor(RegressorMixin, QNeighborsBase):
"""
The Quantum K-Nearest Neighbors algorithm for regression
Note:
The naming conventions follow the KNeighborsRegressor from
sklearn.neighbors
"""
def __init__(self,
n_neighbors: int = 3,
encoding_map: Optional[EncodingMap] = None,
quantum_instance: Optional[Union[QuantumInstance, BaseBackend, Backend]] = None):
"""
Creates a QKNeighborsClassifier Object
Args:
n_neighbors:
number of neighbors participating in the
majority vote
encoding_map:
map to classical data to quantum states.
This class does not impose any constraint on it.
quantum_instance:
the quantum instance to set. Can be a
:class:`~qiskit.utils.QuantumInstance`, a :class:`~qiskit.providers.Backend`
or a :class:`~qiskit.providers.BaseBackend`
"""
super().__init__(n_neighbors, encoding_map, quantum_instance)
def predict(self,
X_test: np.ndarray) -> np.ndarray:
"""Predict the labels of the provided data."""
if self.X_train is None:
raise NotFittedError(
"This QKNeighborsRegressor instance is not fitted yet. "
"Call 'fit' with appropriate arguments before using "
"this estimator.")
circuits = self._construct_circuits(X_test)
results = self.execute(circuits)
# the execution results are employed to compute
# fidelities which are used for the average
fidelities = self._get_fidelities(results, len(X_test))
logger.info("Averaging ...")
k_nearest = self._kneighbors(self.y_train, fidelities)
n_queries, _ = self.X_train.shape
if n_queries == 1:
predicted_labels = np.mean(k_nearest)
else:
predicted_labels = np.mean(k_nearest, axis=1)
logger.info("Done.")
return predicted_labels
| [
"sklearn.exceptions.NotFittedError",
"numpy.mean",
"logging.getLogger"
] | [((335, 362), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (352, 362), False, 'import logging\n'), ((1662, 1810), 'sklearn.exceptions.NotFittedError', 'NotFittedError', (['"""This QKNeighborsRegressor instance is not fitted yet. Call \'fit\' with appropriate arguments before using this estimator."""'], {}), '(\n "This QKNeighborsRegressor instance is not fitted yet. Call \'fit\' with appropriate arguments before using this estimator."\n )\n', (1676, 1810), False, 'from sklearn.exceptions import NotFittedError\n'), ((2326, 2344), 'numpy.mean', 'np.mean', (['k_nearest'], {}), '(k_nearest)\n', (2333, 2344), True, 'import numpy as np\n'), ((2390, 2416), 'numpy.mean', 'np.mean', (['k_nearest'], {'axis': '(1)'}), '(k_nearest, axis=1)\n', (2397, 2416), True, 'import numpy as np\n')] |
import numpy as np
from evaluate.bbox import bbox_overlaps
def evaluate_recall(roidb, thresholds=None,
area='all',
limit=None):
"""Evaluate detection proposal recall metrics.
Returns:
results: dictionary of results with keys
'ar': average recall
'recalls': vector recalls at each IoU overlap threshold
'thresholds': vector of IoU overlap thresholds
'gt_overlaps': vector of all ground-truth overlaps
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
'all': 0,
'small': 1,
'medium': 2,
'large': 3,
'96-128': 4,
'128-256': 5,
'256-512': 6,
'512-inf': 7
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2], # 512-inf
]
assert area in areas, 'unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = np.zeros(0)
num_pos = 0
for i in range(len(roidb)):
# Checking for max_overlaps == 1 avoids including crowd annotations
# (...pretty hacking :/)
# max_gt_overlaps = roidb[i]['gt_overlaps'].toarray().max(axis=1)
# gt_inds = np.where((roidb[i]['gt_classes'] > 0) &
# (max_gt_overlaps == 1))[0]
gt_inds = np.where(roidb[i]['gt_classes'].view(-1) > 0)
gt_boxes = roidb[i]['boxes'][:,gt_inds].squeeze().view((-1,4))
gt_areas = roidb[i]['seg_areas'][:,0,gt_inds].squeeze()
# valid_gt_inds = np.where((gt_areas >= area_range[0]) &
# (gt_areas <= area_range[1]))[0]
# gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(gt_inds[0])
boxes = roidb[i]['mrcnn_bbox']
if boxes.shape[0] == 0:
continue
if limit is not None and boxes.shape[0] > limit:
boxes = boxes[:limit, :]
overlaps = bbox_overlaps(boxes, gt_boxes)
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in range(gt_boxes.shape[0]):
# find which proposal box maximally covers each gt box
# argmax_overlaps = overlaps.argmax(dim=0)
# and get the iou amount of coverage for each gt box
max_overlaps,argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert (gt_ovr >= 0)
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert (_gt_overlaps[j] == gt_ovr)
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
'ar': ar,
'recalls': recalls,
'thresholds': thresholds,
'gt_overlaps': gt_overlaps} | [
"numpy.zeros_like",
"evaluate.bbox.bbox_overlaps",
"numpy.zeros",
"numpy.hstack",
"numpy.sort",
"numpy.arange"
] | [((1214, 1225), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (1222, 1225), True, 'import numpy as np\n'), ((3301, 3321), 'numpy.sort', 'np.sort', (['gt_overlaps'], {}), '(gt_overlaps)\n', (3308, 3321), True, 'import numpy as np\n'), ((3438, 3463), 'numpy.zeros_like', 'np.zeros_like', (['thresholds'], {}), '(thresholds)\n', (3451, 3463), True, 'import numpy as np\n'), ((2198, 2228), 'evaluate.bbox.bbox_overlaps', 'bbox_overlaps', (['boxes', 'gt_boxes'], {}), '(boxes, gt_boxes)\n', (2211, 2228), False, 'from evaluate.bbox import bbox_overlaps\n'), ((2253, 2280), 'numpy.zeros', 'np.zeros', (['gt_boxes.shape[0]'], {}), '(gt_boxes.shape[0])\n', (2261, 2280), True, 'import numpy as np\n'), ((3243, 3281), 'numpy.hstack', 'np.hstack', (['(gt_overlaps, _gt_overlaps)'], {}), '((gt_overlaps, _gt_overlaps))\n', (3252, 3281), True, 'import numpy as np\n'), ((3390, 3424), 'numpy.arange', 'np.arange', (['(0.5)', '(0.95 + 1e-05)', 'step'], {}), '(0.5, 0.95 + 1e-05, step)\n', (3399, 3424), True, 'import numpy as np\n')] |
import copy
from typing import Dict, FrozenSet, List, Tuple
import jax
import networkx as nx
import numpy as np
def get_interaction_graph_from_feature_activations(
feature_activations: np.ndarray,
pools_to_laterals_list: List[
List[Dict[FrozenSet[Tuple[int, int, int, int]], np.ndarray]]
],
templates_list: List[List[List[int]]],
subset_laterals: bool = True,
) -> nx.Graph:
"""get_interaction_graph_from_feature_activations.
Args:
feature_activations: Array of shape (n_feature_activations, 3)
frcs of the feature activations
pools_to_laterals_list: A complete list of pool definitions at each feature
len(pools_to_laterals_list) == n_features
len(pools_to_laterals_list[ii]) == n_pools for feature ii
Each pool is represented as a dictionary
The keys are symmetric representations of the lateral, using frozenset
The values are the actual lateral (in terms of connected feature idx, dr and dc)
For a lateral connecting feature ii and jj with a displacement
(jj related to ii) dr, dc, the corresponding frozen set would be
{(ii, jj, dr, dc), (jj, ii, -dr, -dc)}
templates_list: templates_list
List of templates for the different features.
len(templates_list) == n_features
len(templates_list[ii]) == n_templates for feature ii
A template is represented as a list of integers, representing indices of
the involved pools for that template
Returns:
interaction_graph: The interaction graph specifying the lateral layer
Nodes are specified in (f, r, c) tuples.
For each node, there exist node attributes:
is_within_boundary (bool): Indicating whether the variable is within boundary
True is the node is within boundary
False or missing means the node is outside boundary
templates (List[List[List[np.ndarray]]]): List of configurations (with pooling) for each node
First dimension ranges over different templates
Second dimension ranges over different pools within a template
Third dimension ranges over different laterals within a pool. Each lateral is encoded using
the frc of its connected node, in an np array of length 3
Edges are of the form ((f0, r0, c0), (f1, r1, c1)).
For each edge, there exist edge attributes:
idx (int): The flat index of the corresponding edge
count (int): Number of times an edge appears as part of a template in a node
For edges connecting nodes within boundary this should always be 2
For edges connecting one node within boundary and one boundary node, this should be 1
Used to decide boundary_laterals_indices and boundary_laterals_sides_indices
sides (dict): A dictionary mapping connected nodes to sides (0 or 1)
"""
n_features = len(pools_to_laterals_list)
subset_pools_to_laterals_list = [
copy.copy(pools_to_laterals) for pools_to_laterals in pools_to_laterals_list
]
if subset_laterals:
all_possible_laterals_from_feature_activations = set(
[
frozenset(
[
(feature_activations[ii, 0], feature_activations[jj, 0])
+ tuple(
feature_activations[jj, 1:] - feature_activations[ii, 1:]
),
(feature_activations[jj, 0], feature_activations[ii, 0])
+ tuple(
feature_activations[ii, 1:] - feature_activations[jj, 1:]
),
]
)
for ii in range(feature_activations.shape[0] - 1)
for jj in range(ii + 1, feature_activations.shape[0])
]
)
for feature_idx in range(n_features):
for pool_idx in range(len(subset_pools_to_laterals_list[feature_idx])):
subset_pools_to_laterals_list[feature_idx][pool_idx] = [
subset_pools_to_laterals_list[feature_idx][pool_idx][key]
for key in set(
list(
subset_pools_to_laterals_list[feature_idx][pool_idx].keys()
)
).intersection(all_possible_laterals_from_feature_activations)
]
subset_templates_list = [
[
[
subset_pools_to_laterals_list[feature_idx][pool_idx]
for pool_idx in template
]
for template in templates_list[feature_idx]
]
for feature_idx in range(n_features)
]
interaction_graph = nx.Graph()
interaction_graph.add_nodes_from(
[tuple(frc) for frc in feature_activations], is_within_boundary=True
)
nodes_list = list(interaction_graph.nodes())
for node in nodes_list:
connected_nodes_list = list(
set(
jax.tree_util.tree_map(
lambda x: (x[0],) + tuple(np.array(node[1:]) + x[1:]),
jax.tree_util.tree_leaves(subset_templates_list[node[0]]),
)
)
)
templates = jax.tree_util.tree_map(
lambda x: np.array((x[0],) + tuple(np.array(node[1:]) + x[1:])),
subset_templates_list[node[0]],
)
interaction_graph.nodes()[node]['templates'] = templates
for connected_node in connected_nodes_list:
interaction_graph.add_edge(
node,
connected_node,
count=interaction_graph.edges()
.get((node, connected_node), {})
.get('count', 0)
+ 1,
)
for idx, edge in enumerate(interaction_graph.edges()):
if interaction_graph.nodes()[edge[0]].get(
'is_within_boundary', False
) and interaction_graph.nodes()[edge[1]].get('is_within_boundary', False):
assert interaction_graph.edges()[edge]['count'] == 2, (
edge,
interaction_graph.edges()[edge]['count'],
)
interaction_graph.edges()[edge].update(
{
'idx': idx,
'sides': {edge[0]: 0, edge[1]: 1},
}
)
return interaction_graph
| [
"numpy.array",
"networkx.Graph",
"jax.tree_util.tree_leaves",
"copy.copy"
] | [((4967, 4977), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4975, 4977), True, 'import networkx as nx\n'), ((3199, 3227), 'copy.copy', 'copy.copy', (['pools_to_laterals'], {}), '(pools_to_laterals)\n', (3208, 3227), False, 'import copy\n'), ((5365, 5422), 'jax.tree_util.tree_leaves', 'jax.tree_util.tree_leaves', (['subset_templates_list[node[0]]'], {}), '(subset_templates_list[node[0]])\n', (5390, 5422), False, 'import jax\n'), ((5557, 5575), 'numpy.array', 'np.array', (['node[1:]'], {}), '(node[1:])\n', (5565, 5575), True, 'import numpy as np\n'), ((5316, 5334), 'numpy.array', 'np.array', (['node[1:]'], {}), '(node[1:])\n', (5324, 5334), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import argparse
import datetime
import logging
import numpy as np
from aiohttp import ClientConnectionError
from pyModbusTCP.client import ModbusClient
from pymodbus.constants import Endian
from pymodbus.payload import BinaryPayloadDecoder
import asyncio
from aioinflux import InfluxDBClient, InfluxDBWriteError
datapoint = {
'measurement': 'SolarEdge',
'tags': {},
'fields': {}
}
reg_block = {}
logger = logging.getLogger('solaredge')
async def write_to_influx(dbhost, dbport, mbmeters, dbname='solaredge'):
global client
global datapoint
global reg_block
def trunc_float(floatval):
return float('%.2f' % floatval)
try:
solar_client = InfluxDBClient(host=dbhost, port=dbport, db=dbname)
await solar_client.create_database(db=dbname)
except ClientConnectionError as e:
logger.error(f'Error during connection to InfluxDb {dbhost}: {e}')
return
logger.info('Database opened and initialized')
while True:
try:
reg_block = {}
reg_block = client.read_holding_registers(40069, 38)
if reg_block:
datapoint = {
'measurement': 'SolarEdge',
'tags': {},
'fields': {}
}
# print(reg_block)
# reg_block[0] = Sun Spec DID
# reg_block[1] = Length of Model Block
# reg_block[2] = AC Total current value
# reg_block[3] = AC Phase A current value
# reg_block[4] = AC Phase B current value
# reg_block[5] = AC Phase C current value
# reg_block[6] = AC current scale factor
# reg_block[7] = AC Phase A to B voltage value
# reg_block[8] = AC Phase B to C voltage value
# reg_block[9] = AC Phase C to A voltage value
# reg_block[10] = AC Phase A to N voltage value
# reg_block[11] = AC Phase B to N voltage value
# reg_block[12] = AC Phase C to N voltage value
# reg_block[13] = AC voltage scale factor
# reg_block[14] = AC Power value
# reg_block[15] = AC Power scale factor
# reg_block[16] = AC Frequency value
# reg_block[17] = AC Frequency scale factor
# reg_block[27] = DC Current value
# reg_block[28] = DC Current scale factor
# reg_block[29] = DC Voltage value
# reg_block[30] = DC Voltage scale factor
# reg_block[31] = DC Power value
# reg_block[32] = DC Power scale factor
# reg_block[34] = Inverter temp
# reg_block[37] = Inverter temp scale factor
datapoint['tags']['inverter'] = 1
# AC Current
logger.debug(f'Block6: {str(reg_block[6])}')
logger.debug(f'AC Current SF: {str(np.int16(reg_block[6]))}')
scalefactor = np.float_power(10,np.int16(reg_block[6]))
logger.debug(f'AC Current mult: {str(scalefactor)}')
if reg_block[2]<65535:
datapoint['fields']['AC Total Current'] = trunc_float(reg_block[2] * scalefactor)
if reg_block[3] <65535:
datapoint['fields']['AC Current phase A'] = trunc_float(reg_block[3] * scalefactor)
if reg_block[4]<65535:
datapoint['fields']['AC Current phase B'] = trunc_float(reg_block[4] * scalefactor)
if reg_block[5]<65535:
datapoint['fields']['AC Current phase C'] = trunc_float(reg_block[5] * scalefactor)
# AC Voltage
logger.debug(f'Block13: {str(reg_block[13])}')
logger.debug(f'AC Voltage SF: {str(np.int16(reg_block[13]))}')
scalefactor = np.float_power(10,np.int16(reg_block[13]))
logger.debug(f'AC Voltage mult: {str(scalefactor)}')
if reg_block[7]<65535:
datapoint['fields']['AC Voltage phase A-B'] = trunc_float(reg_block[7] * scalefactor)
if reg_block[8]<65535:
datapoint['fields']['AC Voltage phase B-C'] = trunc_float(reg_block[8] * scalefactor)
if reg_block[9]<65535:
datapoint['fields']['AC Voltage phase C-A'] = trunc_float(reg_block[9] * scalefactor)
if reg_block[10]<65535:
datapoint['fields']['AC Voltage phase A-N'] = trunc_float(reg_block[10] * scalefactor)
if reg_block[11]<65535:
datapoint['fields']['AC Voltage phase B-N'] = trunc_float(reg_block[11] * scalefactor)
if reg_block[12]<65535:
datapoint['fields']['AC Voltage phase C-N'] = trunc_float(reg_block[12] * scalefactor)
# AC Frequency
logger.debug(f'AC Frequency SF: {str(np.int16(reg_block[17]))}')
scalefactor = np.float_power(10,np.int16(reg_block[17]))
if reg_block[16]<65535:
datapoint['fields']['AC Frequency'] = trunc_float(reg_block[16] * scalefactor)
# AC Power
logger.debug(f'Block15: {str(reg_block[15])}')
logger.debug(f'AC Power SF: {str(np.int16(reg_block[15]))}')
scalefactor = np.float_power(10,np.int16(reg_block[15]))
logger.debug(f'AC Power mult: {str(scalefactor)}')
if reg_block[14]<65535:
datapoint['fields']['AC Power output'] = trunc_float(reg_block[14] * scalefactor)
# DC Current
logger.debug(f'Block28: {str(reg_block[28])}')
logger.debug(f'DC Current SF: {str(np.int16(reg_block[28]))}')
scalefactor = np.float_power(10,np.int16(reg_block[28]))
logger.debug(f'DC Current mult: {str(scalefactor)}')
if reg_block[27]<65535:
datapoint['fields']['DC Current'] = trunc_float(reg_block[27] * scalefactor)
# DC Voltage
logger.debug(f'Block30: {str(reg_block[30])}')
logger.debug(f'DC voltage SF: {str(np.int16(reg_block[30]))}')
scalefactor = np.float_power(10,np.int16(reg_block[30]))
logger.debug(f'DC Voltage mult: {str(scalefactor)}')
if reg_block[29]<65535:
datapoint['fields']['DC Voltage'] = trunc_float(reg_block[29] * scalefactor)
# DC Power
logger.debug(f'Block32: {str(reg_block[32])}')
logger.debug(f'DC Power SF: {str(np.int16(reg_block[32]))}')
scalefactor = np.float_power(10,np.int16(reg_block[32]))
logger.debug(f'DC Power mult: {str(scalefactor)}')
if reg_block[31]<65535:
datapoint['fields']['DC Power input'] = trunc_float(reg_block[31] * scalefactor)
# Inverter Temp
logger.debug(f'Block37: {str(reg_block[37])}')
logger.debug(f'Temp SF: {str(np.int16(reg_block[37]))}')
scalefactor = np.float_power(10,np.int16(reg_block[37]))
logger.debug(f'Temp mult: {str(scalefactor)}')
if reg_block[34]<65535:
datapoint['fields']['Inverter Temperature'] = trunc_float(reg_block[34] * scalefactor)
datapoint['time'] = str(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
logger.debug(f'Writing to Influx: {str(datapoint)}')
await solar_client.write(datapoint)
else:
# Error during data receive
if client.last_error() == 2:
logger.error(f'Failed to connect to SolarEdge inverter {client.host()}!')
elif client.last_error() == 3 or client.last_error() == 4:
logger.error('Send or receive error!')
elif client.last_error() == 5:
logger.error('Timeout during send or receive operation!')
for x in range(1, mbmeters+1):
# Now loop through this for each meter that is attached.
logger.debug(f'Meter={str(x)}')
reg_block = {}
# Clear data from inverter, otherwise we publish that again!
datapoint = {
'measurement': 'SolarEdge',
'tags': {
'meter': x
},
'fields': {}
}
# Start point is different for each meter
if x==1:
reg_block = client.read_holding_registers(40190, 36)
if x==2:
reg_block = client.read_holding_registers(40364, 36)
if x==3:
reg_block = client.read_holding_registers(40539, 36)
if reg_block:
# print(reg_block)
# reg_block[0] = AC Total current value
# reg_block[1] = AC Phase A current value
# reg_block[2] = AC Phase B current value
# reg_block[3] = AC Phase C current value
# reg_block[4] = AC current scale factor
# reg_block[5] = AC Phase Line (average) to N voltage value
# reg_block[6] = AC Phase A to N voltage value
# reg_block[7] = AC Phase B to N voltage value
# reg_block[8] = AC Phase C to N voltage value
# reg_block[9] = AC Phase Line to Line voltage value
# reg_block[10] = AC Phase A to B voltage value
# reg_block[11] = AC Phase B to C voltage value
# reg_block[12] = AC Phase C to A voltage value
# reg_block[13] = AC voltage scale factor
# reg_block[14] = AC Frequency value
# reg_block[15] = AC Frequency scale factor
# reg_block[16] = Total Real Power
# reg_block[17] = Phase A Real Power
# reg_block[18] = Phase B Real Power
# reg_block[19] = Phase C Real Power
# reg_block[20] = Real Power scale factor
# reg_block[21] = Total Apparent Power
# reg_block[22] = Phase A Apparent Power
# reg_block[23] = Phase B Apparent Power
# reg_block[24] = Phase C Apparent Power
# reg_block[25] = Apparent Power scale factor
# reg_block[26] = Total Reactive Power
# reg_block[27] = Phase A Reactive Power
# reg_block[28] = Phase B Reactive Power
# reg_block[29] = Phase C Reactive Power
# reg_block[30] = Reactive Power scale factor
# reg_block[31] = Average Power Factor
# reg_block[32] = Phase A Power Factor
# reg_block[33] = Phase B Power Factor
# reg_block[34] = Phase C Power Factor
# reg_block[35] = Power Factor scale factor
logger.debug(f'meter reg_block: {str(reg_block)}')
# AC Current
logger.debug(f'AC Current SF: {str(np.int16(reg_block[4]))}')
scalefactor = np.float_power(10,np.int16(reg_block[4]))
datapoint['fields']['AC Total Current'] = trunc_float(np.int16(reg_block[0]) * scalefactor)
datapoint['fields']['AC Current phase A'] = trunc_float(np.int16(reg_block[1]) * scalefactor)
datapoint['fields']['AC Current phase B'] = trunc_float(np.int16(reg_block[2]) * scalefactor)
datapoint['fields']['AC Current phase C'] = trunc_float(np.int16(reg_block[3]) * scalefactor)
# AC Voltage
logger.debug(f'AC Voltage SF: {str(np.int16(reg_block[13]))}')
scalefactor = np.float_power(10,np.int16(reg_block[13]))
datapoint['fields']['AC Voltage phase L-N'] = trunc_float(np.int16(reg_block[5]) * scalefactor)
datapoint['fields']['AC Voltage phase A-N'] = trunc_float(np.int16(reg_block[6]) * scalefactor)
datapoint['fields']['AC Voltage phase B-N'] = trunc_float(np.int16(reg_block[7]) * scalefactor)
datapoint['fields']['AC Voltage phase C-N'] = trunc_float(np.int16(reg_block[8]) * scalefactor)
datapoint['fields']['AC Voltage phase L-L'] = trunc_float(np.int16(reg_block[9]) * scalefactor)
datapoint['fields']['AC Voltage phase A-B'] = trunc_float(np.int16(reg_block[10]) * scalefactor)
datapoint['fields']['AC Voltage phase B-C'] = trunc_float(np.int16(reg_block[11]) * scalefactor)
datapoint['fields']['AC Voltage phase C-A'] = trunc_float(np.int16(reg_block[12]) * scalefactor)
# AC Frequency
logger.debug(f'AC Frequency SF: {str(np.int16(reg_block[15]))}')
scalefactor = np.float_power(10,np.int16(reg_block[15]))
datapoint['fields']['AC Frequency'] = trunc_float(np.int16(reg_block[14]) * scalefactor)
# AC Real Power
logger.debug(f'AC Real Power SF: {str(np.int16(reg_block[20]))}')
scalefactor = np.float_power(10,np.int16(reg_block[20]))
datapoint['fields']['AC Total Real Power'] = trunc_float(np.int16(reg_block[16]) * scalefactor)
datapoint['fields']['AC Real Power Phase A'] = trunc_float(np.int16(reg_block[17]) * scalefactor)
datapoint['fields']['AC Real Power Phase B'] = trunc_float(np.int16(reg_block[18]) * scalefactor)
datapoint['fields']['AC Real Power Phase C'] = trunc_float(np.int16(reg_block[19]) * scalefactor)
# AC Apparent Power
logger.debug(f'AC Apparent Power SF: {str(np.int16(reg_block[25]))}')
scalefactor = np.float_power(10,np.int16(reg_block[25]))
datapoint['fields']['AC Total Apparent Power'] = trunc_float(np.int16(reg_block[21]) * scalefactor)
datapoint['fields']['AC Apparent Power Phase A'] = trunc_float(np.int16(reg_block[22]) * scalefactor)
datapoint['fields']['AC Apparent Power Phase B'] = trunc_float(np.int16(reg_block[23]) * scalefactor)
datapoint['fields']['AC Apparent Power Phase C'] = trunc_float(np.int16(reg_block[24]) * scalefactor)
# AC Reactive Power
logger.debug(f'AC Reactive Power SF: {str(np.int16(reg_block[30]))}')
scalefactor = np.float_power(10,np.int16(reg_block[30]))
datapoint['fields']['AC Total Reactive Power'] = trunc_float(np.int16(reg_block[26]) * scalefactor)
datapoint['fields']['AC Reactive Power Phase A'] = trunc_float(np.int16(reg_block[27]) * scalefactor)
datapoint['fields']['AC Reactive Power Phase B'] = trunc_float(np.int16(reg_block[28]) * scalefactor)
datapoint['fields']['AC Reactive Power Phase C'] = trunc_float(np.int16(reg_block[29]) * scalefactor)
# AC Power Factor
logger.debug(f'AC Power Factor SF: {str(np.int16(reg_block[30]))}')
scalefactor = np.float_power(10,np.int16(reg_block[35]))
datapoint['fields']['AC Average Power Factor'] = trunc_float(np.int16(reg_block[31]) * scalefactor)
datapoint['fields']['AC Power Factor Phase A'] = trunc_float(np.int16(reg_block[32]) * scalefactor)
datapoint['fields']['AC Power Factor Phase B'] = trunc_float(np.int16(reg_block[33]) * scalefactor)
datapoint['fields']['AC Power Factor Phase C'] = trunc_float(np.int16(reg_block[34]) * scalefactor)
datapoint['time'] = str(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
logger.debug(f'Writing to Influx: {str(datapoint)}')
await solar_client.write(datapoint)
else:
# Error during data receive
if client.last_error() == 2:
logger.error(f'Failed to connect to SolarEdge inverter {client.host()}!')
elif client.last_error() == 3 or client.last_error() == 4:
logger.error('Send or receive error!')
elif client.last_error() == 5:
logger.error('Timeout during send or receive operation!')
except InfluxDBWriteError as e:
logger.error(f'Failed to write to InfluxDb: {e}')
except IOError as e:
logger.error(f'I/O exception during operation: {e}')
except Exception as e:
logger.error(f'Unhandled exception: {e}')
await asyncio.sleep(5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--influxdb', default='localhost')
parser.add_argument('--influxport', type=int, default=8086)
parser.add_argument('--port', type=int, default=502, help='ModBus TCP port number to use')
parser.add_argument('--unitid', type=int, default=1, help='ModBus unit id to use in communication')
parser.add_argument('--meters', type=int, default=0, help='Number of ModBus meters attached to inverter (0-3)')
parser.add_argument('solaredge', metavar='SolarEdge IP', help='IP address of the SolarEdge inverter to monitor')
parser.add_argument('--debug', '-d', action='count')
args = parser.parse_args()
logging.basicConfig()
if args.debug and args.debug >= 1:
logging.getLogger('solaredge').setLevel(logging.DEBUG)
if args.debug and args.debug == 2:
logging.getLogger('aioinflux').setLevel(logging.DEBUG)
print('Starting up solaredge monitoring')
print(f'Connecting to Solaredge inverter {args.solaredge} on port {args.port} using unitid {args.unitid}')
print(f'Writing data to influxDb {args.influxdb} on port {args.influxport}')
print(f'Number of meters is {args.meters}')
client = ModbusClient(args.solaredge, port=args.port, unit_id=args.unitid, auto_open=True)
logger.debug('Running eventloop')
asyncio.get_event_loop().run_until_complete(write_to_influx(args.influxdb, args.influxport, args.meters))
| [
"asyncio.get_event_loop",
"pyModbusTCP.client.ModbusClient",
"logging.basicConfig",
"aioinflux.InfluxDBClient",
"argparse.ArgumentParser",
"asyncio.sleep",
"datetime.datetime.utcnow",
"numpy.int16",
"logging.getLogger"
] | [((442, 472), 'logging.getLogger', 'logging.getLogger', (['"""solaredge"""'], {}), "('solaredge')\n", (459, 472), False, 'import logging\n'), ((17601, 17626), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17624, 17626), False, 'import argparse\n'), ((18275, 18296), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (18294, 18296), False, 'import logging\n'), ((18801, 18887), 'pyModbusTCP.client.ModbusClient', 'ModbusClient', (['args.solaredge'], {'port': 'args.port', 'unit_id': 'args.unitid', 'auto_open': '(True)'}), '(args.solaredge, port=args.port, unit_id=args.unitid, auto_open\n =True)\n', (18813, 18887), False, 'from pyModbusTCP.client import ModbusClient\n'), ((713, 764), 'aioinflux.InfluxDBClient', 'InfluxDBClient', ([], {'host': 'dbhost', 'port': 'dbport', 'db': 'dbname'}), '(host=dbhost, port=dbport, db=dbname)\n', (727, 764), False, 'from aioinflux import InfluxDBClient, InfluxDBWriteError\n'), ((17543, 17559), 'asyncio.sleep', 'asyncio.sleep', (['(5)'], {}), '(5)\n', (17556, 17559), False, 'import asyncio\n'), ((18925, 18949), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (18947, 18949), False, 'import asyncio\n'), ((18344, 18374), 'logging.getLogger', 'logging.getLogger', (['"""solaredge"""'], {}), "('solaredge')\n", (18361, 18374), False, 'import logging\n'), ((18446, 18476), 'logging.getLogger', 'logging.getLogger', (['"""aioinflux"""'], {}), "('aioinflux')\n", (18463, 18476), False, 'import logging\n'), ((3087, 3109), 'numpy.int16', 'np.int16', (['reg_block[6]'], {}), '(reg_block[6])\n', (3095, 3109), True, 'import numpy as np\n'), ((3971, 3994), 'numpy.int16', 'np.int16', (['reg_block[13]'], {}), '(reg_block[13])\n', (3979, 3994), True, 'import numpy as np\n'), ((5102, 5125), 'numpy.int16', 'np.int16', (['reg_block[17]'], {}), '(reg_block[17])\n', (5110, 5125), True, 'import numpy as np\n'), ((5503, 5526), 'numpy.int16', 'np.int16', (['reg_block[15]'], {}), '(reg_block[15])\n', (5511, 5526), True, 'import numpy as np\n'), ((5957, 5980), 'numpy.int16', 'np.int16', (['reg_block[28]'], {}), '(reg_block[28])\n', (5965, 5980), True, 'import numpy as np\n'), ((6408, 6431), 'numpy.int16', 'np.int16', (['reg_block[30]'], {}), '(reg_block[30])\n', (6416, 6431), True, 'import numpy as np\n'), ((6855, 6878), 'numpy.int16', 'np.int16', (['reg_block[32]'], {}), '(reg_block[32])\n', (6863, 6878), True, 'import numpy as np\n'), ((7306, 7329), 'numpy.int16', 'np.int16', (['reg_block[37]'], {}), '(reg_block[37])\n', (7314, 7329), True, 'import numpy as np\n'), ((11704, 11726), 'numpy.int16', 'np.int16', (['reg_block[4]'], {}), '(reg_block[4])\n', (11712, 11726), True, 'import numpy as np\n'), ((12351, 12374), 'numpy.int16', 'np.int16', (['reg_block[13]'], {}), '(reg_block[13])\n', (12359, 12374), True, 'import numpy as np\n'), ((13480, 13503), 'numpy.int16', 'np.int16', (['reg_block[15]'], {}), '(reg_block[15])\n', (13488, 13503), True, 'import numpy as np\n'), ((13809, 13832), 'numpy.int16', 'np.int16', (['reg_block[20]'], {}), '(reg_block[20])\n', (13817, 13832), True, 'import numpy as np\n'), ((14507, 14530), 'numpy.int16', 'np.int16', (['reg_block[25]'], {}), '(reg_block[25])\n', (14515, 14530), True, 'import numpy as np\n'), ((15201, 15224), 'numpy.int16', 'np.int16', (['reg_block[30]'], {}), '(reg_block[30])\n', (15209, 15224), True, 'import numpy as np\n'), ((15891, 15914), 'numpy.int16', 'np.int16', (['reg_block[35]'], {}), '(reg_block[35])\n', (15899, 15914), True, 'import numpy as np\n'), ((11802, 11824), 'numpy.int16', 'np.int16', (['reg_block[0]'], {}), '(reg_block[0])\n', (11810, 11824), True, 'import numpy as np\n'), ((11916, 11938), 'numpy.int16', 'np.int16', (['reg_block[1]'], {}), '(reg_block[1])\n', (11924, 11938), True, 'import numpy as np\n'), ((12030, 12052), 'numpy.int16', 'np.int16', (['reg_block[2]'], {}), '(reg_block[2])\n', (12038, 12052), True, 'import numpy as np\n'), ((12144, 12166), 'numpy.int16', 'np.int16', (['reg_block[3]'], {}), '(reg_block[3])\n', (12152, 12166), True, 'import numpy as np\n'), ((12454, 12476), 'numpy.int16', 'np.int16', (['reg_block[5]'], {}), '(reg_block[5])\n', (12462, 12476), True, 'import numpy as np\n'), ((12570, 12592), 'numpy.int16', 'np.int16', (['reg_block[6]'], {}), '(reg_block[6])\n', (12578, 12592), True, 'import numpy as np\n'), ((12686, 12708), 'numpy.int16', 'np.int16', (['reg_block[7]'], {}), '(reg_block[7])\n', (12694, 12708), True, 'import numpy as np\n'), ((12802, 12824), 'numpy.int16', 'np.int16', (['reg_block[8]'], {}), '(reg_block[8])\n', (12810, 12824), True, 'import numpy as np\n'), ((12918, 12940), 'numpy.int16', 'np.int16', (['reg_block[9]'], {}), '(reg_block[9])\n', (12926, 12940), True, 'import numpy as np\n'), ((13034, 13057), 'numpy.int16', 'np.int16', (['reg_block[10]'], {}), '(reg_block[10])\n', (13042, 13057), True, 'import numpy as np\n'), ((13151, 13174), 'numpy.int16', 'np.int16', (['reg_block[11]'], {}), '(reg_block[11])\n', (13159, 13174), True, 'import numpy as np\n'), ((13268, 13291), 'numpy.int16', 'np.int16', (['reg_block[12]'], {}), '(reg_block[12])\n', (13276, 13291), True, 'import numpy as np\n'), ((13575, 13598), 'numpy.int16', 'np.int16', (['reg_block[14]'], {}), '(reg_block[14])\n', (13583, 13598), True, 'import numpy as np\n'), ((13911, 13934), 'numpy.int16', 'np.int16', (['reg_block[16]'], {}), '(reg_block[16])\n', (13919, 13934), True, 'import numpy as np\n'), ((14029, 14052), 'numpy.int16', 'np.int16', (['reg_block[17]'], {}), '(reg_block[17])\n', (14037, 14052), True, 'import numpy as np\n'), ((14147, 14170), 'numpy.int16', 'np.int16', (['reg_block[18]'], {}), '(reg_block[18])\n', (14155, 14170), True, 'import numpy as np\n'), ((14265, 14288), 'numpy.int16', 'np.int16', (['reg_block[19]'], {}), '(reg_block[19])\n', (14273, 14288), True, 'import numpy as np\n'), ((14613, 14636), 'numpy.int16', 'np.int16', (['reg_block[21]'], {}), '(reg_block[21])\n', (14621, 14636), True, 'import numpy as np\n'), ((14735, 14758), 'numpy.int16', 'np.int16', (['reg_block[22]'], {}), '(reg_block[22])\n', (14743, 14758), True, 'import numpy as np\n'), ((14857, 14880), 'numpy.int16', 'np.int16', (['reg_block[23]'], {}), '(reg_block[23])\n', (14865, 14880), True, 'import numpy as np\n'), ((14979, 15002), 'numpy.int16', 'np.int16', (['reg_block[24]'], {}), '(reg_block[24])\n', (14987, 15002), True, 'import numpy as np\n'), ((15307, 15330), 'numpy.int16', 'np.int16', (['reg_block[26]'], {}), '(reg_block[26])\n', (15315, 15330), True, 'import numpy as np\n'), ((15429, 15452), 'numpy.int16', 'np.int16', (['reg_block[27]'], {}), '(reg_block[27])\n', (15437, 15452), True, 'import numpy as np\n'), ((15551, 15574), 'numpy.int16', 'np.int16', (['reg_block[28]'], {}), '(reg_block[28])\n', (15559, 15574), True, 'import numpy as np\n'), ((15673, 15696), 'numpy.int16', 'np.int16', (['reg_block[29]'], {}), '(reg_block[29])\n', (15681, 15696), True, 'import numpy as np\n'), ((15997, 16020), 'numpy.int16', 'np.int16', (['reg_block[31]'], {}), '(reg_block[31])\n', (16005, 16020), True, 'import numpy as np\n'), ((16117, 16140), 'numpy.int16', 'np.int16', (['reg_block[32]'], {}), '(reg_block[32])\n', (16125, 16140), True, 'import numpy as np\n'), ((16237, 16260), 'numpy.int16', 'np.int16', (['reg_block[33]'], {}), '(reg_block[33])\n', (16245, 16260), True, 'import numpy as np\n'), ((16357, 16380), 'numpy.int16', 'np.int16', (['reg_block[34]'], {}), '(reg_block[34])\n', (16365, 16380), True, 'import numpy as np\n'), ((3012, 3034), 'numpy.int16', 'np.int16', (['reg_block[6]'], {}), '(reg_block[6])\n', (3020, 3034), True, 'import numpy as np\n'), ((3895, 3918), 'numpy.int16', 'np.int16', (['reg_block[13]'], {}), '(reg_block[13])\n', (3903, 3918), True, 'import numpy as np\n'), ((5026, 5049), 'numpy.int16', 'np.int16', (['reg_block[17]'], {}), '(reg_block[17])\n', (5034, 5049), True, 'import numpy as np\n'), ((5427, 5450), 'numpy.int16', 'np.int16', (['reg_block[15]'], {}), '(reg_block[15])\n', (5435, 5450), True, 'import numpy as np\n'), ((5881, 5904), 'numpy.int16', 'np.int16', (['reg_block[28]'], {}), '(reg_block[28])\n', (5889, 5904), True, 'import numpy as np\n'), ((6332, 6355), 'numpy.int16', 'np.int16', (['reg_block[30]'], {}), '(reg_block[30])\n', (6340, 6355), True, 'import numpy as np\n'), ((6779, 6802), 'numpy.int16', 'np.int16', (['reg_block[32]'], {}), '(reg_block[32])\n', (6787, 6802), True, 'import numpy as np\n'), ((7230, 7253), 'numpy.int16', 'np.int16', (['reg_block[37]'], {}), '(reg_block[37])\n', (7238, 7253), True, 'import numpy as np\n'), ((7582, 7608), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (7606, 7608), False, 'import datetime\n'), ((11625, 11647), 'numpy.int16', 'np.int16', (['reg_block[4]'], {}), '(reg_block[4])\n', (11633, 11647), True, 'import numpy as np\n'), ((12271, 12294), 'numpy.int16', 'np.int16', (['reg_block[13]'], {}), '(reg_block[13])\n', (12279, 12294), True, 'import numpy as np\n'), ((13400, 13423), 'numpy.int16', 'np.int16', (['reg_block[15]'], {}), '(reg_block[15])\n', (13408, 13423), True, 'import numpy as np\n'), ((13729, 13752), 'numpy.int16', 'np.int16', (['reg_block[20]'], {}), '(reg_block[20])\n', (13737, 13752), True, 'import numpy as np\n'), ((14427, 14450), 'numpy.int16', 'np.int16', (['reg_block[25]'], {}), '(reg_block[25])\n', (14435, 14450), True, 'import numpy as np\n'), ((15121, 15144), 'numpy.int16', 'np.int16', (['reg_block[30]'], {}), '(reg_block[30])\n', (15129, 15144), True, 'import numpy as np\n'), ((15811, 15834), 'numpy.int16', 'np.int16', (['reg_block[30]'], {}), '(reg_block[30])\n', (15819, 15834), True, 'import numpy as np\n'), ((16462, 16488), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (16486, 16488), False, 'import datetime\n')] |
"""Vessel segmentation"""
import os
from typing import Optional
import itk
import numpy as np
import nibabel as nib
import skimage.morphology as morph
from tqdm import tqdm
from scipy.ndimage import affine_transform
from util.nifti import load_nifti
def backup_result(image: itk.Image, aff: np.ndarray,
nii_header: nib.nifti1.Nifti1Header, filename: str):
"""
This function may be used to back-up intermediate results
of the segmentation processing pipeline.
"""
# Create nibabel image object
nii_backup = nib.Nifti1Image(
np.moveaxis(np.asarray(image), [0, 1, 2], [2, 1, 0]),
aff, nii_header
)
# Save image
nib.save(nii_backup, filename)
def determine_intensity_sigmoid_params(
intensity_image: itk.Image,
vessel_mask: itk.Image) -> tuple[float, float]:
"""
This function determines the appropriate alpha and beta
for a sigmoid function. This sigmoid function should map
vessel intensities to 1.0, while mapping everything else
to 0.0. We may later use this as a sort-of speed map for
a fast marching algorithm.
"""
# Import images to numpy
intensity_array = np.asarray(intensity_image)
mask_array = np.asarray(vessel_mask)
# Obtain intensity values for vessel and non-vessel
vessel_array = intensity_array[mask_array != 0.0]
nonvessel_array = intensity_array[mask_array == 0.0]
# Calculate average intensities for vessel and non-vessel
K1 = np.mean(vessel_array)
K2 = np.mean(nonvessel_array[nonvessel_array != 0.0])
# Calculate alpha and beta
alpha = (K1 - K2) / 6
beta = (K1 + K2) / 2
return alpha, beta
def determine_edge_sigmoid_params(laplacian_image: itk.Image) \
-> tuple[float, float]:
"""
This function determines the appropriate alpha and beta
for a sigmoid function. This sigmoid function should map
regions of constant intensity to 1.0, while mapping regions
with high intensity gradients to 0.0.
"""
# Import image to numpy
image_array = np.asarray(laplacian_image)
# Obtain max and min values
max_edgeness = np.percentile(image_array, 95)
min_edgeness = np.percentile(image_array, 5)
# Calculate average intensities for vessel and non-vessel
K1 = min_edgeness
K2 = max_edgeness
# Calculate alpha and beta
alpha = (K1 - K2) / 6
beta = (K1 + K2) / 2
return alpha, beta
def anisotropic_diffusion_smoothing(image: itk.Image,
timeStep: float = 0.05,
nIter: int = 3,
conductance: float = 5.0) -> itk.Image:
"""
Here, we perform an anisotropic diffusion smoothing algorithm.
Hereby, we remove noise from the image while still maintaining the edges.
Documentation as described in:
https://itk.org/ITKExamples/src/Filtering/AnisotropicSmoothing/ComputeCurvatureAnisotropicDiffusion/Documentation.html
"""
# Cast image to itk.F
image_F = image.astype(itk.F)
# Setup image parameters
InputPixelType = itk.F
OutputPixelType = itk.F
Dimension = image.GetImageDimension()
InputImageType = itk.Image[InputPixelType, Dimension]
OutputImageType = itk.Image[OutputPixelType, Dimension]
# Perform filtering
smoothed_img = itk.curvature_anisotropic_diffusion_image_filter(
image_F, number_of_iterations=nIter, time_step=timeStep,
conductance_parameter=conductance,
ttype=[InputImageType, OutputImageType]
)
return smoothed_img
def hessian_vesselness(image: itk.Image, voxDim: float,
sigmaRange: tuple = (0.1, 1.0), nSteps: int = 10,
alpha: Optional[float] = 0.5,
beta: Optional[float] = 0.5,
gamma: Optional[float] = 20.0) -> itk.Image:
"""
Here, we make use of the 3D multiscale Hessian-based
vesselness filter by Antiga et al., as is described in:
https://itk.org/Doxygen/html/classitk_1_1MultiScaleHessianBasedMeasureImageFilter.html
https://itk.org/ITKExamples/src/Nonunit/Review/SegmentBloodVesselsWithMultiScaleHessianBasedMeasure/Documentation.html
"""
# Cast image to itk.F
image_F = image.astype(itk.F)
# Setup image parameters
PixelType = itk.F
Dimension = image_F.GetImageDimension()
ImageType = itk.Image[PixelType, Dimension]
# Set-up parameters
sigmaMin = sigmaRange[0] / voxDim
sigmaMax = sigmaRange[1] / voxDim
# Set-up Hessian image type
HessianPixelType = itk.SymmetricSecondRankTensor[itk.D, Dimension]
HessianImageType = itk.Image[HessianPixelType, Dimension]
# Set-up Hessian-to-objectness filter
objectness_filter = itk.HessianToObjectnessMeasureImageFilter[
HessianImageType, ImageType].New()
objectness_filter.SetBrightObject(True)
objectness_filter.SetScaleObjectnessMeasure(False)
if alpha: objectness_filter.SetAlpha(alpha)
if beta: objectness_filter.SetBeta(beta)
if gamma: objectness_filter.SetGamma(gamma)
# Set-up the Multi-scale Hessian filter
multi_scale_filter = itk.MultiScaleHessianBasedMeasureImageFilter[
ImageType, HessianImageType, ImageType].New()
multi_scale_filter.SetInput(image_F)
multi_scale_filter.SetHessianToMeasureFilter(objectness_filter)
multi_scale_filter.SetSigmaMinimum(sigmaMin)
multi_scale_filter.SetSigmaMaximum(sigmaMax)
multi_scale_filter.SetNumberOfSigmaSteps(nSteps)
# Obtain output
multi_scale_filter.Update()
vesselness_img = multi_scale_filter.GetOutput()
return vesselness_img
def vesselness_thresholding(image: itk.Image, percentile: float = 95.,
nonzeros: bool = True) -> itk.Image:
"""
This function thresholds the vesselness map.
The threshold is set to a certain percentile (param)
"""
# Import vesselness image to numpy
vesselness_as_np = itk.array_from_image(image)
np.moveaxis(vesselness_as_np, [0, 1, 2], [2, 1, 0])
# Determine threshold
if nonzeros:
abs_threshold = \
np.percentile(vesselness_as_np[vesselness_as_np > 1e-4],
percentile)
else:
abs_threshold = np.percentile(vesselness_as_np, percentile)
# Threshold image
vesselness_as_np[vesselness_as_np < abs_threshold] = 0.
vesselness_as_np[vesselness_as_np >= abs_threshold] = 1.
# Export vesselness image back to itk
image_out = itk.image_from_array(vesselness_as_np)
return image_out
def fastmarching_segmentation(image: itk.Image, seed_mask: itk.Image,
affine_matrix: np.ndarray,
nii_header: nib.nifti1.Nifti1Header,
logsDir: str,
intSigmoidAlpha: Optional[float] = None,
intSigmoidBeta: Optional[float] = None,
edgeSigmoidAlpha: Optional[float] = None,
edgeSigmoidBeta: Optional[float] = None,
timeThreshold: int = 10,
stoppingTime: int = 10,
smoothInput: bool = False,
useOnlyGradientMagnitudeAsSpeed: bool = False,
backupInterResults: bool = True) -> itk.Image:
"""
Here, we implement the fastmarching segmentation (ITK),
as is documented (for C++) at:
https://itk.org/Doxygen/html/itkFastMarchingImageFilter_8h_source.html
"""
# Determine voxel size
avg_vox_dim = np.mean(np.absolute((affine_matrix.diagonal())[:-1]))
# Cast image to itk.F
image_F = image.astype(itk.F)
ImageType = itk.Image[itk.F, image.GetImageDimension()]
# If applicable, apply smoothing to input
if smoothInput:
smoothed_image = anisotropic_diffusion_smoothing(image_F)
else:
smoothed_image = image_F
# Calculate Laplacian of the image (used later as part of speed map)
laplacianEdge_image = \
itk.laplacian_image_filter(
smoothed_image
)
if backupInterResults:
backup_result(laplacianEdge_image, affine_matrix, nii_header,
os.path.join(logsDir, "4_1_gradient_magnitude.nii.gz"))
# Calculate speed map by applying sigmoid filter to gradMag-image
# and intensity image
if useOnlyGradientMagnitudeAsSpeed:
speedMap_image = itk.sigmoid_image_filter(
laplacianEdge_image,
output_minimum=0.0, output_maximum=1.0,
alpha=edgeSigmoidAlpha, beta=edgeSigmoidBeta
)
else:
# Calculate alpha, beta for both edges and intensity maps
if not intSigmoidAlpha or not intSigmoidBeta:
intSigmoidAlpha, intSigmoidBeta = \
determine_intensity_sigmoid_params(smoothed_image, seed_mask)
if not edgeSigmoidAlpha or not edgeSigmoidBeta:
edgeSigmoidAlpha, edgeSigmoidBeta = \
determine_edge_sigmoid_params(laplacianEdge_image)
# Calculate sigmoid for intensities
intensitySigmoid_image = itk.sigmoid_image_filter(
smoothed_image,
output_minimum=0.0, output_maximum=1.0,
alpha=intSigmoidAlpha, beta=intSigmoidBeta
)
# Calculate sigmoid for Laplacian edge detection
laplacianSigmoid_image = itk.sigmoid_image_filter(
laplacianEdge_image,
output_minimum=0.0, output_maximum=1.0,
alpha=edgeSigmoidAlpha, beta=edgeSigmoidBeta
)
# Multiply intensity and Laplacian images to find
# the final speed map
speedMap_image = itk.multiply_image_filter(
intensitySigmoid_image, laplacianSigmoid_image
)
# Set speed in non-brain to 0
speedMap_np = np.asarray(speedMap_image)
image_np = np.asarray(image_F)
speedMap_np[image_np < 1e-2 * np.mean(image_np)] = 0.
speedMap_image = itk.GetImageFromArray(speedMap_np)
if backupInterResults:
backup_result(speedMap_image, affine_matrix, nii_header,
os.path.join(logsDir, "4_2_speed_map_sigmoid.nii.gz"))
# Generate appropriate seed mask format (image to list of points)
if backupInterResults:
backup_result(seed_mask, affine_matrix, nii_header,
os.path.join(logsDir, "4_3_seed_mask.nii.gz"))
seed_idx = np.nonzero(np.asarray(seed_mask))
NodeType = itk.LevelSetNode.F3
NodeContainer = itk.VectorContainer[itk.UI, NodeType]
SeedPoints = NodeContainer.New()
SeedPoints.Initialize()
for i in range(np.shape(seed_idx)[1]):
id_x = int(seed_idx[2][i])
id_y = int(seed_idx[1][i])
id_z = int(seed_idx[0][i])
node = NodeType()
node.SetIndex((id_x, id_y, id_z))
node.SetValue(0.0)
SeedPoints.InsertElement(i, node)
# Perform FastMarching
# https://www.orfeo-toolbox.org/SoftwareGuide/SoftwareGuidech16.html
fastMarching_image = itk.fast_marching_image_filter(
speedMap_image, trial_points=SeedPoints,
stopping_value=stoppingTime,
ttype=[ImageType, ImageType]
)
# Threshold FastMarching output
image_out = itk.binary_threshold_image_filter(
fastMarching_image,
lower_threshold=0.0, upper_threshold=timeThreshold,
outside_value=0.0, inside_value=1.0
)
return image_out, laplacianSigmoid_image
def levelset_segmentation(seed_image: itk.Image,
feature_image: itk.Image) -> itk.Image:
"""
Here, we implement the levelset segmentation (ITK),
as is documented (for C++) at:
https://itk.org/Doxygen/html/classitk_1_1GeodesicActiveContourLevelSetImageFilter.html
"""
# Cast images to itk.F
seed_image_F = seed_image.astype(itk.F)
feature_image_F = feature_image.astype(itk.F)
# Calculate initial level set
initial_level_set = itk.binary_threshold_image_filter(
seed_image_F,
lower_threshold=0.1,
outside_value=1.0, inside_value=-1.0
)
# Apply geodesic active contour level-set filter
levelSet_image = itk.geodesic_active_contour_level_set_image_filter(
initial_level_set, feature_image_F,
number_of_iterations=20, propagation_scaling=-0.5,
advection_scaling=1.0, curvature_scaling=1.0
)
# Threshold image
image_out = itk.binary_threshold_image_filter(
levelSet_image,
lower_threshold=0.0,
outside_value=1.0, inside_value=0.0
)
return image_out
def neumann_segmentation(image: np.ndarray,
affine_matrix: np.ndarray,
nii_header: nib.nifti1.Nifti1Header,
logsDir: str) -> np.ndarray:
"""
This function implements the *LevelSet* vessel segmentation
method, as is described by Neumann et al., 2019
(doi: https://doi.org/10.1016/j.cmpb.2019.105037).
We will be implementing this method in Python via the ITK
software package. The process consists of several steps:
- Firstly, we apply an edge-preserving anisotropic diffusion
filtering algorithm to the input image.
- Then, we generate a Hessian-based vesselness map, for which
we use the vesselness filters implemented in ITK.
- As this image will contain a significant amount of false positive
voxels, we now threshold this image at (mean + 2 std).
- This thresholded map is now used as a collection of seed
points for a FastMarching method, as is also implemented in ITK.
- Now, we finally implement an active contour segmentation algorithm
we call the LevelSet step. This step extends the segmentation map
found with the FastMarching method to append some of the smaller details.
"""
# Determine image scale
avg_vox_dim = np.mean(np.absolute((affine_matrix.diagonal())[:-1]))
# Import image to itk
img_itk = (itk.GetImageFromArray(image))
image_in = img_itk.astype(itk.F)
# --- Anisotropic diffusion smoothing ---
# Apply filter
smoothed_img = anisotropic_diffusion_smoothing(image_in)
# Backup image
backup_result(smoothed_img, affine_matrix, nii_header,
os.path.join(logsDir, "1_anisotropic_diff_smoothing.nii.gz"))
# --- Hessian-based vesselness map ---
# Apply filter
vesselness_img = hessian_vesselness(smoothed_img, avg_vox_dim)
# Backup image
backup_result(vesselness_img, affine_matrix, nii_header,
os.path.join(logsDir, "2_hessian_based_vesselness.nii.gz"))
# --- Threshold image at (mean + 1.5 * std) ---
# Apply filter
thresholded_img = vesselness_thresholding(vesselness_img)
# Backup image
backup_result(thresholded_img, affine_matrix, nii_header,
os.path.join(logsDir, "3_thresholded_vesselness.nii.gz"))
# --- FastMarching segmentation ---
# Apply filter
fastmarching_img, speed_img = fastmarching_segmentation(
smoothed_img, thresholded_img, affine_matrix, nii_header, logsDir
)
# Backup image
backup_result(fastmarching_img, affine_matrix, nii_header,
os.path.join(logsDir, "4_fastmarching_segmentation.nii.gz"))
# # --- LevelSet segmentation ---
# # Apply filter
# levelset_img = levelset_segmentation(
# fastmarching_img, speed_img
# )
# # Backup image
# backup_result(levelset_img, affine_matrix, nii_header,
# os.path.join(logsDir, "5_levelset_segmentation.nii.gz"))
# Export to numpy
mask = itk.GetArrayFromImage(fastmarching_img)
mask = np.moveaxis(mask, [0, 1, 2], [2, 1, 0])
return mask
def extract_vessels(seg_paths: dict):
"""
This function performs the actual segmentation part of the
vessel segmentation. It uses some Frangi-filter based tricks
to help in this process.
"""
# Create back-up directory (for intermediate results)
logsDir = seg_paths["backupDir"]
# Extract relevant images
T1w_gado, ori_aff, ori_hdr = load_nifti(seg_paths["T1-gado"])
T1w_bet, bet_aff, _ = load_nifti(seg_paths["bet"])
csf_mask, csf_aff, _ = load_nifti(seg_paths["csf"])
# Transform CSF/BET masks to T1w-gado array space
bet_translation = (np.linalg.inv(bet_aff)).dot(ori_aff)
csf_translation = (np.linalg.inv(csf_aff)).dot(ori_aff)
T1w_bet = affine_transform(T1w_bet, bet_translation,
output_shape=np.shape(T1w_gado))
csf_mask = affine_transform(csf_mask, csf_translation,
output_shape=np.shape(T1w_gado))
# Remove non-brain from T1CE (gado) image
T1w_gado[T1w_bet < 1e-2] = 0
# LevelSet vessel extraction
raw_mask = neumann_segmentation(T1w_gado, ori_aff, ori_hdr, logsDir)
# Clean up mask
vessel_mask = raw_mask
vessel_mask[T1w_bet < 1e-2] = 0 # Remove non-brain
# Prepare morphological operations
avg_vox_dim = np.mean(np.absolute((ori_aff.diagonal())[:-1]))
# Perform closing
element = morph.ball(int(2 / avg_vox_dim))
vessel_mask = morph.closing(vessel_mask, element)
# Save vessel mask
nii_mask = nib.Nifti1Image(vessel_mask, ori_aff, ori_hdr)
nib.save(nii_mask, seg_paths["vessel_mask"])
def seg_vessels(paths: dict, settings: dict, verbose: bool = True):
"""
This function performs the path management/administratory
part of the vessel segmentation. It calls upon extract_vessels()
to perform the actual segmentation.
"""
# Initialize skipped_img variable
skipped_img = False
# If applicable, make segmentation paths and folder
if "segDir" not in paths:
paths["segDir"] = os.path.join(paths["tmpDataDir"], "segmentation")
if "seg_paths" not in paths:
paths["seg_paths"] = {}
if not os.path.isdir(paths["segDir"]): os.mkdir(paths["segDir"])
# Generate processing paths (iteratively)
seg_paths = []
for subject in paths["nii_paths"]:
# Create subject dir and raw subject dir
subjectDir = os.path.join(paths["segDir"], subject)
if not os.path.isdir(subjectDir): os.mkdir(subjectDir)
rawDir = os.path.join(subjectDir, "raw")
if not os.path.isdir(rawDir): os.mkdir(rawDir)
if subject not in paths["seg_paths"]:
paths["seg_paths"][subject] = {
"dir": subjectDir,
"raw": rawDir
}
# Create backup dict
backupDir = os.path.join(rawDir, "vessel_debug")
if not os.path.isdir(backupDir): os.mkdir(backupDir)
# Define needed paths (originals + FSL-processed)
T1_path = paths["nii_paths"][subject]["MRI_T1W"]
T1_gado_path = paths["mrreg_paths"][subject]["gado_coreg"]
fsl_bet_path = paths["fsl_paths"][subject]["bet"]
fsl_csf_path = paths["fsl_paths"][subject]["fast_csf"]
# Assemble segmentation path
vessel_mask_path = os.path.join(subjectDir, "vessel_mask.nii.gz")
# Add paths to {paths}
paths["seg_paths"][subject]["vessel_mask"] = vessel_mask_path
# Add paths to seg_paths
subject_dict = {"subject": subject,
"T1": T1_path,
"T1-gado": T1_gado_path,
"bet": fsl_bet_path,
"csf": fsl_csf_path,
"vessel_mask": vessel_mask_path,
"backupDir": backupDir}
seg_paths.append(subject_dict)
# Now, loop over seg_paths and perform ventricle segmentation
# Define iterator
if verbose:
iterator = tqdm(seg_paths, ascii=True,
bar_format='{l_bar}{bar:30}{r_bar}{bar:-30b}')
else:
iterator = seg_paths
# Main loop
for sub_paths in iterator:
# Check whether output already there
output_ok = os.path.exists(sub_paths["vessel_mask"])
# Determine whether to skip subject
if output_ok:
if settings["resetModules"][2] == 0:
skipped_img = True
continue
elif settings["resetModules"][2] == 1:
# Generate vessel mask
extract_vessels(sub_paths)
else:
raise ValueError("Parameter 'resetModules' should be a list "
"containing only 0's and 1's. "
"Please check the config file (config.json).")
else:
# Generate vessel mask
extract_vessels(sub_paths)
# If some files were skipped, write message
if verbose and skipped_img:
print("Some scans were skipped due to the output being complete.\n"
"If you want to rerun this entire module, please set "
"'resetModules'[2] to 0 in the config.json file.")
return paths, settings
| [
"itk.GetImageFromArray",
"os.mkdir",
"numpy.moveaxis",
"numpy.shape",
"numpy.mean",
"itk.laplacian_image_filter",
"os.path.join",
"skimage.morphology.closing",
"os.path.exists",
"nibabel.save",
"util.nifti.load_nifti",
"nibabel.Nifti1Image",
"tqdm.tqdm",
"itk.sigmoid_image_filter",
"nump... | [((683, 713), 'nibabel.save', 'nib.save', (['nii_backup', 'filename'], {}), '(nii_backup, filename)\n', (691, 713), True, 'import nibabel as nib\n'), ((1190, 1217), 'numpy.asarray', 'np.asarray', (['intensity_image'], {}), '(intensity_image)\n', (1200, 1217), True, 'import numpy as np\n'), ((1235, 1258), 'numpy.asarray', 'np.asarray', (['vessel_mask'], {}), '(vessel_mask)\n', (1245, 1258), True, 'import numpy as np\n'), ((1499, 1520), 'numpy.mean', 'np.mean', (['vessel_array'], {}), '(vessel_array)\n', (1506, 1520), True, 'import numpy as np\n'), ((1530, 1578), 'numpy.mean', 'np.mean', (['nonvessel_array[nonvessel_array != 0.0]'], {}), '(nonvessel_array[nonvessel_array != 0.0])\n', (1537, 1578), True, 'import numpy as np\n'), ((2074, 2101), 'numpy.asarray', 'np.asarray', (['laplacian_image'], {}), '(laplacian_image)\n', (2084, 2101), True, 'import numpy as np\n'), ((2154, 2184), 'numpy.percentile', 'np.percentile', (['image_array', '(95)'], {}), '(image_array, 95)\n', (2167, 2184), True, 'import numpy as np\n'), ((2204, 2233), 'numpy.percentile', 'np.percentile', (['image_array', '(5)'], {}), '(image_array, 5)\n', (2217, 2233), True, 'import numpy as np\n'), ((3362, 3552), 'itk.curvature_anisotropic_diffusion_image_filter', 'itk.curvature_anisotropic_diffusion_image_filter', (['image_F'], {'number_of_iterations': 'nIter', 'time_step': 'timeStep', 'conductance_parameter': 'conductance', 'ttype': '[InputImageType, OutputImageType]'}), '(image_F,\n number_of_iterations=nIter, time_step=timeStep, conductance_parameter=\n conductance, ttype=[InputImageType, OutputImageType])\n', (3410, 3552), False, 'import itk\n'), ((6001, 6028), 'itk.array_from_image', 'itk.array_from_image', (['image'], {}), '(image)\n', (6021, 6028), False, 'import itk\n'), ((6033, 6084), 'numpy.moveaxis', 'np.moveaxis', (['vesselness_as_np', '[0, 1, 2]', '[2, 1, 0]'], {}), '(vesselness_as_np, [0, 1, 2], [2, 1, 0])\n', (6044, 6084), True, 'import numpy as np\n'), ((6543, 6581), 'itk.image_from_array', 'itk.image_from_array', (['vesselness_as_np'], {}), '(vesselness_as_np)\n', (6563, 6581), False, 'import itk\n'), ((8141, 8183), 'itk.laplacian_image_filter', 'itk.laplacian_image_filter', (['smoothed_image'], {}), '(smoothed_image)\n', (8167, 8183), False, 'import itk\n'), ((9933, 9959), 'numpy.asarray', 'np.asarray', (['speedMap_image'], {}), '(speedMap_image)\n', (9943, 9959), True, 'import numpy as np\n'), ((9975, 9994), 'numpy.asarray', 'np.asarray', (['image_F'], {}), '(image_F)\n', (9985, 9994), True, 'import numpy as np\n'), ((10076, 10110), 'itk.GetImageFromArray', 'itk.GetImageFromArray', (['speedMap_np'], {}), '(speedMap_np)\n', (10097, 10110), False, 'import itk\n'), ((11132, 11266), 'itk.fast_marching_image_filter', 'itk.fast_marching_image_filter', (['speedMap_image'], {'trial_points': 'SeedPoints', 'stopping_value': 'stoppingTime', 'ttype': '[ImageType, ImageType]'}), '(speedMap_image, trial_points=SeedPoints,\n stopping_value=stoppingTime, ttype=[ImageType, ImageType])\n', (11162, 11266), False, 'import itk\n'), ((11346, 11492), 'itk.binary_threshold_image_filter', 'itk.binary_threshold_image_filter', (['fastMarching_image'], {'lower_threshold': '(0.0)', 'upper_threshold': 'timeThreshold', 'outside_value': '(0.0)', 'inside_value': '(1.0)'}), '(fastMarching_image, lower_threshold=0.0,\n upper_threshold=timeThreshold, outside_value=0.0, inside_value=1.0)\n', (11379, 11492), False, 'import itk\n'), ((12061, 12171), 'itk.binary_threshold_image_filter', 'itk.binary_threshold_image_filter', (['seed_image_F'], {'lower_threshold': '(0.1)', 'outside_value': '(1.0)', 'inside_value': '(-1.0)'}), '(seed_image_F, lower_threshold=0.1,\n outside_value=1.0, inside_value=-1.0)\n', (12094, 12171), False, 'import itk\n'), ((12273, 12464), 'itk.geodesic_active_contour_level_set_image_filter', 'itk.geodesic_active_contour_level_set_image_filter', (['initial_level_set', 'feature_image_F'], {'number_of_iterations': '(20)', 'propagation_scaling': '(-0.5)', 'advection_scaling': '(1.0)', 'curvature_scaling': '(1.0)'}), '(initial_level_set,\n feature_image_F, number_of_iterations=20, propagation_scaling=-0.5,\n advection_scaling=1.0, curvature_scaling=1.0)\n', (12323, 12464), False, 'import itk\n'), ((12526, 12637), 'itk.binary_threshold_image_filter', 'itk.binary_threshold_image_filter', (['levelSet_image'], {'lower_threshold': '(0.0)', 'outside_value': '(1.0)', 'inside_value': '(0.0)'}), '(levelSet_image, lower_threshold=0.0,\n outside_value=1.0, inside_value=0.0)\n', (12559, 12637), False, 'import itk\n'), ((14078, 14106), 'itk.GetImageFromArray', 'itk.GetImageFromArray', (['image'], {}), '(image)\n', (14099, 14106), False, 'import itk\n'), ((15719, 15758), 'itk.GetArrayFromImage', 'itk.GetArrayFromImage', (['fastmarching_img'], {}), '(fastmarching_img)\n', (15740, 15758), False, 'import itk\n'), ((15770, 15809), 'numpy.moveaxis', 'np.moveaxis', (['mask', '[0, 1, 2]', '[2, 1, 0]'], {}), '(mask, [0, 1, 2], [2, 1, 0])\n', (15781, 15809), True, 'import numpy as np\n'), ((16200, 16232), 'util.nifti.load_nifti', 'load_nifti', (["seg_paths['T1-gado']"], {}), "(seg_paths['T1-gado'])\n", (16210, 16232), False, 'from util.nifti import load_nifti\n'), ((16259, 16287), 'util.nifti.load_nifti', 'load_nifti', (["seg_paths['bet']"], {}), "(seg_paths['bet'])\n", (16269, 16287), False, 'from util.nifti import load_nifti\n'), ((16315, 16343), 'util.nifti.load_nifti', 'load_nifti', (["seg_paths['csf']"], {}), "(seg_paths['csf'])\n", (16325, 16343), False, 'from util.nifti import load_nifti\n'), ((17251, 17286), 'skimage.morphology.closing', 'morph.closing', (['vessel_mask', 'element'], {}), '(vessel_mask, element)\n', (17264, 17286), True, 'import skimage.morphology as morph\n'), ((17326, 17372), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['vessel_mask', 'ori_aff', 'ori_hdr'], {}), '(vessel_mask, ori_aff, ori_hdr)\n', (17341, 17372), True, 'import nibabel as nib\n'), ((17377, 17421), 'nibabel.save', 'nib.save', (['nii_mask', "seg_paths['vessel_mask']"], {}), "(nii_mask, seg_paths['vessel_mask'])\n", (17385, 17421), True, 'import nibabel as nib\n'), ((6167, 6237), 'numpy.percentile', 'np.percentile', (['vesselness_as_np[vesselness_as_np > 0.0001]', 'percentile'], {}), '(vesselness_as_np[vesselness_as_np > 0.0001], percentile)\n', (6180, 6237), True, 'import numpy as np\n'), ((6296, 6339), 'numpy.percentile', 'np.percentile', (['vesselness_as_np', 'percentile'], {}), '(vesselness_as_np, percentile)\n', (6309, 6339), True, 'import numpy as np\n'), ((8544, 8679), 'itk.sigmoid_image_filter', 'itk.sigmoid_image_filter', (['laplacianEdge_image'], {'output_minimum': '(0.0)', 'output_maximum': '(1.0)', 'alpha': 'edgeSigmoidAlpha', 'beta': 'edgeSigmoidBeta'}), '(laplacianEdge_image, output_minimum=0.0,\n output_maximum=1.0, alpha=edgeSigmoidAlpha, beta=edgeSigmoidBeta)\n', (8568, 8679), False, 'import itk\n'), ((9230, 9359), 'itk.sigmoid_image_filter', 'itk.sigmoid_image_filter', (['smoothed_image'], {'output_minimum': '(0.0)', 'output_maximum': '(1.0)', 'alpha': 'intSigmoidAlpha', 'beta': 'intSigmoidBeta'}), '(smoothed_image, output_minimum=0.0, output_maximum\n =1.0, alpha=intSigmoidAlpha, beta=intSigmoidBeta)\n', (9254, 9359), False, 'import itk\n'), ((9492, 9627), 'itk.sigmoid_image_filter', 'itk.sigmoid_image_filter', (['laplacianEdge_image'], {'output_minimum': '(0.0)', 'output_maximum': '(1.0)', 'alpha': 'edgeSigmoidAlpha', 'beta': 'edgeSigmoidBeta'}), '(laplacianEdge_image, output_minimum=0.0,\n output_maximum=1.0, alpha=edgeSigmoidAlpha, beta=edgeSigmoidBeta)\n', (9516, 9627), False, 'import itk\n'), ((9784, 9857), 'itk.multiply_image_filter', 'itk.multiply_image_filter', (['intensitySigmoid_image', 'laplacianSigmoid_image'], {}), '(intensitySigmoid_image, laplacianSigmoid_image)\n', (9809, 9857), False, 'import itk\n'), ((10535, 10556), 'numpy.asarray', 'np.asarray', (['seed_mask'], {}), '(seed_mask)\n', (10545, 10556), True, 'import numpy as np\n'), ((14369, 14429), 'os.path.join', 'os.path.join', (['logsDir', '"""1_anisotropic_diff_smoothing.nii.gz"""'], {}), "(logsDir, '1_anisotropic_diff_smoothing.nii.gz')\n", (14381, 14429), False, 'import os\n'), ((14660, 14718), 'os.path.join', 'os.path.join', (['logsDir', '"""2_hessian_based_vesselness.nii.gz"""'], {}), "(logsDir, '2_hessian_based_vesselness.nii.gz')\n", (14672, 14718), False, 'import os\n'), ((14954, 15010), 'os.path.join', 'os.path.join', (['logsDir', '"""3_thresholded_vesselness.nii.gz"""'], {}), "(logsDir, '3_thresholded_vesselness.nii.gz')\n", (14966, 15010), False, 'import os\n'), ((15314, 15373), 'os.path.join', 'os.path.join', (['logsDir', '"""4_fastmarching_segmentation.nii.gz"""'], {}), "(logsDir, '4_fastmarching_segmentation.nii.gz')\n", (15326, 15373), False, 'import os\n'), ((17855, 17904), 'os.path.join', 'os.path.join', (["paths['tmpDataDir']", '"""segmentation"""'], {}), "(paths['tmpDataDir'], 'segmentation')\n", (17867, 17904), False, 'import os\n'), ((17982, 18012), 'os.path.isdir', 'os.path.isdir', (["paths['segDir']"], {}), "(paths['segDir'])\n", (17995, 18012), False, 'import os\n'), ((18014, 18039), 'os.mkdir', 'os.mkdir', (["paths['segDir']"], {}), "(paths['segDir'])\n", (18022, 18039), False, 'import os\n'), ((18216, 18254), 'os.path.join', 'os.path.join', (["paths['segDir']", 'subject'], {}), "(paths['segDir'], subject)\n", (18228, 18254), False, 'import os\n'), ((18336, 18367), 'os.path.join', 'os.path.join', (['subjectDir', '"""raw"""'], {}), "(subjectDir, 'raw')\n", (18348, 18367), False, 'import os\n'), ((18643, 18679), 'os.path.join', 'os.path.join', (['rawDir', '"""vessel_debug"""'], {}), "(rawDir, 'vessel_debug')\n", (18655, 18679), False, 'import os\n'), ((19110, 19156), 'os.path.join', 'os.path.join', (['subjectDir', '"""vessel_mask.nii.gz"""'], {}), "(subjectDir, 'vessel_mask.nii.gz')\n", (19122, 19156), False, 'import os\n'), ((19784, 19858), 'tqdm.tqdm', 'tqdm', (['seg_paths'], {'ascii': '(True)', 'bar_format': '"""{l_bar}{bar:30}{r_bar}{bar:-30b}"""'}), "(seg_paths, ascii=True, bar_format='{l_bar}{bar:30}{r_bar}{bar:-30b}')\n", (19788, 19858), False, 'from tqdm import tqdm\n'), ((20035, 20075), 'os.path.exists', 'os.path.exists', (["sub_paths['vessel_mask']"], {}), "(sub_paths['vessel_mask'])\n", (20049, 20075), False, 'import os\n'), ((589, 606), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (599, 606), True, 'import numpy as np\n'), ((8326, 8380), 'os.path.join', 'os.path.join', (['logsDir', '"""4_1_gradient_magnitude.nii.gz"""'], {}), "(logsDir, '4_1_gradient_magnitude.nii.gz')\n", (8338, 8380), False, 'import os\n'), ((10226, 10279), 'os.path.join', 'os.path.join', (['logsDir', '"""4_2_speed_map_sigmoid.nii.gz"""'], {}), "(logsDir, '4_2_speed_map_sigmoid.nii.gz')\n", (10238, 10279), False, 'import os\n'), ((10461, 10506), 'os.path.join', 'os.path.join', (['logsDir', '"""4_3_seed_mask.nii.gz"""'], {}), "(logsDir, '4_3_seed_mask.nii.gz')\n", (10473, 10506), False, 'import os\n'), ((10737, 10755), 'numpy.shape', 'np.shape', (['seed_idx'], {}), '(seed_idx)\n', (10745, 10755), True, 'import numpy as np\n'), ((16422, 16444), 'numpy.linalg.inv', 'np.linalg.inv', (['bet_aff'], {}), '(bet_aff)\n', (16435, 16444), True, 'import numpy as np\n'), ((16482, 16504), 'numpy.linalg.inv', 'np.linalg.inv', (['csf_aff'], {}), '(csf_aff)\n', (16495, 16504), True, 'import numpy as np\n'), ((16621, 16639), 'numpy.shape', 'np.shape', (['T1w_gado'], {}), '(T1w_gado)\n', (16629, 16639), True, 'import numpy as np\n'), ((16745, 16763), 'numpy.shape', 'np.shape', (['T1w_gado'], {}), '(T1w_gado)\n', (16753, 16763), True, 'import numpy as np\n'), ((18270, 18295), 'os.path.isdir', 'os.path.isdir', (['subjectDir'], {}), '(subjectDir)\n', (18283, 18295), False, 'import os\n'), ((18297, 18317), 'os.mkdir', 'os.mkdir', (['subjectDir'], {}), '(subjectDir)\n', (18305, 18317), False, 'import os\n'), ((18383, 18404), 'os.path.isdir', 'os.path.isdir', (['rawDir'], {}), '(rawDir)\n', (18396, 18404), False, 'import os\n'), ((18406, 18422), 'os.mkdir', 'os.mkdir', (['rawDir'], {}), '(rawDir)\n', (18414, 18422), False, 'import os\n'), ((18695, 18719), 'os.path.isdir', 'os.path.isdir', (['backupDir'], {}), '(backupDir)\n', (18708, 18719), False, 'import os\n'), ((18721, 18740), 'os.mkdir', 'os.mkdir', (['backupDir'], {}), '(backupDir)\n', (18729, 18740), False, 'import os\n'), ((10030, 10047), 'numpy.mean', 'np.mean', (['image_np'], {}), '(image_np)\n', (10037, 10047), True, 'import numpy as np\n')] |
from __future__ import division
import time
import train
import option_parse
from numpy.random import uniform, randint, choice
import torch
def check_params(opts, prev_opts):
stds = {'dropout': .02, 'lr': 10**-6, 'lr_decay': .1, 'start_decay_at': 5, 'attn': 0,
'cat_mo_spec': 0, 'mem_slots': 10, 'mem_size': 10, 'read_heads': 0, 'hops': 1,
'input_feed': 0, 'curriculum': 0, 'share_M': 0, 'brnn': 0, 'layers': 0, 'word_vec_size': 0,
'rnn_size': 0}
if len(prev_opts) == 0:
print(' new_opts ')
print(opts)
return True
for prev_opt in prev_opts:
same_vals = True
for o in prev_opt:
if isinstance(opts[o], str) or isinstance(opts[o], bool):
if opts[o] != prev_opt[o]:
same_vals = False
elif opts[o] < prev_opt[o] - stds[o] or opts[o] > prev_opt[o] + stds[o]:
print(' checking %s : %.4f ;;; prev: %.4f +- std : %.2f' % (str(o),
opts[o], prev_opt[o],
stds[o]))
same_vals = False
if same_vals:
print(' --- no new options ')
print(opts)
return False
print(' ---- new options')
print(opts)
return True
if __name__ == "__main__":
parser = option_parse.get_parser()
opt = parser.parse_args()
tries = 0
low_ppl = 100000
f = open('logs/hypertune_res_' + str(opt.mem), 'a')
print(' data: ' + str(opt.data), file=f)
f.close()
# prev_opts: {mem_type: list of {opt : value}}
if opt.prev_opts:
try:
prev_opts = torch.load(opt.prev_opts)
except FileNotFoundError:
prev_opts = []
else:
prev_opts = []
while True: # low_ppl > 4 or tries < 64:
ok_params = False
while not ok_params:
# opt.brnn = randint(2)
opt.dropout = round(uniform(.1, .7), 2)
opt.dropout_nse = round(uniform(.1, .6), 2)
optim = ['sgd', 'adagrad', 'adadelta', 'adam']
opt.optim = optim[randint(0, 4)]
opt.learning_rate = 10 ** uniform(-3.5, -2.5)
opt.learning_rate_decay = round(uniform(.3, .8), 2)
opt.start_decay_at = randint(8, 32)
opt.curriculum = randint(2, 10)
# opt.attn = uniform() // .7
# opt.input_feed = uniform() // .3
# opt.not_optim_tgt_emb = uniform() // .5
# opts = {'share_M': opt.share_M, 'dropout': opt.dropout, 'lr': opt.learning_rate,
# 'lr_decay': opt.learning_rate_decay, 'start_decay_at': opt.start_decay_at,
# 'attn': opt.attn, 'input_feed': opt.input_feed, 'curriculum': opt.curriculum}
if 'nse' in opt.mem.split('_'):
opt.cat_mo_special = uniform() // .5 == 1
if 'dnc' in opt.mem.split('_'):
ok_size = False
opt.word_vec_size = int(choice([200, 300, 400, 500]))
opt.rnn_size = int(choice([200, 300, 400, 500]))
opt.layers = randint(2) + 1
while not ok_size:
opt.read_heads = randint(1, 4)
opt.mem_slots = randint(10, 50)
opt.mem_size = randint(50, 500)
ok_size = opt.read_heads * opt.mem_slots * opt.mem_size < 6000
if 'n2n' == opt.mem.split('_')[0]:
opt.nr_of_hops = randint(4, 20)
ok_params = check_params(vars(opt), prev_opts)
print(' start try: ' + str(tries))
train.opt = opt
cur_ppl, epoch, trn_ppls, val_ppls, checkpoint, opt, nparams = train.main()
f = open('logs/hypertune_res_' + str(opt.mem), 'a')
if cur_ppl < low_ppl:
low_ppl = cur_ppl
print(' ===== better result ====\n', file=f)
print('low ppl: %f \n number of params: %d \n epoch: %d\n train ppls: %s \n vaild ppls: %s \n'
% (cur_ppl, nparams, epoch, str(trn_ppls), str(val_ppls)), file=f)
print(opt, file=f)
print('\n===========================================================\n\n', file=f)
f.close()
if opt.prev_opts:
prev_opts += [vars(opt)]
torch.save(prev_opts, opt.prev_opts)
tries += 1
| [
"numpy.random.uniform",
"option_parse.get_parser",
"torch.load",
"train.main",
"torch.save",
"numpy.random.randint",
"numpy.random.choice"
] | [((1439, 1464), 'option_parse.get_parser', 'option_parse.get_parser', ([], {}), '()\n', (1462, 1464), False, 'import option_parse\n'), ((3805, 3817), 'train.main', 'train.main', ([], {}), '()\n', (3815, 3817), False, 'import train\n'), ((1757, 1782), 'torch.load', 'torch.load', (['opt.prev_opts'], {}), '(opt.prev_opts)\n', (1767, 1782), False, 'import torch\n'), ((2383, 2397), 'numpy.random.randint', 'randint', (['(8)', '(32)'], {}), '(8, 32)\n', (2390, 2397), False, 'from numpy.random import uniform, randint, choice\n'), ((2427, 2441), 'numpy.random.randint', 'randint', (['(2)', '(10)'], {}), '(2, 10)\n', (2434, 2441), False, 'from numpy.random import uniform, randint, choice\n'), ((4392, 4428), 'torch.save', 'torch.save', (['prev_opts', 'opt.prev_opts'], {}), '(prev_opts, opt.prev_opts)\n', (4402, 4428), False, 'import torch\n'), ((2048, 2065), 'numpy.random.uniform', 'uniform', (['(0.1)', '(0.7)'], {}), '(0.1, 0.7)\n', (2055, 2065), False, 'from numpy.random import uniform, randint, choice\n'), ((2104, 2121), 'numpy.random.uniform', 'uniform', (['(0.1)', '(0.6)'], {}), '(0.1, 0.6)\n', (2111, 2121), False, 'from numpy.random import uniform, randint, choice\n'), ((2213, 2226), 'numpy.random.randint', 'randint', (['(0)', '(4)'], {}), '(0, 4)\n', (2220, 2226), False, 'from numpy.random import uniform, randint, choice\n'), ((2266, 2285), 'numpy.random.uniform', 'uniform', (['(-3.5)', '(-2.5)'], {}), '(-3.5, -2.5)\n', (2273, 2285), False, 'from numpy.random import uniform, randint, choice\n'), ((2330, 2347), 'numpy.random.uniform', 'uniform', (['(0.3)', '(0.8)'], {}), '(0.3, 0.8)\n', (2337, 2347), False, 'from numpy.random import uniform, randint, choice\n'), ((3590, 3604), 'numpy.random.randint', 'randint', (['(4)', '(20)'], {}), '(4, 20)\n', (3597, 3604), False, 'from numpy.random import uniform, randint, choice\n'), ((3097, 3125), 'numpy.random.choice', 'choice', (['[200, 300, 400, 500]'], {}), '([200, 300, 400, 500])\n', (3103, 3125), False, 'from numpy.random import uniform, randint, choice\n'), ((3162, 3190), 'numpy.random.choice', 'choice', (['[200, 300, 400, 500]'], {}), '([200, 300, 400, 500])\n', (3168, 3190), False, 'from numpy.random import uniform, randint, choice\n'), ((3221, 3231), 'numpy.random.randint', 'randint', (['(2)'], {}), '(2)\n', (3228, 3231), False, 'from numpy.random import uniform, randint, choice\n'), ((3308, 3321), 'numpy.random.randint', 'randint', (['(1)', '(4)'], {}), '(1, 4)\n', (3315, 3321), False, 'from numpy.random import uniform, randint, choice\n'), ((3358, 3373), 'numpy.random.randint', 'randint', (['(10)', '(50)'], {}), '(10, 50)\n', (3365, 3373), False, 'from numpy.random import uniform, randint, choice\n'), ((3409, 3425), 'numpy.random.randint', 'randint', (['(50)', '(500)'], {}), '(50, 500)\n', (3416, 3425), False, 'from numpy.random import uniform, randint, choice\n'), ((2957, 2966), 'numpy.random.uniform', 'uniform', ([], {}), '()\n', (2964, 2966), False, 'from numpy.random import uniform, randint, choice\n')] |
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import os.path as path
import time
from Corrfunc import _countpairs
from Corrfunc.utils import read_catalog
import numpy as np
# --- Local ---
# --- halotools ---
from halotools.sim_manager import CachedHaloCatalog
from halotools.empirical_models import PrebuiltHodModelFactory
from halotools.mock_observables import tpcf
from halotools.empirical_models.factories.mock_helpers import three_dim_pos_bundle
from halotools.mock_observables import FoFGroups
from halotools.mock_observables.pair_counters import npairs_3d
import matplotlib.pyplot as plt
from ChangTools.plotting import prettyplot
from ChangTools.plotting import prettycolors
from ChangTools.plotting import prettyplot
from ChangTools.plotting import prettycolors
from Corrfunc.utils import read_catalog
def main():
# Entire MultiDark Volume (Analytic xi)
cov = np.loadtxt("../data/wpxicov_dr72_bright0_mr21.0_z0.159_nj400")
print(cov.shape)
model = PrebuiltHodModelFactory('zheng07', threshold=-21)
print(model.param_dict)
model.param_dict['logM0'] = 12.59
model.param_dict['sigma_logM'] = 0.49
model.param_dict['logMmin'] = 12.78
model.param_dict['alpha'] = 1.14
model.param_dict['logM1'] = 13.99
#, 'sigma_logM': 0.39, 'logMmin': 12.79, 'alpha': 1.15, 'logM1': 13.94}
halocat = CachedHaloCatalog(simname = 'bolplanck', redshift = 0, halo_finder = 'rockstar')
model.populate_mock(halocat, enforce_PBC = True)
pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
tstart = time.time()
t0 = tstart
pos = pos.astype(np.float32)
x, y, z = pos[:,0] , pos[:,1] , pos[:,2]
t1 = time.time()
print("Done reading the data - time taken = {0:10.1f} seconds"
.format(t1 - t0))
print("Beginning Correlation functions calculations")
boxsize = 250
nthreads = 4
pimax = 40.0
binfile = path.join(path.dirname(path.abspath(__file__)),
"../", "bin")
autocorr = 1
numbins_to_print = 12
print("\nRunning 2-D projected correlation function wp(rp)")
results_wp = _countpairs.countpairs_wp(boxsize, pimax, nthreads,
binfile, x, y, z)
print("\n# ****** wp: first {0} bins ******* "
.format(numbins_to_print))
print("# rmin rmax rpavg wp npairs")
print("##########################################################")
for ibin in range(numbins_to_print):
items = results_wp[ibin]
print("{0:12.4f} {1:12.4f} {2:10.4f} {3:10.1f} {4:10d}"
.format(items[0], items[1], items[2], items[3], items[4]))
print("-----------------------------------------------------------")
data_wp = np.loadtxt("../data/wpxi_dr72_bright0_mr21.0_z0.159_nj400")[:,1]
print(data_wp.shape)
data_wp_error = np.sqrt(np.diag(cov)[:12])
print(data_wp_error.shape)
rbins = np.loadtxt(binfile)
rs = np.mean(rbins , axis = 1)
plt.figure(figsize=(10,10))
plt.errorbar(rs , data_wp , data_wp_error , fmt=".k" , capsize = 2)
plt.plot(rs , np.array(results_wp)[:,3])
plt.loglog()
plt.savefig("wp.pdf")
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.loglog",
"numpy.diag",
"matplotlib.pyplot.savefig",
"os.path.abspath",
"halotools.empirical_models.PrebuiltHodModelFactory",
"time.time",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"numpy.loadtxt",
"halotools.empirical_models.factories.mock_helpers.three_dim_pos_... | [((952, 1014), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/wpxicov_dr72_bright0_mr21.0_z0.159_nj400"""'], {}), "('../data/wpxicov_dr72_bright0_mr21.0_z0.159_nj400')\n", (962, 1014), True, 'import numpy as np\n'), ((1048, 1097), 'halotools.empirical_models.PrebuiltHodModelFactory', 'PrebuiltHodModelFactory', (['"""zheng07"""'], {'threshold': '(-21)'}), "('zheng07', threshold=-21)\n", (1071, 1097), False, 'from halotools.empirical_models import PrebuiltHodModelFactory\n'), ((1417, 1491), 'halotools.sim_manager.CachedHaloCatalog', 'CachedHaloCatalog', ([], {'simname': '"""bolplanck"""', 'redshift': '(0)', 'halo_finder': '"""rockstar"""'}), "(simname='bolplanck', redshift=0, halo_finder='rockstar')\n", (1434, 1491), False, 'from halotools.sim_manager import CachedHaloCatalog\n'), ((1561, 1621), 'halotools.empirical_models.factories.mock_helpers.three_dim_pos_bundle', 'three_dim_pos_bundle', (['model.mock.galaxy_table', '"""x"""', '"""y"""', '"""z"""'], {}), "(model.mock.galaxy_table, 'x', 'y', 'z')\n", (1581, 1621), False, 'from halotools.empirical_models.factories.mock_helpers import three_dim_pos_bundle\n'), ((1636, 1647), 'time.time', 'time.time', ([], {}), '()\n', (1645, 1647), False, 'import time\n'), ((1751, 1762), 'time.time', 'time.time', ([], {}), '()\n', (1760, 1762), False, 'import time\n'), ((2198, 2267), 'Corrfunc._countpairs.countpairs_wp', '_countpairs.countpairs_wp', (['boxsize', 'pimax', 'nthreads', 'binfile', 'x', 'y', 'z'], {}), '(boxsize, pimax, nthreads, binfile, x, y, z)\n', (2223, 2267), False, 'from Corrfunc import _countpairs\n'), ((3050, 3069), 'numpy.loadtxt', 'np.loadtxt', (['binfile'], {}), '(binfile)\n', (3060, 3069), True, 'import numpy as np\n'), ((3079, 3101), 'numpy.mean', 'np.mean', (['rbins'], {'axis': '(1)'}), '(rbins, axis=1)\n', (3086, 3101), True, 'import numpy as np\n'), ((3109, 3137), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3119, 3137), True, 'import matplotlib.pyplot as plt\n'), ((3141, 3202), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['rs', 'data_wp', 'data_wp_error'], {'fmt': '""".k"""', 'capsize': '(2)'}), "(rs, data_wp, data_wp_error, fmt='.k', capsize=2)\n", (3153, 3202), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3270), 'matplotlib.pyplot.loglog', 'plt.loglog', ([], {}), '()\n', (3268, 3270), True, 'import matplotlib.pyplot as plt\n'), ((3275, 3296), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""wp.pdf"""'], {}), "('wp.pdf')\n", (3286, 3296), True, 'import matplotlib.pyplot as plt\n'), ((2869, 2928), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/wpxi_dr72_bright0_mr21.0_z0.159_nj400"""'], {}), "('../data/wpxi_dr72_bright0_mr21.0_z0.159_nj400')\n", (2879, 2928), True, 'import numpy as np\n'), ((2005, 2027), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (2017, 2027), True, 'import os.path as path\n'), ((2987, 2999), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (2994, 2999), True, 'import numpy as np\n'), ((3227, 3247), 'numpy.array', 'np.array', (['results_wp'], {}), '(results_wp)\n', (3235, 3247), True, 'import numpy as np\n')] |
# Copyright 2016-2022 Bitmain Technologies Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The implementation of object detection fasterrcnn
"""
from __future__ import print_function
from __future__ import division
import os
import numpy as np
# from sophon.auto_split.api import split as splitter
from sophon.auto_runner.api import infer
from sophon.auto_runner.api import load
# from sophon.auto_split.api import convert as compiler
from ...engine.base_engine import BaseEngine
class SemanticSegmentationDEEPLABV3MOBILENETV2TF(BaseEngine):
"""Construct deeplabv3_mobilenetv2 semantic segmemtation
"""
def __init__(self, source_path, subgraph_path, tfmodel_path, framework,
input_names, output_names, input_shapes, layout, is_dynamic,
process_size, target, conf_threshold):
super(SemanticSegmentationDEEPLABV3MOBILENETV2TF, self).__init__()
# process_size: (h, w)
# image will be resize before processing detection
self.source_path = source_path
self.subgraph_path = subgraph_path
self.tfmodel_path = tfmodel_path
self.framework = framework
self.input_names = input_names
self.input_shapes = input_shapes
self.process_size = process_size
self.conf_threshold = conf_threshold
self.tensors_dict = {}
if len(self.input_names) == len(self.input_shapes):
for input_name, input_shape in zip(input_names, input_shapes):
self.tensors_dict[input_name] = np.ndarray(
input_shape, dtype=np.float32)
else:
raise ValueError('input names and input shapes sizes do not match!')
self.output_names = output_names
self.layout = layout
self.is_dynamic = is_dynamic
self.target = target
# check the subgraph file
# if not os.path.isdir(self.subgraph_path):
# print("attention please: this model needs to be split...")
# # autodeploy split
# splitter(self.framework, self.tensors_dict, self.subgraph_path, \
# self.tfmodel_path, params_path=None, outputs=self.output_names, \
# dynamic=self.is_dynamic, layout=self.layout)
# print("split done!")
# compiler(self.subgraph_path, optimize=1, compare=True, target=self.target)
# print("compile done!")
# else:
# subgraph_filenames = os.listdir(self.subgraph_path)
# if not any(name.endswith('.pb') for name in subgraph_filenames):
# splitter(self.framework, self.tensors_dict, self.subgraph_path, \
# self.tfmodel_path, params_path=None, outputs=self.output_names, \
# dynamic=self.is_dynamic, layout=self.layout)
# print("split done!")
# if not any(name.startswith('graph_ir') for name in subgraph_filenames):
# compiler(
# self.subgraph_path, optimize=1, compare=True, target=self.target)
# print("compile done!")
def predict(self, images):
"""deeplabv3_mobilenetv2 forward
"""
self.time.tic()
if isinstance(images, list):
use_batch = True
else:
use_batch = False
images = [images]
# inference
for image in images:
# origin_size = image.shape
# this version deeplabv3_mobilenetv2 input size fix in (513,513)
# data, rescale_param
data, _ = self.rescale_image(image, self.process_size, True)
data = data[..., [2, 1, 0]] # BGR2RGB PIL.Image read as RGB
input_data = {self.input_names[0]: np.array([data])}
model = load(self.subgraph_path)
out = infer(model, input_data)
print("inference done!")
semanticpredictions = out['SemanticPredictions:0']
if use_batch:
return semanticpredictions
else:
return semanticpredictions
| [
"numpy.array",
"sophon.auto_runner.api.infer",
"numpy.ndarray",
"sophon.auto_runner.api.load"
] | [((3951, 3975), 'sophon.auto_runner.api.load', 'load', (['self.subgraph_path'], {}), '(self.subgraph_path)\n', (3955, 3975), False, 'from sophon.auto_runner.api import load\n'), ((3988, 4012), 'sophon.auto_runner.api.infer', 'infer', (['model', 'input_data'], {}), '(model, input_data)\n', (3993, 4012), False, 'from sophon.auto_runner.api import infer\n'), ((1980, 2021), 'numpy.ndarray', 'np.ndarray', (['input_shape'], {'dtype': 'np.float32'}), '(input_shape, dtype=np.float32)\n', (1990, 2021), True, 'import numpy as np\n'), ((3919, 3935), 'numpy.array', 'np.array', (['[data]'], {}), '([data])\n', (3927, 3935), True, 'import numpy as np\n')] |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
import torch
import numpy as np
import torchvision.models.video as models
from torchvision import transforms
from jina import Document, DocumentArray
try:
from video_torch_encoder import VideoTorchEncoder, ConvertFHWCtoFCHW, ConvertFCHWtoCFHW
except:
from ...video_torch_encoder import VideoTorchEncoder, ConvertFHWCtoFCHW, ConvertFCHWtoCFHW
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=False)
da = DocumentArray([Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)])
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_default_preprocessing=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_default_preprocessing(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=True)
da = DocumentArray([Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)])
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(root=os.path.join(cur_dir, '../data/kinetics400'), frames_per_clip=20)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray([Document(blob=video.detach().numpy()) for video in kinects_videos])
ex = VideoTorchEncoder(use_default_preprocessing=True, model_name=model_name)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose([
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW()
])
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(doc.embedding, expected_torch_embedding.detach().numpy())
| [
"os.path.abspath",
"os.path.join",
"torchvision.transforms.ConvertImageDtype",
"pytest.fixture",
"video_torch_encoder.ConvertFCHWtoCFHW",
"torch.Tensor",
"video_torch_encoder.ConvertFHWCtoFCHW",
"numpy.random.random",
"torchvision.transforms.CenterCrop",
"pytest.mark.parametrize",
"torchvision.t... | [((543, 617), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model_name"""', "['r3d_18', 'mc3_18', 'r2plus1d_18']"], {}), "('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])\n", (566, 617), False, 'import pytest\n'), ((952, 1001), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '[1, 3, 10]'], {}), "('batch_size', [1, 3, 10])\n", (975, 1001), False, 'import pytest\n'), ((1697, 1771), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model_name"""', "['r3d_18', 'mc3_18', 'r2plus1d_18']"], {}), "('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])\n", (1720, 1771), False, 'import pytest\n'), ((2132, 2148), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2146, 2148), False, 'import pytest\n'), ((2363, 2437), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model_name"""', "['mc3_18', 'r2plus1d_18', 'r3d_18']"], {}), "('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])\n", (2386, 2437), False, 'import pytest\n'), ((513, 538), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (528, 538), False, 'import os\n'), ((669, 742), 'video_torch_encoder.VideoTorchEncoder', 'VideoTorchEncoder', ([], {'model_name': 'model_name', 'use_default_preprocessing': '(False)'}), '(model_name=model_name, use_default_preprocessing=False)\n', (686, 742), False, 'from video_torch_encoder import VideoTorchEncoder, ConvertFHWCtoFCHW, ConvertFCHWtoCFHW\n'), ((1069, 1119), 'video_torch_encoder.VideoTorchEncoder', 'VideoTorchEncoder', ([], {'use_default_preprocessing': '(False)'}), '(use_default_preprocessing=False)\n', (1086, 1119), False, 'from video_torch_encoder import VideoTorchEncoder, ConvertFHWCtoFCHW, ConvertFCHWtoCFHW\n'), ((1849, 1921), 'video_torch_encoder.VideoTorchEncoder', 'VideoTorchEncoder', ([], {'model_name': 'model_name', 'use_default_preprocessing': '(True)'}), '(model_name=model_name, use_default_preprocessing=True)\n', (1866, 1921), False, 'from video_torch_encoder import VideoTorchEncoder, ConvertFHWCtoFCHW, ConvertFCHWtoCFHW\n'), ((2597, 2669), 'video_torch_encoder.VideoTorchEncoder', 'VideoTorchEncoder', ([], {'use_default_preprocessing': '(True)', 'model_name': 'model_name'}), '(use_default_preprocessing=True, model_name=model_name)\n', (2614, 2669), False, 'from video_torch_encoder import VideoTorchEncoder, ConvertFHWCtoFCHW, ConvertFCHWtoCFHW\n'), ((3389, 3403), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (3401, 3403), False, 'import torch\n'), ((2252, 2296), 'os.path.join', 'os.path.join', (['cur_dir', '"""../data/kinetics400"""'], {}), "(cur_dir, '../data/kinetics400')\n", (2264, 2296), False, 'import os\n'), ((3017, 3036), 'video_torch_encoder.ConvertFHWCtoFCHW', 'ConvertFHWCtoFCHW', ([], {}), '()\n', (3034, 3036), False, 'from video_torch_encoder import VideoTorchEncoder, ConvertFHWCtoFCHW, ConvertFCHWtoCFHW\n'), ((3046, 3089), 'torchvision.transforms.ConvertImageDtype', 'transforms.ConvertImageDtype', (['torch.float32'], {}), '(torch.float32)\n', (3074, 3089), False, 'from torchvision import transforms\n'), ((3099, 3129), 'torchvision.transforms.Resize', 'transforms.Resize', (['resize_size'], {}), '(resize_size)\n', (3116, 3129), False, 'from torchvision import transforms\n'), ((3139, 3179), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (3159, 3179), False, 'from torchvision import transforms\n'), ((3189, 3221), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['crop_size'], {}), '(crop_size)\n', (3210, 3221), False, 'from torchvision import transforms\n'), ((3231, 3250), 'video_torch_encoder.ConvertFCHWtoCFHW', 'ConvertFCHWtoCFHW', ([], {}), '()\n', (3248, 3250), False, 'from video_torch_encoder import VideoTorchEncoder, ConvertFHWCtoFCHW, ConvertFCHWtoCFHW\n'), ((1188, 1222), 'numpy.random.random', 'np.random.random', (['(3, 2, 112, 112)'], {}), '((3, 2, 112, 112))\n', (1204, 1222), True, 'import numpy as np\n'), ((781, 815), 'numpy.random.random', 'np.random.random', (['(3, 2, 224, 224)'], {}), '((3, 2, 224, 224))\n', (797, 815), True, 'import numpy as np\n'), ((1258, 1292), 'numpy.random.random', 'np.random.random', (['(3, 2, 112, 112)'], {}), '((3, 2, 112, 112))\n', (1274, 1292), True, 'import numpy as np\n'), ((1960, 1995), 'numpy.random.random', 'np.random.random', (['(10, 270, 480, 3)'], {}), '((10, 270, 480, 3))\n', (1976, 1995), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 13:56:53 2020
@author: dcmccal
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 6 13:53:11 2020
@author: dave
"""
# -*- coding: utf-8 -*-
"""
This looks at 25 degrees Li on Au. I'm looking at making this code shorter.
"""
from scipy.optimize import curve_fit
from scipy import asarray as ar,exp
#import argparse # Parsing command line arguments
import numpy as np # Array manipulation/maths
import matplotlib.pyplot as plt # Plotting
#import os # Path related stuff
import math
import scipy.signal as signal # Peak finding
#import scipy.signal as signal # Peak finding
#from scipy.optimize import curve_fit# Fitting the gaussians
#from scipy.stats import linregress # for R-value on the plot
#I believe the three comments below only give you the data/
#from spec_loader import Log
#from spec_loader import TansTbl
#from spec_loader import Spec
#---------------------------------------------------------------------
#--------------------open and read files-25 deg-----------------------------
#---------------------------------------------------------------------
#declare an empty variable. This creats an empty list named txt_data:
txt_data25nofric=[]
txt_read25nofric=[]
trial=[]
#I might need an indext for the text data
cnt=0
#Using the with statement to open a file
with open('Li_on_Au_001_25Deg_nofric.txt') as file:
#need to stay indented when we are working with the file
#read the data and put it into txt_data: 0 is all lines,
#1 and 2 are the first line
#txt_data=file.readlines(100)
#In order to split the data with spaces which is how the data was done do the following:
for line in file:
#add contents of the file into txt_data format
#txt_data.append(line)
#this only reads each line of data but does not store all of the lines
txt_read25nofric=line.split()
#this stores all of the lines of data
txt_data25nofric.append(txt_read25nofric)
#end for loop (in python this done by unindenting the for loop as seen in the next line below)
#print(txt_data[0][2]) #this was useful for looking at data
"""
After some finagling I figured out the data breakdown:
txt_data is size 1001. All of this content comes out as
strings so we will need to properly float data later.
txt_data[0][0] = "energy"
txt_data[0][1] = "intensity"
txt_data[0][2] = "counts"
txt_data[0][3] = "k-factor"
[0][4] = out of bounds
txt_data[1][0] = 0 #this is the first value of "energy" (E/E0)
txt_data[1][1] = 0 #this is the first value of intensity
txt_data[1][2] = 3 #this is the total number of counts
txt_data[1][3] = 0.7318521205746247 #This is the k-factor (kinematic factor)
txt_data[2][0] = 0.001 #this is the second value of "energy" (E/E0)
txt_data[2][1] = 0.0 #this is the second value of intensity
This is how to convert string of number to actual number
trial=float(txt_data[1][3])
python indices start at 0 so if you see a size of 1001
the final index value will be 1000 (rule of thumb final index = size-1)
What we need here is only the data of the energy and intensity values
not the titles of the data, nor the counts (specifcallly) nor the k-factor...for now
"""
#since we opened the text file with the "with" statement above we want to close it
file.close()
#small friction open and close file
#declare an empty variable. This creats an empty list named txt_data:
txt_data25smallfric=[]
txt_read25smallfric=[]
trialsm=[]
#I might need an indext for the text data
cntsm=0
#Using the with statement to open a file
with open('Li_on_Au_001_25Degsmaller_fric.txt') as filesm:
for line in filesm:
txt_read25smallfric=line.split()
txt_data25smallfric.append(txt_read25smallfric)
#end for loop (in python this done by unindenting the for loop as seen in the next line below)
#since we opened the text file with the "with" statement above we want to close it
filesm.close()
#large friction open and close file
#declare an empty variable. This creats an empty list named txt_data:
txt_data25largefric=[]
txt_read25largefric=[]
triallarge=[]
#I might need an indext for the text data
cntlarge=0
#Using the with statement to open a file
with open('Li_on_Au_001_25Degw_fric.txt') as filelarge:
for line in filelarge:
txt_read25largefric=line.split()
txt_data25largefric.append(txt_read25largefric)
#end of for loop
#since we opened the text file with the "with" statement above we want to close it
filelarge.close()
#0.02 friction open and close file
#declare an empty variable. This creats an empty list named txt_data:
txt_data25justrightfric=[]
txt_read25justrightfric=[]
#I might need an indext for the text data
cntlarge=0
#Using the with statement to open a file
with open('Li_on_Au_001_25Deg_justrightfric.txt') as fileright:
for line in fileright:
txt_read25justrightfric=line.split()
txt_data25justrightfric.append(txt_read25justrightfric)
#end of for loop
#since we opened the text file with the "with" statement above we want to close it
fileright.close()
#---------------------------------------------------------------------
#--------------------open and read files------------------------------
#---------------------------------------------------------------------
#---------------------------------------------------------------------
#--------------------adjust the data------------------------------
#---------------------------------------------------------------------
"""
I was having a problem with reading and adjusting the data separately
so I'm working on doing it all at once. Since all of the data is taken
at the same resolution this is fine
"""
a=[]
cnt=int(1)
energy_25deg_nofric=[]
intensity_25deg_nofric=[]
energy_25deg_smallfric=[]
intensity_25deg_smallfric=[]
energy_25deg_largefric=[]
intensity_25deg_largefric=[]
shiftedlarge=[]
shiftedsmall=[]
shiftedright=[]
energy_25deg_jr=[]
intensity_25deg_jr=[]
#one normally cannot simply use integers to go in increments by
#thus we will need to change the integer into a range object as show below
for ii in range(len(txt_data25nofric)-1):
#ii is the index of the for loop
#a.append(ii)
energy_25deg_nofric.append(100*float(txt_data25nofric[cnt][0]))
intensity_25deg_nofric.append(float(txt_data25nofric[cnt][1]))
#a saves each of the indicies in the for loop
#if you don't do anything in the for loop it shows up as an error
#in spyder
#small friction
energy_25deg_smallfric.append(100*float(txt_data25smallfric[cnt][0]))
intensity_25deg_smallfric.append(float(txt_data25smallfric[cnt][1]))
#large friction
energy_25deg_largefric.append(100*float(txt_data25largefric[cnt][0]))
intensity_25deg_largefric.append(float(txt_data25largefric[cnt][1]))
#just right friction
energy_25deg_jr.append(100*float(txt_data25justrightfric[cnt][0]))
intensity_25deg_jr.append(float(txt_data25justrightfric[cnt][1]))
#shift the intensity up by 1.01
shiftedsmall.append(2.00+float(txt_data25smallfric[cnt][1]))
shiftedright.append(1.00+float(txt_data25justrightfric[cnt][1]))
shiftedlarge.append(3.00+float(txt_data25largefric[cnt][1]))
#Now lets do a percent difference of large and no friction
#to do this i will need to check if the values are zero..if so save as
#zero...need if statement
#percentlarge(100*abs(float(txt_data25nofric[cnt][1])-float(txt_data25largefric[cnt][1]))/(float(txt_data25nofric[cnt][1])+float(txt_data25largefric[cnt][1])))
cnt=cnt+1
#trial=(txt_data[1000][0])
#get the number of counts that hit the detector
c_sm=[] #txt_data5nofric[1][2] -this is location of counts that were detected at 5 degrees
c_sm.append(int(txt_data25nofric[1][2]))
c_none=[]
c_none.append(int(txt_data25smallfric[1][2]))
c_large=[]
c_large.append(int(txt_data25largefric[1][2]))
c_justright=[]
c_justright.append(int(txt_data25justrightfric[1][2]))
#theshifted large on same plot as no friction with large friction plot
plt.plot(energy_25deg_largefric, shiftedlarge, label="large friction counts = "+str(c_large), color='black')
plt.plot(energy_25deg_largefric, intensity_25deg_nofric, label="no friction counts = "+str(c_none), color='red')
plt.plot(energy_25deg_largefric, shiftedsmall, label="small friction counts = "+str(c_sm), color='green')
plt.plot(energy_25deg_largefric, shiftedright, label="just right friction counts = "+str(c_justright), color='blue')
plt.legend()
plt.xlabel("Energy (eV)")
plt.ylabel("Intensity shift")
plt.title('This is the data from Li ions on Au (001) surface at theta0 = 25 degrees')
plt.show()
"""
# Fit the dummy exponential data
pars, cov = curve_fit(f=exponential, xdata=x_dummy, ydata=y_dummy, p0=[0, 0], bounds=(-np.inf, np.inf))
f — function used for fitting (in this case exponential)
xdata — array of x-data for fitting
ydata — array of y-data for fitting
p0 — array of initial guesses for the fitting parameters (both a and b as 0)
bounds — bounds for the parameters (-∞ to ∞)
Outputs
pars — array of parameters from fit (in this case [a, b])
cov — the estimated covariance of pars which can be used to determine the standard deviations of the fitting parameters (square roots of the diagonals)
"""
#Now that we have energy and intensity values
#let's look at them and grab them
'''
peaks, properties = signal.find_peaks(intensity values, height=number, width=number)
#Peaks tells you the index in intensity_25-deg-nofric of where the peaks are.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html
properties contain
‘peak_heights’
If height is given, the height of each peak in x.
‘left_thresholds’, ‘right_thresholds’
If threshold is given, these keys contain a peaks vertical distance to its neighbouring samples.
‘prominences’, ‘right_bases’, ‘left_bases’
If prominence is given, these keys are accessible. See peak_prominences for a description of their content.
‘width_heights’, ‘left_ips’, ‘right_ips’
If width is given, these keys are accessible. See peak_widths for a description of their content.
‘plateau_sizes’, left_edges’, ‘right_edges’
If plateau_size is given, these keys are accessible and contain the indices of a peak’s edges (edges are still part of the plateau) and the calculated plateau sizes.
widths
The widths for each peak in samples - samples = length of data points.
width_heightsndarray
The height of the contour lines at which the widths where evaluated.
left_ips, right_ipsndarray
Interpolated positions of left and right intersection points of a horizontal line at the respective evaluation height.
'''
#find peaks and widths of no friction
peaksnofric, propertiesnofric = signal.find_peaks(intensity_25deg_nofric, height=0.2, width=.01)
#The width I'm looking for is under properties in widths- take that value divide by 10. we have 1000 samples and the samples
#go up to 100 eV hence divide the width given by 10.
actualpeakswidths=0.1*propertiesnofric['widths']
h1 = propertiesnofric['peak_heights']
peak1width=actualpeakswidths[0]
peak2width=actualpeakswidths[1]
amp1=h1[0]
amp2=h1[1]
sigma1=peak1width/2.35;
sigma2=peak2width/2.35;
x1=energy_25deg_nofric[peaksnofric[0]]
x2=energy_25deg_nofric[peaksnofric[1]]
#find peaks and widths of a= .2 friction
peakssmallfric, propertiessmallfric = signal.find_peaks(intensity_25deg_smallfric, height=0.2, width=.01)
#The width I'm looking for is under properties in widths- take that value divide by 10. we have 1000 samples and the samples
#go up to 100 eV hence divide the width given by 10.
actualpeakswidthsm=0.1*propertiessmallfric['widths']
h1s = propertiessmallfric['peak_heights']
peak1widthsm=actualpeakswidthsm[0]
peak2widthsm=actualpeakswidthsm[1]
amp1s=h1s[0]
amp2s=h1s[1]
sigma1sm=peak1widthsm/2.35;
sigma2sm=peak2widthsm/2.35;
x1s=energy_25deg_smallfric[peakssmallfric[0]]
x2s=energy_25deg_smallfric[peakssmallfric[1]]
#just right friction
#find peaks and widths of a= .02 friction
peaksjr, propertiesjr = signal.find_peaks(intensity_25deg_jr, height=0.2, width=.01)
actualpeakswidthsjr=0.1*propertiesjr['widths']
h1jr = propertiesjr['peak_heights']
peak1widthjr=actualpeakswidthsjr[0]
peak2widthjr=actualpeakswidthsjr[1]
amp1jr=h1jr[0]
amp2jr=h1jr[1]
sigma1jr=peak1widthjr/2.35;
sigma2jr=peak2widthjr/2.35;
x1jr=energy_25deg_jr[peaksjr[0]]
x2jr=energy_25deg_jr[peaksjr[1]]
#large friction
#find peaks and widths of a= 2 friction - only has one peak
peakslg, propertieslg = signal.find_peaks(intensity_25deg_largefric, height=0.2, width=.01)
actualpeakswidthslg=0.1*propertieslg['widths']
h1lg = propertieslg['peak_heights']
peak1widthlg=actualpeakswidthslg[0]
amp1lg=h1lg[0]
sigma1lg=peak1widthlg/2.35;
x1lg=energy_25deg_largefric[peakslg[0]]
#---------------------------------------------------------------------
#--------------------adjust the data------------------------------
#---------------------------------------------------------------------
#---------------------------------------------------------------------------
#----------------fitting gaussian-------------------------------------------
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#----------------25 degrees-------------------------------------------
#---------------------------------------------------------------------------
"""
n25 = len(x_none25) #length/number of data elements
#since the different energies have different intensities multiplying each energy value
#by its intensity then summing up all the energy values and dividing by the number
#of data elements gives us the average/mean:
mean25 = sum(y_none25)/n25 #note this correction
#here the standard deviation does not occur with respect to each data point.
#I need to update my definition of sigma to reflect the correct sigma
#https://en.wikipedia.org/wiki/Standard_deviation
sigstart25=[]
simga25=[]
dx=[]
dx2=[]
for ii in range(len(x_none25)):
dx=(y_none25[ii]-mean25)
dx2=dx*dx
sigstart25.append(dx2)
# https://www.tutorialspoint.com/python/number_pow.htm
# math.pow(x,y)=x^y
sigma25 = math.sqrt((sum(sigstart25)/n25)) #note this correction
"""
# fitting with the small friction a *SEASAFARI* = 0.0
x_none25=np.array(energy_25deg_nofric)
y_none25=np.array(intensity_25deg_nofric)
n25 = len(x_none25)
#weird other method utilized- this works
#weird formula i don't understand via stack overflow
#https://stackoverflow.com/questions/19206332/gaussian-fit-for-python
mean25 = sum(x_none25*y_none25)/n25
sigma25 = np.sqrt(sum(y_none25*(x_none25)**2)/n25 )
def gaus(x,a, x0, sigma):
#this is a gaussian fit
#here our parameters are a, x0, and sigma
return a*np.exp(-(x-x0)**2/(2*sigma**2))
#https://towardsdatascience.com/basic-curve-fitting-of-scientific-data-with-python-9592244a2509
#above link helps explain curve fitting
#this is for two gaussian fits
def gauss2(x, amp1,cen1,sigma1, amp2,cen2,sigma2):
return amp1*(np.exp(-(x-cen1)**2/(2*sigma1**2))) + \
amp2*(np.exp(-(x-cen2)**2/(2*sigma2**2)))
#now lets make some guesses
#i'm having problems because of the initial parameters.
amp1=amp1#1
cen1=x1#90.70
sigma1=sigma1#1.361702 #sigma25
amp2=amp2#.307388
cen2=x2#93.70
sigma2=sigma2#0.565745 #sigma25
popt_2gauss, pcov_2gauss = curve_fit(gauss2, x_none25, y_none25, p0=[amp1, cen1, sigma1, amp2, cen2, sigma2])
perr_2gauss = np.sqrt(np.diag(pcov_2gauss))
pars_1 = popt_2gauss[0:3]
pars_2 = popt_2gauss[3:6]
gauss_peak_1 = gaus(x_none25, *pars_1)
gauss_peak_2 = gaus(x_none25, *pars_2)
gauss_peak_total=gauss_peak_1+gauss_peak_2
#plot the fits...only problem is it is not finding the peaks correctly
plt.plot(x_none25, gauss_peak_1, "g")
#plt.fill_between(x_none25, gauss_peak_1.min(), gauss_peak_1, facecolor="green", alpha=0.5)
plt.plot(x_none25, gauss_peak_2, "y")
#plt.fill_between(x_none25, gauss_peak_2.min(), gauss_peak_2, facecolor="yellow", alpha=0.5)
plt.plot(x_none25, gauss_peak_total, "m")
plt.fill_between(x_none25, gauss_peak_total.min(), gauss_peak_total, facecolor="magenta", alpha=0.5)
plt.plot(x_none25, y_none25, "k+")
plt.xlim(85,105)
plt.title('Fig. 3 - no friction a=0 ')
plt.show()
#Now let's look at small friction
popt_sm, pcov_sm = curve_fit(gauss2, energy_25deg_smallfric, intensity_25deg_smallfric, p0=[amp1s, x1s, sigma1sm, amp2s, x2s, sigma2sm])
perr_sm = np.sqrt(np.diag(pcov_sm))
pars_1sm = popt_sm[0:3]
pars_2sm = popt_sm[3:6]
gauss_peak_1sm = gaus(energy_25deg_smallfric, *pars_1sm)
gauss_peak_2sm = gaus(energy_25deg_smallfric, *pars_2sm)
gauss_peak_totalsm=gauss_peak_1sm+gauss_peak_2sm
#plot the fits...only problem is it is not finding the peaks correctly
plt.plot(energy_25deg_smallfric, gauss_peak_1sm, "g")
#plt.fill_between(x_none25, gauss_peak_1.min(), gauss_peak_1, facecolor="green", alpha=0.5)
plt.plot(energy_25deg_smallfric, gauss_peak_2sm, "y")
#plt.fill_between(x_none25, gauss_peak_2.min(), gauss_peak_2, facecolor="yellow", alpha=0.5)
plt.plot(energy_25deg_smallfric, gauss_peak_totalsm, "m")
plt.fill_between(energy_25deg_smallfric, gauss_peak_totalsm.min(), gauss_peak_totalsm, facecolor="magenta", alpha=0.5)
plt.plot(energy_25deg_smallfric, intensity_25deg_smallfric, "k+")
plt.xlim(85,105)
plt.title('Fig. 3 - friction a=.2 ')
plt.show()
#now lets do just right friction
popt_jr, pcov_jr = curve_fit(gauss2, energy_25deg_jr, intensity_25deg_jr, p0=[amp1jr, x1jr, sigma1jr, amp2jr, x2jr, sigma2jr])
perr_jr = np.sqrt(np.diag(pcov_jr))
pars_1jr = popt_jr[0:3]
pars_2jr = popt_jr[3:6]
gauss_peak_1jr = gaus(energy_25deg_jr, *pars_1jr)
gauss_peak_2jr = gaus(energy_25deg_jr, *pars_2jr)
gauss_peak_totaljr=gauss_peak_1jr+gauss_peak_2jr
#plot the fits...only problem is it is not finding the peaks correctly
plt.plot(energy_25deg_jr, gauss_peak_1jr, "g")
#plt.fill_between(x_none25, gauss_peak_1.min(), gauss_peak_1, facecolor="green", alpha=0.5)
plt.plot(energy_25deg_jr, gauss_peak_2jr, "y")
#plt.fill_between(x_none25, gauss_peak_2.min(), gauss_peak_2, facecolor="yellow", alpha=0.5)
plt.plot(energy_25deg_jr, gauss_peak_totaljr, "m")
plt.fill_between(energy_25deg_jr, gauss_peak_totaljr.min(), gauss_peak_totalsm, facecolor="magenta", alpha=0.5)
plt.plot(energy_25deg_jr, intensity_25deg_jr, "k+")
plt.xlim(85,105)
plt.title('Fig. 3 -just right friction a=.02 ')
plt.show()
#next for the large friction
popt_lg, pcov_lg = curve_fit(gaus, energy_25deg_largefric, intensity_25deg_largefric, p0=[amp1lg, x1lg, sigma1lg])
perr_lg = np.sqrt(np.diag(pcov_lg))
pars_1lg = popt_lg[0:3]
gauss_peak_1lg = gaus(energy_25deg_largefric, *pars_1lg)
plt.plot(energy_25deg_jr, gauss_peak_1lg, "m")
plt.fill_between(energy_25deg_jr, gauss_peak_1lg.min(), gauss_peak_1lg, facecolor="magenta", alpha=0.5)
plt.plot(energy_25deg_largefric, intensity_25deg_largefric, "k+")
plt.xlim(85,105)
plt.title('Fig. 3 -just right friction a=2 ')
plt.show()
'''
actualpeakswidthsg=0.1*propertieslargefric['widths']
h1g = propertieslargefric['peak_heights']
peak1widthg=actualpeakswidthsg[0]
peak2widthg=actualpeakswidthsg[1]
amp1g=h1g[0]
amp2g=h1g[1]
sigma1g=peak1widthg/2.35;
sigma2g=peak2widthg/2.35;
x1g=energy_25deg_largefric[peakslargefric[0]]
x2g=energy_25deg_largefric[peakslargefric[1]]
'''
"""
#from Pat's code:
# This function is a linear + any number of gaussians
def multiples(x, *params):
y = x * params[0] + params[1]
for i in range(2, len(params), 3):
a = params[i]
sigma = params[i+1]
mu = params[i+2]
y = y + gaussian(x, a, sigma, mu)
return y
"""
"""
from running the code pars contains (x0, a, and sigma)
pars[0] = 0.955118
pars[1]=90.88 - looks like peak location -x0
pars[2]=1.501 - sigma =>FWHM /approx 2.35*sigma
https://en.wikipedia.org/wiki/Gaussian_function
"""
"""
For now i will plot the peak position as a function of friction
Then I will plot the full width half max as afunction of friction
"""
#friction values for the first peak
a_allpeak1=(0, 0.02, 0.2, 2);
#friction values for the second peak
a_allpeak2=(0, 0.02, 0.2);
FWHM_peak1=(actualpeakswidths[0], actualpeakswidthsjr[0], actualpeakswidthsm[0], actualpeakswidthslg[0])
FWHM_peak2=(actualpeakswidths[1], actualpeakswidthsjr[1], actualpeakswidthsm[1])
x1_peak1=(x1, x1jr, x1s, x1lg);
x1_peak2=(x2, x2jr, x2s)
plt.plot(a_allpeak1,FWHM_peak1,'b*', label='main peak')
plt.plot(a_allpeak2,FWHM_peak2,'m+', label='small peak')
plt.title('Fig. 3 - FWHM vs friction value ')
plt.legend()
plt.xlabel('friction value (a) [Sea-SAFARI]')
plt.ylabel('FWHM locations')
plt.show()
plt.plot(a_allpeak1,x1_peak1,'b*', label='main peak')
plt.plot(a_allpeak2,x1_peak2,'m+', label='small peak')
plt.title('Fig. 4 - peak locations vs friction value ')
plt.legend()
plt.xlabel('friction value (a) [Sea-SAFARI]')
plt.ylabel('peak locations (eV)')
plt.show()
#---------------------------------------------------------------------------
#----------------plots-------------------------------------------
#---------------------------------------------------------------------------
"""
x_fricvals25=[0, 0.02, 0.2, 2]
y_peaklocal25=[pars_nofric25[1], parssmall25[1], parlarge25[1], parjr25[1]]
#for details see
#https://ned.ipac.caltech.edu/level5/Leo/Stats2_3.html#:~:text=%2819%29%2C%20the%20standard%20deviation%20corresponds%20to%20the%20half,instead.%20This%20is%20somewhat%20larger%20than%20and%20can
FWHM25=[2.53*pars_nofric25[2], 2.53*parssmall25[2], 2.53*parlarge25[2], 2.53*parjr25[2]]
plt.plot(x_fricvals25,y_peaklocal25,'b*', label='25 deg')
plt.title('Fig. 1 - peaks vs friction value ')
plt.legend()
plt.xlabel('friction value (a) [Sea-SAFARI]')
plt.ylabel('Peak locations')
plt.show()
plt.title('Fig. 2 - FWHM vs friction value ')
plt.legend()
plt.xlabel('friction value (a) [Sea-SAFARI]')
plt.ylabel('FWHM')
plt.show()
""" | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"scipy.optimize.curve_fit",
"scipy.signal.find_peaks",
"numpy.array",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.diag"
] | [((8878, 8890), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8888, 8890), True, 'import matplotlib.pyplot as plt\n'), ((8891, 8916), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy (eV)"""'], {}), "('Energy (eV)')\n", (8901, 8916), True, 'import matplotlib.pyplot as plt\n'), ((8917, 8946), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity shift"""'], {}), "('Intensity shift')\n", (8927, 8946), True, 'import matplotlib.pyplot as plt\n'), ((8947, 9037), 'matplotlib.pyplot.title', 'plt.title', (['"""This is the data from Li ions on Au (001) surface at theta0 = 25 degrees"""'], {}), "(\n 'This is the data from Li ions on Au (001) surface at theta0 = 25 degrees')\n", (8956, 9037), True, 'import matplotlib.pyplot as plt\n'), ((9033, 9043), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9041, 9043), True, 'import matplotlib.pyplot as plt\n'), ((11120, 11185), 'scipy.signal.find_peaks', 'signal.find_peaks', (['intensity_25deg_nofric'], {'height': '(0.2)', 'width': '(0.01)'}), '(intensity_25deg_nofric, height=0.2, width=0.01)\n', (11137, 11185), True, 'import scipy.signal as signal\n'), ((11745, 11813), 'scipy.signal.find_peaks', 'signal.find_peaks', (['intensity_25deg_smallfric'], {'height': '(0.2)', 'width': '(0.01)'}), '(intensity_25deg_smallfric, height=0.2, width=0.01)\n', (11762, 11813), True, 'import scipy.signal as signal\n'), ((12421, 12482), 'scipy.signal.find_peaks', 'signal.find_peaks', (['intensity_25deg_jr'], {'height': '(0.2)', 'width': '(0.01)'}), '(intensity_25deg_jr, height=0.2, width=0.01)\n', (12438, 12482), True, 'import scipy.signal as signal\n'), ((12894, 12962), 'scipy.signal.find_peaks', 'signal.find_peaks', (['intensity_25deg_largefric'], {'height': '(0.2)', 'width': '(0.01)'}), '(intensity_25deg_largefric, height=0.2, width=0.01)\n', (12911, 12962), True, 'import scipy.signal as signal\n'), ((14773, 14802), 'numpy.array', 'np.array', (['energy_25deg_nofric'], {}), '(energy_25deg_nofric)\n', (14781, 14802), True, 'import numpy as np\n'), ((14812, 14844), 'numpy.array', 'np.array', (['intensity_25deg_nofric'], {}), '(intensity_25deg_nofric)\n', (14820, 14844), True, 'import numpy as np\n'), ((15854, 15940), 'scipy.optimize.curve_fit', 'curve_fit', (['gauss2', 'x_none25', 'y_none25'], {'p0': '[amp1, cen1, sigma1, amp2, cen2, sigma2]'}), '(gauss2, x_none25, y_none25, p0=[amp1, cen1, sigma1, amp2, cen2,\n sigma2])\n', (15863, 15940), False, 'from scipy.optimize import curve_fit\n'), ((16228, 16265), 'matplotlib.pyplot.plot', 'plt.plot', (['x_none25', 'gauss_peak_1', '"""g"""'], {}), "(x_none25, gauss_peak_1, 'g')\n", (16236, 16265), True, 'import matplotlib.pyplot as plt\n'), ((16358, 16395), 'matplotlib.pyplot.plot', 'plt.plot', (['x_none25', 'gauss_peak_2', '"""y"""'], {}), "(x_none25, gauss_peak_2, 'y')\n", (16366, 16395), True, 'import matplotlib.pyplot as plt\n'), ((16491, 16532), 'matplotlib.pyplot.plot', 'plt.plot', (['x_none25', 'gauss_peak_total', '"""m"""'], {}), "(x_none25, gauss_peak_total, 'm')\n", (16499, 16532), True, 'import matplotlib.pyplot as plt\n'), ((16636, 16670), 'matplotlib.pyplot.plot', 'plt.plot', (['x_none25', 'y_none25', '"""k+"""'], {}), "(x_none25, y_none25, 'k+')\n", (16644, 16670), True, 'import matplotlib.pyplot as plt\n'), ((16671, 16688), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(85)', '(105)'], {}), '(85, 105)\n', (16679, 16688), True, 'import matplotlib.pyplot as plt\n'), ((16688, 16726), 'matplotlib.pyplot.title', 'plt.title', (['"""Fig. 3 - no friction a=0 """'], {}), "('Fig. 3 - no friction a=0 ')\n", (16697, 16726), True, 'import matplotlib.pyplot as plt\n'), ((16728, 16738), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16736, 16738), True, 'import matplotlib.pyplot as plt\n'), ((16794, 16916), 'scipy.optimize.curve_fit', 'curve_fit', (['gauss2', 'energy_25deg_smallfric', 'intensity_25deg_smallfric'], {'p0': '[amp1s, x1s, sigma1sm, amp2s, x2s, sigma2sm]'}), '(gauss2, energy_25deg_smallfric, intensity_25deg_smallfric, p0=[\n amp1s, x1s, sigma1sm, amp2s, x2s, sigma2sm])\n', (16803, 16916), False, 'from scipy.optimize import curve_fit\n'), ((17233, 17286), 'matplotlib.pyplot.plot', 'plt.plot', (['energy_25deg_smallfric', 'gauss_peak_1sm', '"""g"""'], {}), "(energy_25deg_smallfric, gauss_peak_1sm, 'g')\n", (17241, 17286), True, 'import matplotlib.pyplot as plt\n'), ((17379, 17432), 'matplotlib.pyplot.plot', 'plt.plot', (['energy_25deg_smallfric', 'gauss_peak_2sm', '"""y"""'], {}), "(energy_25deg_smallfric, gauss_peak_2sm, 'y')\n", (17387, 17432), True, 'import matplotlib.pyplot as plt\n'), ((17528, 17585), 'matplotlib.pyplot.plot', 'plt.plot', (['energy_25deg_smallfric', 'gauss_peak_totalsm', '"""m"""'], {}), "(energy_25deg_smallfric, gauss_peak_totalsm, 'm')\n", (17536, 17585), True, 'import matplotlib.pyplot as plt\n'), ((17707, 17772), 'matplotlib.pyplot.plot', 'plt.plot', (['energy_25deg_smallfric', 'intensity_25deg_smallfric', '"""k+"""'], {}), "(energy_25deg_smallfric, intensity_25deg_smallfric, 'k+')\n", (17715, 17772), True, 'import matplotlib.pyplot as plt\n'), ((17773, 17790), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(85)', '(105)'], {}), '(85, 105)\n', (17781, 17790), True, 'import matplotlib.pyplot as plt\n'), ((17790, 17826), 'matplotlib.pyplot.title', 'plt.title', (['"""Fig. 3 - friction a=.2 """'], {}), "('Fig. 3 - friction a=.2 ')\n", (17799, 17826), True, 'import matplotlib.pyplot as plt\n'), ((17827, 17837), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17835, 17837), True, 'import matplotlib.pyplot as plt\n'), ((17892, 18003), 'scipy.optimize.curve_fit', 'curve_fit', (['gauss2', 'energy_25deg_jr', 'intensity_25deg_jr'], {'p0': '[amp1jr, x1jr, sigma1jr, amp2jr, x2jr, sigma2jr]'}), '(gauss2, energy_25deg_jr, intensity_25deg_jr, p0=[amp1jr, x1jr,\n sigma1jr, amp2jr, x2jr, sigma2jr])\n', (17901, 18003), False, 'from scipy.optimize import curve_fit\n'), ((18307, 18353), 'matplotlib.pyplot.plot', 'plt.plot', (['energy_25deg_jr', 'gauss_peak_1jr', '"""g"""'], {}), "(energy_25deg_jr, gauss_peak_1jr, 'g')\n", (18315, 18353), True, 'import matplotlib.pyplot as plt\n'), ((18446, 18492), 'matplotlib.pyplot.plot', 'plt.plot', (['energy_25deg_jr', 'gauss_peak_2jr', '"""y"""'], {}), "(energy_25deg_jr, gauss_peak_2jr, 'y')\n", (18454, 18492), True, 'import matplotlib.pyplot as plt\n'), ((18588, 18638), 'matplotlib.pyplot.plot', 'plt.plot', (['energy_25deg_jr', 'gauss_peak_totaljr', '"""m"""'], {}), "(energy_25deg_jr, gauss_peak_totaljr, 'm')\n", (18596, 18638), True, 'import matplotlib.pyplot as plt\n'), ((18753, 18804), 'matplotlib.pyplot.plot', 'plt.plot', (['energy_25deg_jr', 'intensity_25deg_jr', '"""k+"""'], {}), "(energy_25deg_jr, intensity_25deg_jr, 'k+')\n", (18761, 18804), True, 'import matplotlib.pyplot as plt\n'), ((18805, 18822), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(85)', '(105)'], {}), '(85, 105)\n', (18813, 18822), True, 'import matplotlib.pyplot as plt\n'), ((18822, 18869), 'matplotlib.pyplot.title', 'plt.title', (['"""Fig. 3 -just right friction a=.02 """'], {}), "('Fig. 3 -just right friction a=.02 ')\n", (18831, 18869), True, 'import matplotlib.pyplot as plt\n'), ((18870, 18880), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18878, 18880), True, 'import matplotlib.pyplot as plt\n'), ((18930, 19030), 'scipy.optimize.curve_fit', 'curve_fit', (['gaus', 'energy_25deg_largefric', 'intensity_25deg_largefric'], {'p0': '[amp1lg, x1lg, sigma1lg]'}), '(gaus, energy_25deg_largefric, intensity_25deg_largefric, p0=[\n amp1lg, x1lg, sigma1lg])\n', (18939, 19030), False, 'from scipy.optimize import curve_fit\n'), ((19144, 19190), 'matplotlib.pyplot.plot', 'plt.plot', (['energy_25deg_jr', 'gauss_peak_1lg', '"""m"""'], {}), "(energy_25deg_jr, gauss_peak_1lg, 'm')\n", (19152, 19190), True, 'import matplotlib.pyplot as plt\n'), ((19297, 19362), 'matplotlib.pyplot.plot', 'plt.plot', (['energy_25deg_largefric', 'intensity_25deg_largefric', '"""k+"""'], {}), "(energy_25deg_largefric, intensity_25deg_largefric, 'k+')\n", (19305, 19362), True, 'import matplotlib.pyplot as plt\n'), ((19363, 19380), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(85)', '(105)'], {}), '(85, 105)\n', (19371, 19380), True, 'import matplotlib.pyplot as plt\n'), ((19380, 19425), 'matplotlib.pyplot.title', 'plt.title', (['"""Fig. 3 -just right friction a=2 """'], {}), "('Fig. 3 -just right friction a=2 ')\n", (19389, 19425), True, 'import matplotlib.pyplot as plt\n'), ((19426, 19436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19434, 19436), True, 'import matplotlib.pyplot as plt\n'), ((20849, 20906), 'matplotlib.pyplot.plot', 'plt.plot', (['a_allpeak1', 'FWHM_peak1', '"""b*"""'], {'label': '"""main peak"""'}), "(a_allpeak1, FWHM_peak1, 'b*', label='main peak')\n", (20857, 20906), True, 'import matplotlib.pyplot as plt\n'), ((20905, 20963), 'matplotlib.pyplot.plot', 'plt.plot', (['a_allpeak2', 'FWHM_peak2', '"""m+"""'], {'label': '"""small peak"""'}), "(a_allpeak2, FWHM_peak2, 'm+', label='small peak')\n", (20913, 20963), True, 'import matplotlib.pyplot as plt\n'), ((20962, 21007), 'matplotlib.pyplot.title', 'plt.title', (['"""Fig. 3 - FWHM vs friction value """'], {}), "('Fig. 3 - FWHM vs friction value ')\n", (20971, 21007), True, 'import matplotlib.pyplot as plt\n'), ((21008, 21020), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21018, 21020), True, 'import matplotlib.pyplot as plt\n'), ((21021, 21066), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""friction value (a) [Sea-SAFARI]"""'], {}), "('friction value (a) [Sea-SAFARI]')\n", (21031, 21066), True, 'import matplotlib.pyplot as plt\n'), ((21067, 21095), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""FWHM locations"""'], {}), "('FWHM locations')\n", (21077, 21095), True, 'import matplotlib.pyplot as plt\n'), ((21096, 21106), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21104, 21106), True, 'import matplotlib.pyplot as plt\n'), ((21108, 21163), 'matplotlib.pyplot.plot', 'plt.plot', (['a_allpeak1', 'x1_peak1', '"""b*"""'], {'label': '"""main peak"""'}), "(a_allpeak1, x1_peak1, 'b*', label='main peak')\n", (21116, 21163), True, 'import matplotlib.pyplot as plt\n'), ((21162, 21218), 'matplotlib.pyplot.plot', 'plt.plot', (['a_allpeak2', 'x1_peak2', '"""m+"""'], {'label': '"""small peak"""'}), "(a_allpeak2, x1_peak2, 'm+', label='small peak')\n", (21170, 21218), True, 'import matplotlib.pyplot as plt\n'), ((21217, 21272), 'matplotlib.pyplot.title', 'plt.title', (['"""Fig. 4 - peak locations vs friction value """'], {}), "('Fig. 4 - peak locations vs friction value ')\n", (21226, 21272), True, 'import matplotlib.pyplot as plt\n'), ((21273, 21285), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21283, 21285), True, 'import matplotlib.pyplot as plt\n'), ((21286, 21331), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""friction value (a) [Sea-SAFARI]"""'], {}), "('friction value (a) [Sea-SAFARI]')\n", (21296, 21331), True, 'import matplotlib.pyplot as plt\n'), ((21332, 21365), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""peak locations (eV)"""'], {}), "('peak locations (eV)')\n", (21342, 21365), True, 'import matplotlib.pyplot as plt\n'), ((21366, 21376), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21374, 21376), True, 'import matplotlib.pyplot as plt\n'), ((15959, 15979), 'numpy.diag', 'np.diag', (['pcov_2gauss'], {}), '(pcov_2gauss)\n', (15966, 15979), True, 'import numpy as np\n'), ((16930, 16946), 'numpy.diag', 'np.diag', (['pcov_sm'], {}), '(pcov_sm)\n', (16937, 16946), True, 'import numpy as np\n'), ((18018, 18034), 'numpy.diag', 'np.diag', (['pcov_jr'], {}), '(pcov_jr)\n', (18025, 18034), True, 'import numpy as np\n'), ((19044, 19060), 'numpy.diag', 'np.diag', (['pcov_lg'], {}), '(pcov_lg)\n', (19051, 19060), True, 'import numpy as np\n'), ((15253, 15294), 'numpy.exp', 'np.exp', (['(-(x - x0) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - x0) ** 2 / (2 * sigma ** 2))\n', (15259, 15294), True, 'import numpy as np\n'), ((15522, 15566), 'numpy.exp', 'np.exp', (['(-(x - cen1) ** 2 / (2 * sigma1 ** 2))'], {}), '(-(x - cen1) ** 2 / (2 * sigma1 ** 2))\n', (15528, 15566), True, 'import numpy as np\n'), ((15580, 15624), 'numpy.exp', 'np.exp', (['(-(x - cen2) ** 2 / (2 * sigma2 ** 2))'], {}), '(-(x - cen2) ** 2 / (2 * sigma2 ** 2))\n', (15586, 15624), True, 'import numpy as np\n')] |
from queue import Queue
from threading import Thread
import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import pyaudio
from somnus.models import BaseModel
from somnus.preprocess_audio import melnormalize
class Somnus():
"""
Args:
model (string): The file containing the trained model
device_index (int): The device index of the microphone that Somnus should listen to.
threshold (float): A threshold for how confident Somnus has to be for it to detect the keyword (between [0,1])
data_shape (tuple): The input shape for the keyword model
sample_duration (float): How long the input of the keyword model should be in seconds
n_filters (int): The number of filters in each frame
win_length (int): The length of each window in frames
win_hop (int): the number of frames between the starting frame of each consecutive window.
"""
def __init__(
self,
model='',
device_index=0,
threshold=0.8,
audio_config=None
):
if not audio_config:
audio_config = self._get_default_config()
self.model = BaseModel()
self.model.load(model)
self.chunk_duration = 0.1 # Each read length in seconds from mic.
self.fs = 16000 # sampling rate for mic
self.chunk_samples = int(self.fs * self.chunk_duration) # Each read length in number of samples.
# Each model input data duration in seconds, need to be an integer numbers of chunk_duration
self.feed_samples = int(self.fs * audio_config['sample_duration'])
self.threshold = threshold
# Data buffer for the input wavform
self.data = np.zeros(self.feed_samples, dtype='int16')
self.device_index = device_index
# variables for preprocessing the audio stream
self.n_filters = audio_config['n_filters']
self.win_length = audio_config['win_length']
self.win_hop = audio_config['win_hop']
# Optional variables for continuous listening mode
# Queue to communiate between the audio callback and main thread
self.q = None
self.stream = None
self.listening = False
def listen(self):
"""
Fetches data from the audio buffer until it detects a trigger word
Returns:
True if the key word is detected, otherwise False
"""
self._setup_stream()
try:
self.stream.start_stream()
while True:
audio_stream = self.q.get().astype('float')
result, confidence = self._get_prediction(audio_stream)
if result == 0 and confidence > self.threshold:
self.listening = False
return True
except (KeyboardInterrupt, SystemExit):
self.stream.stop_stream()
self.stream.close()
sys.exit()
except:
# if something fails then we return False
return False
def detect_keyword(self, audio_stream):
"""
Normalizes the audio_stream argument and detects whether or not it contains the key word
Args:
audio_stream (array): An audio time series
Returns:
True if the key word is detected, otherwise False
"""
result, confidence = self._get_prediction(audio_stream)
if result == 0 and confidence > self.threshold:
return True
return False
def _get_audio_input_stream(self):
stream = pyaudio.PyAudio().open(
format=pyaudio.paInt16,
channels=1,
rate=self.fs,
input=True,
frames_per_buffer=self.chunk_samples,
input_device_index=self.device_index,
stream_callback=self._callback)
return stream
def _get_default_config(self):
"""The default config assumes that all the default arguments for the Somnus CLI were used"""
return {
'data_shape': (101, 40, 1),
'sample_duration': 1.,
'n_filters': 40,
'win_length': 400,
'win_hop': 160
}
def _callback(self, in_data, frame_count, time_info, status):
data0 = np.frombuffer(in_data, dtype='int16')
self.data = np.append(self.data,data0)
if len(self.data) > self.feed_samples:
self.data = self.data[-self.feed_samples:]
# Process data async by sending a queue.
if self.listening:
self.q.put(self.data)
return (in_data, pyaudio.paContinue)
def _setup_stream(self):
"""
Initialize the audio stream for continuous listening
"""
self.stream = self._get_audio_input_stream()
self.listening = True
self.q = Queue()
self.data = np.zeros(self.feed_samples, dtype='int16')
def _get_prediction(self, audio_stream):
"""
Predicts the class of the audio time series
Args:
audio_stream (array): An audio time series
Returns:
Returns the predicted class and the confidence the model has in its prediction
"""
data = melnormalize(audio_stream, self.n_filters, self.win_length, self.win_hop)
data = np.expand_dims(data, axis=0)
preds = self.model.predict(data)
res = np.argmax(preds)
return res, max(preds)
| [
"somnus.models.BaseModel",
"numpy.argmax",
"numpy.frombuffer",
"numpy.zeros",
"numpy.expand_dims",
"numpy.append",
"pyaudio.PyAudio",
"queue.Queue",
"sys.exit",
"somnus.preprocess_audio.melnormalize"
] | [((1200, 1211), 'somnus.models.BaseModel', 'BaseModel', ([], {}), '()\n', (1209, 1211), False, 'from somnus.models import BaseModel\n'), ((1757, 1799), 'numpy.zeros', 'np.zeros', (['self.feed_samples'], {'dtype': '"""int16"""'}), "(self.feed_samples, dtype='int16')\n", (1765, 1799), True, 'import numpy as np\n'), ((4349, 4386), 'numpy.frombuffer', 'np.frombuffer', (['in_data'], {'dtype': '"""int16"""'}), "(in_data, dtype='int16')\n", (4362, 4386), True, 'import numpy as np\n'), ((4416, 4443), 'numpy.append', 'np.append', (['self.data', 'data0'], {}), '(self.data, data0)\n', (4425, 4443), True, 'import numpy as np\n'), ((4932, 4939), 'queue.Queue', 'Queue', ([], {}), '()\n', (4937, 4939), False, 'from queue import Queue\n'), ((4960, 5002), 'numpy.zeros', 'np.zeros', (['self.feed_samples'], {'dtype': '"""int16"""'}), "(self.feed_samples, dtype='int16')\n", (4968, 5002), True, 'import numpy as np\n'), ((5319, 5392), 'somnus.preprocess_audio.melnormalize', 'melnormalize', (['audio_stream', 'self.n_filters', 'self.win_length', 'self.win_hop'], {}), '(audio_stream, self.n_filters, self.win_length, self.win_hop)\n', (5331, 5392), False, 'from somnus.preprocess_audio import melnormalize\n'), ((5408, 5436), 'numpy.expand_dims', 'np.expand_dims', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (5422, 5436), True, 'import numpy as np\n'), ((5493, 5509), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (5502, 5509), True, 'import numpy as np\n'), ((2981, 2991), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2989, 2991), False, 'import sys\n'), ((3630, 3647), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (3645, 3647), False, 'import pyaudio\n')] |
from __future__ import division
import numpy as np
def net_input(xi, weights):
return np.dot(xi, weights[1:]) + weights[0]
def predict(xi, weights):
return np.where(net_input(xi, weights) >= 0, 1, -1)
def fit(X, y, learning_rate=0.01, iterations=10):
number_of_features = X.shape[1]
weights = np.zeros(number_of_features + 1) # +1 for bias weight
total_errors = []
for _ in range(iterations):
iteration_errors = 0
for xi, yi in zip(X, y):
predicted = predict(xi, weights)
weights[1:] += learning_rate * (yi - predicted) * xi
weights[0] += learning_rate * (yi - predicted)
if predicted != yi:
iteration_errors += 1
total_errors.append(iteration_errors)
return weights, total_errors
| [
"numpy.dot",
"numpy.zeros"
] | [((312, 344), 'numpy.zeros', 'np.zeros', (['(number_of_features + 1)'], {}), '(number_of_features + 1)\n', (320, 344), True, 'import numpy as np\n'), ((91, 114), 'numpy.dot', 'np.dot', (['xi', 'weights[1:]'], {}), '(xi, weights[1:])\n', (97, 114), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
from base import Experiment, FilteredRankingEval
from skge import TransE, PairwiseStochasticTrainer
class TransEEval(FilteredRankingEval):
def prepare(self, mdl, p):
self.ER = mdl.E + mdl.R[p]
def scores_o(self, mdl, s, p):
return -np.sum(np.abs(self.ER[s] - mdl.E), axis=1)
def scores_s(self, mdl, o, p):
return -np.sum(np.abs(self.ER - mdl.E[o]), axis=1)
class ExpTransE(Experiment):
def __init__(self):
super(ExpTransE, self).__init__()
self.parser.add_argument('--ncomp', type=int, help='Number of latent components')
self.evaluator = TransEEval
def setup_trainer(self, sz, sampler):
model = TransE(sz, self.args.ncomp, init=self.args.init)
trainer = PairwiseStochasticTrainer(
model,
nbatches=self.args.nb,
margin=self.args.margin,
max_epochs=self.args.me,
learning_rate=self.args.lr,
samplef=sampler.sample,
post_epoch=[self.callback]
)
return trainer
if __name__ == '__main__':
ExpTransE().run()
| [
"skge.TransE",
"numpy.abs",
"skge.PairwiseStochasticTrainer"
] | [((723, 771), 'skge.TransE', 'TransE', (['sz', 'self.args.ncomp'], {'init': 'self.args.init'}), '(sz, self.args.ncomp, init=self.args.init)\n', (729, 771), False, 'from skge import TransE, PairwiseStochasticTrainer\n'), ((790, 985), 'skge.PairwiseStochasticTrainer', 'PairwiseStochasticTrainer', (['model'], {'nbatches': 'self.args.nb', 'margin': 'self.args.margin', 'max_epochs': 'self.args.me', 'learning_rate': 'self.args.lr', 'samplef': 'sampler.sample', 'post_epoch': '[self.callback]'}), '(model, nbatches=self.args.nb, margin=self.args.\n margin, max_epochs=self.args.me, learning_rate=self.args.lr, samplef=\n sampler.sample, post_epoch=[self.callback])\n', (815, 985), False, 'from skge import TransE, PairwiseStochasticTrainer\n'), ((309, 335), 'numpy.abs', 'np.abs', (['(self.ER[s] - mdl.E)'], {}), '(self.ER[s] - mdl.E)\n', (315, 335), True, 'import numpy as np\n'), ((404, 430), 'numpy.abs', 'np.abs', (['(self.ER - mdl.E[o])'], {}), '(self.ER - mdl.E[o])\n', (410, 430), True, 'import numpy as np\n')] |
"""An exact Riemann solver for the Euler equations with a gamma-law
gas. The left and right states are stored as State objects. We then
create a RiemannProblem object with the left and right state:
> rp = RiemannProblem(left_state, right_state)
Next we solve for the star state:
> rp.find_star_state()
Finally, we sample the solution to find the interface state, which
is returned as a State object:
> q_int = rp.sample_solution()
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as optimize
class State:
""" a simple object to hold a primitive variable state """
def __init__(self, p=1.0, u=0.0, rho=1.0):
self.p = p
self.u = u
self.rho = rho
def __str__(self):
return f"rho: {self.rho}; u: {self.u}; p: {self.p}"
class RiemannProblem:
""" a class to define a Riemann problem. It takes a left
and right state. Note: we assume a constant gamma """
def __init__(self, left_state, right_state, gamma=1.4):
self.left = left_state
self.right = right_state
self.gamma = gamma
self.ustar = None
self.pstar = None
def __str__(self):
return f"pstar = {self.pstar}, ustar = {self.ustar}"
def u_hugoniot(self, p, side, shock=False):
"""define the Hugoniot curve, u(p). If shock=True, we do a 2-shock
solution"""
if side == "left":
state = self.left
s = 1.0
elif side == "right":
state = self.right
s = -1.0
c = np.sqrt(self.gamma*state.p/state.rho)
if shock:
# shock
beta = (self.gamma+1.0)/(self.gamma-1.0)
u = state.u + s*(2.0*c/np.sqrt(2.0*self.gamma*(self.gamma-1.0)))* \
(1.0 - p/state.p)/np.sqrt(1.0 + beta*p/state.p)
else:
if p < state.p:
# rarefaction
u = state.u + s*(2.0*c/(self.gamma-1.0))* \
(1.0 - (p/state.p)**((self.gamma-1.0)/(2.0*self.gamma)))
else:
# shock
beta = (self.gamma+1.0)/(self.gamma-1.0)
u = state.u + s*(2.0*c/np.sqrt(2.0*self.gamma*(self.gamma-1.0)))* \
(1.0 - p/state.p)/np.sqrt(1.0 + beta*p/state.p)
return u
def find_star_state(self, p_min=0.001, p_max=1000.0):
""" root find the Hugoniot curve to find ustar, pstar """
# we need to root-find on
self.pstar = optimize.brentq(
lambda p: self.u_hugoniot(p, "left") - self.u_hugoniot(p, "right"),
p_min, p_max)
self.ustar = self.u_hugoniot(self.pstar, "left")
def find_2shock_star_state(self, p_min=0.001, p_max=1000.0):
""" root find the Hugoniot curve to find ustar, pstar """
# we need to root-find on
self.pstar = optimize.brentq(
lambda p: self.u_hugoniot(p, "left", shock=True) - self.u_hugoniot(p, "right", shock=True),
p_min, p_max)
self.ustar = self.u_hugoniot(self.pstar, "left", shock=True)
def shock_solution(self, sgn, xi, state):
"""return the interface solution considering a shock"""
p_ratio = self.pstar/state.p
c = np.sqrt(self.gamma*state.p/state.rho)
# Toro, eq. 4.52 / 4.59
S = state.u + sgn*c*np.sqrt(0.5*(self.gamma + 1.0)/self.gamma*p_ratio +
0.5*(self.gamma - 1.0)/self.gamma)
# are we to the left or right of the shock?
if (sgn > 0 and xi > S) or (sgn < 0 and xi < S):
# R/L region
solution = state
else:
# * region -- get rhostar from Toro, eq. 4.50 / 4.57
gam_fac = (self.gamma - 1.0)/(self.gamma + 1.0)
rhostar = state.rho * (p_ratio + gam_fac)/(gam_fac * p_ratio + 1.0)
solution = State(rho=rhostar, u=self.ustar, p=self.pstar)
return solution
def rarefaction_solution(self, sgn, xi, state):
"""return the interface solution considering a rarefaction wave"""
# find the speed of the head and tail of the rarefaction fan
# isentropic (Toro eq. 4.54 / 4.61)
p_ratio = self.pstar/state.p
c = np.sqrt(self.gamma*state.p/state.rho)
cstar = c*p_ratio**((self.gamma-1.0)/(2*self.gamma))
lambda_head = state.u + sgn*c
lambda_tail = self.ustar + sgn*cstar
gam_fac = (self.gamma - 1.0)/(self.gamma + 1.0)
if (sgn > 0 and xi > lambda_head) or (sgn < 0 and xi < lambda_head):
# R/L region
solution = state
elif (sgn > 0 and xi < lambda_tail) or (sgn < 0 and xi > lambda_tail):
# * region, we use the isentropic density (Toro 4.53 / 4.60)
solution = State(rho = state.rho*p_ratio**(1.0/self.gamma),
u = self.ustar, p = self.pstar)
else:
# we are in the fan -- Toro 4.56 / 4.63
rho = state.rho * (2/(self.gamma + 1.0) -
sgn*gam_fac*(state.u - xi)/c)**(2.0/(self.gamma-1.0))
u = 2.0/(self.gamma + 1.0) * ( -sgn*c + 0.5*(self.gamma - 1.0)*state.u + xi)
p = state.p * (2/(self.gamma + 1.0) -
sgn*gam_fac*(state.u - xi)/c)**(2.0*self.gamma/(self.gamma-1.0))
solution = State(rho=rho, u=u, p=p)
return solution
def sample_solution(self, time, npts, xmin=0.0, xmax=1.0):
"""given the star state (ustar, pstar), sample the solution for npts
points between xmin and xmax at the given time.
this is a similarity solution in xi = x/t """
# we write it all explicitly out here -- this could be vectorized
# better.
dx = (xmax - xmin)/npts
xjump = 0.5*(xmin + xmax)
x = np.linspace(xmin, xmax, npts, endpoint=False) + 0.5*dx
xi = (x - xjump)/time
# which side of the contact are we on?
chi = np.sign(xi - self.ustar)
gam = self.gamma
gam_fac = (gam - 1.0)/(gam + 1.0)
rho_v = []
u_v = []
p_v = []
for n in range(npts):
if xi[n] > self.ustar:
# we are in the R* or R region
state = self.right
sgn = 1.0
else:
# we are in the L* or L region
state = self.left
sgn = -1.0
# is non-contact wave a shock or rarefaction?
if self.pstar > state.p:
# compression! we are a shock
solution = self.shock_solution(sgn, xi[n], state)
else:
# rarefaction
solution = self.rarefaction_solution(sgn, xi[n], state)
# store
rho_v.append(solution.rho)
u_v.append(solution.u)
p_v.append(solution.p)
return x, np.array(rho_v), np.array(u_v), np.array(p_v)
def plot_hugoniot(self, p_min = 0.0, p_max=1.5, N=500, gray=False):
""" plot the Hugoniot curves """
p = np.linspace(p_min, p_max, num=N)
u_left = np.zeros_like(p)
u_right = np.zeros_like(p)
for n in range(N):
u_left[n] = self.u_hugoniot(p[n], "left")
# shock for p > p_s; rarefaction otherwise
ish = np.where(p > self.left.p)
ir = np.where(p < self.left.p)
if gray:
color = "0.5"
else:
color = "C0"
plt.plot(p[ish], u_left[ish], c=color, ls="-", lw=2)
plt.plot(p[ir], u_left[ir], c=color, ls=":", lw=2)
plt.scatter([self.left.p], [self.left.u], marker="x", c=color, s=40)
du = 0.025*(max(np.max(u_left), np.max(u_right)) -
min(np.min(u_left), np.min(u_right)))
if not gray:
plt.text(self.left.p, self.left.u+du, "left",
horizontalalignment="center", color=color)
for n in range(N):
u_right[n] = self.u_hugoniot(p[n], "right")
ish = np.where(p > self.right.p)
ir = np.where(p < self.right.p)
if gray:
color = "0.5"
else:
color = "C1"
plt.plot(p[ish], u_right[ish], c=color, ls="-", lw=2)
plt.plot(p[ir], u_right[ir], c=color, ls=":", lw=2)
plt.scatter([self.right.p], [self.right.u], marker="x", c=color, s=40)
if not gray:
plt.text(self.right.p, self.right.u+du, "right",
horizontalalignment="center", color=color)
plt.xlim(p_min, p_max)
plt.xlabel(r"$p$", fontsize="large")
plt.ylabel(r"$u$", fontsize="large")
if not gray:
legs = []
legnames = []
legs.append(plt.Line2D((0,1),(0,0), color="0.5", ls="-", marker=None))
legnames.append("shock")
legs.append(plt.Line2D((0,1),(0,0), color="0.5", ls=":", marker=None))
legnames.append("rarefaction")
plt.legend(legs, legnames, frameon=False, loc="best")
plt.tight_layout()
def plot_2shock_hugoniot(self, p_min = 0.0, p_max=1.5, N=500):
""" plot the Hugoniot curves under the 2-shock approximation"""
p = np.linspace(p_min, p_max, num=N)
u_left = np.zeros_like(p)
u_right = np.zeros_like(p)
for n in range(N):
u_left[n] = self.u_hugoniot(p[n], "left", shock=True)
plt.plot(p, u_left, c="C0", ls="-", lw=2, zorder=100)
plt.scatter([self.left.p], [self.left.u], marker="x", c="C0", s=40, zorder=100)
for n in range(N):
u_right[n] = self.u_hugoniot(p[n], "right", shock=True)
plt.plot(p, u_right, c="C1", ls="-", lw=2, zorder=100)
plt.scatter([self.right.p], [self.right.u], marker="x", c="C1", s=40, zorder=100)
du = 0.025*(max(np.max(u_left), np.max(u_right)) -
min(np.min(u_left), np.min(u_right)))
plt.text(self.left.p, self.left.u+du, "left",
horizontalalignment="center", color="C0")
plt.text(self.right.p, self.right.u+du, "right",
horizontalalignment="center", color="C1")
plt.xlim(p_min, p_max)
plt.xlabel(r"$p$", fontsize="large")
plt.ylabel(r"$u$", fontsize="large")
plt.tight_layout()
| [
"matplotlib.pyplot.xlim",
"numpy.zeros_like",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.text",
"numpy.max",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.Line2D",
"numpy.linspace",
"numpy.sign",
"numpy.min",
"matplotlib.pyplot.... | [((1554, 1595), 'numpy.sqrt', 'np.sqrt', (['(self.gamma * state.p / state.rho)'], {}), '(self.gamma * state.p / state.rho)\n', (1561, 1595), True, 'import numpy as np\n'), ((3234, 3275), 'numpy.sqrt', 'np.sqrt', (['(self.gamma * state.p / state.rho)'], {}), '(self.gamma * state.p / state.rho)\n', (3241, 3275), True, 'import numpy as np\n'), ((4226, 4267), 'numpy.sqrt', 'np.sqrt', (['(self.gamma * state.p / state.rho)'], {}), '(self.gamma * state.p / state.rho)\n', (4233, 4267), True, 'import numpy as np\n'), ((5966, 5990), 'numpy.sign', 'np.sign', (['(xi - self.ustar)'], {}), '(xi - self.ustar)\n', (5973, 5990), True, 'import numpy as np\n'), ((7066, 7098), 'numpy.linspace', 'np.linspace', (['p_min', 'p_max'], {'num': 'N'}), '(p_min, p_max, num=N)\n', (7077, 7098), True, 'import numpy as np\n'), ((7116, 7132), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (7129, 7132), True, 'import numpy as np\n'), ((7151, 7167), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (7164, 7167), True, 'import numpy as np\n'), ((7316, 7341), 'numpy.where', 'np.where', (['(p > self.left.p)'], {}), '(p > self.left.p)\n', (7324, 7341), True, 'import numpy as np\n'), ((7355, 7380), 'numpy.where', 'np.where', (['(p < self.left.p)'], {}), '(p < self.left.p)\n', (7363, 7380), True, 'import numpy as np\n'), ((7473, 7525), 'matplotlib.pyplot.plot', 'plt.plot', (['p[ish]', 'u_left[ish]'], {'c': 'color', 'ls': '"""-"""', 'lw': '(2)'}), "(p[ish], u_left[ish], c=color, ls='-', lw=2)\n", (7481, 7525), True, 'import matplotlib.pyplot as plt\n'), ((7534, 7584), 'matplotlib.pyplot.plot', 'plt.plot', (['p[ir]', 'u_left[ir]'], {'c': 'color', 'ls': '""":"""', 'lw': '(2)'}), "(p[ir], u_left[ir], c=color, ls=':', lw=2)\n", (7542, 7584), True, 'import matplotlib.pyplot as plt\n'), ((7593, 7661), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[self.left.p]', '[self.left.u]'], {'marker': '"""x"""', 'c': 'color', 's': '(40)'}), "([self.left.p], [self.left.u], marker='x', c=color, s=40)\n", (7604, 7661), True, 'import matplotlib.pyplot as plt\n'), ((8023, 8049), 'numpy.where', 'np.where', (['(p > self.right.p)'], {}), '(p > self.right.p)\n', (8031, 8049), True, 'import numpy as np\n'), ((8063, 8089), 'numpy.where', 'np.where', (['(p < self.right.p)'], {}), '(p < self.right.p)\n', (8071, 8089), True, 'import numpy as np\n'), ((8182, 8235), 'matplotlib.pyplot.plot', 'plt.plot', (['p[ish]', 'u_right[ish]'], {'c': 'color', 'ls': '"""-"""', 'lw': '(2)'}), "(p[ish], u_right[ish], c=color, ls='-', lw=2)\n", (8190, 8235), True, 'import matplotlib.pyplot as plt\n'), ((8244, 8295), 'matplotlib.pyplot.plot', 'plt.plot', (['p[ir]', 'u_right[ir]'], {'c': 'color', 'ls': '""":"""', 'lw': '(2)'}), "(p[ir], u_right[ir], c=color, ls=':', lw=2)\n", (8252, 8295), True, 'import matplotlib.pyplot as plt\n'), ((8304, 8374), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[self.right.p]', '[self.right.u]'], {'marker': '"""x"""', 'c': 'color', 's': '(40)'}), "([self.right.p], [self.right.u], marker='x', c=color, s=40)\n", (8315, 8374), True, 'import matplotlib.pyplot as plt\n'), ((8531, 8553), 'matplotlib.pyplot.xlim', 'plt.xlim', (['p_min', 'p_max'], {}), '(p_min, p_max)\n', (8539, 8553), True, 'import matplotlib.pyplot as plt\n'), ((8563, 8598), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p$"""'], {'fontsize': '"""large"""'}), "('$p$', fontsize='large')\n", (8573, 8598), True, 'import matplotlib.pyplot as plt\n'), ((8608, 8643), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$u$"""'], {'fontsize': '"""large"""'}), "('$u$', fontsize='large')\n", (8618, 8643), True, 'import matplotlib.pyplot as plt\n'), ((9039, 9057), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9055, 9057), True, 'import matplotlib.pyplot as plt\n'), ((9212, 9244), 'numpy.linspace', 'np.linspace', (['p_min', 'p_max'], {'num': 'N'}), '(p_min, p_max, num=N)\n', (9223, 9244), True, 'import numpy as np\n'), ((9262, 9278), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (9275, 9278), True, 'import numpy as np\n'), ((9297, 9313), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (9310, 9313), True, 'import numpy as np\n'), ((9417, 9470), 'matplotlib.pyplot.plot', 'plt.plot', (['p', 'u_left'], {'c': '"""C0"""', 'ls': '"""-"""', 'lw': '(2)', 'zorder': '(100)'}), "(p, u_left, c='C0', ls='-', lw=2, zorder=100)\n", (9425, 9470), True, 'import matplotlib.pyplot as plt\n'), ((9479, 9558), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[self.left.p]', '[self.left.u]'], {'marker': '"""x"""', 'c': '"""C0"""', 's': '(40)', 'zorder': '(100)'}), "([self.left.p], [self.left.u], marker='x', c='C0', s=40, zorder=100)\n", (9490, 9558), True, 'import matplotlib.pyplot as plt\n'), ((9664, 9718), 'matplotlib.pyplot.plot', 'plt.plot', (['p', 'u_right'], {'c': '"""C1"""', 'ls': '"""-"""', 'lw': '(2)', 'zorder': '(100)'}), "(p, u_right, c='C1', ls='-', lw=2, zorder=100)\n", (9672, 9718), True, 'import matplotlib.pyplot as plt\n'), ((9727, 9812), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[self.right.p]', '[self.right.u]'], {'marker': '"""x"""', 'c': '"""C1"""', 's': '(40)', 'zorder': '(100)'}), "([self.right.p], [self.right.u], marker='x', c='C1', s=40,\n zorder=100)\n", (9738, 9812), True, 'import matplotlib.pyplot as plt\n'), ((9936, 10030), 'matplotlib.pyplot.text', 'plt.text', (['self.left.p', '(self.left.u + du)', '"""left"""'], {'horizontalalignment': '"""center"""', 'color': '"""C0"""'}), "(self.left.p, self.left.u + du, 'left', horizontalalignment=\n 'center', color='C0')\n", (9944, 10030), True, 'import matplotlib.pyplot as plt\n'), ((10050, 10147), 'matplotlib.pyplot.text', 'plt.text', (['self.right.p', '(self.right.u + du)', '"""right"""'], {'horizontalalignment': '"""center"""', 'color': '"""C1"""'}), "(self.right.p, self.right.u + du, 'right', horizontalalignment=\n 'center', color='C1')\n", (10058, 10147), True, 'import matplotlib.pyplot as plt\n'), ((10167, 10189), 'matplotlib.pyplot.xlim', 'plt.xlim', (['p_min', 'p_max'], {}), '(p_min, p_max)\n', (10175, 10189), True, 'import matplotlib.pyplot as plt\n'), ((10199, 10234), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p$"""'], {'fontsize': '"""large"""'}), "('$p$', fontsize='large')\n", (10209, 10234), True, 'import matplotlib.pyplot as plt\n'), ((10244, 10279), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$u$"""'], {'fontsize': '"""large"""'}), "('$u$', fontsize='large')\n", (10254, 10279), True, 'import matplotlib.pyplot as plt\n'), ((10290, 10308), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10306, 10308), True, 'import matplotlib.pyplot as plt\n'), ((5819, 5864), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'npts'], {'endpoint': '(False)'}), '(xmin, xmax, npts, endpoint=False)\n', (5830, 5864), True, 'import numpy as np\n'), ((6893, 6908), 'numpy.array', 'np.array', (['rho_v'], {}), '(rho_v)\n', (6901, 6908), True, 'import numpy as np\n'), ((6910, 6923), 'numpy.array', 'np.array', (['u_v'], {}), '(u_v)\n', (6918, 6923), True, 'import numpy as np\n'), ((6925, 6938), 'numpy.array', 'np.array', (['p_v'], {}), '(p_v)\n', (6933, 6938), True, 'import numpy as np\n'), ((7814, 7909), 'matplotlib.pyplot.text', 'plt.text', (['self.left.p', '(self.left.u + du)', '"""left"""'], {'horizontalalignment': '"""center"""', 'color': 'color'}), "(self.left.p, self.left.u + du, 'left', horizontalalignment=\n 'center', color=color)\n", (7822, 7909), True, 'import matplotlib.pyplot as plt\n'), ((8409, 8507), 'matplotlib.pyplot.text', 'plt.text', (['self.right.p', '(self.right.u + du)', '"""right"""'], {'horizontalalignment': '"""center"""', 'color': 'color'}), "(self.right.p, self.right.u + du, 'right', horizontalalignment=\n 'center', color=color)\n", (8417, 8507), True, 'import matplotlib.pyplot as plt\n'), ((8976, 9029), 'matplotlib.pyplot.legend', 'plt.legend', (['legs', 'legnames'], {'frameon': '(False)', 'loc': '"""best"""'}), "(legs, legnames, frameon=False, loc='best')\n", (8986, 9029), True, 'import matplotlib.pyplot as plt\n'), ((3333, 3433), 'numpy.sqrt', 'np.sqrt', (['(0.5 * (self.gamma + 1.0) / self.gamma * p_ratio + 0.5 * (self.gamma - 1.0) /\n self.gamma)'], {}), '(0.5 * (self.gamma + 1.0) / self.gamma * p_ratio + 0.5 * (self.gamma -\n 1.0) / self.gamma)\n', (3340, 3433), True, 'import numpy as np\n'), ((8740, 8800), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': '"""0.5"""', 'ls': '"""-"""', 'marker': 'None'}), "((0, 1), (0, 0), color='0.5', ls='-', marker=None)\n", (8750, 8800), True, 'import matplotlib.pyplot as plt\n'), ((8861, 8921), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': '"""0.5"""', 'ls': '""":"""', 'marker': 'None'}), "((0, 1), (0, 0), color='0.5', ls=':', marker=None)\n", (8871, 8921), True, 'import matplotlib.pyplot as plt\n'), ((1798, 1831), 'numpy.sqrt', 'np.sqrt', (['(1.0 + beta * p / state.p)'], {}), '(1.0 + beta * p / state.p)\n', (1805, 1831), True, 'import numpy as np\n'), ((7687, 7701), 'numpy.max', 'np.max', (['u_left'], {}), '(u_left)\n', (7693, 7701), True, 'import numpy as np\n'), ((7703, 7718), 'numpy.max', 'np.max', (['u_right'], {}), '(u_right)\n', (7709, 7718), True, 'import numpy as np\n'), ((7746, 7760), 'numpy.min', 'np.min', (['u_left'], {}), '(u_left)\n', (7752, 7760), True, 'import numpy as np\n'), ((7762, 7777), 'numpy.min', 'np.min', (['u_right'], {}), '(u_right)\n', (7768, 7777), True, 'import numpy as np\n'), ((9834, 9848), 'numpy.max', 'np.max', (['u_left'], {}), '(u_left)\n', (9840, 9848), True, 'import numpy as np\n'), ((9850, 9865), 'numpy.max', 'np.max', (['u_right'], {}), '(u_right)\n', (9856, 9865), True, 'import numpy as np\n'), ((9893, 9907), 'numpy.min', 'np.min', (['u_left'], {}), '(u_left)\n', (9899, 9907), True, 'import numpy as np\n'), ((9909, 9924), 'numpy.min', 'np.min', (['u_right'], {}), '(u_right)\n', (9915, 9924), True, 'import numpy as np\n'), ((2259, 2292), 'numpy.sqrt', 'np.sqrt', (['(1.0 + beta * p / state.p)'], {}), '(1.0 + beta * p / state.p)\n', (2266, 2292), True, 'import numpy as np\n'), ((1719, 1765), 'numpy.sqrt', 'np.sqrt', (['(2.0 * self.gamma * (self.gamma - 1.0))'], {}), '(2.0 * self.gamma * (self.gamma - 1.0))\n', (1726, 1765), True, 'import numpy as np\n'), ((2176, 2222), 'numpy.sqrt', 'np.sqrt', (['(2.0 * self.gamma * (self.gamma - 1.0))'], {}), '(2.0 * self.gamma * (self.gamma - 1.0))\n', (2183, 2222), True, 'import numpy as np\n')] |
# SOCIAL NETWORK ANALYSIS PACKAGE
# AUTHORS: <NAME>, <NAME>, <NAME>
# LAST MODIFIED: 08/07/2020
# REQUIRED MODULES
import sys, os # Utils
import pandas as pd # Data wrangling
import numpy as np # Data wrangling
import math as math # Maths
import powerlaw as pwl # Statistical analysis of power law distributions
import networkx as nx # Network Analysis
import EoN # Network Epidemiology
from matplotlib import pyplot as plt # Data visualization
import seaborn as sns # Data visualization
import matplotlib.ticker as ticker # Data visualization
import seaborn as sns # Data visualization
from netwulf import visualize # Data visualization
from collections import Counter # Utils
import sys, os, os.path # Utils
import itertools # Utils
from progressbar import ProgressBar # Utils
from progressbar import Bar, Percentage # Utils
from operator import itemgetter # Utils
from collections import Counter # Utils
from collections import defaultdict # Utils
import random as rand # Utils
# CONTENTS
# 0. Basic Utilities
# 1. Network Data Science
# 2. Network Epidemiology
# 0. BASIC UTILITIES
### Omit Zeroes
def omit_by(dct, predicate=lambda x: x!=0):
return {k: v for k, v in dct.items() if predicate(v)}
### Logarithmic Binning
def log_bin(dict,n_bins):
# first we need to define the interval of dict values
min_val=sorted(dict.values())[0]
max_val=sorted(dict.values())[-1]
delta=(math.log(float(max_val))-math.log(float(min_val)))/n_bins
# then we create the bins, in this case the log of the bins is equally spaced (bins size increases exponentially)
bins=np.zeros(n_bins+1,float)
bins[0]=min_val
for i in range(1,n_bins+1):
bins[i]=bins[i-1]*math.exp(delta)
# then we need to assign the dict of each node to a bin
values_in_bin=np.zeros(n_bins+1,float)
nodes_in_bin=np.zeros(n_bins+1,float) # this vector is crucial to evalute how many nodes are inside each bin
for i in dict:
for j in range(1,n_bins+1):
if j<n_bins:
if dict[i]<bins[j]:
values_in_bin[j]+=dict[i]
nodes_in_bin[j]+=1.
break
else:
if dict[i]<=bins[j]:
values_in_bin[j]+=dict[i]
nodes_in_bin[j]+=1.
break
# then we need to evalutate the average x value in each bin
for i in range(1,n_bins+1):
if nodes_in_bin[i]>0:
values_in_bin[i]=values_in_bin[i]/nodes_in_bin[i]
# finally we get the binned distribution
binned=[]
for i in range(1,n_bins+1):
if nodes_in_bin[i]>0:
x=values_in_bin[i]
y=nodes_in_bin[i]/((bins[i]-bins[i-1])*len(dict))
binned.append([x,y])
return binned
### Median
def median(files):
ite=len(files)
out=[]
if len(files)%2 ==0:
median=[]
median=files
median=sorted(median)
median.reverse()
ee=int(float(ite)/2.)
m_cinq=ee-1-int((ee-1)*0.5)
max_cinq=ee +int((ee-1)*0.5)
m_novc=ee-1-int((ee-1)*0.95)
max_novc=ee +int((ee-1)*0.95)
out.append([(median[ee]+median[ee-1])/2.,median[m_cinq],median[max_cinq],median[m_novc],median[max_novc]])
else:
median=[]
median=files
median=sorted(median)
median.reverse()
ee=int(float(ite)/2.+0.5)
m_cinq=ee-1-int((ee-1)*0.5)
max_cinq=ee-1+int((ee-1)*0.5)
m_novc=ee-1-int((ee-1)*0.95)
max_novc=ee-1+int((ee-1)*0.95)
out.append([median[ee-1],median[m_cinq],median[max_cinq],median[m_novc],median[max_novc]])
return out
# 1. NETWORK DATA SCIENCE
### Data Wrangling
def rtweet_to_networkx(fo, so, all = False, save = None):
"""
Pipeline from rtweet edge-lists to networkx graphs.
"""
# Read csv datasets
fo_friends_csv = pd.read_csv(fo)
so_edges_csv = pd.read_csv(so)
try:
fo_friends = fo_friends_csv["Target"].tolist()
except Exception as err:
print("Error! Expected column names are 'Source' and 'Target' for all csv.")
raise err
so_edges = list(zip(so_edges_csv["Source"].tolist(), so_edges_csv["Target"].tolist()))
if all == True:
edge_list = [tup for tup in so_edges]
else:
edge_list = [ tup for tup in so_edges if tup[1] in fo_friends ]
#edge_list = [ (row["Source"],row["Target"]) for _,row in so_edges_csv.iterrows() if row["Target"] in fo_friends ] # line to be removed if the function works on new data
# Create directed graph
G = nx.DiGraph()
G.add_nodes_from(fo_friends) # add nodes
G.add_edges_from(edge_list) # add edges
if save is not None:
nx.write_graphml(G, save)
return G
### Degree Distribution
#### Get Distribution
def get_degree_distribution(G, which):
"""
"""
if which == "degree":
degree_view = dict(G.degree())
elif which == "in_degree":
try:
degree_view = dict(G.in_degree())
except:
print("Error, check the graph! Is it directed?")
elif which == "out_degree":
try:
degree_view = dict(G.out_degree())
except:
print("error, check the graph! Is it directed?")
else:
print("Invalid 'which' argument: it must be one of 'degree', 'in_degree' or 'out_degree'")
return
mean = np.mean(np.array(list(degree_view.values())))
var = np.var(np.array(list(degree_view.values())))
return (degree_view, mean, var)
##### Visualization
def plot_degree_distribution(degree_distribution, hist = True, kde = True, log_binning = None, color = 'darkblue', hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 3}, title = "", log = False, dimensions = (15,8), display_stats = None):
"""
"""
plt.rcParams['figure.figsize'] = dimensions
if log_binning is not None:
degree_distribution_nonzero = omit_by(dct = degree_distribution)
log_distrib = log_bin(degree_distribution_nonzero,log_binning)
bins = [0]+[lim[0] for lim in log_distrib]
else:
bins = None
ax = sns.distplot(list(degree_distribution.values()), hist = hist, kde = kde, bins = bins , color = color, hist_kws = hist_kws , kde_kws = kde_kws)
ax.set_title(title, fontsize = 16)
ax.set_xlabel("$k$", fontsize = 14)
ax.set_ylabel("$P(k)$", fontsize = 14)
ax.tick_params(labelsize = 11)
if log:
ax.set_yscale("log")
ax.set_xscale("log")
# if display_stats is not None:
# mean = np.var(np.array(list(degree_distribution.values())))
# var = np.mean(np.array(list(degree_distribution.values())))
#plt.gcf().text(0.9, 0.8, f"mean = {mean} \n var = {var}", fontsize=14) #, xy=(0.005, 700), xytext=(0.005, 700)
plt.show()
### Centrality Metrics
##### Get Centralities
def get_centrality(G, type_centrality):
if type_centrality=="degree":
centrality=[]
for i in G.nodes():
centrality.append([G.degree(i),i])
centrality=sorted(centrality,reverse=True)
return centrality
elif type_centrality=="closeness":
l=nx.closeness_centrality(G)
centrality=[]
for i in G.nodes():
centrality.append([l[i],i])
centrality=sorted(centrality,reverse=True)
return centrality
elif type_centrality=="betweenness":
l=nx.betweenness_centrality(G)
centrality=[]
for i in G.nodes():
centrality.append([l[i],i])
centrality=sorted(centrality,reverse=True)
return centrality
elif type_centrality=="eigenvector":
l=nx.eigenvector_centrality(G, max_iter=1000, tol=1e-06)
centrality=[]
for i in G.nodes():
centrality.append([l[i],i])
centrality=sorted(centrality,reverse=True)
return centrality
elif type_centrality=="katz":
l=nx.katz_centrality(G, alpha=0.001, beta=1.0, max_iter=1000, tol=1e-06)
centrality=[]
for i in G.nodes():
centrality.append([l[i],i])
centrality=sorted(centrality,reverse=True)
return centrality
elif type_centrality=="pagerank":
l=nx.pagerank(G,0.85)
centrality=[]
for i in G.nodes():
centrality.append([l[i],i])
centrality=sorted(centrality,reverse=True)
return centrality
elif type_centrality=="random":
centrality=[]
for i in G.nodes():
centrality.append([i,i])
rand.shuffle(centrality)
return centrality
else:
return 0
##### Plot Centrality Distributions
def plot_centrality_distribution(G, list_centrality, color, n_bins):
dict_centrality={}
for i in list_centrality:
if i[0]>0.:
dict_centrality[i[1]]=i[0]
centrality_binned=log_bin(dict_centrality,n_bins)
# we then plot their binned distribution
x_centrality=[]
y_centrality=[]
for i in centrality_binned:
x_centrality.append(i[0])
y_centrality.append(i[1])
plt.plot(x_centrality,y_centrality, color=color,linewidth=1.1, marker="o",alpha=0.55)
plt.yscale('log')
plt.xscale('log')
plt.xlabel('$x$', fontsize = 15)
plt.ylabel('$P(x)$', fontsize = 15)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.show()
### POWER LAW ANALYSIS
def power_law_plot(graph, log = True,linear_binning = False, bins = 90, draw= True,x_min = None):
degree = list(dict(graph.degree()).values())
#powerlaw does not work if a bin is empty
#sum([1 if x == 0 else 0 for x in list(degree)])
corrected_degree = [x for x in degree if x != 0 ]
if x_min is not None:
corrected_degree = [x for x in corrected_degree if x>x_min]
# fit powerlaw exponent and return distribution
pwl_distri=pwl.pdf(corrected_degree, bins = bins)
if draw:
degree_distribution = Counter(degree)
# Degree distribution
x=[]
y=[]
for i in sorted(degree_distribution):
x.append(i)
y.append(degree_distribution[i]/len(graph))
#plot our distributon compared to powerlaw
#plt.figure(figsize=(10,7))
plt.yscale('log')
plt.xscale('log')
plt.plot(x,y,'ro')
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('$k$', fontsize=16)
plt.ylabel('$P(k)$', fontsize=16)
if linear_binning:
pwl.plot_pdf(corrected_degree, linear_bins=True, color='black', linewidth=2)
else:
pwl.plot_pdf(corrected_degree, color='black', linewidth=2)
return pwl_distri
### COMMUNITY DETECTION
##### Modularity Evaluation
def modularity(partition):
return nx.community.quality.modularity(G, partition)
##### Partition Mapping
def create_partition_map(partition):
partition_map = {}
for idx, cluster_nodes in enumerate(partition):
for node in cluster_nodes:
partition_map[node] = idx
return partition_map
# 2. EPIDEMIC DYNAMICS
## 2.1 EPIDEMIC DYNAMICS ON STATIC NETWORKS
#### Multi-Run Simulation
def network_SIR_multirun_simulation(G, nrun, lambd, mu):
I_dict = defaultdict(list) # Define the time series dictionary for I
Irun = [] # Define the multi-run list of lists for I
for run in range(0,nrun):
# Create a dictionary of nodal infection/disease states s.t. S=0, I=1, R=-1
G.disease_status = {}
# Create a list of infected notes
I_nodes = []
# Choose a seed
node_list = []
deg = dict(G.degree())
for i in sorted(deg.items(), key = itemgetter(1)):
node_list.append(i[0])
seed = node_list[-1]
# Initialize the network
I_nodes.append(seed)
for n in G.nodes():
if n in I_nodes:
# Infected
G.disease_status[n] = 1
else:
# Susceptible
G.disease_status[n] = 0
t = 0 # Initialize the clock
I_list = [] # Define the single-run list for I
I_list.append(len(I_nodes)) # Initialize the single-run list for I
I_dict[t].append(I_nodes) # Initialize the time series dictionary for I
# Implement the dynamical model
while len(I_nodes)>0:
# Transmission dynamics (S -> I)
for i in I_nodes: # For any infected node
for j in G.neighbors(i): # For any of its neighbours
if G.disease_status[j] == 0: # If it's S,
p = np.random.random() # then infect it with probability lambda
if p < lambd:
G.disease_status[j] = 1
# Recovery dynamics (I -> R)
for k in I_nodes: # For any infected node
p = np.random.random() # It recovers with probability mu
if p < mu:
G.disease_status[k] = -1
# Update infected nodes
I_nodes = []
for node in G.nodes():
if G.disease_status[node] == 1:
I_nodes.append(node)
t += 1
# Register the prevalence for each time step
#I_graph.append(len(infected_nodes))
I_list.append(len(I_nodes))
I_dict[t].append(len(I_nodes))
Irun.append(I_list)
return Irun
def network_SIR_finalsize_lambda_sensitivity(G, mu, rho, lambda_min, lambda_max, nruns):
#average_degree = 2 * G.number_of_edges() / G.number_of_nodes()
#lc = mu / average_degree
final_size = defaultdict(list) # normalized attack rate
for lambd in np.geomspace(lambda_min, lambda_max, nruns):
for run in range(0, nruns):
t, S, I, R = EoN.fast_SIR(G, tau=lambd, gamma=mu, rho=rho)
final_size[lambd].append(R[-1]/G.number_of_nodes())
return pd.DataFrame.from_dict(final_size)
#### Visualization
def plot_ensemble(runs):
# Plot the ensemble of trajectories
#plt.figure(figsize = (10,7))
plt.xticks(fontsize = 11)
plt.yticks(fontsize = 11)
plt.xlabel('Time', fontsize = 16)
plt.ylabel('Prevalence', fontsize = 16)
for run in runs:
plt.plot(range(0,len(run)),run)
def boxplot_finalsize_lambda_sensitivity(G, mu, data, ymin, ymax, xlim):
average_degree = 2 * G.number_of_edges() / G.number_of_nodes()
lc = mu / average_degree
data.boxplot(positions=np.array(data.columns),
widths=np.array(data.columns)/3)
plt.vlines(x=lc, ymin=ymin, ymax=ymax)
plt.xscale('log')
plt.yscale('log')
plt.xlim(xlim)
plt.ylim(0.045, 1.1)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.ylabel('Final Epidemic Size ($R_f / |V_G|$)', fontsize=18)
plt.xlabel('Transmission Rate per Contact ($\lambda$)', fontsize=18)
plt.show()
def random_walk(G,source,stop,t,nt,visited):
nt[t]=source # at time t the walker visits node "source"
visited[source]+=1 # the node has been visited another time
# the process ends after reaching a certain threshold
if t<stop:
# explore the neighbors
neighbors=list(G.neighbors(source))
# select one randomly
target=neighbors[random.randint(0,len(neighbors)-1)]
# move there using the same function
random_walk(G,target,stop,t+1,nt,visited)
else:
return 0
def get_coverage(nt):
coverage=np.zeros(nt.size,int)
v=set()
for i in range(nt.size):
v.add(nt[i])
coverage[i]=len(v) # at each time the coverage is the set up to t
return coverage
# let us see another way to do it, without recursion
def random_walk2(G,source,stop,nt,visited):
t=0
while t<stop:
visited[source]+=1
nt[t]=source
neighbors=list(G.neighbors(source))
target=neighbors[random.randint(0,len(neighbors)-1)]
source=target
t+=1
def SIR_hm(beta,mu,N,status):
p_1=0.
delta_1=0.
delta_2=0.
p_1=beta*float(status[1])/N ## P(S-->I)
p_2=mu ## P(I--->R)
if p_1>0.:
# binomial extraction to identify the number of infected people going to I given p_1
delta_1=np.random.binomial(status[0], p_1)
if status[2]!=0:
delta_2=np.random.binomial(status[1],p_2)
# update the compartments
status[0]-= delta_1
status[1]+= delta_1
status[1]-= delta_2
status[2]+= delta_2 # R is id=2
return 0
def ini_subpop(G,average_V,s,x):
# let assign V people to each subpopulation
N = G.number_of_nodes()
V=np.zeros(N,int)
for i in G.nodes():
V[i]=average_V
# inside each subpopulation people are divided in compartments S,I,R
# let's create a dictionary with the compartments
compartments={}
compartments[0]='S'
compartments[1]='I'
compartments[2]='R'
# that that this could be read from file
# then let's create a dictionary for each subpop that tell us how many people in each compartment are there
status_subpop={}
for i in G.nodes():
status_subpop.setdefault(i,np.zeros(3,int))
for j in compartments:
if compartments[j]=='S': # initially they are all S
status_subpop[i][j]=V[i]
else:
status_subpop[i][j]=0
# now we need to select the subpopulation that are initially seeded
# let's select a random fraction of s as initially seeded
n_of_infected=int(s*N)
# we get the list of nodes and shuffle it
list_subpop=[]
for i in range(N):
list_subpop.append(i)
random.shuffle(list_subpop)
# now let's add a number of infected people in the selected subpopulation
for i in range(n_of_infected):
seed_subpop=list_subpop[i]
# for each initial seed we need to change the subpop distribution
for j in compartments:
if compartments[j]=='S': # we remove 10 people
status_subpop[seed_subpop][j]-=x
if compartments[j]=='I': # we make them infected!
status_subpop[seed_subpop][j]+=x
return status_subpop
# what about using a different d_kk'?
# remember from the lecture a more realistic one is d_kk' ~ (kk')^(theta)
# let's create the weights first
def get_p_traveling(theta,G):
dij={} # this a dictionary we use to compute the rate of travels from any pair ij
for i in G.nodes():
l=G.neighbors(i) # we compute the traveling rate to each neighbor
summ=0.
dij.setdefault(i,{})
for j in l:
# this the numerator of the dij
w= (G.degree(i)*G.degree(j))**theta
dij[i].setdefault(j,w)
summ+=w # this is the normalization factor: \sum_{j}wij
for j in dij[i]:
dij[i][j]=dij[i][j]/summ
return dij
def random_walk4(G,stop,dij,p,W):
t=0
N=G.number_of_nodes()
while t<stop:
# temporary vector where to store who moves where at eact t
temp=np.zeros(N,int)
temp2=np.zeros(N,int)
for source in G.nodes():
# for each node we let diffuse the walkers out of it
neighbors=list(G.neighbors(source))
# we need to get the probabilities
# now p is not 1!!
prob=[]
for j in neighbors:
prob.append(p*dij[source][j]) # with prob p they travel to j with prob p*d_ij
# with prob 1-p they stay
prob.append(1.-p)
output=np.random.multinomial(W[source], prob, size=1)
# after calling the multinomial we know how to divide W(i)
id=0
for j in range(len(output[0])-1):
temp[neighbors[id]]+=output[0][j] # these are the traveling in
id+=1
temp2[source]=output[0][-1] # these are those staying in source
# after the loop across all nodes
# we update the values of W
for i in G.nodes():
W[i]=temp[i]+temp2[i] #since p!=0, this is given by those than arrive plus those that stayed
t+=1
# let's convert all of this into a function
def metapop(t_max,N,compartments,status_subpop,G,beta,mu,p,theta,dij):
diseased={} # for each t let's save the number of diseased subpop
prevalence={} # for each t let's save the number of infected people
for t in range(t_max):
# at each iteration the first thing is to make people travel
# we make each compartment travel separately
for j in compartments:
people_traveling=np.zeros(N,int) # this is the vector of people traveling in comp j
for k in G.nodes():
people_traveling[k]+=status_subpop[k][j]
# we then call the random walk function for 1 time step
random_walk4(G,1,dij,p,people_traveling)
# we update the populations given the travels
for k in G.nodes():
status_subpop[k][j]=people_traveling[k]
# after the traveling we can call the SIR model in each subpopulation
for k in G.nodes():
tot_pop=0 # we need to know how many people are living in each subpop
inf=0 # also we run the SIR just if there are infected
for j in compartments:
tot_pop+=status_subpop[k][j]
if j==1:
inf=status_subpop[k][j]
if inf>0:
SIR_hm(beta,mu,tot_pop,status_subpop[k]) # note how we are passing status_subpop[k] to the function
#let's see how many diseased subpopulation we have
disease_sub_pop=0
tot_inf=0.
for k in G.nodes():
if status_subpop[k][1]>0:
disease_sub_pop+=1
tot_inf+=status_subpop[k][1]
diseased[t]=disease_sub_pop
prevalence[t]=tot_inf
return diseased, prevalence
# 4.2 EPIDEMIC DYNAMICS ON TEMPORAL NETWORKS | [
"matplotlib.pyplot.yscale",
"pandas.read_csv",
"random.shuffle",
"numpy.random.multinomial",
"powerlaw.pdf",
"collections.defaultdict",
"networkx.closeness_centrality",
"networkx.betweenness_centrality",
"numpy.geomspace",
"matplotlib.pyplot.yticks",
"collections.Counter",
"matplotlib.pyplot.x... | [((1903, 1930), 'numpy.zeros', 'np.zeros', (['(n_bins + 1)', 'float'], {}), '(n_bins + 1, float)\n', (1911, 1930), True, 'import numpy as np\n'), ((2114, 2141), 'numpy.zeros', 'np.zeros', (['(n_bins + 1)', 'float'], {}), '(n_bins + 1, float)\n', (2122, 2141), True, 'import numpy as np\n'), ((2156, 2183), 'numpy.zeros', 'np.zeros', (['(n_bins + 1)', 'float'], {}), '(n_bins + 1, float)\n', (2164, 2183), True, 'import numpy as np\n'), ((4198, 4213), 'pandas.read_csv', 'pd.read_csv', (['fo'], {}), '(fo)\n', (4209, 4213), True, 'import pandas as pd\n'), ((4233, 4248), 'pandas.read_csv', 'pd.read_csv', (['so'], {}), '(so)\n', (4244, 4248), True, 'import pandas as pd\n'), ((4920, 4932), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (4930, 4932), True, 'import networkx as nx\n'), ((7160, 7170), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7168, 7170), True, 'from matplotlib import pyplot as plt\n'), ((9481, 9573), 'matplotlib.pyplot.plot', 'plt.plot', (['x_centrality', 'y_centrality'], {'color': 'color', 'linewidth': '(1.1)', 'marker': '"""o"""', 'alpha': '(0.55)'}), "(x_centrality, y_centrality, color=color, linewidth=1.1, marker='o',\n alpha=0.55)\n", (9489, 9573), True, 'from matplotlib import pyplot as plt\n'), ((9572, 9589), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (9582, 9589), True, 'from matplotlib import pyplot as plt\n'), ((9594, 9611), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (9604, 9611), True, 'from matplotlib import pyplot as plt\n'), ((9616, 9646), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {'fontsize': '(15)'}), "('$x$', fontsize=15)\n", (9626, 9646), True, 'from matplotlib import pyplot as plt\n'), ((9653, 9686), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P(x)$"""'], {'fontsize': '(15)'}), "('$P(x)$', fontsize=15)\n", (9663, 9686), True, 'from matplotlib import pyplot as plt\n'), ((9693, 9716), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (9703, 9716), True, 'from matplotlib import pyplot as plt\n'), ((9721, 9744), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (9731, 9744), True, 'from matplotlib import pyplot as plt\n'), ((9749, 9759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9757, 9759), True, 'from matplotlib import pyplot as plt\n'), ((10252, 10288), 'powerlaw.pdf', 'pwl.pdf', (['corrected_degree'], {'bins': 'bins'}), '(corrected_degree, bins=bins)\n', (10259, 10288), True, 'import powerlaw as pwl\n'), ((11187, 11232), 'networkx.community.quality.modularity', 'nx.community.quality.modularity', (['G', 'partition'], {}), '(G, partition)\n', (11218, 11232), True, 'import networkx as nx\n'), ((11639, 11656), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (11650, 11656), False, 'from collections import defaultdict\n'), ((14326, 14343), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (14337, 14343), False, 'from collections import defaultdict\n'), ((14391, 14434), 'numpy.geomspace', 'np.geomspace', (['lambda_min', 'lambda_max', 'nruns'], {}), '(lambda_min, lambda_max, nruns)\n', (14403, 14434), True, 'import numpy as np\n'), ((14637, 14671), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['final_size'], {}), '(final_size)\n', (14659, 14671), True, 'import pandas as pd\n'), ((14797, 14820), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(11)'}), '(fontsize=11)\n', (14807, 14820), True, 'from matplotlib import pyplot as plt\n'), ((14827, 14850), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(11)'}), '(fontsize=11)\n', (14837, 14850), True, 'from matplotlib import pyplot as plt\n'), ((14857, 14888), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(16)'}), "('Time', fontsize=16)\n", (14867, 14888), True, 'from matplotlib import pyplot as plt\n'), ((14895, 14932), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Prevalence"""'], {'fontsize': '(16)'}), "('Prevalence', fontsize=16)\n", (14905, 14932), True, 'from matplotlib import pyplot as plt\n'), ((15284, 15322), 'matplotlib.pyplot.vlines', 'plt.vlines', ([], {'x': 'lc', 'ymin': 'ymin', 'ymax': 'ymax'}), '(x=lc, ymin=ymin, ymax=ymax)\n', (15294, 15322), True, 'from matplotlib import pyplot as plt\n'), ((15327, 15344), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (15337, 15344), True, 'from matplotlib import pyplot as plt\n'), ((15349, 15366), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (15359, 15366), True, 'from matplotlib import pyplot as plt\n'), ((15371, 15385), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (15379, 15385), True, 'from matplotlib import pyplot as plt\n'), ((15391, 15411), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.045)', '(1.1)'], {}), '(0.045, 1.1)\n', (15399, 15411), True, 'from matplotlib import pyplot as plt\n'), ((15416, 15439), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (15426, 15439), True, 'from matplotlib import pyplot as plt\n'), ((15444, 15467), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (15454, 15467), True, 'from matplotlib import pyplot as plt\n'), ((15472, 15534), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Final Epidemic Size ($R_f / |V_G|$)"""'], {'fontsize': '(18)'}), "('Final Epidemic Size ($R_f / |V_G|$)', fontsize=18)\n", (15482, 15534), True, 'from matplotlib import pyplot as plt\n'), ((15539, 15608), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Transmission Rate per Contact ($\\\\lambda$)"""'], {'fontsize': '(18)'}), "('Transmission Rate per Contact ($\\\\lambda$)', fontsize=18)\n", (15549, 15608), True, 'from matplotlib import pyplot as plt\n'), ((15612, 15622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15620, 15622), True, 'from matplotlib import pyplot as plt\n'), ((16192, 16214), 'numpy.zeros', 'np.zeros', (['nt.size', 'int'], {}), '(nt.size, int)\n', (16200, 16214), True, 'import numpy as np\n'), ((17386, 17402), 'numpy.zeros', 'np.zeros', (['N', 'int'], {}), '(N, int)\n', (17394, 17402), True, 'import numpy as np\n'), ((5060, 5085), 'networkx.write_graphml', 'nx.write_graphml', (['G', 'save'], {}), '(G, save)\n', (5076, 5085), True, 'import networkx as nx\n'), ((10339, 10354), 'collections.Counter', 'Counter', (['degree'], {}), '(degree)\n', (10346, 10354), False, 'from collections import Counter\n'), ((10646, 10663), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (10656, 10663), True, 'from matplotlib import pyplot as plt\n'), ((10672, 10689), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (10682, 10689), True, 'from matplotlib import pyplot as plt\n'), ((10698, 10718), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""ro"""'], {}), "(x, y, 'ro')\n", (10706, 10718), True, 'from matplotlib import pyplot as plt\n'), ((10726, 10749), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (10736, 10749), True, 'from matplotlib import pyplot as plt\n'), ((10758, 10781), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (10768, 10781), True, 'from matplotlib import pyplot as plt\n'), ((10791, 10821), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {'fontsize': '(16)'}), "('$k$', fontsize=16)\n", (10801, 10821), True, 'from matplotlib import pyplot as plt\n'), ((10830, 10863), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P(k)$"""'], {'fontsize': '(16)'}), "('$P(k)$', fontsize=16)\n", (10840, 10863), True, 'from matplotlib import pyplot as plt\n'), ((16992, 17026), 'numpy.random.binomial', 'np.random.binomial', (['status[0]', 'p_1'], {}), '(status[0], p_1)\n', (17010, 17026), True, 'import numpy as np\n'), ((17073, 17107), 'numpy.random.binomial', 'np.random.binomial', (['status[1]', 'p_2'], {}), '(status[1], p_2)\n', (17091, 17107), True, 'import numpy as np\n'), ((19810, 19826), 'numpy.zeros', 'np.zeros', (['N', 'int'], {}), '(N, int)\n', (19818, 19826), True, 'import numpy as np\n'), ((19840, 19856), 'numpy.zeros', 'np.zeros', (['N', 'int'], {}), '(N, int)\n', (19848, 19856), True, 'import numpy as np\n'), ((2006, 2021), 'math.exp', 'math.exp', (['delta'], {}), '(delta)\n', (2014, 2021), True, 'import math as math\n'), ((7531, 7557), 'networkx.closeness_centrality', 'nx.closeness_centrality', (['G'], {}), '(G)\n', (7554, 7557), True, 'import networkx as nx\n'), ((10904, 10980), 'powerlaw.plot_pdf', 'pwl.plot_pdf', (['corrected_degree'], {'linear_bins': '(True)', 'color': '"""black"""', 'linewidth': '(2)'}), "(corrected_degree, linear_bins=True, color='black', linewidth=2)\n", (10916, 10980), True, 'import powerlaw as pwl\n'), ((11007, 11065), 'powerlaw.plot_pdf', 'pwl.plot_pdf', (['corrected_degree'], {'color': '"""black"""', 'linewidth': '(2)'}), "(corrected_degree, color='black', linewidth=2)\n", (11019, 11065), True, 'import powerlaw as pwl\n'), ((14502, 14547), 'EoN.fast_SIR', 'EoN.fast_SIR', (['G'], {'tau': 'lambd', 'gamma': 'mu', 'rho': 'rho'}), '(G, tau=lambd, gamma=mu, rho=rho)\n', (14514, 14547), False, 'import EoN\n'), ((15200, 15222), 'numpy.array', 'np.array', (['data.columns'], {}), '(data.columns)\n', (15208, 15222), True, 'import numpy as np\n'), ((17908, 17924), 'numpy.zeros', 'np.zeros', (['(3)', 'int'], {}), '(3, int)\n', (17916, 17924), True, 'import numpy as np\n'), ((20314, 20360), 'numpy.random.multinomial', 'np.random.multinomial', (['W[source]', 'prob'], {'size': '(1)'}), '(W[source], prob, size=1)\n', (20335, 20360), True, 'import numpy as np\n'), ((21389, 21405), 'numpy.zeros', 'np.zeros', (['N', 'int'], {}), '(N, int)\n', (21397, 21405), True, 'import numpy as np\n'), ((7781, 7809), 'networkx.betweenness_centrality', 'nx.betweenness_centrality', (['G'], {}), '(G)\n', (7806, 7809), True, 'import networkx as nx\n'), ((12124, 12137), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (12134, 12137), False, 'from operator import itemgetter\n'), ((13516, 13534), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (13532, 13534), True, 'import numpy as np\n'), ((15249, 15271), 'numpy.array', 'np.array', (['data.columns'], {}), '(data.columns)\n', (15257, 15271), True, 'import numpy as np\n'), ((8033, 8087), 'networkx.eigenvector_centrality', 'nx.eigenvector_centrality', (['G'], {'max_iter': '(1000)', 'tol': '(1e-06)'}), '(G, max_iter=1000, tol=1e-06)\n', (8058, 8087), True, 'import networkx as nx\n'), ((8304, 8374), 'networkx.katz_centrality', 'nx.katz_centrality', (['G'], {'alpha': '(0.001)', 'beta': '(1.0)', 'max_iter': '(1000)', 'tol': '(1e-06)'}), '(G, alpha=0.001, beta=1.0, max_iter=1000, tol=1e-06)\n', (8322, 8374), True, 'import networkx as nx\n'), ((13198, 13216), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (13214, 13216), True, 'import numpy as np\n'), ((8595, 8615), 'networkx.pagerank', 'nx.pagerank', (['G', '(0.85)'], {}), '(G, 0.85)\n', (8606, 8615), True, 'import networkx as nx\n'), ((8927, 8951), 'random.shuffle', 'rand.shuffle', (['centrality'], {}), '(centrality)\n', (8939, 8951), True, 'import random as rand\n')] |
import argparse
import pandas as pd
import numpy as np
import commons
from get_statistics import get_statistics
def predict_popular(train_tracks, train_tags, test_tracks, test_tags, tags_order):
tags_popular = {}
for category in commons.CATEGORIES:
stats, _ = get_statistics(category, train_tracks, train_tags)
if len(stats) > 0:
# save top tag, number of tracks and category for it
stats = stats.sort_values(by='tracks', ascending=False)
stats = stats.reset_index(drop=True)
tags_popular[stats['tag'][0]] = {'tracks': stats['tracks'][0], 'category': category}
print(tags_popular)
tag_popular = max(tags_popular.keys(), key=lambda key: tags_popular[key]['tracks'])
full_tag = tags_popular[tag_popular]['category'] + commons.TAG_HYPHEN + tag_popular
data = np.zeros([len(test_tracks), len(tags_order)], dtype=bool)
tag_index = tags_order.index[tags_order[0] == full_tag]
data[:, tag_index] = True
return data
# super non-optimized, refactor due after tags rework
def predict_random(train_tracks, train_tags, test_tracks, test_tags, tags_order):
n_tracks = len(train_tracks)
tag_ratios = {}
for category in commons.CATEGORIES:
stats, _ = get_statistics(category, train_tracks, train_tags)
if len(stats) > 0:
for _, row in stats.iterrows():
full_tag = category + commons.TAG_HYPHEN + row['tag']
tag_ratios[full_tag] = row['tracks'] / n_tracks
tag_vector = np.zeros(len(tags_order))
for i, row in tags_order.iterrows():
tag_vector[i] = tag_ratios[row[0]]
data = np.tile(tag_vector, (len(test_tracks), 1))
return data
ALGORITHMS = {
'popular': predict_popular,
'random': predict_random
}
DEFAULT_ALGORITHM = 'popular'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generates predictions based on naive baseline algorithms')
parser.add_argument('train_file', help=commons.METADATA_DESCRIPTION)
parser.add_argument('test_file', help=commons.METADATA_DESCRIPTION)
parser.add_argument('tags_file', help='file with tag order that will be used')
parser.add_argument('output_file', help='output NPY file ')
parser.add_argument('--algorithm', choices=ALGORITHMS.keys(), default=DEFAULT_ALGORITHM,
help='algorithm to use')
args = parser.parse_args()
func = ALGORITHMS[args.algorithm]
train_tracks, train_tags, _ = commons.read_file(args.train_file)
test_tracks, test_tags, _ = commons.read_file(args.test_file)
tags_order = pd.read_csv(args.tags_file, delimiter='\t', header=None)
data = ALGORITHMS[args.algorithm](train_tracks, train_tags, test_tracks, test_tags, tags_order)
np.save(args.output_file, data)
| [
"numpy.save",
"argparse.ArgumentParser",
"commons.read_file",
"pandas.read_csv",
"get_statistics.get_statistics"
] | [((1871, 1971), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generates predictions based on naive baseline algorithms"""'}), "(description=\n 'Generates predictions based on naive baseline algorithms')\n", (1894, 1971), False, 'import argparse\n'), ((2505, 2539), 'commons.read_file', 'commons.read_file', (['args.train_file'], {}), '(args.train_file)\n', (2522, 2539), False, 'import commons\n'), ((2572, 2605), 'commons.read_file', 'commons.read_file', (['args.test_file'], {}), '(args.test_file)\n', (2589, 2605), False, 'import commons\n'), ((2623, 2679), 'pandas.read_csv', 'pd.read_csv', (['args.tags_file'], {'delimiter': '"""\t"""', 'header': 'None'}), "(args.tags_file, delimiter='\\t', header=None)\n", (2634, 2679), True, 'import pandas as pd\n'), ((2785, 2816), 'numpy.save', 'np.save', (['args.output_file', 'data'], {}), '(args.output_file, data)\n', (2792, 2816), True, 'import numpy as np\n'), ((280, 330), 'get_statistics.get_statistics', 'get_statistics', (['category', 'train_tracks', 'train_tags'], {}), '(category, train_tracks, train_tags)\n', (294, 330), False, 'from get_statistics import get_statistics\n'), ((1263, 1313), 'get_statistics.get_statistics', 'get_statistics', (['category', 'train_tracks', 'train_tags'], {}), '(category, train_tracks, train_tags)\n', (1277, 1313), False, 'from get_statistics import get_statistics\n')] |
import logging
import numpy as np
# Logger
def get_logger(file_path):
""" Make python logger """
logger = logging.getLogger("darts")
log_format = "%(asctime)s | %(message)s"
formatter = logging.Formatter(log_format, datefmt="%m/%d %I:%M:%S %p")
file_handler = logging.FileHandler(file_path, mode="a")
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
return logger
# Top k accuracy
def top_n_accuracy(preds, ts, n):
"""
ts: np array (nb_observations,)
preds: prediction probabilities np array (nb_observations, n_classes)
"""
best_n = np.argsort(preds, axis=1)[:, -n:]
successes = 0
for i in range(ts.shape[0]):
if ts[i] in best_n[i, :]:
successes += 1
return float(successes) / ts.shape[0]
| [
"logging.FileHandler",
"logging.StreamHandler",
"logging.Formatter",
"numpy.argsort",
"logging.getLogger"
] | [((115, 141), 'logging.getLogger', 'logging.getLogger', (['"""darts"""'], {}), "('darts')\n", (132, 141), False, 'import logging\n'), ((203, 261), 'logging.Formatter', 'logging.Formatter', (['log_format'], {'datefmt': '"""%m/%d %I:%M:%S %p"""'}), "(log_format, datefmt='%m/%d %I:%M:%S %p')\n", (220, 261), False, 'import logging\n'), ((281, 321), 'logging.FileHandler', 'logging.FileHandler', (['file_path'], {'mode': '"""a"""'}), "(file_path, mode='a')\n", (300, 321), False, 'import logging\n'), ((384, 407), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (405, 407), False, 'import logging\n'), ((772, 797), 'numpy.argsort', 'np.argsort', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (782, 797), True, 'import numpy as np\n')] |
# coding=utf-8
"""
train bert model
"""
import modeling
import tensorflow as tf
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Describe your program')
parser.add_argument('-batch_size', '--batch_size', type=int,default=128)
args = parser.parse_args()
batch_size=args.batch_size
print("batch_size:",batch_size)
def bert_train_fn():
is_training=True
hidden_size = 768
num_labels = 10
#batch_size=128
max_seq_length=512
use_one_hot_embeddings = False
bert_config = modeling.BertConfig(vocab_size=21128, hidden_size=hidden_size, num_hidden_layers=12,
num_attention_heads=12,intermediate_size=3072)
input_ids = tf.placeholder(tf.int32, [batch_size, max_seq_length], name="input_ids")
input_mask = tf.placeholder(tf.int32, [batch_size, max_seq_length], name="input_mask")
segment_ids = tf.placeholder(tf.int32, [batch_size,max_seq_length],name="segment_ids")
label_ids = tf.placeholder(tf.float32, [batch_size,num_labels], name="label_ids")
loss, per_example_loss, logits, probabilities, model = create_model(bert_config, is_training, input_ids, input_mask,
segment_ids, label_ids, num_labels,
use_one_hot_embeddings)
# 1. generate or load training/validation/test data. e.g. train:(X,y). X is input_ids,y is labels.
# 2. train the model by calling create model, get loss
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
sess = tf.Session(config=gpu_config)
sess.run(tf.global_variables_initializer())
for i in range(1000):
input_ids_=np.ones((batch_size,max_seq_length),dtype=np.int32)
input_mask_=np.ones((batch_size,max_seq_length),dtype=np.int32)
segment_ids_=np.ones((batch_size,max_seq_length),dtype=np.int32)
label_ids_=np.ones((batch_size,num_labels),dtype=np.float32)
feed_dict = {input_ids: input_ids_, input_mask: input_mask_,segment_ids:segment_ids_,label_ids:label_ids_}
loss_ = sess.run([loss], feed_dict)
print("loss:",loss_)
# 3. eval the model from time to time
def bert_predict_fn():
# 1. predict based on
pass
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable("output_weights", [num_labels, hidden_size],initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable("output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training: # if training, add dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
print("output_layer:",output_layer.shape,";output_weights:",output_weights.shape,";logits:",logits.shape)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
per_example_loss=tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
loss = tf.reduce_mean(per_example_loss)
return loss, per_example_loss, logits, probabilities,model
bert_train_fn()
| [
"tensorflow.nn.softmax",
"argparse.ArgumentParser",
"modeling.BertModel",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.ones",
"tensorflow.variable_scope",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.reduce_mean",
"tensorflow.placeholder",
"tensorflow... | [((125, 185), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Describe your program"""'}), "(description='Describe your program')\n", (148, 185), False, 'import argparse\n'), ((525, 661), 'modeling.BertConfig', 'modeling.BertConfig', ([], {'vocab_size': '(21128)', 'hidden_size': 'hidden_size', 'num_hidden_layers': '(12)', 'num_attention_heads': '(12)', 'intermediate_size': '(3072)'}), '(vocab_size=21128, hidden_size=hidden_size,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n', (544, 661), False, 'import modeling\n'), ((712, 784), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, max_seq_length]'], {'name': '"""input_ids"""'}), "(tf.int32, [batch_size, max_seq_length], name='input_ids')\n", (726, 784), True, 'import tensorflow as tf\n'), ((802, 875), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, max_seq_length]'], {'name': '"""input_mask"""'}), "(tf.int32, [batch_size, max_seq_length], name='input_mask')\n", (816, 875), True, 'import tensorflow as tf\n'), ((894, 968), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, max_seq_length]'], {'name': '"""segment_ids"""'}), "(tf.int32, [batch_size, max_seq_length], name='segment_ids')\n", (908, 968), True, 'import tensorflow as tf\n'), ((983, 1053), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, num_labels]'], {'name': '"""label_ids"""'}), "(tf.float32, [batch_size, num_labels], name='label_ids')\n", (997, 1053), True, 'import tensorflow as tf\n'), ((1558, 1574), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1572, 1574), True, 'import tensorflow as tf\n'), ((1633, 1662), 'tensorflow.Session', 'tf.Session', ([], {'config': 'gpu_config'}), '(config=gpu_config)\n', (1643, 1662), True, 'import tensorflow as tf\n'), ((2485, 2676), 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), '(config=bert_config, is_training=is_training, input_ids=\n input_ids, input_mask=input_mask, token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n', (2503, 2676), False, 'import modeling\n'), ((1676, 1709), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1707, 1709), True, 'import tensorflow as tf\n'), ((1756, 1809), 'numpy.ones', 'np.ones', (['(batch_size, max_seq_length)'], {'dtype': 'np.int32'}), '((batch_size, max_seq_length), dtype=np.int32)\n', (1763, 1809), True, 'import numpy as np\n'), ((1828, 1881), 'numpy.ones', 'np.ones', (['(batch_size, max_seq_length)'], {'dtype': 'np.int32'}), '((batch_size, max_seq_length), dtype=np.int32)\n', (1835, 1881), True, 'import numpy as np\n'), ((1901, 1954), 'numpy.ones', 'np.ones', (['(batch_size, max_seq_length)'], {'dtype': 'np.int32'}), '((batch_size, max_seq_length), dtype=np.int32)\n', (1908, 1954), True, 'import numpy as np\n'), ((1972, 2023), 'numpy.ones', 'np.ones', (['(batch_size, num_labels)'], {'dtype': 'np.float32'}), '((batch_size, num_labels), dtype=np.float32)\n', (1979, 2023), True, 'import numpy as np\n'), ((3036, 3061), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (3053, 3061), True, 'import tensorflow as tf\n'), ((3188, 3245), 'tensorflow.matmul', 'tf.matmul', (['output_layer', 'output_weights'], {'transpose_b': '(True)'}), '(output_layer, output_weights, transpose_b=True)\n', (3197, 3245), True, 'import tensorflow as tf\n'), ((3370, 3405), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), '(logits, output_bias)\n', (3384, 3405), True, 'import tensorflow as tf\n'), ((3426, 3456), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (3439, 3456), True, 'import tensorflow as tf\n'), ((3478, 3547), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (3517, 3547), True, 'import tensorflow as tf\n'), ((3559, 3591), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {}), '(per_example_loss)\n', (3573, 3591), True, 'import tensorflow as tf\n'), ((2885, 2929), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (2916, 2929), True, 'import tensorflow as tf\n'), ((3004, 3026), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (3024, 3026), True, 'import tensorflow as tf\n'), ((3132, 3174), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_layer'], {'keep_prob': '(0.9)'}), '(output_layer, keep_prob=0.9)\n', (3145, 3174), True, 'import tensorflow as tf\n')] |
"""
What is desteaking?
When computing inverse Radon transform using Filter back projection,
streaks (line artifacts) would appear if information from some angles are
missing. A popular way to remove them is to optimize some loss function
in the image and Radon transform domain, such loss functions are exquisitely
studied in compressive sensing.
Mode (streak_type and streak_params):
- "periodic", [number of angles to keep]: angles are periodic
- "uniform", [number of angles to keep]: angles are uniformly distributed
- "fix", [list of angles valued from 0 to 180 (exclusive)]
"""
import numpy as np
from typing import Tuple, Optional
from skimage.color import rgb2grey
from skimage.transform import radon, iradon
from nnimgproc.processor import TargetProcessor
from nnimgproc.util.parameters import Parameters
# TargetProcessor for destreaking
class DestreakingTargetProcessor(TargetProcessor):
def __init__(self, streak_type: str, streak_params: list):
super(DestreakingTargetProcessor, self).__init__()
self._streak_type = streak_type
self._streak_params = streak_params
self._params = Parameters()
# Noise definitions
def __call__(self, img: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, Optional[Parameters]]:
"""
Compute Radon reconstruction using Filtered Back Projection method.
The input can be color image, but it will be converted to black and
white. The output will always be black and white.
:param img: ndarray of shape (w, h, 1) for grey image or (w, h, 3)
:return: ndarray of shape (w, h, 1)
"""
# Convert img to 2D array
if img.shape[2] == 3:
img = rgb2grey(img)
else:
img = img[:, :, 0]
# Compute the right angles according to the parameters
if self._streak_type == 'periodic':
theta = np.linspace(0., 180., int(self._streak_params[0]),
endpoint=False)
elif self._streak_type == "uniform":
theta = np.random.uniform(0., 180., int(self._streak_params[0]))
elif self._streak_type == "fix":
theta = map(float, self._streak_params)
else:
raise NotImplementedError('%s streaking is not implemented' %
self._streak_type)
self._params.set('angles', theta)
sinogram = radon(img, theta=theta, circle=False)
x = np.expand_dims(iradon(sinogram, theta=theta, circle=False),
axis=2)
y = np.expand_dims(img, axis=2)
return x.clip(0, 1), y, self._params
| [
"skimage.transform.iradon",
"nnimgproc.util.parameters.Parameters",
"numpy.expand_dims",
"skimage.color.rgb2grey",
"skimage.transform.radon"
] | [((1137, 1149), 'nnimgproc.util.parameters.Parameters', 'Parameters', ([], {}), '()\n', (1147, 1149), False, 'from nnimgproc.util.parameters import Parameters\n'), ((2431, 2468), 'skimage.transform.radon', 'radon', (['img'], {'theta': 'theta', 'circle': '(False)'}), '(img, theta=theta, circle=False)\n', (2436, 2468), False, 'from skimage.transform import radon, iradon\n'), ((2589, 2616), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (2603, 2616), True, 'import numpy as np\n'), ((1722, 1735), 'skimage.color.rgb2grey', 'rgb2grey', (['img'], {}), '(img)\n', (1730, 1735), False, 'from skimage.color import rgb2grey\n'), ((2496, 2539), 'skimage.transform.iradon', 'iradon', (['sinogram'], {'theta': 'theta', 'circle': '(False)'}), '(sinogram, theta=theta, circle=False)\n', (2502, 2539), False, 'from skimage.transform import radon, iradon\n')] |
import os
import sys
import argparse
import dolfin as dlf
import fenicsmechanics as fm
from fenicsmechanics.dolfincompat import MPI_COMM_WORLD
# Parse through the arguments provided at the command line.
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dim',
help='dimension',
default=2,
type=int)
parser.add_argument('-r', '--refinement',
help='mesh refinement',
default=12,
type=int)
parser.add_argument('-m', '--material',
help='constitutive relation',
default='lin_elastic',
choices=['lin_elastic', 'neo_hookean',
'guccione', 'fung'])
parser.add_argument('-inv', '--inverse',
help='activate inverse elasticity',
action='store_true')
parser.add_argument('-inc', '--incompressible',
help='active incompressible material',
action='store_true')
parser.add_argument('-hdf5',
help="use HDF5 files",
action='store_true')
parser.add_argument('-s','--save',
help='save solution',
action='store_true')
parser.add_argument('-v', '--compute-volume',
help='compute deformed volume',
action='store_true')
args = parser.parse_args()
# Mesh file names based on arguments given
mesh_dims = (args.refinement,)*args.dim
dim_str = 'x'.join(['%i' % i for i in mesh_dims])
if args.incompressible:
name_dims = ('incomp_' + args.material, dim_str)
element_type = 'p2-p1'
else:
name_dims = ('comp_' + args.material, dim_str)
element_type = 'p2'
if args.save:
if args.inverse:
disp_file = 'results/inverse-disp-%s-%s.pvd' % name_dims
vel_file = 'results/inverse-vel-%s-%s.pvd' % name_dims
else:
disp_file = 'results/forward-disp-%s-%s.pvd' % name_dims
vel_file = 'results/forward-vel-%s-%s.pvd' % name_dims
else:
disp_file = None
vel_file = None
if args.hdf5:
ext = "h5"
else:
ext = "xml.gz"
mesh_file, boundaries = fm.get_mesh_file_names("unit_domain", ret_facets=True,
refinements=mesh_dims, ext=ext)
# Check if the mesh file exists
if not os.path.isfile(mesh_file):
raise Exception('The mesh file, \'%s\', does not exist. ' % mesh_file
+ 'Please run the script \'generate_mesh_files.py\' '
+ 'with the same arguments first.')
# Check if the mesh function file exists
if not os.path.isfile(boundaries):
raise Exception('The mesh function file, \'%s\', does not exist. ' % boundaries
+ 'Please run the script \'generate_mesh_files.py\''
+ 'with the same arguments first.')
# Optimization options for the form compiler
dlf.parameters['form_compiler']['cpp_optimize'] = True
dlf.parameters['form_compiler']['quadrature_degree'] = 3
dlf.parameters['form_compiler']['representation'] = 'uflacs'
ffc_options = {'optimize' : True,
'eliminate_zeros' : True,
'precompute_basis_const' : True,
'precompute_ip_const' : True}
# Elasticity parameters
E = 20.0 # Young's modulus
if args.incompressible:
nu = 0.5 # Poisson's ratio
else:
nu = 0.3 # Poisson's ratio
inv_la = (1. + nu)*(1. - 2.*nu)/(E*nu)
mu = E/(2.*(1. + nu)) # 2nd Lame parameter
# Traction on the Neumann boundary region
trac = [10.0] + [0.0]*(args.dim-1)
# Region IDs
ALL_ELSE = 0
CLIP = 1
TRACTION = 2
# Material subdictionary
material_dict = {'const_eqn': args.material,
'type': 'elastic',
'incompressible': args.incompressible,
'density': 10.0}
# Isotropic parameters
if args.material == 'fung':
material_dict['d'] = [15.0]*3 + [0.0]*3 + [10.0]*3
elif args.material == 'guccione':
material_dict['bt'] = 10.0
material_dict['bf'] = 1.0
material_dict['bfs'] = 5.0
if args.material in ['fung', 'guccione']:
from numpy import sqrt
material_dict['C'] = 20.0
material_dict['kappa'] = 1e4
if args.dim == 2:
e1 = dlf.Constant([1.0/sqrt(2.0)]*2)
e2 = dlf.Constant([1.0/sqrt(2.0), -1.0/sqrt(2.0)])
else:
e1 = dlf.Constant([1.0/sqrt(2.0)]*2 + [0.0])
e2 = dlf.Constant([1.0/sqrt(2.0), -1.0/sqrt(2.0), 0.0])
material_dict['fibers'] = {'fiber_files': [e1, e2],
'fiber_names': ['e1', 'e2'],
'element': None}
else:
material_dict.update({'inv_la': inv_la, 'mu': mu})
# Mesh subdictionary
mesh_dict = {'mesh_file': mesh_file,
'boundaries': boundaries}
# Formulation subdictionary
formulation_dict = {'element': element_type,
'domain': 'lagrangian',
'inverse': args.inverse,
'body_force': dlf.Constant([0.0]*args.dim),
'bcs': {
'dirichlet':
{
'displacement': [dlf.Constant([0.0]*args.dim)],
'regions': [CLIP],
},
'neumann':
{
'regions': [TRACTION],
'types': ['cauchy'],
'values': [trac]
}
}}
# Problem configuration dictionary
config = {'material': material_dict,
'mesh': mesh_dict,
'formulation': formulation_dict}
problem = fm.SolidMechanicsProblem(config)
solver = fm.SolidMechanicsSolver(problem, fname_disp=disp_file)
solver.full_solve()
# problem = fm.MechanicsProblem(config)
# my_solver = fm.MechanicsBlockSolver(problem, fname_disp=disp_file)
# my_solver.solve(print_norm=True,
# iter_tol=1e-6,
# maxLinIters=50,
# show=2)
# Plot solution if running on one process.
if dlf.MPI.size(MPI_COMM_WORLD) == 1:
if int(dlf.__version__[:4]) <= 2017:
dlf.plot(problem.displacement, interactive=True, mode='displacement')
# Compute the final volume
if args.compute_volume:
W1 = dlf.VectorFunctionSpace(problem.mesh, 'CG', 1)
xi1 = dlf.TestFunction(W1)
du1 = dlf.TrialFunction(W1)
u_move = dlf.Function(W1)
move_bcs = dlf.DirichletBC(W1, dlf.Constant([0.0]*args.dim),
problem.boundaries, CLIP)
a = dlf.dot(xi1, du1)*dlf.dx
L = dlf.dot(xi1, problem.displacement)*dlf.dx
dlf.solve(a == L, u_move, move_bcs)
ale = dlf.ALE()
ale.move(problem.mesh, u_move)
# print "Total volume after: ", \
# dlf.assemble(dlf.Constant(1.0)*dlf.dx(domain=problem.mesh))
| [
"dolfin.MPI.size",
"fenicsmechanics.SolidMechanicsSolver",
"argparse.ArgumentParser",
"dolfin.TrialFunction",
"dolfin.solve",
"dolfin.TestFunction",
"dolfin.ALE",
"fenicsmechanics.SolidMechanicsProblem",
"dolfin.Function",
"dolfin.plot",
"os.path.isfile",
"dolfin.Constant",
"fenicsmechanics.... | [((214, 239), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (237, 239), False, 'import argparse\n'), ((2195, 2286), 'fenicsmechanics.get_mesh_file_names', 'fm.get_mesh_file_names', (['"""unit_domain"""'], {'ret_facets': '(True)', 'refinements': 'mesh_dims', 'ext': 'ext'}), "('unit_domain', ret_facets=True, refinements=\n mesh_dims, ext=ext)\n", (2217, 2286), True, 'import fenicsmechanics as fm\n'), ((5657, 5689), 'fenicsmechanics.SolidMechanicsProblem', 'fm.SolidMechanicsProblem', (['config'], {}), '(config)\n', (5681, 5689), True, 'import fenicsmechanics as fm\n'), ((5699, 5753), 'fenicsmechanics.SolidMechanicsSolver', 'fm.SolidMechanicsSolver', (['problem'], {'fname_disp': 'disp_file'}), '(problem, fname_disp=disp_file)\n', (5722, 5753), True, 'import fenicsmechanics as fm\n'), ((2369, 2394), 'os.path.isfile', 'os.path.isfile', (['mesh_file'], {}), '(mesh_file)\n', (2383, 2394), False, 'import os\n'), ((2649, 2675), 'os.path.isfile', 'os.path.isfile', (['boundaries'], {}), '(boundaries)\n', (2663, 2675), False, 'import os\n'), ((4966, 4996), 'dolfin.Constant', 'dlf.Constant', (['([0.0] * args.dim)'], {}), '([0.0] * args.dim)\n', (4978, 4996), True, 'import dolfin as dlf\n'), ((6059, 6087), 'dolfin.MPI.size', 'dlf.MPI.size', (['MPI_COMM_WORLD'], {}), '(MPI_COMM_WORLD)\n', (6071, 6087), True, 'import dolfin as dlf\n'), ((6274, 6320), 'dolfin.VectorFunctionSpace', 'dlf.VectorFunctionSpace', (['problem.mesh', '"""CG"""', '(1)'], {}), "(problem.mesh, 'CG', 1)\n", (6297, 6320), True, 'import dolfin as dlf\n'), ((6331, 6351), 'dolfin.TestFunction', 'dlf.TestFunction', (['W1'], {}), '(W1)\n', (6347, 6351), True, 'import dolfin as dlf\n'), ((6362, 6383), 'dolfin.TrialFunction', 'dlf.TrialFunction', (['W1'], {}), '(W1)\n', (6379, 6383), True, 'import dolfin as dlf\n'), ((6397, 6413), 'dolfin.Function', 'dlf.Function', (['W1'], {}), '(W1)\n', (6409, 6413), True, 'import dolfin as dlf\n'), ((6623, 6658), 'dolfin.solve', 'dlf.solve', (['(a == L)', 'u_move', 'move_bcs'], {}), '(a == L, u_move, move_bcs)\n', (6632, 6658), True, 'import dolfin as dlf\n'), ((6670, 6679), 'dolfin.ALE', 'dlf.ALE', ([], {}), '()\n', (6677, 6679), True, 'import dolfin as dlf\n'), ((6143, 6212), 'dolfin.plot', 'dlf.plot', (['problem.displacement'], {'interactive': '(True)', 'mode': '"""displacement"""'}), "(problem.displacement, interactive=True, mode='displacement')\n", (6151, 6212), True, 'import dolfin as dlf\n'), ((6449, 6479), 'dolfin.Constant', 'dlf.Constant', (['([0.0] * args.dim)'], {}), '([0.0] * args.dim)\n', (6461, 6479), True, 'import dolfin as dlf\n'), ((6544, 6561), 'dolfin.dot', 'dlf.dot', (['xi1', 'du1'], {}), '(xi1, du1)\n', (6551, 6561), True, 'import dolfin as dlf\n'), ((6577, 6611), 'dolfin.dot', 'dlf.dot', (['xi1', 'problem.displacement'], {}), '(xi1, problem.displacement)\n', (6584, 6611), True, 'import dolfin as dlf\n'), ((5133, 5163), 'dolfin.Constant', 'dlf.Constant', (['([0.0] * args.dim)'], {}), '([0.0] * args.dim)\n', (5145, 5163), True, 'import dolfin as dlf\n'), ((4291, 4300), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (4295, 4300), False, 'from numpy import sqrt\n'), ((4307, 4316), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (4311, 4316), False, 'from numpy import sqrt\n'), ((4413, 4422), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (4417, 4422), False, 'from numpy import sqrt\n'), ((4429, 4438), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (4433, 4438), False, 'from numpy import sqrt\n'), ((4246, 4255), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (4250, 4255), False, 'from numpy import sqrt\n'), ((4360, 4369), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (4364, 4369), False, 'from numpy import sqrt\n')] |
#Noise Simulator
import numpy as np
import matplotlib.pyplot as plt
def noise(num_samples = 10000, alpha = None, noise_type = 'pink', to_plot = 'False'):
"""
:type num_samples: int
:type alpha: float
:type noise_type: str
:rtype: List[float]
"""
if alpha is None:
if noise_type == 'white':
alpha = 0
elif noise_type == 'pink':
alpha = 1
elif noise_type == 'brown':
alpha = 2
samps = np.random.normal(0, 1, num_samples)
samps_fft = np.fft.fft(samps)
if len(samps_fft) % 2 == 0:
den1 = np.arange(1, len(samps_fft)//2 + 2)
den2 = np.arange(len(samps_fft)//2, 1, -1)
else:
den1 = np.arange(1, len(samps_fft)//2 + 2)
den2 = np.arange(len(samps_fft)//2 + 1, 1, -1)
dens = np.concatenate([den1, den2])
new_samps = samps_fft / (np.sqrt(dens) ** alpha)
new_samps_ifft = np.fft.ifft(new_samps)
if to_plot == True:
plt.plot(new_samps_ifft)
plt.show()
return new_samps_ifft
if __name__ == '__main__':
data = noise(10000, alpha = 1, to_plot = True)
#plt.plot(data)
#plt.show()
| [
"numpy.fft.ifft",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"numpy.random.normal",
"numpy.concatenate",
"numpy.sqrt"
] | [((482, 517), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'num_samples'], {}), '(0, 1, num_samples)\n', (498, 517), True, 'import numpy as np\n'), ((534, 551), 'numpy.fft.fft', 'np.fft.fft', (['samps'], {}), '(samps)\n', (544, 551), True, 'import numpy as np\n'), ((814, 842), 'numpy.concatenate', 'np.concatenate', (['[den1, den2]'], {}), '([den1, den2])\n', (828, 842), True, 'import numpy as np\n'), ((919, 941), 'numpy.fft.ifft', 'np.fft.ifft', (['new_samps'], {}), '(new_samps)\n', (930, 941), True, 'import numpy as np\n'), ((975, 999), 'matplotlib.pyplot.plot', 'plt.plot', (['new_samps_ifft'], {}), '(new_samps_ifft)\n', (983, 999), True, 'import matplotlib.pyplot as plt\n'), ((1008, 1018), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1016, 1018), True, 'import matplotlib.pyplot as plt\n'), ((873, 886), 'numpy.sqrt', 'np.sqrt', (['dens'], {}), '(dens)\n', (880, 886), True, 'import numpy as np\n')] |
import os
import sys
import yaml
import logging
import pickle
import numpy as np
import time
from datetime import datetime
from rdkit import Chem
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch_geometric as pyg
import utils.graph_utils as graph_utils
import utils.general_utils as general_utils
from .sGAT import sGAT, save_sGAT, load_sGAT
torch.multiprocessing.set_sharing_strategy('file_system')
#####################################################
# MODEL HANDLING #
#####################################################
def load_current_model(model_path):
net = load_sGAT(os.path.join(model_path, 'current_model.pt'))
return net
def load_best_model(model_path):
net = load_sGAT(os.path.join(model_path, 'best_model.pt'))
return net
def save_current_model(net, model_path):
save_sGAT(net, os.path.join(model_path, 'current_model.pt'))
def save_best_model(net, model_path):
save_sGAT(net, os.path.join(model_path, 'best_model.pt'))
#############################################
# Custom Functions #
#############################################
def dock_score_weights(scores):
"""If sample has a docking score south of a random split point, it is more likely to be sampled in the batch."""
weights = np.zeros(len(scores))
for idx, score in enumerate(scores):
if score < split:
weight = upsampled_weight
else:
weight = 1 - upsampled_weight
weights[idx] = weight
return weights
def exp_weighted_mse(output, target):
"""Custom loss function assigning greater weight to errors at the top of the ranked list."""
epsilon = 0.001 # To avoid nan's?
weight = torch.clamp((torch.exp(-(target - exp_loc) / exp_scale) / exp_scale), min=0.0, max=1)
loss = torch.mean(weight * (output - target) ** 2) + epsilon
return loss
#############################################
# DATA #
#############################################
def get_dense_edges(n):
x = np.arange(n)
src, dst = [np.tile(x, len(x)), np.repeat(x, len(x))]
return torch.tensor([src, dst], dtype=torch.long)
class MolData(Dataset):
def __init__(self, logp, smiles, use_3d):
super(MolData, self).__init__()
self.logp = logp
self.smiles = smiles
self.use_3d = use_3d
def __getitem__(self, index):
logp = self.logp[index]
smiles = self.smiles[index]
# Hot fix, get first in list if mol is none...
mol = Chem.MolFromSmiles(smiles)
if mol is None:
smiles = self.smiles[0]
logp = self.logp[0]
mol = Chem.MolFromSmiles(smiles)
print("Invalid SMILE encountered. Using first row instead.")
g = graph_utils.mol_to_pyg_graph(mol, self.use_3d)
return torch.FloatTensor([logp]), g[0], g[1]
def __len__(self):
return len(self.logp)
def get_graph_spec(self):
y, g, _ = self[0]
nb_node_feats = g.x.shape[1]
try:
nb_edge_feats = g.edge_attr.shape[1]
except Exception as e:
nb_edge_feats = 1
return nb_node_feats, nb_edge_feats
def compute_baseline_error(self):
logp = np.array(self.logp)
mean = logp.mean()
sq_sum = np.sum(np.square(logp - mean)) / len(logp)
logging.info("{:5.3f} baseline L2 loss\n".format(sq_sum))
def create_datasets(logp, smiles, use_3d, np_seed=0):
nb_samples = len(logp)
assert nb_samples > 10
nb_train = int(nb_samples * 0.6)
nb_valid = int(nb_samples * 0.2)
np.random.seed(np_seed)
sample_order = np.random.permutation(nb_samples)
logp = np.asarray(logp)[sample_order].tolist()
smiles = np.asarray(smiles)[sample_order].tolist()
train_data = MolData(logp[:nb_train], smiles[:nb_train], use_3d)
valid_data = MolData(logp[nb_train:nb_train + nb_valid],
smiles[nb_train:nb_train + nb_valid],
use_3d)
test_data = MolData(logp[nb_train + nb_valid:],
smiles[nb_train + nb_valid:],
use_3d)
return train_data, valid_data, test_data
def my_collate(samples):
y = [s[0] for s in samples]
g1 = [s[1] for s in samples]
g2 = [s[2] for s in samples]
y = torch.cat(y, dim=0)
G1 = pyg.data.Batch().from_data_list(g1)
if None in g2:
return y, G1, None
else:
G2 = pyg.data.Batch().from_data_list(g2)
return y, G1, G2
def parse_raw_data(raw_dataset):
batch_size = 32
loader = DataLoader(raw_dataset,
shuffle=False,
collate_fn=my_collate,
batch_size=batch_size,
num_workers=16)
all_data = []
print("\nPreprocessing {} samples".format(len(raw_dataset)))
for i, d in enumerate(loader):
if (i % 3)==0:
print("{:7d}".format(i*batch_size))
print(len(all_data))
all_data.append(d)
if i==20:
break
return all_data
def parse_data_path(data_path, use_3d):
path_split = data_path.split('/')
parent_path = '/'.join(path_split[:-1])
data_name = path_split[-1].split('.')[0]
storage_path = os.path.join(parent_path, data_name)
if use_3d:
storage_path += '_with_3d'
else:
storage_path += '_no_3d'
os.makedirs(storage_path, exist_ok=True)
return storage_path
def preprocess_data(raw_dataset, storage_path, dataset_name):
dataset_path = os.path.join(storage_path, dataset_name+'.pkl')
print(dataset_path)
try:
with open(dataset_path, 'rb') as f:
parsed_data = pickle.load(f)
print("Preprocessed {} set loaded".format(dataset_name))
except Exception as e:
print("Preprocessed {} set not found. Parsing...".format(dataset_name))
t0 = time.time()
parsed_data = parse_raw_data(raw_dataset)
print("{:5.2f}s for {} samples".format(time.time()-t0, len(parsed_data)))
with open(dataset_path, 'wb') as f:
pickle.dump(parsed_data, f)
print("Done.")
exit()
return parsed_data
#################################################
# TRAINING #
#################################################
def proc_one_epoch(net,
criterion,
batch_size,
loader,
optim=None,
train=False):
print_freq = 10 if train else 4
nb_batch = len(loader)
nb_samples = nb_batch * batch_size
epoch_loss = 0.0
elapsed = 0.0
if train:
net.train()
else:
net.eval()
t0 = time.time()
logging.info(" {} batches, {} samples".format(nb_batch, nb_samples))
for i, (y, G1, G2) in enumerate(loader):
t1 = time.time()
if train:
optim.zero_grad()
y = y.to(DEVICE, non_blocking=True)
G1 = G1.to(DEVICE)
G2 = G2.to(DEVICE) if G2 is not None else None
y_pred = net(G1, G2).squeeze()
loss = criterion(y_pred, y)
with torch.autograd.set_detect_anomaly(True):
if train:
loss.backward()
optim.step()
epoch_loss += loss.item()
if ((i + 1) % (nb_batch // print_freq)) == 0:
nb_proc = (i + 1) * batch_size
logging.info(" {:8d}: {:4.2f}".format(nb_proc, epoch_loss / (i + 1)))
elapsed += time.time() - t1
logging.info(" Model elapsed: {:.2f}".format(elapsed))
logging.info(" Loader elapsed: {:.2f}".format(time.time() - t0 - elapsed))
logging.info(" Total elapsed: {:.2f}".format(time.time() - t0))
return epoch_loss / nb_batch
def train(net,
criterion,
batch_size,
train_loader,
valid_loader,
optim,
arg_handler,
save_dir,
writer):
current_lr = optim.param_groups[0]['lr']
lr_end = current_lr / 10 ** 3
best_loss = arg_handler('best_loss')
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, 'min', patience=3, verbose=True)
scheduler.step(best_loss)
for i in range(arg_handler('current_epoch'), 1000):
t0 = time.time()
logging.info("\n\nEpoch {}".format(i + 1))
logging.info("Learning rate: {0:.3g}".format(current_lr))
logging.info(" Train:")
train_loss = proc_one_epoch(net,
criterion,
batch_size,
train_loader,
optim,
train=True)
logging.info("\n Valid:")
valid_loss = proc_one_epoch(net,
criterion,
batch_size,
valid_loader)
logging.info("Train MSE: {:3.2f}".format(train_loss))
logging.info("Valid MSE: {:3.2f}".format(valid_loss))
writer.add_scalar('lr', current_lr, i)
writer.add_scalar('train_loss', train_loss, i)
writer.add_scalar('valid_loss', valid_loss, i)
#writer.add_scalars('loss',
# {'train': train_loss, 'valid': valid_loss},
# i)
scheduler.step(valid_loss)
if valid_loss < best_loss:
logging.info("Best performance on valid set")
best_loss = valid_loss
save_best_model(net, save_dir)
logging.info("{:6.1f} seconds, this epoch".format(time.time() - t0))
current_lr = optim.param_groups[0]['lr']
arg_handler.update_args(current_lr, i + 1, best_loss)
save_current_model(net, save_dir)
if current_lr < lr_end:
break
#############################################
# ARGS #
#############################################
class ArgumentHandler:
def __init__(self, experiment_dir, starting_lr):
self.arg_file = os.path.join(experiment_dir, 'args.yaml')
try:
self.load_args()
logging.info("Arguments loaded.")
except Exception as e:
self.initialize_args(starting_lr)
logging.info("Arguments initialized.")
def load_args(self):
with open(self.arg_file, 'r') as f:
self.args = yaml.load(f, Loader=yaml.FullLoader)
def initialize_args(self, starting_lr):
args = {}
args['current_epoch'] = 0
args['current_lr'] = starting_lr
args['best_loss'] = 10 ** 10
self.args = args
self.save_args()
def save_args(self):
with open(self.arg_file, 'w') as f:
yaml.dump(self.args, f)
def update_args(self, current_lr, current_epoch, best_loss):
self.args['current_lr'] = current_lr
self.args['current_epoch'] = current_epoch
self.args['best_loss'] = best_loss
self.save_args()
def __call__(self, param):
return self.args[param]
#############################################
# MAIN #
#############################################
def main(artifact_path,
logp,
smiles,
gpu_num=0,
upsample=False,
exp_loss=False,
use_3d=False,
batch_size=128,
num_workers=12,
nb_hidden=256,
nb_layers=4,
lr=0.001,
store_preprocessed=False,
data_path=None):
# Global variables: GPU Device, random splits for upsampling, loc and scale parameter for exp weighted loss.
global DEVICE
global split
global upsampled_weight
global exp_loc
global exp_scale
if torch.cuda.is_available():
DEVICE = torch.device('cuda:' + str(gpu_num))
else:
DEVICE = 'cpu'
# logging variables
dt = datetime.now().strftime("%Y.%m.%d_%H:%M:%S")
writer = SummaryWriter(log_dir=os.path.join(artifact_path, 'runs/' + dt))
save_dir = os.path.join(artifact_path, 'saves/' + dt)
os.makedirs(save_dir, exist_ok=True)
general_utils.initialize_logger(save_dir)
arg_handler = ArgumentHandler(save_dir, lr)
train_data, valid_data, test_data = create_datasets(logp, smiles, use_3d)
valid_data.compute_baseline_error()
print("Dataset created")
if (data_path is not None) and store_preprocessed:
print("Using stored dataset. Preprocessing if necessary.")
storage_path = parse_data_path(data_path, use_3d)
train_data = preprocess_data(train_data, storage_path, 'train')
valid_data = preprocess_data(valid_data, storage_path, 'valid')
test_data = preprocess_data(test_data, storage_path, 'test')
if upsample:
# Percentiles used in dock score weights.
# Reset randomness
np.random.seed()
#train_25 = np.percentile(train_data.logp, 25)
#train_75 = np.percentile(train_data.logp, 75)
upsampled_weight = np.random.uniform(0.5, 1, 1)[0]
#split = np.random.uniform(train_25, train_75, 1)[0]
split = np.percentile(train_data.logp, 1)
logging.info("Upsampling weights: {:3.2f}".format(upsampled_weight))
logging.info("Upsampling split: {:3.2f}".format(split))
# Initialize weighted sampler
train_weights = torch.DoubleTensor(dock_score_weights(train_data.logp))
valid_weights = torch.DoubleTensor(dock_score_weights(valid_data.logp))
# test_weights = torch.DoubleTensor(dock_score_weights(test_data.logp))
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(train_weights, len(train_weights))
valid_sampler = torch.utils.data.sampler.WeightedRandomSampler(valid_weights, len(valid_weights))
# test_sampler = torch.utils.data.sampler.WeightedRandomSampler(test_weights, len(test_weights))
train_loader = DataLoader(train_data,
collate_fn=my_collate,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers)
valid_loader = DataLoader(valid_data,
collate_fn=my_collate,
batch_size=batch_size,
sampler=valid_sampler,
num_workers=num_workers)
else:
train_loader = DataLoader(train_data,
shuffle=True,
collate_fn=my_collate,
batch_size=batch_size,
num_workers=num_workers)
valid_loader = DataLoader(valid_data,
collate_fn=my_collate,
batch_size=batch_size,
num_workers=num_workers)
try:
net = load_current_model(save_dir)
logging.info("Model restored")
except Exception as e:
input_dim, nb_edge_types = train_data.get_graph_spec()
net = sGAT(input_dim=input_dim,
nb_hidden=nb_hidden,
nb_layers=nb_layers,
nb_edge_types=nb_edge_types,
use_3d=use_3d)
logging.info(net)
logging.info("New model created")
net = net.to(DEVICE)
optim = torch.optim.Adam(net.parameters(), lr=arg_handler('current_lr'))
if exp_loss:
np.random.seed()
exp_loc = min(train_data.logp)
exp_scale = np.random.uniform(1, 4, 1)[0]
logging.info("Exponential loc: {:3.2f}".format(exp_loc))
logging.info("Exponential scale: {:3.2f}".format(exp_scale))
criterion = exp_weighted_mse
else:
criterion = torch.nn.MSELoss()
train(net,
criterion,
batch_size,
train_loader,
valid_loader,
optim,
arg_handler,
save_dir,
writer)
general_utils.close_logger()
writer.close()
return load_best_model(save_dir)
| [
"yaml.load",
"pickle.dump",
"numpy.random.seed",
"utils.graph_utils.mol_to_pyg_graph",
"yaml.dump",
"torch.cat",
"pickle.load",
"numpy.arange",
"torch.autograd.set_detect_anomaly",
"os.path.join",
"torch.nn.MSELoss",
"torch.multiprocessing.set_sharing_strategy",
"torch.utils.data.DataLoader"... | [((423, 480), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (465, 480), False, 'import torch\n'), ((2144, 2156), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2153, 2156), True, 'import numpy as np\n'), ((2226, 2268), 'torch.tensor', 'torch.tensor', (['[src, dst]'], {'dtype': 'torch.long'}), '([src, dst], dtype=torch.long)\n', (2238, 2268), False, 'import torch\n'), ((3719, 3742), 'numpy.random.seed', 'np.random.seed', (['np_seed'], {}), '(np_seed)\n', (3733, 3742), True, 'import numpy as np\n'), ((3762, 3795), 'numpy.random.permutation', 'np.random.permutation', (['nb_samples'], {}), '(nb_samples)\n', (3783, 3795), True, 'import numpy as np\n'), ((4447, 4466), 'torch.cat', 'torch.cat', (['y'], {'dim': '(0)'}), '(y, dim=0)\n', (4456, 4466), False, 'import torch\n'), ((4712, 4817), 'torch.utils.data.DataLoader', 'DataLoader', (['raw_dataset'], {'shuffle': '(False)', 'collate_fn': 'my_collate', 'batch_size': 'batch_size', 'num_workers': '(16)'}), '(raw_dataset, shuffle=False, collate_fn=my_collate, batch_size=\n batch_size, num_workers=16)\n', (4722, 4817), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5402, 5438), 'os.path.join', 'os.path.join', (['parent_path', 'data_name'], {}), '(parent_path, data_name)\n', (5414, 5438), False, 'import os\n'), ((5538, 5578), 'os.makedirs', 'os.makedirs', (['storage_path'], {'exist_ok': '(True)'}), '(storage_path, exist_ok=True)\n', (5549, 5578), False, 'import os\n'), ((5689, 5738), 'os.path.join', 'os.path.join', (['storage_path', "(dataset_name + '.pkl')"], {}), "(storage_path, dataset_name + '.pkl')\n", (5701, 5738), False, 'import os\n'), ((6870, 6881), 'time.time', 'time.time', ([], {}), '()\n', (6879, 6881), False, 'import time\n'), ((8236, 8322), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optim', '"""min"""'], {'patience': '(3)', 'verbose': '(True)'}), "(optim, 'min', patience=3,\n verbose=True)\n", (8278, 8322), False, 'import torch\n'), ((11930, 11955), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11953, 11955), False, 'import torch\n'), ((12216, 12258), 'os.path.join', 'os.path.join', (['artifact_path', "('saves/' + dt)"], {}), "(artifact_path, 'saves/' + dt)\n", (12228, 12258), False, 'import os\n'), ((12263, 12299), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (12274, 12299), False, 'import os\n'), ((12304, 12345), 'utils.general_utils.initialize_logger', 'general_utils.initialize_logger', (['save_dir'], {}), '(save_dir)\n', (12335, 12345), True, 'import utils.general_utils as general_utils\n'), ((16243, 16271), 'utils.general_utils.close_logger', 'general_utils.close_logger', ([], {}), '()\n', (16269, 16271), True, 'import utils.general_utils as general_utils\n'), ((700, 744), 'os.path.join', 'os.path.join', (['model_path', '"""current_model.pt"""'], {}), "(model_path, 'current_model.pt')\n", (712, 744), False, 'import os\n'), ((815, 856), 'os.path.join', 'os.path.join', (['model_path', '"""best_model.pt"""'], {}), "(model_path, 'best_model.pt')\n", (827, 856), False, 'import os\n'), ((934, 978), 'os.path.join', 'os.path.join', (['model_path', '"""current_model.pt"""'], {}), "(model_path, 'current_model.pt')\n", (946, 978), False, 'import os\n'), ((1038, 1079), 'os.path.join', 'os.path.join', (['model_path', '"""best_model.pt"""'], {}), "(model_path, 'best_model.pt')\n", (1050, 1079), False, 'import os\n'), ((1901, 1944), 'torch.mean', 'torch.mean', (['(weight * (output - target) ** 2)'], {}), '(weight * (output - target) ** 2)\n', (1911, 1944), False, 'import torch\n'), ((2636, 2662), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (2654, 2662), False, 'from rdkit import Chem\n'), ((2887, 2933), 'utils.graph_utils.mol_to_pyg_graph', 'graph_utils.mol_to_pyg_graph', (['mol', 'self.use_3d'], {}), '(mol, self.use_3d)\n', (2915, 2933), True, 'import utils.graph_utils as graph_utils\n'), ((3356, 3375), 'numpy.array', 'np.array', (['self.logp'], {}), '(self.logp)\n', (3364, 3375), True, 'import numpy as np\n'), ((7014, 7025), 'time.time', 'time.time', ([], {}), '()\n', (7023, 7025), False, 'import time\n'), ((8418, 8429), 'time.time', 'time.time', ([], {}), '()\n', (8427, 8429), False, 'import time\n'), ((8555, 8579), 'logging.info', 'logging.info', (['""" Train:"""'], {}), "(' Train:')\n", (8567, 8579), False, 'import logging\n'), ((8865, 8891), 'logging.info', 'logging.info', (['"""\n Valid:"""'], {}), "('\\n Valid:')\n", (8877, 8891), False, 'import logging\n'), ((10226, 10267), 'os.path.join', 'os.path.join', (['experiment_dir', '"""args.yaml"""'], {}), "(experiment_dir, 'args.yaml')\n", (10238, 10267), False, 'import os\n'), ((13041, 13057), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (13055, 13057), True, 'import numpy as np\n'), ((13304, 13337), 'numpy.percentile', 'np.percentile', (['train_data.logp', '(1)'], {}), '(train_data.logp, 1)\n', (13317, 13337), True, 'import numpy as np\n'), ((14100, 14220), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'collate_fn': 'my_collate', 'batch_size': 'batch_size', 'sampler': 'train_sampler', 'num_workers': 'num_workers'}), '(train_data, collate_fn=my_collate, batch_size=batch_size,\n sampler=train_sampler, num_workers=num_workers)\n', (14110, 14220), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14376, 14496), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_data'], {'collate_fn': 'my_collate', 'batch_size': 'batch_size', 'sampler': 'valid_sampler', 'num_workers': 'num_workers'}), '(valid_data, collate_fn=my_collate, batch_size=batch_size,\n sampler=valid_sampler, num_workers=num_workers)\n', (14386, 14496), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14662, 14774), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'shuffle': '(True)', 'collate_fn': 'my_collate', 'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(train_data, shuffle=True, collate_fn=my_collate, batch_size=\n batch_size, num_workers=num_workers)\n', (14672, 14774), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14929, 15026), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_data'], {'collate_fn': 'my_collate', 'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(valid_data, collate_fn=my_collate, batch_size=batch_size,\n num_workers=num_workers)\n', (14939, 15026), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((15187, 15217), 'logging.info', 'logging.info', (['"""Model restored"""'], {}), "('Model restored')\n", (15199, 15217), False, 'import logging\n'), ((15727, 15743), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (15741, 15743), True, 'import numpy as np\n'), ((16034, 16052), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (16050, 16052), False, 'import torch\n'), ((1817, 1859), 'torch.exp', 'torch.exp', (['(-(target - exp_loc) / exp_scale)'], {}), '(-(target - exp_loc) / exp_scale)\n', (1826, 1859), False, 'import torch\n'), ((2774, 2800), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (2792, 2800), False, 'from rdkit import Chem\n'), ((2949, 2974), 'torch.FloatTensor', 'torch.FloatTensor', (['[logp]'], {}), '([logp])\n', (2966, 2974), False, 'import torch\n'), ((4476, 4492), 'torch_geometric.data.Batch', 'pyg.data.Batch', ([], {}), '()\n', (4490, 4492), True, 'import torch_geometric as pyg\n'), ((5840, 5854), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5851, 5854), False, 'import pickle\n'), ((6040, 6051), 'time.time', 'time.time', ([], {}), '()\n', (6049, 6051), False, 'import time\n'), ((7290, 7329), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (7323, 7329), False, 'import torch\n'), ((7650, 7661), 'time.time', 'time.time', ([], {}), '()\n', (7659, 7661), False, 'import time\n'), ((9581, 9626), 'logging.info', 'logging.info', (['"""Best performance on valid set"""'], {}), "('Best performance on valid set')\n", (9593, 9626), False, 'import logging\n'), ((10322, 10355), 'logging.info', 'logging.info', (['"""Arguments loaded."""'], {}), "('Arguments loaded.')\n", (10334, 10355), False, 'import logging\n'), ((10578, 10614), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (10587, 10614), False, 'import yaml\n'), ((10922, 10945), 'yaml.dump', 'yaml.dump', (['self.args', 'f'], {}), '(self.args, f)\n', (10931, 10945), False, 'import yaml\n'), ((12078, 12092), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12090, 12092), False, 'from datetime import datetime\n'), ((12158, 12199), 'os.path.join', 'os.path.join', (['artifact_path', "('runs/' + dt)"], {}), "(artifact_path, 'runs/' + dt)\n", (12170, 12199), False, 'import os\n'), ((13195, 13223), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1)', '(1)'], {}), '(0.5, 1, 1)\n', (13212, 13223), True, 'import numpy as np\n'), ((15538, 15555), 'logging.info', 'logging.info', (['net'], {}), '(net)\n', (15550, 15555), False, 'import logging\n'), ((15564, 15597), 'logging.info', 'logging.info', (['"""New model created"""'], {}), "('New model created')\n", (15576, 15597), False, 'import logging\n'), ((15803, 15829), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (15820, 15829), True, 'import numpy as np\n'), ((3427, 3449), 'numpy.square', 'np.square', (['(logp - mean)'], {}), '(logp - mean)\n', (3436, 3449), True, 'import numpy as np\n'), ((3808, 3824), 'numpy.asarray', 'np.asarray', (['logp'], {}), '(logp)\n', (3818, 3824), True, 'import numpy as np\n'), ((3861, 3879), 'numpy.asarray', 'np.asarray', (['smiles'], {}), '(smiles)\n', (3871, 3879), True, 'import numpy as np\n'), ((4581, 4597), 'torch_geometric.data.Batch', 'pyg.data.Batch', ([], {}), '()\n', (4595, 4597), True, 'import torch_geometric as pyg\n'), ((6240, 6267), 'pickle.dump', 'pickle.dump', (['parsed_data', 'f'], {}), '(parsed_data, f)\n', (6251, 6267), False, 'import pickle\n'), ((7860, 7871), 'time.time', 'time.time', ([], {}), '()\n', (7869, 7871), False, 'import time\n'), ((10445, 10483), 'logging.info', 'logging.info', (['"""Arguments initialized."""'], {}), "('Arguments initialized.')\n", (10457, 10483), False, 'import logging\n'), ((7780, 7791), 'time.time', 'time.time', ([], {}), '()\n', (7789, 7791), False, 'import time\n'), ((9763, 9774), 'time.time', 'time.time', ([], {}), '()\n', (9772, 9774), False, 'import time\n'), ((6149, 6160), 'time.time', 'time.time', ([], {}), '()\n', (6158, 6160), False, 'import time\n')] |
import matplotlib.pyplot as plt
import numpy as np
import api.spotify as spotify
import api.utils as utils
from api.spotify import FeatureType, FeatureFilter
def plot_all_features(tracks, overlay_tracks=None):
fig0, axs0 = double_plot()
histogram(fig0, axs0[0], "Danceability", spotify.get_feature_values(tracks, FeatureType.DANCEABILITY))
histogram(fig0, axs0[1], "Energy", spotify.get_feature_values(tracks, FeatureType.ENERGY))
if overlay_tracks != None:
histogram(fig0, axs0[0], "Danceability", spotify.get_feature_values(overlay_tracks, FeatureType.DANCEABILITY))
histogram(fig0, axs0[1], "Energy", spotify.get_feature_values(overlay_tracks, FeatureType.ENERGY))
fig1, axs1 = double_plot()
bar(fig1, axs1[0], "Key", utils.key_mapping, spotify.get_feature_values(tracks, FeatureType.KEY))
histogram(fig1, axs1[1], "Loudness", spotify.get_feature_values(tracks, FeatureType.LOUDNESS), min_val=-60, max_val=0, num_bins=60, num_ticks=12)
if overlay_tracks != None:
bar(fig1, axs1[0], "Key", utils.key_mapping, spotify.get_feature_values(overlay_tracks, FeatureType.KEY))
histogram(fig1, axs1[1], "Loudness", spotify.get_feature_values(overlay_tracks, FeatureType.LOUDNESS), min_val=-60, max_val=0, num_bins=60, num_ticks=12)
if overlay_tracks == None:
fig2, axs2 = single_plot()
pie(fig2, axs2, "Mode", ['major', 'minor'], spotify.get_feature_values(tracks, FeatureType.MODE))
else:
fig2, axs2 = double_plot()
pie(fig2, axs2[0], "Source Tracks' Mode", ['major', 'minor'], spotify.get_feature_values(tracks, FeatureType.MODE))
pie(fig2, axs2[1], "Filtered Tracks' Mode", ['major', 'minor'], spotify.get_feature_values(overlay_tracks, FeatureType.MODE))
fig3, axs3 = double_plot()
histogram(fig3, axs3[0], "Speechiness", spotify.get_feature_values(tracks, FeatureType.SPEECHINESS))
histogram(fig3, axs3[1], "Acousticness", spotify.get_feature_values(tracks, FeatureType.ACOUSTICNESS))
if overlay_tracks != None:
histogram(fig3, axs3[0], "Speechiness", spotify.get_feature_values(overlay_tracks, FeatureType.SPEECHINESS))
histogram(fig3, axs3[1], "Acousticness", spotify.get_feature_values(overlay_tracks, FeatureType.ACOUSTICNESS))
fig4, axs4 = double_plot()
histogram(fig4, axs4[0], "Instrumentalness", spotify.get_feature_values(tracks, FeatureType.INSTRUMENTALNESS))
histogram(fig4, axs4[1], "Liveness", spotify.get_feature_values(tracks, FeatureType.LIVENESS))
if overlay_tracks != None:
histogram(fig4, axs4[0], "Instrumentalness", spotify.get_feature_values(overlay_tracks, FeatureType.INSTRUMENTALNESS))
histogram(fig4, axs4[1], "Liveness", spotify.get_feature_values(overlay_tracks, FeatureType.LIVENESS))
fig5, axs5 = double_plot()
histogram(fig5, axs5[0], "Valence", spotify.get_feature_values(tracks, FeatureType.VALENCE))
histogram(fig5, axs5[1], "Tempo", spotify.get_feature_values(tracks, FeatureType.TEMPO), min_val=0, max_val=250, num_bins=50)
if overlay_tracks != None:
histogram(fig5, axs5[0], "Valence", spotify.get_feature_values(overlay_tracks, FeatureType.VALENCE))
histogram(fig5, axs5[1], "Tempo", spotify.get_feature_values(overlay_tracks, FeatureType.TEMPO), min_val=0, max_val=250, num_bins=50)
def single_plot():
fig, axs = plt.subplots(1, 1)
fig.set_size_inches(15, 5)
return fig, axs
def double_plot():
fig, axs = plt.subplots(1, 2)
fig.set_size_inches(15, 5)
return fig, axs
def histogram(fig, ax, title, values, min_val=0.0, max_val=1.0, num_bins=50, num_ticks=10):
ax.hist(values, bins=num_bins, range=(min_val, max_val), rwidth=1.0, edgecolor='black', linewidth=1.0)
ax.set_xticks(np.linspace(min_val, max_val, num=num_ticks+1))
ax.grid(which='major', axis='y')
ax.set_title(title)
ax.set_xlabel(title.lower())
ax.set_ylabel("frequency")
def bar(fig, ax, title, categories, values):
x = np.arange(len(categories))
heights = [0]*len(categories)
for value in values:
heights[categories.index(value)] += 1
ax.bar(x, heights)
ax.set_xticks(x)
ax.set_xticklabels(categories)
ax.grid(which='major', axis='y')
ax.set_title(title)
ax.set_ylabel("frequency")
def pie(fig, ax, title, categories, values):
counts = [0]*len(categories)
for value in values:
counts[categories.index(value)] += 1
ax.pie(counts, labels=categories, autopct=lambda percent:'{:.2f}%'.format(percent))
ax.set_title(title) | [
"api.spotify.get_feature_values",
"matplotlib.pyplot.subplots",
"numpy.linspace"
] | [((3390, 3408), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (3402, 3408), True, 'import matplotlib.pyplot as plt\n'), ((3500, 3518), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (3512, 3518), True, 'import matplotlib.pyplot as plt\n'), ((289, 349), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.DANCEABILITY'], {}), '(tracks, FeatureType.DANCEABILITY)\n', (315, 349), True, 'import api.spotify as spotify\n'), ((390, 444), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.ENERGY'], {}), '(tracks, FeatureType.ENERGY)\n', (416, 444), True, 'import api.spotify as spotify\n'), ((794, 845), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.KEY'], {}), '(tracks, FeatureType.KEY)\n', (820, 845), True, 'import api.spotify as spotify\n'), ((888, 944), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.LOUDNESS'], {}), '(tracks, FeatureType.LOUDNESS)\n', (914, 944), True, 'import api.spotify as spotify\n'), ((1859, 1918), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.SPEECHINESS'], {}), '(tracks, FeatureType.SPEECHINESS)\n', (1885, 1918), True, 'import api.spotify as spotify\n'), ((1965, 2025), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.ACOUSTICNESS'], {}), '(tracks, FeatureType.ACOUSTICNESS)\n', (1991, 2025), True, 'import api.spotify as spotify\n'), ((2377, 2441), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.INSTRUMENTALNESS'], {}), '(tracks, FeatureType.INSTRUMENTALNESS)\n', (2403, 2441), True, 'import api.spotify as spotify\n'), ((2484, 2540), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.LIVENESS'], {}), '(tracks, FeatureType.LIVENESS)\n', (2510, 2540), True, 'import api.spotify as spotify\n'), ((2885, 2940), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.VALENCE'], {}), '(tracks, FeatureType.VALENCE)\n', (2911, 2940), True, 'import api.spotify as spotify\n'), ((2980, 3033), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.TEMPO'], {}), '(tracks, FeatureType.TEMPO)\n', (3006, 3033), True, 'import api.spotify as spotify\n'), ((3798, 3846), 'numpy.linspace', 'np.linspace', (['min_val', 'max_val'], {'num': '(num_ticks + 1)'}), '(min_val, max_val, num=num_ticks + 1)\n', (3809, 3846), True, 'import numpy as np\n'), ((527, 595), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.DANCEABILITY'], {}), '(overlay_tracks, FeatureType.DANCEABILITY)\n', (553, 595), True, 'import api.spotify as spotify\n'), ((640, 702), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.ENERGY'], {}), '(overlay_tracks, FeatureType.ENERGY)\n', (666, 702), True, 'import api.spotify as spotify\n'), ((1082, 1141), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.KEY'], {}), '(overlay_tracks, FeatureType.KEY)\n', (1108, 1141), True, 'import api.spotify as spotify\n'), ((1188, 1252), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.LOUDNESS'], {}), '(overlay_tracks, FeatureType.LOUDNESS)\n', (1214, 1252), True, 'import api.spotify as spotify\n'), ((1425, 1477), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.MODE'], {}), '(tracks, FeatureType.MODE)\n', (1451, 1477), True, 'import api.spotify as spotify\n'), ((1594, 1646), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['tracks', 'FeatureType.MODE'], {}), '(tracks, FeatureType.MODE)\n', (1620, 1646), True, 'import api.spotify as spotify\n'), ((1720, 1780), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.MODE'], {}), '(overlay_tracks, FeatureType.MODE)\n', (1746, 1780), True, 'import api.spotify as spotify\n'), ((2107, 2174), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.SPEECHINESS'], {}), '(overlay_tracks, FeatureType.SPEECHINESS)\n', (2133, 2174), True, 'import api.spotify as spotify\n'), ((2225, 2293), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.ACOUSTICNESS'], {}), '(overlay_tracks, FeatureType.ACOUSTICNESS)\n', (2251, 2293), True, 'import api.spotify as spotify\n'), ((2627, 2699), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.INSTRUMENTALNESS'], {}), '(overlay_tracks, FeatureType.INSTRUMENTALNESS)\n', (2653, 2699), True, 'import api.spotify as spotify\n'), ((2746, 2810), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.LIVENESS'], {}), '(overlay_tracks, FeatureType.LIVENESS)\n', (2772, 2810), True, 'import api.spotify as spotify\n'), ((3148, 3211), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.VALENCE'], {}), '(overlay_tracks, FeatureType.VALENCE)\n', (3174, 3211), True, 'import api.spotify as spotify\n'), ((3255, 3316), 'api.spotify.get_feature_values', 'spotify.get_feature_values', (['overlay_tracks', 'FeatureType.TEMPO'], {}), '(overlay_tracks, FeatureType.TEMPO)\n', (3281, 3316), True, 'import api.spotify as spotify\n')] |
# -*- coding: utf-8 -*-
'''
┌┬──────────────────────────────────┬┐
└┤ OCD ANALYSIS SOFTWARE ├┘
┌┤ <NAME> - Huang Lab ├┐
└┤ Rice Univ - 2017 ├┘
┌┤ <EMAIL> ├┐
└┴──────────────────────────────────┴┘
'''
import sys
import os
import numpy as np
import matplotlib as mpl
if sys.platform == 'darwin':
mpl.use('TkAgg')
import matplotlib.pyplot as plt
def kaiser_smooth(x,beta):
""" kaiser window smoothing """
window_len=41 #Needs to be odd for proper response
# extending the data at beginning and at the end
# to apply the window at the borders
s = np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] #start:stop:step
w = np.kaiser(window_len,beta)
y = np.convolve(w/w.sum(),s,mode='valid')
return y[20:len(y)-20]
class ocd_spec:
def renorm(self,scale_factor):
new_ocd_spec = ocd_spec()
new_ocd_spec.wl = self.wl
new_ocd_spec.dw = self.dw
new_ocd_spec.name = self.name + "[Rescaled]"
new_ocd_spec.cd = scale_factor*self.cd
return new_ocd_spec
def load(self,fn):
'''Specific to J810 ASCII'''
data = np.loadtxt(fn,dtype=float,skiprows=19,usecols=(0,1))
return data
def save(self,fn):
'''For future loading'''
savefile = open(fn,"w+")
savefile.write(self.name+"\n")
'''Mimic JASCO Header'''
for i in range(18):
savefile.write("* * * * * * * * * *\n")
'''Write wl and cd values'''
for i in range(len(self.wl)):
savefile.write("{}\t{}\n".format(self.wl[i],self.cd[i]))
savefile.close()
def __add__(self,new):
new_ocd_spec = ocd_spec()
cd = np.array([])
for i,j in zip(self.cd,new.cd):
cd = np.append(cd,i+j)
new_ocd_spec.cd = cd
new_ocd_spec.wl = self.wl
return new_ocd_spec
def __sub__(self,new):
new_ocd_spec = ocd_spec()
cd = np.array([])
for i,j, in zip(self.cd,new.cd):
cd = np.append(cd,i-j)
new_ocd_spec.cd = cd
new_ocd_spec.wl = self.wl
return new_ocd_spec
def __rsub__(self,other):
if other == 0:
return self
else:
return self.__sub__(other)
def __radd__(self,other):
if other == 0:
return self
else:
return self.__add__(other)
def __init__(self,fn=None):
if fn is not None:
self.data = self.load(fn)
'''Reverse Data'''
self.wl = np.flipud(np.array(self.data[:,0]))
self.cd = np.flipud(np.array(self.data[:,1]))
self.name = fn
self.dw = self.wl[0]-self.wl[1]
if fn is None:
self.data = np.array([[]])
self.wl = np.array([])
self.cd = np.array([])
self.name = "empty spectrum"
self.dw = None
def graph(self):
axis_y = np.zeros(len(self.wl))
fig = plt.figure("CD Spectrum")
plt.plot(self.wl, self.cd, 'k')
plt.plot(self.wl, axis_y, 'k:')
plt.ylabel("CD [mdeg]")
plt.xlabel("Wavelength [nm]")
plt.title(self.name)
plt.show()
def trim(self,wl1,wl2):
w_arr = np.array([wl1,wl2])
if wl1>wl2:
w_arr = np.flipud(w_arr)
new_ocd_spec = ocd_spec()
idx1, = np.where(self.wl == wl1)
idx2, = np.where(self.wl == wl2)
new_wl = self.wl[int(idx1):int(idx2)+1]
new_ocd_spec.wl = new_wl
new_ocd_spec.cd = self.cd[int(idx1):int(idx2)+1]
new_ocd_spec.dw = self.dw
new_ocd_spec.name = self.name +" ["+str(wl1)+":"+str(wl2)+"]"
return new_ocd_spec
def rm_baseline(self,constant,type="constant",plot=False):
new_ocd_spec = ocd_spec()
baseline = constant*np.ones(len(self.wl))
new_ocd_spec.cd = self.cd - baseline
new_ocd_spec.wl = self.wl
new_ocd_spec.name = self.name +" [Baseline Corrected]"
new_ocd_spec.dw = self.dw
if plot != False:
fig = plt.figure("Baseline Correction")
plt.plot(self.wl,self.cd,'k:')
plt.plot(new_ocd_spec.wl,new_ocd_spec.cd,'k')
plt.plot(self.wl,np.zeros(len(self.wl)),'k:')
plt.title("Baseline Correction")
plt.ylabel("CD [mdeg]")
plt.xlabel("Wavelength [nm]")
plt.legend([self.name,new_ocd_spec.name],loc='best')
plt.show()
return new_ocd_spec
def filter(self,type="kaiser",beta=2.0,plot=True,lwidths=[1,1]):
new_ocd_spec = ocd_spec()
new_ocd_spec.name = self.name + "[Filtered]"
new_ocd_spec.wl = self.wl
new_ocd_spec.dw = self.dw
new_ocd_spec.cd = kaiser_smooth(self.cd,1)
fig = plt.figure("Filtered Signal")
plt.title("Filtered Signal")
plt.ylabel("CD [mdeg]")
plt.xlabel("Wavelength [nm]")
plt.plot(self.wl,self.cd,color='salmon',linewidth=lwidths[1])
plt.plot(new_ocd_spec.wl,new_ocd_spec.cd,'k',linewidth=lwidths[0])
plt.plot(self.wl,np.zeros(len(self.wl)),'k:')
plt.legend([self.name,new_ocd_spec.name],loc='best')
axis_y = np.zeros(len(self.wl))
plt.plot(self.wl,axis_y,'k:')
plt.show()
return new_ocd_spec
def avg_signal(specs):
'''input is array of ocd_spec'''
cd = np.zeros(len(specs[0].cd))
for i in range(len(specs)):
for j in range(len(specs[i].cd)):
cd[j] += specs[i].cd[j]
cd = cd/len(specs)
avg = ocd_spec()
avg.cd = cd
avg.wl = specs[0].wl
avg.dw = specs[0].dw
return avg
def mult_graph(specs,types=None,colors=None,lwidths=None,title=None,verts=None,xlim=None):
if lwidths == None:
lwidths = [1]*len(specs)
fig = plt.figure("Composite Plot")
axis_y = np.zeros(len(specs[0].wl))
plt.title(title)
plt.xlabel("Wavelength [nm]")
plt.ylabel("CD [mdeg]")
if types != None and colors == None:
names=[]
for i,j,k in zip(specs,types,lwidths):
plt.plot(i.wl,i.cd,j,linewidth=k)
names.append(i.name)
plt.legend(names,loc="best")
if types != None and colors != None:
names=[]
for i,j,k,l in zip(specs,types,colors,lwidths):
plt.plot(i.wl,i.cd,j,color=k,linewidth=k)
names.append(i.name)
plt.legend(names,loc="best")
if types == None and colors == None:
names=[]
for i in range(len(specs)):
plt.plot(specs[i].wl,specs[i].cd,linewidth=lwidths[i])
names.append(specs[i].name)
plt.legend(names,loc="best")
if verts != None:
for i in range(len(verts)):
plt.axvline(x=verts[i])
plt.plot(specs[0].wl,axis_y,'k:')
if xlim != None and len(xlim) == 2:
plt.xlim(xlim[0],xlim[1])
plt.show()
def graph_series(specs,title=None,cmap=mpl.cm.Reds,lwidths=None,xlim=None):
if lwidths == None:
lwidths = [1]*len(specs)
fig = plt.figure("Series Plot")
plt.title(title)
plt.xlabel("Wavelength [nm]")
plt.ylabel("CD [mdeg]")
names=[]
for i in range(len(specs)):
plt.plot(specs[i].wl,specs[i].cd,color=cmap((i+1)*1./len(specs)), linewidth=lwidths[i])
names.append(specs[i].name)
plt.legend(names,loc="best")
if xlim != None and len(xlim) == 2:
plt.xlim(xlim[0],xlim[1])
plt.show()
if sys.platform == "win32":
try:
import wx
def fs():
app = wx.App(None)
style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_MULTIPLE
dialog = wx.FileDialog(None, 'Open',wildcard='*.txt',style=style)
mult = None
if dialog.ShowModal() == wx.ID_OK:
try:
paths = dialog.GetPath()
mult = False
except:
paths = dialog.GetPaths()
mult = True
else:
paths = None
dialog.Destroy()
if mult == False:
return ocd_spec(paths[0])
if mult == True:
specs =[]
for i in paths:
specs.append(ocd_spec(i))
return specs
def mfs():
app = wx.App(None)
style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_MULTIPLE
dialog = wx.FileDialog(None,'Open',wildcard='*.txt',style=style)
if dialog.ShowModal() == wx.ID_OK:
paths = dialog.GetPaths()
else:
paths =None
dialog.Destroy()
specs = []
for i in paths:
specs.append(ocd_spec(i))
return specs
except:
print("WX module not found. Defaulting to CLI.")
if sys.platform == "linux" or sys.platform == "darwin" \
or sys.platform == "linux2":
try:
from dialog import Dialog
dlg = Dialog(dialog="dialog")
rows, cols = os.popen("stty size","r").read().split()
rows = int(rows); cols = int(cols)
def fs():
path = './'
entries = os.listdir(path)
tagtuples = []
for i in entries:
tagtuples.append((i,i,"off"))
code, paths = dlg.buildlist("Select Files",rows-10,cols-10,rows-14,tagtuples)
if code == Dialog.OK:
if len(paths) == 1:
return ocd_spec(paths[0])
else:
specs=[]
for i in paths:
specs.append(ocd_spec(i))
return specs
if code == Dialog.CANCEL:
return None
except:
print("Dialog module not found. Defaulting to CLI.")
| [
"matplotlib.pyplot.title",
"numpy.kaiser",
"os.popen",
"dialog.Dialog",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axvline",
"numpy.append",
"numpy.loadtxt",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.flipud",
"matplotlib.use",
"wx.App",
"matplotlib.pyplot.ylabel",
... | [((340, 356), 'matplotlib.use', 'mpl.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (347, 356), True, 'import matplotlib as mpl\n'), ((753, 780), 'numpy.kaiser', 'np.kaiser', (['window_len', 'beta'], {}), '(window_len, beta)\n', (762, 780), True, 'import numpy as np\n'), ((5081, 5109), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Composite Plot"""'], {}), "('Composite Plot')\n", (5091, 5109), True, 'import matplotlib.pyplot as plt\n'), ((5148, 5164), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5157, 5164), True, 'import matplotlib.pyplot as plt\n'), ((5166, 5195), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength [nm]"""'], {}), "('Wavelength [nm]')\n", (5176, 5195), True, 'import matplotlib.pyplot as plt\n'), ((5197, 5220), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CD [mdeg]"""'], {}), "('CD [mdeg]')\n", (5207, 5220), True, 'import matplotlib.pyplot as plt\n'), ((5878, 5913), 'matplotlib.pyplot.plot', 'plt.plot', (['specs[0].wl', 'axis_y', '"""k:"""'], {}), "(specs[0].wl, axis_y, 'k:')\n", (5886, 5913), True, 'import matplotlib.pyplot as plt\n'), ((5978, 5988), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5986, 5988), True, 'import matplotlib.pyplot as plt\n'), ((6124, 6149), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Series Plot"""'], {}), "('Series Plot')\n", (6134, 6149), True, 'import matplotlib.pyplot as plt\n'), ((6151, 6167), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6160, 6167), True, 'import matplotlib.pyplot as plt\n'), ((6169, 6198), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength [nm]"""'], {}), "('Wavelength [nm]')\n", (6179, 6198), True, 'import matplotlib.pyplot as plt\n'), ((6200, 6223), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CD [mdeg]"""'], {}), "('CD [mdeg]')\n", (6210, 6223), True, 'import matplotlib.pyplot as plt\n'), ((6384, 6413), 'matplotlib.pyplot.legend', 'plt.legend', (['names'], {'loc': '"""best"""'}), "(names, loc='best')\n", (6394, 6413), True, 'import matplotlib.pyplot as plt\n'), ((6479, 6489), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6487, 6489), True, 'import matplotlib.pyplot as plt\n'), ((1162, 1218), 'numpy.loadtxt', 'np.loadtxt', (['fn'], {'dtype': 'float', 'skiprows': '(19)', 'usecols': '(0, 1)'}), '(fn, dtype=float, skiprows=19, usecols=(0, 1))\n', (1172, 1218), True, 'import numpy as np\n'), ((1631, 1643), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1639, 1643), True, 'import numpy as np\n'), ((1837, 1849), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1845, 1849), True, 'import numpy as np\n'), ((2644, 2669), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CD Spectrum"""'], {}), "('CD Spectrum')\n", (2654, 2669), True, 'import matplotlib.pyplot as plt\n'), ((2672, 2703), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wl', 'self.cd', '"""k"""'], {}), "(self.wl, self.cd, 'k')\n", (2680, 2703), True, 'import matplotlib.pyplot as plt\n'), ((2706, 2737), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wl', 'axis_y', '"""k:"""'], {}), "(self.wl, axis_y, 'k:')\n", (2714, 2737), True, 'import matplotlib.pyplot as plt\n'), ((2740, 2763), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CD [mdeg]"""'], {}), "('CD [mdeg]')\n", (2750, 2763), True, 'import matplotlib.pyplot as plt\n'), ((2766, 2795), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength [nm]"""'], {}), "('Wavelength [nm]')\n", (2776, 2795), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2818), 'matplotlib.pyplot.title', 'plt.title', (['self.name'], {}), '(self.name)\n', (2807, 2818), True, 'import matplotlib.pyplot as plt\n'), ((2821, 2831), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2829, 2831), True, 'import matplotlib.pyplot as plt\n'), ((2868, 2888), 'numpy.array', 'np.array', (['[wl1, wl2]'], {}), '([wl1, wl2])\n', (2876, 2888), True, 'import numpy as np\n'), ((2968, 2992), 'numpy.where', 'np.where', (['(self.wl == wl1)'], {}), '(self.wl == wl1)\n', (2976, 2992), True, 'import numpy as np\n'), ((3003, 3027), 'numpy.where', 'np.where', (['(self.wl == wl2)'], {}), '(self.wl == wl2)\n', (3011, 3027), True, 'import numpy as np\n'), ((4181, 4210), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Filtered Signal"""'], {}), "('Filtered Signal')\n", (4191, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4213, 4241), 'matplotlib.pyplot.title', 'plt.title', (['"""Filtered Signal"""'], {}), "('Filtered Signal')\n", (4222, 4241), True, 'import matplotlib.pyplot as plt\n'), ((4244, 4267), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CD [mdeg]"""'], {}), "('CD [mdeg]')\n", (4254, 4267), True, 'import matplotlib.pyplot as plt\n'), ((4270, 4299), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength [nm]"""'], {}), "('Wavelength [nm]')\n", (4280, 4299), True, 'import matplotlib.pyplot as plt\n'), ((4302, 4366), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wl', 'self.cd'], {'color': '"""salmon"""', 'linewidth': 'lwidths[1]'}), "(self.wl, self.cd, color='salmon', linewidth=lwidths[1])\n", (4310, 4366), True, 'import matplotlib.pyplot as plt\n'), ((4366, 4435), 'matplotlib.pyplot.plot', 'plt.plot', (['new_ocd_spec.wl', 'new_ocd_spec.cd', '"""k"""'], {'linewidth': 'lwidths[0]'}), "(new_ocd_spec.wl, new_ocd_spec.cd, 'k', linewidth=lwidths[0])\n", (4374, 4435), True, 'import matplotlib.pyplot as plt\n'), ((4483, 4537), 'matplotlib.pyplot.legend', 'plt.legend', (['[self.name, new_ocd_spec.name]'], {'loc': '"""best"""'}), "([self.name, new_ocd_spec.name], loc='best')\n", (4493, 4537), True, 'import matplotlib.pyplot as plt\n'), ((4572, 4603), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wl', 'axis_y', '"""k:"""'], {}), "(self.wl, axis_y, 'k:')\n", (4580, 4603), True, 'import matplotlib.pyplot as plt\n'), ((4604, 4614), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4612, 4614), True, 'import matplotlib.pyplot as plt\n'), ((5374, 5403), 'matplotlib.pyplot.legend', 'plt.legend', (['names'], {'loc': '"""best"""'}), "(names, loc='best')\n", (5384, 5403), True, 'import matplotlib.pyplot as plt\n'), ((5573, 5602), 'matplotlib.pyplot.legend', 'plt.legend', (['names'], {'loc': '"""best"""'}), "(names, loc='best')\n", (5583, 5602), True, 'import matplotlib.pyplot as plt\n'), ((5772, 5801), 'matplotlib.pyplot.legend', 'plt.legend', (['names'], {'loc': '"""best"""'}), "(names, loc='best')\n", (5782, 5801), True, 'import matplotlib.pyplot as plt\n'), ((5951, 5977), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim[0]', 'xlim[1]'], {}), '(xlim[0], xlim[1])\n', (5959, 5977), True, 'import matplotlib.pyplot as plt\n'), ((6452, 6478), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim[0]', 'xlim[1]'], {}), '(xlim[0], xlim[1])\n', (6460, 6478), True, 'import matplotlib.pyplot as plt\n'), ((7623, 7646), 'dialog.Dialog', 'Dialog', ([], {'dialog': '"""dialog"""'}), "(dialog='dialog')\n", (7629, 7646), False, 'from dialog import Dialog\n'), ((1686, 1706), 'numpy.append', 'np.append', (['cd', '(i + j)'], {}), '(cd, i + j)\n', (1695, 1706), True, 'import numpy as np\n'), ((1893, 1913), 'numpy.append', 'np.append', (['cd', '(i - j)'], {}), '(cd, i - j)\n', (1902, 1913), True, 'import numpy as np\n'), ((2466, 2480), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (2474, 2480), True, 'import numpy as np\n'), ((2494, 2506), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2502, 2506), True, 'import numpy as np\n'), ((2520, 2532), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2528, 2532), True, 'import numpy as np\n'), ((2913, 2929), 'numpy.flipud', 'np.flipud', (['w_arr'], {}), '(w_arr)\n', (2922, 2929), True, 'import numpy as np\n'), ((3576, 3609), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Baseline Correction"""'], {}), "('Baseline Correction')\n", (3586, 3609), True, 'import matplotlib.pyplot as plt\n'), ((3613, 3645), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wl', 'self.cd', '"""k:"""'], {}), "(self.wl, self.cd, 'k:')\n", (3621, 3645), True, 'import matplotlib.pyplot as plt\n'), ((3647, 3694), 'matplotlib.pyplot.plot', 'plt.plot', (['new_ocd_spec.wl', 'new_ocd_spec.cd', '"""k"""'], {}), "(new_ocd_spec.wl, new_ocd_spec.cd, 'k')\n", (3655, 3694), True, 'import matplotlib.pyplot as plt\n'), ((3745, 3777), 'matplotlib.pyplot.title', 'plt.title', (['"""Baseline Correction"""'], {}), "('Baseline Correction')\n", (3754, 3777), True, 'import matplotlib.pyplot as plt\n'), ((3781, 3804), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CD [mdeg]"""'], {}), "('CD [mdeg]')\n", (3791, 3804), True, 'import matplotlib.pyplot as plt\n'), ((3808, 3837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength [nm]"""'], {}), "('Wavelength [nm]')\n", (3818, 3837), True, 'import matplotlib.pyplot as plt\n'), ((3841, 3895), 'matplotlib.pyplot.legend', 'plt.legend', (['[self.name, new_ocd_spec.name]'], {'loc': '"""best"""'}), "([self.name, new_ocd_spec.name], loc='best')\n", (3851, 3895), True, 'import matplotlib.pyplot as plt\n'), ((3897, 3907), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3905, 3907), True, 'import matplotlib.pyplot as plt\n'), ((5314, 5350), 'matplotlib.pyplot.plot', 'plt.plot', (['i.wl', 'i.cd', 'j'], {'linewidth': 'k'}), '(i.wl, i.cd, j, linewidth=k)\n', (5322, 5350), True, 'import matplotlib.pyplot as plt\n'), ((5505, 5550), 'matplotlib.pyplot.plot', 'plt.plot', (['i.wl', 'i.cd', 'j'], {'color': 'k', 'linewidth': 'k'}), '(i.wl, i.cd, j, color=k, linewidth=k)\n', (5513, 5550), True, 'import matplotlib.pyplot as plt\n'), ((5684, 5740), 'matplotlib.pyplot.plot', 'plt.plot', (['specs[i].wl', 'specs[i].cd'], {'linewidth': 'lwidths[i]'}), '(specs[i].wl, specs[i].cd, linewidth=lwidths[i])\n', (5692, 5740), True, 'import matplotlib.pyplot as plt\n'), ((5853, 5876), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'verts[i]'}), '(x=verts[i])\n', (5864, 5876), True, 'import matplotlib.pyplot as plt\n'), ((6559, 6571), 'wx.App', 'wx.App', (['None'], {}), '(None)\n', (6565, 6571), False, 'import wx\n'), ((6647, 6705), 'wx.FileDialog', 'wx.FileDialog', (['None', '"""Open"""'], {'wildcard': '"""*.txt"""', 'style': 'style'}), "(None, 'Open', wildcard='*.txt', style=style)\n", (6660, 6705), False, 'import wx\n'), ((7096, 7108), 'wx.App', 'wx.App', (['None'], {}), '(None)\n', (7102, 7108), False, 'import wx\n'), ((7184, 7242), 'wx.FileDialog', 'wx.FileDialog', (['None', '"""Open"""'], {'wildcard': '"""*.txt"""', 'style': 'style'}), "(None, 'Open', wildcard='*.txt', style=style)\n", (7197, 7242), False, 'import wx\n'), ((7783, 7799), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (7793, 7799), False, 'import os\n'), ((2306, 2331), 'numpy.array', 'np.array', (['self.data[:, 0]'], {}), '(self.data[:, 0])\n', (2314, 2331), True, 'import numpy as np\n'), ((2355, 2380), 'numpy.array', 'np.array', (['self.data[:, 1]'], {}), '(self.data[:, 1])\n', (2363, 2380), True, 'import numpy as np\n'), ((7662, 7688), 'os.popen', 'os.popen', (['"""stty size"""', '"""r"""'], {}), "('stty size', 'r')\n", (7670, 7688), False, 'import os\n')] |
import numpy as np
import pandas as pd
from pywt import wavedec
from zipfile import ZipFile
from statsmodels.robust.scale import mad as medianAD
def get_class_and_frequence(path: str) -> (int, int):
'''
`path` é uma str no modelo: 'pasta/subpasta/arquivo'.
O retorno é uma tupla contendo `(classe, frequência)`,
onde os valores estão presentes nos nomes da subpasta
e arquivo, respectivamente.
'''
_, class_str, freq_str = path.split('/')
# A classe é o ultimo caractere da string
class_int = int(class_str[-1])
# O nome do arquivo separa 4 valores pelo char 'c' (V0cV1cV2cV3.csv)
# No qual a frequência é o terceiro valor, V2
freq_int = int(freq_str.split('c')[2])
return (class_int, freq_int)
def energy(vec:np.ndarray) -> np.float64:
return np.square(vec).sum()
def create_fs20(vec:np.ndarray, file_path:str) -> pd.DataFrame:
'''
Dado um sinal (`vec`) e o nome do arquivo de origem (`file_path`),
retorna um dataframe de 1 linha com os atributos do "Feature Set 20" extraidos.
Feature Set 20:
---
+ MeanAD D3, MeanAD D4, MeanAD A5;
+ MedianAD D3, MedianAD D4, MedianAD D5, MedianAD A5;
+ Energia D3, Energia D4, Energia D5, Energia A5;
+ Kurt D5, Kurt A5;
+ Skew D4
+ Frequency;
'''
result_df = pd.DataFrame()
# tupla de coeficientes: (A5, D5, D4, ..., D1)
dwt_coefs = wavedec(data=vec, wavelet='db2', level=5)
# meanAD A5, D4, D3
for index, coef in zip([0, 2, 3], ['A5', 'D4', 'D3']):
result_df[f'MeanAD-{coef}'] = pd.DataFrame(dwt_coefs[index]).mad()
# medianAD A5, D5, D4, D3 e Energia A5, D5, D4, D3
for index, coef in zip([0, 1, 2, 3], ['A5', 'D5', 'D4', 'D3']):
result_df[f'MedianAD-{coef}'] = medianAD(dwt_coefs[index])
result_df[f'Energy-{coef}'] = energy(dwt_coefs[index])
# Kurtosis A5
result_df['Kurt-A5'] = pd.DataFrame(dwt_coefs[0]).kurt()
# Kurtosis D5
result_df['Kurt-D5'] = pd.DataFrame(dwt_coefs[1]).kurt()
# Skewness D4
result_df['Skew-D4'] = pd.DataFrame(dwt_coefs[2]).skew()
# target e Frequence
target, frequence = get_class_and_frequence(file_path)
result_df['frequence'] = frequence
result_df['target'] = target
return result_df.copy()
| [
"pandas.DataFrame",
"pywt.wavedec",
"numpy.square",
"statsmodels.robust.scale.mad"
] | [((1303, 1317), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1315, 1317), True, 'import pandas as pd\n'), ((1386, 1427), 'pywt.wavedec', 'wavedec', ([], {'data': 'vec', 'wavelet': '"""db2"""', 'level': '(5)'}), "(data=vec, wavelet='db2', level=5)\n", (1393, 1427), False, 'from pywt import wavedec\n'), ((1751, 1777), 'statsmodels.robust.scale.mad', 'medianAD', (['dwt_coefs[index]'], {}), '(dwt_coefs[index])\n', (1759, 1777), True, 'from statsmodels.robust.scale import mad as medianAD\n'), ((791, 805), 'numpy.square', 'np.square', (['vec'], {}), '(vec)\n', (800, 805), True, 'import numpy as np\n'), ((1891, 1917), 'pandas.DataFrame', 'pd.DataFrame', (['dwt_coefs[0]'], {}), '(dwt_coefs[0])\n', (1903, 1917), True, 'import pandas as pd\n'), ((1971, 1997), 'pandas.DataFrame', 'pd.DataFrame', (['dwt_coefs[1]'], {}), '(dwt_coefs[1])\n', (1983, 1997), True, 'import pandas as pd\n'), ((2051, 2077), 'pandas.DataFrame', 'pd.DataFrame', (['dwt_coefs[2]'], {}), '(dwt_coefs[2])\n', (2063, 2077), True, 'import pandas as pd\n'), ((1550, 1580), 'pandas.DataFrame', 'pd.DataFrame', (['dwt_coefs[index]'], {}), '(dwt_coefs[index])\n', (1562, 1580), True, 'import pandas as pd\n')] |
# Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
#
"""
The Fully Connected Network class
"""
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from gym.spaces import Discrete, MultiDiscrete
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
_OBSERVATIONS = Constants.OBSERVATIONS
# Policy networks
# ---------------
class FullyConnected(nn.Module):
"""
Fully connected network implementation in Pytorch
"""
name = "torch_fully_connected"
def __init__(self, env, model_config, policy, policy_tag_to_agent_id_map):
super().__init__()
self.env = env
fc_dims = model_config["fc_dims"]
assert isinstance(fc_dims, list)
num_fc_layers = len(fc_dims)
self.policy = policy
self.policy_tag_to_agent_id_map = policy_tag_to_agent_id_map
self.fast_forward_mode = False
# Flatten obs space
sample_agent_id = self.policy_tag_to_agent_id_map[self.policy][0]
obs_space = np.prod(self.env.env.observation_space[sample_agent_id].shape)
if isinstance(self.env.env.action_space[sample_agent_id], Discrete):
action_space = [self.env.env.action_space[sample_agent_id].n]
elif isinstance(self.env.env.action_space[sample_agent_id], MultiDiscrete):
action_space = self.env.env.action_space[sample_agent_id].nvec
else:
raise NotImplementedError
input_dims = [obs_space] + fc_dims[:-1]
output_dims = fc_dims
self.fc = nn.ModuleDict()
for fc_layer in range(num_fc_layers):
self.fc[str(fc_layer)] = nn.Sequential(
nn.Linear(input_dims[fc_layer], output_dims[fc_layer]),
nn.ReLU(),
)
# policy network (list of heads)
policy_heads = [None for _ in range(len(action_space))]
for idx, act_space in enumerate(action_space):
policy_heads[idx] = nn.Linear(fc_dims[-1], act_space)
self.policy_head = nn.ModuleList(policy_heads)
# value-function network head
self.vf_head = nn.Linear(fc_dims[-1], 1)
def set_fast_forward_mode(self):
# if there is only one policy with discrete action space,
# then there is no need to map to agents
self.fast_forward_mode = True
print(
f"the model {self.name} turns on the fast_forward_mode to speed up "
"the forward calculation (there is only one policy with discrete "
"action space, therefore in the model forward there is no need to have "
"an explicit mapping to agents which is slow) "
)
def forward(self, batch_index=None, batch_size=None, obs=None):
if batch_index is not None:
assert obs is None
assert batch_index < batch_size
obs = self.env.cuda_data_manager.data_on_device_via_torch(_OBSERVATIONS)
# Push obs to obs_batch
name = f"{_OBSERVATIONS}_batch"
if not self.env.cuda_data_manager.is_data_on_device_via_torch(name):
obs_batch = np.zeros((batch_size,) + obs.shape)
obs_feed = DataFeed()
obs_feed.add_data(name=name, data=obs_batch)
self.env.cuda_data_manager.push_data_to_device(
obs_feed, torch_accessible=True
)
if not self.fast_forward_mode:
agent_ids_for_policy = self.policy_tag_to_agent_id_map[self.policy]
self.env.cuda_data_manager.data_on_device_via_torch(name=name)[
batch_index, :, agent_ids_for_policy
] = obs[:, agent_ids_for_policy]
ip = obs[:, agent_ids_for_policy]
else:
self.env.cuda_data_manager.data_on_device_via_torch(name=name)[
batch_index
] = obs
ip = obs
else:
assert obs is not None
ip = obs
# Feed through the FC layers
for layer in range(len(self.fc)):
op = self.fc[str(layer)](ip)
ip = op
# Compute the action probabilities and the value function estimate
probs = [F.softmax(ph(op), dim=-1) for ph in self.policy_head]
vals = self.vf_head(op)
return probs, vals[..., 0]
| [
"torch.nn.ReLU",
"torch.nn.ModuleList",
"numpy.zeros",
"warp_drive.utils.data_feed.DataFeed",
"torch.nn.ModuleDict",
"torch.nn.Linear",
"numpy.prod"
] | [((1210, 1272), 'numpy.prod', 'np.prod', (['self.env.env.observation_space[sample_agent_id].shape'], {}), '(self.env.env.observation_space[sample_agent_id].shape)\n', (1217, 1272), True, 'import numpy as np\n'), ((1734, 1749), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (1747, 1749), True, 'import torch.nn as nn\n'), ((2215, 2242), 'torch.nn.ModuleList', 'nn.ModuleList', (['policy_heads'], {}), '(policy_heads)\n', (2228, 2242), True, 'import torch.nn as nn\n'), ((2305, 2330), 'torch.nn.Linear', 'nn.Linear', (['fc_dims[-1]', '(1)'], {}), '(fc_dims[-1], 1)\n', (2314, 2330), True, 'import torch.nn as nn\n'), ((2154, 2187), 'torch.nn.Linear', 'nn.Linear', (['fc_dims[-1]', 'act_space'], {}), '(fc_dims[-1], act_space)\n', (2163, 2187), True, 'import torch.nn as nn\n'), ((1864, 1918), 'torch.nn.Linear', 'nn.Linear', (['input_dims[fc_layer]', 'output_dims[fc_layer]'], {}), '(input_dims[fc_layer], output_dims[fc_layer])\n', (1873, 1918), True, 'import torch.nn as nn\n'), ((1936, 1945), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1943, 1945), True, 'import torch.nn as nn\n'), ((3306, 3341), 'numpy.zeros', 'np.zeros', (['((batch_size,) + obs.shape)'], {}), '((batch_size,) + obs.shape)\n', (3314, 3341), True, 'import numpy as np\n'), ((3369, 3379), 'warp_drive.utils.data_feed.DataFeed', 'DataFeed', ([], {}), '()\n', (3377, 3379), False, 'from warp_drive.utils.data_feed import DataFeed\n')] |
#!/usr/bin/env python3
"""
histogram: plot a histogram of a file of numbers. Numbers can be floats, one per
line. Lines with two numbers are interpreted as pre-counted, with the number of
repeats of the first being given by the second.
Multiple instances of the same value in a category will be merged by adding
weights.
Re-uses sample code and documentation from
<http://users.soe.ucsc.edu/~karplus/bme205/f12/Scaffold.html>
"""
import argparse, sys, os, itertools, math, numpy, collections
import matplotlib, matplotlib.ticker
def intify(x):
"""
Turn an integral float into an int, if applicable.
"""
if isinstance(x, float) and x.is_integer():
return int(x)
return x
def draw_labels(bin_counts, bar_patches, size=None):
"""
Put the given count labels on the given bar patches, on the current axes.
Takes an optional font size.
"""
from matplotlib import pyplot
# Grab the axes
axes = pyplot.gca()
for bin_count, bar_patch in zip(bin_counts, bar_patches):
if(bin_count.is_integer()):
# Intify if applicable
bin_count = int(bin_count)
# Label each bar
if bin_count == 0:
# Except those for empty bins
continue
# Find the center of the bar
bar_center_x = bar_patch.get_x() + bar_patch.get_width() / float(2)
# And its height
bar_height = bar_patch.get_height()
# Label the bar
axes.annotate("{:,}".format(bin_count), (bar_center_x, bar_height),
ha="center", va="bottom", rotation=45, xytext=(0, 5),
textcoords="offset points", size=size)
def parse_args(args):
"""
Takes in the command-line arguments list (args), and returns a nice argparse
result with fields for all the options.
Borrows heavily from the argparse documentation examples:
<http://docs.python.org/library/argparse.html>
"""
# The command line arguments start with the program name, which we don't
# want to treat as an argument for argparse. So we remove it.
args = args[1:]
# Construct the parser (which is stored in parser)
# Module docstring lives in __doc__
# See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847
# And a formatter class so our examples in the docstring look good. Isn't it
# convenient how we already wrapped it to 80 characters?
# See http://docs.python.org/library/argparse.html#formatter-class
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Now add all the options to it
parser.add_argument("data", nargs="+",
help="the file to read")
parser.add_argument("--redPortion", type=float, action="append", default=[],
help="portion of each bin to color red")
parser.add_argument("--redWeight", type=float, action="append", default=[],
help="value to plot in red in each bin")
parser.add_argument("--title", default="Histogram",
help="the plot title")
parser.add_argument("--x_label", default="Value",
help="the plot title")
parser.add_argument("--y_label", default="Number of Items (count)",
help="the plot title")
parser.add_argument("--bins", type=int, default=10,
help="the number of histogram bins")
parser.add_argument("--x_min", "--min", type=float, default=None,
help="minimum value allowed")
parser.add_argument("--x_max", "--max", type=float, default=None,
help="maximum value allowed")
parser.add_argument("--y_min", type=float, default=None,
help="minimum count on plot")
parser.add_argument("--y_max", type=float, default=None,
help="maximum count on plot")
parser.add_argument("--cutoff", type=float, default=None,
help="note portion above and below a value, and draw a vertical line")
parser.add_argument("--font_size", type=int, default=12,
help="the font size for text")
parser.add_argument("--categories", nargs="+", default=None,
help="categories to plot, in order")
parser.add_argument("--category_labels", "--labels", nargs="+",
default=[],
help="labels for all categories or data files, in order")
parser.add_argument("--colors", nargs="+", default=[],
help="use the specified Matplotlib colors per category or file")
parser.add_argument("--styles", nargs="+", default=[],
help="use the specified line styles per category or file")
parser.add_argument("--cumulative", action="store_true",
help="plot cumulatively")
parser.add_argument("--log", action="store_true",
help="take the base-10 logarithm of values before plotting histogram")
parser.add_argument("--log_counts", "--logCounts", action="store_true",
help="take the logarithm of counts before plotting histogram")
parser.add_argument("--fake_zero", action="store_true",
help="split lines where points would be 0")
parser.add_argument("--split_at_zero", action="store_true",
help="split lines between positive and negative")
parser.add_argument("--stats", action="store_true",
help="print data stats")
parser.add_argument("--save",
help="save figure to the given filename instead of showing it")
parser.add_argument("--dpi", type=int, default=300,
help="save the figure with the specified DPI, if applicable")
parser.add_argument("--sparse_ticks", action="store_true",
help="use sparse tick marks on both axes")
parser.add_argument("--sparse_x", action="store_true",
help="use sparse tick marks on X axis")
parser.add_argument("--sparse_y", action="store_true",
help="use sparse tick marks on Y axis")
parser.add_argument("--ticks", nargs="+", default=None,
help="use particular X tick locations")
parser.add_argument("--scientific_x", action="store_true",
help="use scientific notation on the X axis")
parser.add_argument("--scientific_y", action="store_true",
help="use scientific notation on the Y axis")
parser.add_argument("--label", action="store_true",
help="label bins with counts")
parser.add_argument("--label_size", type=float,
help="bin count label font size")
parser.add_argument("--no_n", dest="show_n", action="store_false",
help="don't add n value to title")
parser.add_argument("--normalize", action="store_true",
help="normalize to total weight of 1")
parser.add_argument("--line", action="store_true",
help="draw a line instead of a barchart")
parser.add_argument("--no_zero_ends", dest="zero_ends", default=True,
action="store_false",
help="don't force line ends to zero")
parser.add_argument("--legend_overlay", default=None,
help="display the legend overlayed on the graph at this location")
parser.add_argument("--no_legend", action="store_true",
help="don't display a legend when one would otherwise be dispalyed")
parser.add_argument("--points", action="store_true",
help="draw points instead of a barchart")
parser.add_argument("--width", type=float, default=8,
help="plot width in inches")
parser.add_argument("--height", type=float, default=6,
help="plot height in inches")
return parser.parse_args(args)
def filter2(criterion, key_list, other_list):
"""
Filter two lists of corresponding items based on some function of the first
list.
"""
# Make the output lists
out1 = []
out2 = []
for key_val, other_val in zip(key_list, other_list):
# Pair up the items
if criterion(key_val):
# The key passed the filter, so take both.
out1.append(key_val)
out2.append(other_val)
return out1, out2
def filter_n(*args):
"""
Filter any number of lists of corresponding items based on some function of
the first list.
"""
filter_function = args[0]
to_filter = args[1:]
to_return = [list() for _ in to_filter]
for i in range(len(to_filter[0])):
# For each run of entries
if filter_function(to_filter[0][i]):
# If the key passes the filter
for j in range(len(to_filter)):
# Keep the whole row
if i < len(to_filter[j]):
to_return[j].append(to_filter[j][i])
# return all the lists as a tuple, which unpacks as multiple return values
return tuple(to_return)
def main(args):
"""
Parses command line arguments, and plots a histogram.
"args" specifies the program arguments, with args[0] being the executable
name. The return value should be used as the program's exit code.
"""
options = parse_args(args) # This holds the nicely-parsed options object
if options.save is not None:
# Set up plot for use in headless mode if we just want to save. See
# <http://stackoverflow.com/a/2766194/402891>. We need to do this before
# we grab pyplot.
matplotlib.use('Agg')
from matplotlib import pyplot
# Make the figure with the appropriate size and DPI.
pyplot.figure(figsize=(options.width, options.height), dpi=options.dpi)
# This will hold a dict of dicts from data value to weight, by category or
# file name. Later gets converted to a dict of lists of (value, weight)
# pairs, aggregated by value.
all_data = collections.defaultdict(lambda: collections.defaultdict(float))
for data_filename in options.data:
for line_number, line in enumerate(open(data_filename)):
# Split each line
parts = line.split()
if len(parts) == 1:
# This is one instance of a value
all_data[data_filename][float(parts[0])] += 1.0
elif len(parts) == 2:
if len(options.data) > 1:
# This is multiple instances of a value, and we are doing
# categories by filename.
all_data[data_filename][float(parts[0])] += float(parts[1])
else:
try:
value = float(parts[0])
# If the first column is a number, this is value, weight
# data.
all_data[data_filename][value] += float(parts[1])
except ValueError:
# This is category, instance data, since first column
# isn't a number.
all_data[parts[0]][float(parts[1])] += 1.0
elif len(parts) == 3:
# This is category, instance, weight data
all_data[parts[0]][float(parts[1])] += float(parts[2])
else:
raise Exception("Wrong number of fields on {} line {}".format(
data_filename, line_number + 1))
for category in all_data.keys():
# Strip NaNs and Infs and weight-0 entries, and convert to a dict of
# lists of tuples.
all_data[category] = [(value, weight) for (value, weight)
in all_data[category].items() if
value < float("+inf") and value > float("-inf") and weight > 0]
# Calculate our own bins, over all the data. First we need the largest and
# smallest observed values. The fors in the comprehension have to be in
# normal for loop order and not the other order.
# Make sure to filter out 0s from bounds determination if using log space.
bin_min = options.x_min if options.x_min is not None else min((pair[0]
for pair_list in all_data.values() for pair in pair_list if (not options.log or pair[0] != 0)))
bin_max = options.x_max if options.x_max is not None else max((pair[0]
for pair_list in all_data.values() for pair in pair_list if (not options.log or pair[0] != 0)))
if options.log:
# Do our bins in log space, so they look evenly spaced on the plot.
bin_max = math.log10(bin_max)
bin_min = math.log10(bin_min)
# Work out what step we should use between bin edges
bin_step = (bin_max - bin_min) / float(options.bins)
# Work out where the bin edges should be
bins = [bin_min + bin_step * i for i in range(options.bins + 1)]
# Work out where the bin centers should be
bin_centers = [left_edge + bin_step / 2.0 for left_edge in bins[:-1]]
if options.log:
# Bring bins back into data space
bins = [math.pow(10, x) for x in bins]
bin_centers = [math.pow(10, x) for x in bin_centers]
if options.categories is not None:
# Order data by category order
ordered_data = [(category, all_data[category]) for category in
options.categories]
elif len(options.data) > 1:
# Order data by file order
ordered_data = [(filename, all_data[filename]) for filename in
options.data]
else:
# Order arbitrarily
ordered_data = list(all_data.items())
if options.categories is not None and options.category_labels == []:
# Label categories with their internal names
category_labels = options.categories
else:
# Label categories exactly as asked
category_labels = options.category_labels
for (category, data_and_weights), label, color, line_style, marker in \
zip(ordered_data,
itertools.chain(category_labels, itertools.repeat(None)),
itertools.chain(options.colors, itertools.cycle(
['b', 'g', 'r', 'c', 'm', 'y', 'k'])),
itertools.chain(options.styles, itertools.cycle(
['-', '--', ':', '-.'])),
itertools.cycle(
['o', 'v', '^', '<', '>', 's', '+', 'x', 'D', '|', '_'])):
# For every category and its display properties...
if len(data_and_weights) == 0:
# Skip categories with no data
continue
# Split out the data and the weights for this category/file
data = [pair[0] for pair in data_and_weights]
weights = [pair[1] for pair in data_and_weights]
# For each set of data and weights that we want to plot, and the label
# it needs (or None)...
# We may want to normalize by total weight
# We need a float here so we don't get int division later.
total_weight_overall = float(0)
for value, weight in zip(data, weights):
# Sum up the weights overall
total_weight_overall += weight
if options.normalize and total_weight_overall > 0:
# Normalize all the weight to 1.0 total weight.
weights = [w / total_weight_overall for w in weights]
# Apply the limits after normalization
if options.x_min is not None:
data, weights = filter2(lambda x: x >= options.x_min, data, weights)
if options.x_max is not None:
data, weights = filter2(lambda x: x <= options.x_max, data, weights)
# Work out how many samples there are left within the chart area
samples = intify(sum(weights))
if options.stats:
# Compute and report some stats
data_min = numpy.min(data)
data_min_count = weights[numpy.argmin(data)]
data_max = numpy.max(data)
data_max_count = weights[numpy.argmax(data)]
# The mode is the data item with maximal count
data_mode = data[numpy.argmax(weights)]
data_mode_count = numpy.max(weights)
# Intify floats pretending to be ints
data_min = intify(data_min)
data_min_count = intify(data_min_count)
data_max = intify(data_max)
data_max_count = intify(data_max_count)
data_mode = intify(data_mode)
data_mode_count = intify(data_mode_count)
# TODO: median, mean
print("Min: {} occurs {} times".format(data_min, data_min_count))
print("Mode: {} occurs {} times".format(data_mode, data_mode_count))
print("Max: {} occurs {} times".format(data_max, data_max_count))
if options.cutoff is not None:
# Work out how much weight is above and below the cutoff
above = 0
below = 0
for value, weight in zip(data, weights):
if value > options.cutoff:
above += weight
else:
below += weight
# Report the results wrt the cutoff.
print("{} above {}, {} below".format(
above / total_weight_overall, options.cutoff,
below / total_weight_overall))
if options.line or options.points:
# Do histogram binning manually
# Do the binning
bin_values, _ = numpy.histogram(data, bins=bins, weights=weights)
if options.cumulative:
# Calculate cumulative weights for each data point
bin_values = numpy.cumsum(bin_values)
if options.zero_ends:
if options.cumulative:
# Pin things to 0 on the low end and max on the high
all_bin_centers = [bins[0]] + list(bin_centers) + [bins[-1]]
all_bin_values = [0] + list(bin_values) + [sum(weights)]
else:
# Pin things to 0 on the end
all_bin_centers = [bins[0]] + list(bin_centers) + [bins[-1]]
all_bin_values = [0] + list(bin_values) + [0]
else:
all_bin_centers = bin_centers
all_bin_values = bin_values
# Now we make a bunch of deries for each line, potentially. This
# holds pairs of (centers, values) lists.
series = []
if options.fake_zero or options.split_at_zero:
# We need to split into multiple series, potentially.
# This holds the series we are working on.
this_series = ([], [])
# What was the last bin we saw?
last_bin = 0
for center, value in zip(all_bin_centers,
all_bin_values):
# For every point on the line, see if we need to break here
# because it's zero.
# This logic gets complicated so we do some flags.
# Do we keep this point?
includeSample = True
# Do we split the line?
breakSeries = False
if options.fake_zero and value == 0:
# We don't want this sample, and we need to break the
# series
includeSample = False
breakSeries = True
if options.split_at_zero and last_bin < 0 and center > 0:
# We crossed the y axis, or we went down to the x axis.
# We can maybe keep the sample, and we need to break the
# series
breakSeries = True
if breakSeries and len(this_series[0]) > 0:
# Finish the series and start another
series.append(this_series)
this_series = ([], [])
if includeSample:
# Stick this point in the series
this_series[0].append(center)
this_series[1].append(value)
last_bin = center
if len(this_series[0]) > 0:
# Finish the last series
series.append(this_series)
else:
# Just do one series
series.append((all_bin_centers, all_bin_values))
# We only want to label the first series in the legend, so we'll
# none this out after we use it.
label_to_use = label
for series_centers, series_values in series:
# Plot every series
if options.line and options.points:
# Do the plots as lines with points
pyplot.plot(series_centers, series_values,
label=label_to_use, linestyle=line_style, color=color,
marker=marker)
label_to_use = None
elif options.line:
# Do the plots as lines only
pyplot.plot(series_centers, series_values,
label=label_to_use, linestyle=line_style, color=color)
label_to_use= None
elif options.points:
# Do the plot as points.
pyplot.scatter(series_centers, series_values,
label=label_to_use, color=color, marker=marker)
label_to_use = None
if options.log_counts:
# Log the Y axis
pyplot.yscale('log')
if options.split_at_zero:
# Put a big vertical line.
pyplot.axvline(linewidth=2, color="k")
else:
# Do the plot. Do cumulative, or logarithmic Y axis, optionally.
# Keep the bin total counts and the bar patches.
bin_counts, _, bar_patches = pyplot.hist(data, bins,
cumulative=options.cumulative, log=options.log_counts,
weights=weights, alpha=0.5 if len(ordered_data) > 1 else 1.0,
label=label)
if options.cutoff is not None:
# Put a vertical line at the cutoff.
pyplot.axvline(x=options.cutoff, color="r")
if len(options.redPortion) > 0:
# Plot a red histogram over that one, modified by redPortion.
red_data = []
red_weights = []
for item, weight in zip(data, weights):
# For each item, what bin is it in?
bin_number = int(item / bin_step)
if bin_number < len(options.redPortion):
# We have a potentially nonzero scaling factor. Apply that.
weight *= options.redPortion[bin_number]
# Keep this item.
red_data.append(item)
red_weights.append(weight)
# Plot the re-weighted data with the same bins, in red
red_counts, _, red_patches = pyplot.hist(red_data, bins,
cumulative=options.cumulative, log=options.log_counts,
weights=red_weights, color='#FF9696', hatch='/'*6)
if options.label:
# Label all the red portion-based bars
draw_labels(red_counts, red_patches, size=options.label_size)
if len(options.redWeight) > 0:
# Plot a red histogram over that one, modified by redPortion.
# Grab an item in each bin
items = bins[0:len(options.redWeight)]
# Plot the re-weighted data with the same bins, in red
red_counts, _, red_patches = pyplot.hist(items, bins,
cumulative=options.cumulative, log=options.log_counts,
weights=options.redWeight, color='#FF9696', hatch='/'*6)
if options.label:
# Label all the red weight-based bars
draw_labels(red_counts, red_patches, size=options.label_size)
# StackOverflow provides us with font sizing. See
# <http://stackoverflow.com/q/3899980/402891>
matplotlib.rcParams.update({"font.size": options.font_size})
if options.show_n:
# Add an n value to the title
options.title += " (n = {:,})".format(samples)
pyplot.title(options.title)
pyplot.xlabel(options.x_label)
pyplot.ylabel(options.y_label)
if options.log:
# Set the X axis to log mode
pyplot.xscale('log')
if options.x_min is not None:
# Set only the lower x limit
pyplot.xlim((options.x_min, pyplot.xlim()[1]))
if options.x_max is not None:
# Set only the upper x limit
pyplot.xlim((pyplot.xlim()[0], options.x_max))
if options.y_min is not None:
# Set only the lower y limit
pyplot.ylim((options.y_min, pyplot.ylim()[1]))
elif options.log_counts:
# Make sure the default lower Y limit is 1 on log plots.
pyplot.ylim((1, pyplot.ylim()[1]))
if options.y_max is not None:
# Set only the upper y limit
pyplot.ylim((pyplot.ylim()[0], options.y_max))
if options.sparse_ticks or options.sparse_x:
# Set up X tickmarks to have only 2 per axis, at the ends
pyplot.gca().xaxis.set_major_locator(
matplotlib.ticker.FixedLocator(pyplot.xlim()))
if options.sparse_ticks or options.sparse_y:
# Same for the Y axis
pyplot.gca().yaxis.set_major_locator(
matplotlib.ticker.FixedLocator(pyplot.ylim()))
if options.ticks is not None:
# Use these particular X ticks instead
pyplot.gca().xaxis.set_major_locator(
matplotlib.ticker.FixedLocator(
[float(pos) for pos in options.ticks]))
# Make sure tick labels don't overlap. See
# <http://stackoverflow.com/a/20599129/402891>
pyplot.gca().tick_params(axis="x", pad=0.5 * options.font_size)
# Make our own scientific notation formatter since set_scientific is not
# working
sci_formatter = matplotlib.ticker.FormatStrFormatter("%1.2e")
if options.scientific_x:
# Force scientific notation on X axis
pyplot.gca().xaxis.set_major_formatter(sci_formatter)
if options.scientific_y:
# Force scientific notation on Y axis
pyplot.gca().yaxis.set_major_formatter(sci_formatter)
if options.label:
# Label all the normal bars
draw_labels(bin_counts, bar_patches, size=options.label_size)
# Make everything fit
pyplot.tight_layout()
if len(category_labels) > 0 and not options.no_legend:
# We need a legend
if options.legend_overlay is None:
# We want the default legend, off to the right of the plot.
# First shrink the plot to make room for it.
# TODO: automatically actually work out how big it will be.
bounds = pyplot.gca().get_position()
pyplot.gca().set_position([bounds.x0, bounds.y0,
bounds.width * 0.5, bounds.height])
# Make the legend
pyplot.legend(loc="center left", bbox_to_anchor=(1.05, 0.5))
else:
# We want the legend on top of the plot at the user-specified
# location, and we want the plot to be full width.
pyplot.legend(loc=options.legend_overlay)
if options.save is not None:
# Save the figure to a file
pyplot.savefig(options.save, dpi=options.dpi)
else:
# Show the figure to the user
pyplot.show()
return 0
if __name__ == "__main__" :
sys.exit(main(sys.argv))
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.argmin",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"numpy.histogram",
"matplotlib.pyplot.gca",
"itertools.cycle",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axvline",... | [((973, 985), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (983, 985), False, 'from matplotlib import pyplot\n'), ((2558, 2661), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (2581, 2661), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((9396, 9467), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(options.width, options.height)', 'dpi': 'options.dpi'}), '(figsize=(options.width, options.height), dpi=options.dpi)\n', (9409, 9467), False, 'from matplotlib import pyplot\n'), ((24608, 24668), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': options.font_size}"], {}), "({'font.size': options.font_size})\n", (24634, 24668), False, 'import matplotlib, matplotlib.ticker\n'), ((24789, 24816), 'matplotlib.pyplot.title', 'pyplot.title', (['options.title'], {}), '(options.title)\n', (24801, 24816), False, 'from matplotlib import pyplot\n'), ((24821, 24851), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['options.x_label'], {}), '(options.x_label)\n', (24834, 24851), False, 'from matplotlib import pyplot\n'), ((24856, 24886), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['options.y_label'], {}), '(options.y_label)\n', (24869, 24886), False, 'from matplotlib import pyplot\n'), ((26578, 26623), 'matplotlib.ticker.FormatStrFormatter', 'matplotlib.ticker.FormatStrFormatter', (['"""%1.2e"""'], {}), "('%1.2e')\n", (26614, 26623), False, 'import matplotlib, matplotlib.ticker\n'), ((27070, 27091), 'matplotlib.pyplot.tight_layout', 'pyplot.tight_layout', ([], {}), '()\n', (27089, 27091), False, 'from matplotlib import pyplot\n'), ((9265, 9286), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (9279, 9286), False, 'import matplotlib, matplotlib.ticker\n'), ((12324, 12343), 'math.log10', 'math.log10', (['bin_max'], {}), '(bin_max)\n', (12334, 12343), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((12362, 12381), 'math.log10', 'math.log10', (['bin_min'], {}), '(bin_min)\n', (12372, 12381), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((14010, 14082), 'itertools.cycle', 'itertools.cycle', (["['o', 'v', '^', '<', '>', 's', '+', 'x', 'D', '|', '_']"], {}), "(['o', 'v', '^', '<', '>', 's', '+', 'x', 'D', '|', '_'])\n", (14025, 14082), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((23483, 23623), 'matplotlib.pyplot.hist', 'pyplot.hist', (['red_data', 'bins'], {'cumulative': 'options.cumulative', 'log': 'options.log_counts', 'weights': 'red_weights', 'color': '"""#FF9696"""', 'hatch': "('/' * 6)"}), "(red_data, bins, cumulative=options.cumulative, log=options.\n log_counts, weights=red_weights, color='#FF9696', hatch='/' * 6)\n", (23494, 23623), False, 'from matplotlib import pyplot\n'), ((24145, 24288), 'matplotlib.pyplot.hist', 'pyplot.hist', (['items', 'bins'], {'cumulative': 'options.cumulative', 'log': 'options.log_counts', 'weights': 'options.redWeight', 'color': '"""#FF9696"""', 'hatch': "('/' * 6)"}), "(items, bins, cumulative=options.cumulative, log=options.\n log_counts, weights=options.redWeight, color='#FF9696', hatch='/' * 6)\n", (24156, 24288), False, 'from matplotlib import pyplot\n'), ((24957, 24977), 'matplotlib.pyplot.xscale', 'pyplot.xscale', (['"""log"""'], {}), "('log')\n", (24970, 24977), False, 'from matplotlib import pyplot\n'), ((28027, 28072), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['options.save'], {'dpi': 'options.dpi'}), '(options.save, dpi=options.dpi)\n', (28041, 28072), False, 'from matplotlib import pyplot\n'), ((28129, 28142), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (28140, 28142), False, 'from matplotlib import pyplot\n'), ((9709, 9739), 'collections.defaultdict', 'collections.defaultdict', (['float'], {}), '(float)\n', (9732, 9739), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((12823, 12838), 'math.pow', 'math.pow', (['(10)', 'x'], {}), '(10, x)\n', (12831, 12838), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((12877, 12892), 'math.pow', 'math.pow', (['(10)', 'x'], {}), '(10, x)\n', (12885, 12892), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((13782, 13804), 'itertools.repeat', 'itertools.repeat', (['None'], {}), '(None)\n', (13798, 13804), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((13847, 13899), 'itertools.cycle', 'itertools.cycle', (["['b', 'g', 'r', 'c', 'm', 'y', 'k']"], {}), "(['b', 'g', 'r', 'c', 'm', 'y', 'k'])\n", (13862, 13899), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((13951, 13990), 'itertools.cycle', 'itertools.cycle', (["['-', '--', ':', '-.']"], {}), "(['-', '--', ':', '-.'])\n", (13966, 13990), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((15601, 15616), 'numpy.min', 'numpy.min', (['data'], {}), '(data)\n', (15610, 15616), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((15697, 15712), 'numpy.max', 'numpy.max', (['data'], {}), '(data)\n', (15706, 15712), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((15911, 15929), 'numpy.max', 'numpy.max', (['weights'], {}), '(weights)\n', (15920, 15929), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((17385, 17434), 'numpy.histogram', 'numpy.histogram', (['data'], {'bins': 'bins', 'weights': 'weights'}), '(data, bins=bins, weights=weights)\n', (17400, 17434), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((22656, 22699), 'matplotlib.pyplot.axvline', 'pyplot.axvline', ([], {'x': 'options.cutoff', 'color': '"""r"""'}), "(x=options.cutoff, color='r')\n", (22670, 22699), False, 'from matplotlib import pyplot\n'), ((26398, 26410), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (26408, 26410), False, 'from matplotlib import pyplot\n'), ((27666, 27726), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(1.05, 0.5)'}), "(loc='center left', bbox_to_anchor=(1.05, 0.5))\n", (27679, 27726), False, 'from matplotlib import pyplot\n'), ((27903, 27944), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {'loc': 'options.legend_overlay'}), '(loc=options.legend_overlay)\n', (27916, 27944), False, 'from matplotlib import pyplot\n'), ((15654, 15672), 'numpy.argmin', 'numpy.argmin', (['data'], {}), '(data)\n', (15666, 15672), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((15750, 15768), 'numpy.argmax', 'numpy.argmax', (['data'], {}), '(data)\n', (15762, 15768), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((15858, 15879), 'numpy.argmax', 'numpy.argmax', (['weights'], {}), '(weights)\n', (15870, 15879), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((17579, 17603), 'numpy.cumsum', 'numpy.cumsum', (['bin_values'], {}), '(bin_values)\n', (17591, 17603), False, 'import argparse, sys, os, itertools, math, numpy, collections\n'), ((21961, 21981), 'matplotlib.pyplot.yscale', 'pyplot.yscale', (['"""log"""'], {}), "('log')\n", (21974, 21981), False, 'from matplotlib import pyplot\n'), ((22096, 22134), 'matplotlib.pyplot.axvline', 'pyplot.axvline', ([], {'linewidth': '(2)', 'color': '"""k"""'}), "(linewidth=2, color='k')\n", (22110, 22134), False, 'from matplotlib import pyplot\n'), ((25846, 25859), 'matplotlib.pyplot.xlim', 'pyplot.xlim', ([], {}), '()\n', (25857, 25859), False, 'from matplotlib import pyplot\n'), ((26030, 26043), 'matplotlib.pyplot.ylim', 'pyplot.ylim', ([], {}), '()\n', (26041, 26043), False, 'from matplotlib import pyplot\n'), ((21135, 21252), 'matplotlib.pyplot.plot', 'pyplot.plot', (['series_centers', 'series_values'], {'label': 'label_to_use', 'linestyle': 'line_style', 'color': 'color', 'marker': 'marker'}), '(series_centers, series_values, label=label_to_use, linestyle=\n line_style, color=color, marker=marker)\n', (21146, 21252), False, 'from matplotlib import pyplot\n'), ((25090, 25103), 'matplotlib.pyplot.xlim', 'pyplot.xlim', ([], {}), '()\n', (25101, 25103), False, 'from matplotlib import pyplot\n'), ((25201, 25214), 'matplotlib.pyplot.xlim', 'pyplot.xlim', ([], {}), '()\n', (25212, 25214), False, 'from matplotlib import pyplot\n'), ((25351, 25364), 'matplotlib.pyplot.ylim', 'pyplot.ylim', ([], {}), '()\n', (25362, 25364), False, 'from matplotlib import pyplot\n'), ((25599, 25612), 'matplotlib.pyplot.ylim', 'pyplot.ylim', ([], {}), '()\n', (25610, 25612), False, 'from matplotlib import pyplot\n'), ((25765, 25777), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (25775, 25777), False, 'from matplotlib import pyplot\n'), ((25949, 25961), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (25959, 25961), False, 'from matplotlib import pyplot\n'), ((26149, 26161), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (26159, 26161), False, 'from matplotlib import pyplot\n'), ((26707, 26719), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (26717, 26719), False, 'from matplotlib import pyplot\n'), ((26844, 26856), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (26854, 26856), False, 'from matplotlib import pyplot\n'), ((27466, 27478), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (27476, 27478), False, 'from matplotlib import pyplot\n'), ((27506, 27518), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (27516, 27518), False, 'from matplotlib import pyplot\n'), ((21440, 21542), 'matplotlib.pyplot.plot', 'pyplot.plot', (['series_centers', 'series_values'], {'label': 'label_to_use', 'linestyle': 'line_style', 'color': 'color'}), '(series_centers, series_values, label=label_to_use, linestyle=\n line_style, color=color)\n', (21451, 21542), False, 'from matplotlib import pyplot\n'), ((25488, 25501), 'matplotlib.pyplot.ylim', 'pyplot.ylim', ([], {}), '()\n', (25499, 25501), False, 'from matplotlib import pyplot\n'), ((21706, 21804), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['series_centers', 'series_values'], {'label': 'label_to_use', 'color': 'color', 'marker': 'marker'}), '(series_centers, series_values, label=label_to_use, color=\n color, marker=marker)\n', (21720, 21804), False, 'from matplotlib import pyplot\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 15:26:13 2019
@author: Tang
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
def get_noise(img,value=10):
'''
#生成噪声图像
>>> 输入: img图像
value= 大小控制雨滴的多少
>>> 返回图像大小的模糊噪声图像
'''
noise = np.random.uniform(0, 256, img.shape[0:2])
#控制噪声水平,取浮点数,只保留最大的一部分作为噪声
v = value *0.1
noise[np.where(noise<(256-v))]=0
#噪声做初次模糊
k = np.array([ [0, 0.1, 0],
[0.1, 8, 0.1],
[0, 0.1, 0] ])
#ddepth=-1 指的是卷积后的图像深度和之前的一样
noise = cv2.filter2D(noise,-1,k)
#可以输出噪声看看
'''cv2.imshow('img',noise)
cv2.waitKey()
cv2.destroyWindow('img')'''
return noise
def rain_blur(noise, length=10, angle=0,w=1):
'''
将噪声加上运动模糊,模仿雨滴
>>>输入
noise:输入噪声图,shape = img.shape[0:2]
length: 对角矩阵大小,表示雨滴的长度
angle: 倾斜的角度,逆时针为正
w: 雨滴大小
>>>输出带模糊的噪声
'''
#这里由于对角阵自带45度的倾斜,逆时针为正,所以加了-45度的误差,保证开始为正
trans = cv2.getRotationMatrix2D((length/2, length/2), angle-45, 1-length/100.0)
dig = np.diag(np.ones(length)) #生成对角矩阵
k = cv2.warpAffine(dig, trans, (length, length)) #生成模糊核
k = cv2.GaussianBlur(k,(w,w),0) #高斯模糊这个旋转后的对角核,使得雨有宽度
#k = k / length #是否归一化
blurred = cv2.filter2D(noise, -1, k) #用刚刚得到的旋转后的核,进行滤波
#转换到0-255区间
cv2.normalize(blurred, blurred, 0, 255, cv2.NORM_MINMAX)
blurred = np.array(blurred, dtype=np.uint8)
'''
cv2.imshow('img',blurred)
cv2.waitKey()
cv2.destroyWindow('img')'''
return blurred
def alpha_rain(rain,img,beta = 0.8):
#输入雨滴噪声和图像
#beta = 0.8 #results weight
#显示下雨效果
#expand dimensin
#将二维雨噪声扩张为三维单通道
#并与图像合成在一起形成带有alpha通道的4通道图像
rain = np.expand_dims(rain,2)
rain_effect = np.concatenate((img,rain),axis=2) #add alpha channel
rain_result = img.copy() #拷贝一个掩膜
rain = np.array(rain,dtype=np.float32) #数据类型变为浮点数,后面要叠加,防止数组越界要用32位
rain_result[:,:,0]= rain_result[:,:,0] * (255-rain[:,:,0])/255.0 + beta*rain[:,:,0]
rain_result[:,:,1] = rain_result[:,:,1] * (255-rain[:,:,0])/255 + beta*rain[:,:,0]
rain_result[:,:,2] = rain_result[:,:,2] * (255-rain[:,:,0])/255 + beta*rain[:,:,0]
#对每个通道先保留雨滴噪声图对应的黑色(透明)部分,再叠加白色的雨滴噪声部分(有比例因子)
return rain_result
def add_rain(rain,img,alpha=0.9):
#输入雨滴噪声和图像
#alpha:原图比例因子
#显示下雨效果
#chage rain into 3-dimenis
#将二维rain噪声扩张为与原图相同的三通道图像
rain = np.expand_dims(rain,2)
rain = np.repeat(rain,3,2)
#加权合成新图
result = cv2.addWeighted(img,alpha,rain,1-alpha,1)
return result
#%%
img = cv2.imread('ori_img.png')
noise = get_noise(img, value=100)
rain = rain_blur(noise, length=20, angle=-30, w=3)
rain_result1 = alpha_rain(rain,img,beta=0.8) #方法一,透明度赋值
rain_result2 = add_rain(rain,img) #方法二,加权后有玻璃外的效果
hmerge = np.hstack((rain_result1, rain_result2))
cv2.imshow('rain_effct_result', hmerge)
cv2.waitKey()
cv2.destroyAllWindows()
#%%
ori_set = np.load('C:\\Tang\\data\\car_airplane\\dataset_car-airplane_train-1000_test-300.npz')
ori_Xtest = ori_set['X_test']
ori_Ytest = ori_set['Y_test']
rain_Xtest = np.zeros((600, 299, 299, 3))
#%%
for i, img in enumerate(ori_Xtest):
print('rain image %s' % i)
img = (ori_Xtest[i]+1)/2*255
img = img.astype(np.uint8)
noise = get_noise(img, value=200)
rain = rain_blur(noise, length=30, angle=-30, w=3)
rain_result = add_rain(rain, img)
rain_Xtest[i] = rain_result/255.0*2.0-1.0
#%%
np.savez('C:\\Tang\\data\\car_airplane\\dataset_car-airplane_rainy_testset.npz', X_test = rain_Xtest, Y_test = ori_Ytest)
#%%
img = (ori_Xtest[11]+1)/2*255
img = img.astype(np.uint8)
noise = get_noise(img, value=200)
rain = rain_blur(noise, length=30, angle=-30, w=3)
rain_result = add_rain(rain, img)
plt.imshow(rain_result)
| [
"cv2.GaussianBlur",
"numpy.load",
"numpy.ones",
"cv2.warpAffine",
"cv2.normalize",
"cv2.imshow",
"cv2.getRotationMatrix2D",
"cv2.filter2D",
"matplotlib.pyplot.imshow",
"cv2.destroyAllWindows",
"numpy.repeat",
"cv2.waitKey",
"numpy.hstack",
"cv2.addWeighted",
"numpy.savez",
"numpy.conca... | [((2722, 2747), 'cv2.imread', 'cv2.imread', (['"""ori_img.png"""'], {}), "('ori_img.png')\n", (2732, 2747), False, 'import cv2\n'), ((2952, 2991), 'numpy.hstack', 'np.hstack', (['(rain_result1, rain_result2)'], {}), '((rain_result1, rain_result2))\n', (2961, 2991), True, 'import numpy as np\n'), ((2993, 3032), 'cv2.imshow', 'cv2.imshow', (['"""rain_effct_result"""', 'hmerge'], {}), "('rain_effct_result', hmerge)\n", (3003, 3032), False, 'import cv2\n'), ((3033, 3046), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (3044, 3046), False, 'import cv2\n'), ((3047, 3070), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3068, 3070), False, 'import cv2\n'), ((3086, 3181), 'numpy.load', 'np.load', (['"""C:\\\\Tang\\\\data\\\\car_airplane\\\\dataset_car-airplane_train-1000_test-300.npz"""'], {}), "(\n 'C:\\\\Tang\\\\data\\\\car_airplane\\\\dataset_car-airplane_train-1000_test-300.npz'\n )\n", (3093, 3181), True, 'import numpy as np\n'), ((3245, 3273), 'numpy.zeros', 'np.zeros', (['(600, 299, 299, 3)'], {}), '((600, 299, 299, 3))\n', (3253, 3273), True, 'import numpy as np\n'), ((3597, 3719), 'numpy.savez', 'np.savez', (['"""C:\\\\Tang\\\\data\\\\car_airplane\\\\dataset_car-airplane_rainy_testset.npz"""'], {'X_test': 'rain_Xtest', 'Y_test': 'ori_Ytest'}), "('C:\\\\Tang\\\\data\\\\car_airplane\\\\dataset_car-airplane_rainy_testset.npz'\n , X_test=rain_Xtest, Y_test=ori_Ytest)\n", (3605, 3719), True, 'import numpy as np\n'), ((3900, 3923), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rain_result'], {}), '(rain_result)\n', (3910, 3923), True, 'from matplotlib import pyplot as plt\n'), ((313, 354), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(256)', 'img.shape[0:2]'], {}), '(0, 256, img.shape[0:2])\n', (330, 354), True, 'import numpy as np\n'), ((473, 524), 'numpy.array', 'np.array', (['[[0, 0.1, 0], [0.1, 8, 0.1], [0, 0.1, 0]]'], {}), '([[0, 0.1, 0], [0.1, 8, 0.1], [0, 0.1, 0]])\n', (481, 524), True, 'import numpy as np\n'), ((618, 644), 'cv2.filter2D', 'cv2.filter2D', (['noise', '(-1)', 'k'], {}), '(noise, -1, k)\n', (630, 644), False, 'import cv2\n'), ((1057, 1143), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(length / 2, length / 2)', '(angle - 45)', '(1 - length / 100.0)'], {}), '((length / 2, length / 2), angle - 45, 1 - length / \n 100.0)\n', (1080, 1143), False, 'import cv2\n'), ((1184, 1228), 'cv2.warpAffine', 'cv2.warpAffine', (['dig', 'trans', '(length, length)'], {}), '(dig, trans, (length, length))\n', (1198, 1228), False, 'import cv2\n'), ((1245, 1275), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['k', '(w, w)', '(0)'], {}), '(k, (w, w), 0)\n', (1261, 1275), False, 'import cv2\n'), ((1373, 1399), 'cv2.filter2D', 'cv2.filter2D', (['noise', '(-1)', 'k'], {}), '(noise, -1, k)\n', (1385, 1399), False, 'import cv2\n'), ((1446, 1502), 'cv2.normalize', 'cv2.normalize', (['blurred', 'blurred', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(blurred, blurred, 0, 255, cv2.NORM_MINMAX)\n', (1459, 1502), False, 'import cv2\n'), ((1517, 1550), 'numpy.array', 'np.array', (['blurred'], {'dtype': 'np.uint8'}), '(blurred, dtype=np.uint8)\n', (1525, 1550), True, 'import numpy as np\n'), ((1856, 1879), 'numpy.expand_dims', 'np.expand_dims', (['rain', '(2)'], {}), '(rain, 2)\n', (1870, 1879), True, 'import numpy as np\n'), ((1897, 1932), 'numpy.concatenate', 'np.concatenate', (['(img, rain)'], {'axis': '(2)'}), '((img, rain), axis=2)\n', (1911, 1932), True, 'import numpy as np\n'), ((2003, 2035), 'numpy.array', 'np.array', (['rain'], {'dtype': 'np.float32'}), '(rain, dtype=np.float32)\n', (2011, 2035), True, 'import numpy as np\n'), ((2562, 2585), 'numpy.expand_dims', 'np.expand_dims', (['rain', '(2)'], {}), '(rain, 2)\n', (2576, 2585), True, 'import numpy as np\n'), ((2596, 2617), 'numpy.repeat', 'np.repeat', (['rain', '(3)', '(2)'], {}), '(rain, 3, 2)\n', (2605, 2617), True, 'import numpy as np\n'), ((2646, 2693), 'cv2.addWeighted', 'cv2.addWeighted', (['img', 'alpha', 'rain', '(1 - alpha)', '(1)'], {}), '(img, alpha, rain, 1 - alpha, 1)\n', (2661, 2693), False, 'import cv2\n'), ((415, 440), 'numpy.where', 'np.where', (['(noise < 256 - v)'], {}), '(noise < 256 - v)\n', (423, 440), True, 'import numpy as np\n'), ((1149, 1164), 'numpy.ones', 'np.ones', (['length'], {}), '(length)\n', (1156, 1164), True, 'import numpy as np\n')] |
"""Unit tests for representations module."""
import pathlib
import tempfile
from ldp.parse import representations
import h5py
import numpy as np
import pytest
import torch
REP_LAYERS = 3
REP_DIMENSION = 1024
SEQ_LENGTHS = (1, 2, 3)
@pytest.fixture
def reps():
"""Returns fake representations for testing."""
return [
np.random.randn(REP_LAYERS, length, REP_DIMENSION)
for length in SEQ_LENGTHS
]
@pytest.yield_fixture
def path(reps):
"""Yields the path to a fake representations h5 file."""
with tempfile.TemporaryDirectory() as tempdir:
path = pathlib.Path(tempdir) / 'representations.h5'
with h5py.File(path, 'w') as handle:
handle.create_dataset('sentence_to_index', data=0)
for index, rep in enumerate(reps):
handle.create_dataset(str(index), data=rep)
yield path
@pytest.fixture
def representation_dataset(path):
"""Returns a RepresentationDataset for testing."""
return representations.RepresentationDataset(path)
def test_representation_dataset_getitem(representation_dataset, reps):
"""Test RepresentationDataset.__getitem__ returns correct shape."""
for index, expected in enumerate(reps):
actual = representation_dataset[index]
assert actual.equal(torch.tensor(expected))
def test_representation_dataset_len(representation_dataset):
"""Test RepresentationDataset.__len__ returns correct length."""
assert len(representation_dataset) == len(SEQ_LENGTHS)
def test_representation_dataset_dimension(representation_dataset):
"""Test RepresentationDataset.dimension returns correct dimension."""
assert representation_dataset.dimension == REP_DIMENSION
def test_representation_dataset_length(representation_dataset):
"""Test RepresentationDataset.length returns sequence lengths."""
for index, expected in enumerate(SEQ_LENGTHS):
assert representation_dataset.length(index) == expected
def test_representation_dataset_layer(representation_dataset):
"""Test RepresentationDataset.layer returns correct view."""
for layer in range(REP_LAYERS):
actual = representation_dataset.layer(layer)
assert actual.dataset == representation_dataset
assert actual.layer == layer
LAYER = 0
@pytest.fixture
def representation_layer_dataset(representation_dataset):
"""Returns a RepresentationLayerDataset for testing."""
return representations.RepresentationLayerDataset(representation_dataset,
LAYER)
def test_representation_layer_dataset_getitem(representation_layer_dataset,
reps):
"""Test RepresentationLayerDataset.__getitem__ returns correct layer."""
for index, expected in enumerate(reps):
actual = representation_layer_dataset[index]
assert actual.equal(torch.tensor(expected[LAYER]))
def test_representation_layer_dataset_len(representation_layer_dataset):
"""Test RepresentationLayerDataset.__len__ returns number of samples."""
assert len(representation_layer_dataset) == len(SEQ_LENGTHS)
def test_representation_layer_dataset_init_bad_layer(representation_dataset):
"""Test RepresentationLayerDataset.__init__ dies when given bad layer."""
with pytest.raises(IndexError, match='.*3 out of bounds.*'):
representations.RepresentationLayerDataset(representation_dataset,
REP_LAYERS)
| [
"h5py.File",
"tempfile.TemporaryDirectory",
"numpy.random.randn",
"ldp.parse.representations.RepresentationDataset",
"pytest.raises",
"ldp.parse.representations.RepresentationLayerDataset",
"pathlib.Path",
"torch.tensor"
] | [((993, 1036), 'ldp.parse.representations.RepresentationDataset', 'representations.RepresentationDataset', (['path'], {}), '(path)\n', (1030, 1036), False, 'from ldp.parse import representations\n'), ((2442, 2515), 'ldp.parse.representations.RepresentationLayerDataset', 'representations.RepresentationLayerDataset', (['representation_dataset', 'LAYER'], {}), '(representation_dataset, LAYER)\n', (2484, 2515), False, 'from ldp.parse import representations\n'), ((338, 388), 'numpy.random.randn', 'np.random.randn', (['REP_LAYERS', 'length', 'REP_DIMENSION'], {}), '(REP_LAYERS, length, REP_DIMENSION)\n', (353, 388), True, 'import numpy as np\n'), ((539, 568), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (566, 568), False, 'import tempfile\n'), ((3318, 3372), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '""".*3 out of bounds.*"""'}), "(IndexError, match='.*3 out of bounds.*')\n", (3331, 3372), False, 'import pytest\n'), ((3382, 3460), 'ldp.parse.representations.RepresentationLayerDataset', 'representations.RepresentationLayerDataset', (['representation_dataset', 'REP_LAYERS'], {}), '(representation_dataset, REP_LAYERS)\n', (3424, 3460), False, 'from ldp.parse import representations\n'), ((596, 617), 'pathlib.Path', 'pathlib.Path', (['tempdir'], {}), '(tempdir)\n', (608, 617), False, 'import pathlib\n'), ((654, 674), 'h5py.File', 'h5py.File', (['path', '"""w"""'], {}), "(path, 'w')\n", (663, 674), False, 'import h5py\n'), ((1301, 1323), 'torch.tensor', 'torch.tensor', (['expected'], {}), '(expected)\n', (1313, 1323), False, 'import torch\n'), ((2903, 2932), 'torch.tensor', 'torch.tensor', (['expected[LAYER]'], {}), '(expected[LAYER])\n', (2915, 2932), False, 'import torch\n')] |
import numpy as np
import glob
from numpy.linalg import eig as npeig
import matplotlib.pyplot as plt
from scipy.io import loadmat
from scipy import signal
from numpy.linalg import inv as npinv
def correlate():
target_chips = glob.glob('../data/train/chips20x40/targets/' + '*.mat')
clutter_chips = glob.glob('../data/train/chips20x40/clutter/' + '*.mat')
d1 = 20
d2 = 40
count = len(target_chips)
print(count, 'target chips')
alltargets = np.zeros((d1,d2,count))
for idx,chip in enumerate(target_chips):
chiparray = loadmat(chip)['target_chip']
chiparray = chiparray - chiparray.mean()
alltargets[:,:,idx] = chiparray
R1 = np.zeros((d1*d2,d1*d2))
for idx in range(count):
chipvec = alltargets[:,:,idx].transpose().reshape(d1*d2,1)
R1 = R1 + np.matmul(chipvec, chipvec.transpose())
R1 = R1/count
np.save('./weights_filters/R1',R1)
count = len(clutter_chips)
print(count, 'clutter chips')
allclutter = np.zeros((20,40,count))
for idx,chip in enumerate(clutter_chips):
chiparray = loadmat(chip)['clutter_chip']
chiparray = chiparray - chiparray.mean()
allclutter[:,:,idx] = chiparray
x = allclutter[:, :, 0]
x2 = np.flipud(np.fliplr(x))
acf = signal.convolve2d(x, x2)
for idx in range(count-1):
x = allclutter[:,:,idx + 1]
x2 = np.flipud(np.fliplr(x))
tmp = signal.convolve2d(x,x2)
acf = (acf * idx + tmp) / (idx + 1)
mask=np.ones((d1,d2));
pmask=signal.convolve2d(mask,mask,'full')
cov = acf/pmask
m = cov.shape[0]
n = cov.shape[1]
ad1=int((m+1)/2)
ad2=int((n+1)/2)
dim=int(ad1*ad2)
CM = np.zeros((dim,dim))
row_index = np.kron(np.ones(ad2), np.arange(0, ad1, 1)).astype("int64")
col_index = np.kron(np.arange(0, ad2, 1), np.ones(ad1))
iv = np.column_stack((row_index, col_index))
for i in range(dim):
for j in range(dim):
index = (iv[j, :] - iv[i, :]).astype("int64")
row = d1 -1 + index[0]
col = d2 -1 + index[1]
CM[i, j] = cov[row, col]
CM[j, i] = CM[i, j]
R2 = CM
np.save('./weights_filters/R2',R2)
def make_basis():
R1 = np.load("./weights_filters/R1.npy")
R2 = np.load("./weights_filters/R2.npy")
A = .18 * R1
B = R2
S = A + B
delta, phi = npeig(S)
sdelta = delta[delta.argsort()]
sphi = phi[:, delta.argsort()]
tmp2= np.cumsum(sdelta)/sdelta.sum();
skip = tmp2[tmp2 < .001].shape[0] - 1
sdelta = sdelta[skip:]
sphi = sphi[:,skip:]
abc = np.matmul(sphi,npinv(sdelta *np.eye(len(sdelta))))
Sinv = np.matmul(abc,sphi.transpose())
T=np.matmul(Sinv,(A-B))
delta, phi = npeig(T)
delta = np.real(delta)
phi = np.real(phi)
sdelta = delta[delta.argsort()]
sphi = phi[:, delta.argsort()]
S1=sphi[:,sdelta > .01]
S2=sphi[:,sdelta < -.01]
n1 = S1.shape[1]
n2 = S2.shape[1]
print(n1,n2)
np.save('./weights_filters/target_filters',S1)
np.save('./weights_filters/clutter_filters',S2)
def view_filter(S,idx):
img_array = S[:, idx].reshape(40, 20).transpose()
f, ax = plt.subplots(figsize=(10, 10))
ax.imshow(img_array)
ax.set_title('ljk', fontsize=10)
plt.show()
correlate()
make_basis()
target_filters = np.load('./weights_filters/target_filters.npy')
clutter_filters = np.load('./weights_filters/clutter_filters.npy')
print('targets',target_filters.shape)
print('clutter',clutter_filters.shape)
# view_filter(target_filters,-1)
# view_filter(clutter_filters,20)
| [
"numpy.load",
"numpy.save",
"matplotlib.pyplot.show",
"scipy.signal.convolve2d",
"scipy.io.loadmat",
"numpy.column_stack",
"numpy.zeros",
"numpy.ones",
"numpy.linalg.eig",
"numpy.fliplr",
"numpy.cumsum",
"numpy.arange",
"numpy.matmul",
"glob.glob",
"numpy.real",
"matplotlib.pyplot.subp... | [((3349, 3396), 'numpy.load', 'np.load', (['"""./weights_filters/target_filters.npy"""'], {}), "('./weights_filters/target_filters.npy')\n", (3356, 3396), True, 'import numpy as np\n'), ((3415, 3463), 'numpy.load', 'np.load', (['"""./weights_filters/clutter_filters.npy"""'], {}), "('./weights_filters/clutter_filters.npy')\n", (3422, 3463), True, 'import numpy as np\n'), ((231, 287), 'glob.glob', 'glob.glob', (["('../data/train/chips20x40/targets/' + '*.mat')"], {}), "('../data/train/chips20x40/targets/' + '*.mat')\n", (240, 287), False, 'import glob\n'), ((308, 364), 'glob.glob', 'glob.glob', (["('../data/train/chips20x40/clutter/' + '*.mat')"], {}), "('../data/train/chips20x40/clutter/' + '*.mat')\n", (317, 364), False, 'import glob\n'), ((469, 494), 'numpy.zeros', 'np.zeros', (['(d1, d2, count)'], {}), '((d1, d2, count))\n', (477, 494), True, 'import numpy as np\n'), ((687, 715), 'numpy.zeros', 'np.zeros', (['(d1 * d2, d1 * d2)'], {}), '((d1 * d2, d1 * d2))\n', (695, 715), True, 'import numpy as np\n'), ((887, 922), 'numpy.save', 'np.save', (['"""./weights_filters/R1"""', 'R1'], {}), "('./weights_filters/R1', R1)\n", (894, 922), True, 'import numpy as np\n'), ((1006, 1031), 'numpy.zeros', 'np.zeros', (['(20, 40, count)'], {}), '((20, 40, count))\n', (1014, 1031), True, 'import numpy as np\n'), ((1288, 1312), 'scipy.signal.convolve2d', 'signal.convolve2d', (['x', 'x2'], {}), '(x, x2)\n', (1305, 1312), False, 'from scipy import signal\n'), ((1509, 1526), 'numpy.ones', 'np.ones', (['(d1, d2)'], {}), '((d1, d2))\n', (1516, 1526), True, 'import numpy as np\n'), ((1537, 1574), 'scipy.signal.convolve2d', 'signal.convolve2d', (['mask', 'mask', '"""full"""'], {}), "(mask, mask, 'full')\n", (1554, 1574), False, 'from scipy import signal\n'), ((1711, 1731), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (1719, 1731), True, 'import numpy as np\n'), ((1876, 1915), 'numpy.column_stack', 'np.column_stack', (['(row_index, col_index)'], {}), '((row_index, col_index))\n', (1891, 1915), True, 'import numpy as np\n'), ((2185, 2220), 'numpy.save', 'np.save', (['"""./weights_filters/R2"""', 'R2'], {}), "('./weights_filters/R2', R2)\n", (2192, 2220), True, 'import numpy as np\n'), ((2250, 2285), 'numpy.load', 'np.load', (['"""./weights_filters/R1.npy"""'], {}), "('./weights_filters/R1.npy')\n", (2257, 2285), True, 'import numpy as np\n'), ((2295, 2330), 'numpy.load', 'np.load', (['"""./weights_filters/R2.npy"""'], {}), "('./weights_filters/R2.npy')\n", (2302, 2330), True, 'import numpy as np\n'), ((2390, 2398), 'numpy.linalg.eig', 'npeig', (['S'], {}), '(S)\n', (2395, 2398), True, 'from numpy.linalg import eig as npeig\n'), ((2716, 2738), 'numpy.matmul', 'np.matmul', (['Sinv', '(A - B)'], {}), '(Sinv, A - B)\n', (2725, 2738), True, 'import numpy as np\n'), ((2755, 2763), 'numpy.linalg.eig', 'npeig', (['T'], {}), '(T)\n', (2760, 2763), True, 'from numpy.linalg import eig as npeig\n'), ((2776, 2790), 'numpy.real', 'np.real', (['delta'], {}), '(delta)\n', (2783, 2790), True, 'import numpy as np\n'), ((2801, 2813), 'numpy.real', 'np.real', (['phi'], {}), '(phi)\n', (2808, 2813), True, 'import numpy as np\n'), ((3005, 3052), 'numpy.save', 'np.save', (['"""./weights_filters/target_filters"""', 'S1'], {}), "('./weights_filters/target_filters', S1)\n", (3012, 3052), True, 'import numpy as np\n'), ((3056, 3104), 'numpy.save', 'np.save', (['"""./weights_filters/clutter_filters"""', 'S2'], {}), "('./weights_filters/clutter_filters', S2)\n", (3063, 3104), True, 'import numpy as np\n'), ((3197, 3227), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3209, 3227), True, 'import matplotlib.pyplot as plt\n'), ((3294, 3304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3302, 3304), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1276), 'numpy.fliplr', 'np.fliplr', (['x'], {}), '(x)\n', (1273, 1276), True, 'import numpy as np\n'), ((1431, 1455), 'scipy.signal.convolve2d', 'signal.convolve2d', (['x', 'x2'], {}), '(x, x2)\n', (1448, 1455), False, 'from scipy import signal\n'), ((1831, 1851), 'numpy.arange', 'np.arange', (['(0)', 'ad2', '(1)'], {}), '(0, ad2, 1)\n', (1840, 1851), True, 'import numpy as np\n'), ((1853, 1865), 'numpy.ones', 'np.ones', (['ad1'], {}), '(ad1)\n', (1860, 1865), True, 'import numpy as np\n'), ((2480, 2497), 'numpy.cumsum', 'np.cumsum', (['sdelta'], {}), '(sdelta)\n', (2489, 2497), True, 'import numpy as np\n'), ((558, 571), 'scipy.io.loadmat', 'loadmat', (['chip'], {}), '(chip)\n', (565, 571), False, 'from scipy.io import loadmat\n'), ((1096, 1109), 'scipy.io.loadmat', 'loadmat', (['chip'], {}), '(chip)\n', (1103, 1109), False, 'from scipy.io import loadmat\n'), ((1403, 1415), 'numpy.fliplr', 'np.fliplr', (['x'], {}), '(x)\n', (1412, 1415), True, 'import numpy as np\n'), ((1755, 1767), 'numpy.ones', 'np.ones', (['ad2'], {}), '(ad2)\n', (1762, 1767), True, 'import numpy as np\n'), ((1769, 1789), 'numpy.arange', 'np.arange', (['(0)', 'ad1', '(1)'], {}), '(0, ad1, 1)\n', (1778, 1789), True, 'import numpy as np\n')] |
import pandas as pd
import time
import seaborn
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
import kernelml
from scipy import stats
train=pd.read_csv("data/kc_house_train_data.csv",dtype = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':str, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int})
test=pd.read_csv("data/kc_house_test_data.csv",dtype = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':str, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int})
#sample parameters from distribution
#the mean of X seems like a reasonable center for the distribution params
def prior_sampler_custom(kmldata):
w = np.random.uniform(np.mean(X),1,size=(kmldata.number_of_parameters,kmldata.posterior_random_samples))
return w
def liklihood_loss(x,y,w):
hypothesis = x
hypothesis[hypothesis<=0.00001] = 0.00001
hypothesis[hypothesis>=0.99999] = 0.99999
loss = -1*((1-y).T.dot(np.log(1-hypothesis)) + y.T.dot(np.log(hypothesis)))/len(y)
return loss.flatten()[0]
def distribution_loss(x,y,w):
alpha1,loc1,scale1 = w[0],w[1],w[2]
rv = scale1*stats.norm(alpha1,loc1).pdf(x)
loss = liklihood_loss(rv,y,w)
return loss
y, indx = np.histogram(train[['price']].values, normed=False,bins=30)
X = np.linspace(np.min(train[['price']].values),
np.max(train[['price']].values),len(y)) + np.diff(indx)
X = X.reshape(-1,1)
y = y.flatten()/np.max(y)
y = y.reshape(-1,1)
realizations = 3
cycles = 10
volume = 5
simulations = 100
volatility = 100
kml = kernelml.KernelML(
prior_sampler_fcn=prior_sampler_custom,
posterior_sampler_fcn=None,
intermediate_sampler_fcn=None,
mini_batch_sampler_fcn=None,
parameter_transform_fcn=None,
batch_size=None)
parameter_by_run,loss_by_run = kml.optimize(X,y,loss_function=distribution_loss,
number_of_parameters=3,
args=[],
number_of_realizations=realizations,
number_of_random_simulations=simulations,
update_volatility = volatility,
number_of_cycles=cycles,
prior_uniform_low=1,
prior_uniform_high=2,
plot_feedback=False,
print_feedback=True)
w = parameter_by_run[-1]
mean1,std1,scale1 = w[0],w[1],w[2]
plt.stem(X, scale1*stats.norm.pdf(X,mean1,std1),'r', lw=5, alpha=0.6, label='normal pdf')
plt.plot(X,y)
plt.show()
| [
"scipy.stats.norm",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"scipy.stats.norm.pdf",
"numpy.histogram",
"numpy.diff",
"numpy.max",
"kernelml.KernelML",
"numpy.mean",
"numpy.min"
] | [((183, 620), 'pandas.read_csv', 'pd.read_csv', (['"""data/kc_house_train_data.csv"""'], {'dtype': "{'bathrooms': float, 'waterfront': int, 'sqft_above': int, 'sqft_living15':\n float, 'grade': int, 'yr_renovated': int, 'price': float, 'bedrooms':\n float, 'zipcode': str, 'long': float, 'sqft_lot15': float,\n 'sqft_living': float, 'floors': str, 'condition': int, 'lat': float,\n 'date': str, 'sqft_basement': int, 'yr_built': int, 'id': str,\n 'sqft_lot': int, 'view': int}"}), "('data/kc_house_train_data.csv', dtype={'bathrooms': float,\n 'waterfront': int, 'sqft_above': int, 'sqft_living15': float, 'grade':\n int, 'yr_renovated': int, 'price': float, 'bedrooms': float, 'zipcode':\n str, 'long': float, 'sqft_lot15': float, 'sqft_living': float, 'floors':\n str, 'condition': int, 'lat': float, 'date': str, 'sqft_basement': int,\n 'yr_built': int, 'id': str, 'sqft_lot': int, 'view': int})\n", (194, 620), True, 'import pandas as pd\n'), ((586, 1022), 'pandas.read_csv', 'pd.read_csv', (['"""data/kc_house_test_data.csv"""'], {'dtype': "{'bathrooms': float, 'waterfront': int, 'sqft_above': int, 'sqft_living15':\n float, 'grade': int, 'yr_renovated': int, 'price': float, 'bedrooms':\n float, 'zipcode': str, 'long': float, 'sqft_lot15': float,\n 'sqft_living': float, 'floors': str, 'condition': int, 'lat': float,\n 'date': str, 'sqft_basement': int, 'yr_built': int, 'id': str,\n 'sqft_lot': int, 'view': int}"}), "('data/kc_house_test_data.csv', dtype={'bathrooms': float,\n 'waterfront': int, 'sqft_above': int, 'sqft_living15': float, 'grade':\n int, 'yr_renovated': int, 'price': float, 'bedrooms': float, 'zipcode':\n str, 'long': float, 'sqft_lot15': float, 'sqft_living': float, 'floors':\n str, 'condition': int, 'lat': float, 'date': str, 'sqft_basement': int,\n 'yr_built': int, 'id': str, 'sqft_lot': int, 'view': int})\n", (597, 1022), True, 'import pandas as pd\n'), ((1688, 1748), 'numpy.histogram', 'np.histogram', (["train[['price']].values"], {'normed': '(False)', 'bins': '(30)'}), "(train[['price']].values, normed=False, bins=30)\n", (1700, 1748), True, 'import numpy as np\n'), ((2021, 2221), 'kernelml.KernelML', 'kernelml.KernelML', ([], {'prior_sampler_fcn': 'prior_sampler_custom', 'posterior_sampler_fcn': 'None', 'intermediate_sampler_fcn': 'None', 'mini_batch_sampler_fcn': 'None', 'parameter_transform_fcn': 'None', 'batch_size': 'None'}), '(prior_sampler_fcn=prior_sampler_custom,\n posterior_sampler_fcn=None, intermediate_sampler_fcn=None,\n mini_batch_sampler_fcn=None, parameter_transform_fcn=None, batch_size=None)\n', (2038, 2221), False, 'import kernelml\n'), ((3077, 3091), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y'], {}), '(X, y)\n', (3085, 3091), True, 'from matplotlib import pyplot as plt\n'), ((3091, 3101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3099, 3101), True, 'from matplotlib import pyplot as plt\n'), ((1855, 1868), 'numpy.diff', 'np.diff', (['indx'], {}), '(indx)\n', (1862, 1868), True, 'import numpy as np\n'), ((1906, 1915), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (1912, 1915), True, 'import numpy as np\n'), ((1156, 1166), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (1163, 1166), True, 'import numpy as np\n'), ((1764, 1795), 'numpy.min', 'np.min', (["train[['price']].values"], {}), "(train[['price']].values)\n", (1770, 1795), True, 'import numpy as np\n'), ((1813, 1844), 'numpy.max', 'np.max', (["train[['price']].values"], {}), "(train[['price']].values)\n", (1819, 1844), True, 'import numpy as np\n'), ((3006, 3036), 'scipy.stats.norm.pdf', 'stats.norm.pdf', (['X', 'mean1', 'std1'], {}), '(X, mean1, std1)\n', (3020, 3036), False, 'from scipy import stats\n'), ((1595, 1619), 'scipy.stats.norm', 'stats.norm', (['alpha1', 'loc1'], {}), '(alpha1, loc1)\n', (1605, 1619), False, 'from scipy import stats\n'), ((1419, 1441), 'numpy.log', 'np.log', (['(1 - hypothesis)'], {}), '(1 - hypothesis)\n', (1425, 1441), True, 'import numpy as np\n'), ((1451, 1469), 'numpy.log', 'np.log', (['hypothesis'], {}), '(hypothesis)\n', (1457, 1469), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import glob
from os import listdir
import os
import scipy.io
import csv
import numpy as np
import tensorflow as tf
import tensorflow_compression as tfc
import sys
def load_image(filename):
"""Loads a PNG image file."""
string = tf.read_file(filename)
image = tf.image.decode_image(string, channels=3)
image = tf.cast(image, tf.float32)
image /= 255
return image
def quantize_image(image):
image = tf.clip_by_value(image, 0, 1)
image = tf.round(image * 255)
image = tf.cast(image, tf.uint8)
return image
def save_image(filename, image):
"""Saves an image to a PNG file."""
image = quantize_image(image)
string = tf.image.encode_png(image)
return tf.write_file(filename, string)
# slimmable autoencoder -- encoder
def slimmable_analysis_transform(tensor_in, switch_list, total_filters_num):
"""Builds the slimmable analysis transform."""
with tf.variable_scope("analysis"):
tensor_encoder = list()
for i, _switch in enumerate(switch_list):
# the first conv and switchable gdn layers
with tf.variable_scope("layer_0",reuse=(i>0)):
layer = tfc.SignalConv2D_slim(
total_filters_num, (9, 9), corr=True, strides_down=4, padding="same_zeros",
use_bias=True, activation=None)
tensor = layer(tensor_in, 3, _switch)
with tf.variable_scope("gdn_an_0_{:1d}".format(i)):
tensor_gdn_0 = tfc.GDN()(tensor)
tensor_gdn_0 = tf.pad(tensor_gdn_0, [[0,0], [0,0], [0,0], [0,(total_filters_num - _switch)]], "CONSTANT")
# the second conv and switchable gdn layers
with tf.variable_scope("layer_1",reuse=(i>0)):
layer = tfc.SignalConv2D_slim(
total_filters_num, (5, 5), corr=True, strides_down=2, padding="same_zeros",
use_bias=True, activation=None)
tensor = layer(tensor_gdn_0, _switch, _switch)
with tf.variable_scope("gdn_an_1_{:1d}".format(i)):
tensor_gdn_1 = tfc.GDN()(tensor)
tensor_gdn_1 = tf.pad(tensor_gdn_1, [[0,0], [0,0], [0,0], [0,(total_filters_num - _switch)]], "CONSTANT")
# the third conv and switchable gdn layers
with tf.variable_scope("layer_2",reuse=(i>0)):
layer = tfc.SignalConv2D_slim(
total_filters_num, (5, 5), corr=True, strides_down=2, padding="same_zeros",
use_bias=False, activation=None)
tensor = layer(tensor_gdn_1, _switch, _switch)
with tf.variable_scope("gdn_an_2_{:1d}".format(i)):
tensor_gdn_2 = tfc.GDN()(tensor)
# store the bottleneck features from different width
tensor_encoder.append(tensor_gdn_2)
return tensor_encoder
# slimmable autoencoder -- decoder
def slimmable_synthesis_transform(tensor_encoder, switch_list, total_filters_num):
"""Builds the slimmable synthesis transform."""
with tf.variable_scope("synthesis"):
tensor_decoder = list()
for i, _switch in enumerate(switch_list):
# the first deconv and igdn layers
with tf.variable_scope("gdn_sy_0_{:1d}".format(i)):
tensor_igdn_0 = tfc.GDN(inverse=True)(tensor_encoder[i])
tensor_igdn_0 = tf.pad(tensor_igdn_0, [[0,0], [0,0], [0,0], [0,(total_filters_num - _switch)]], "CONSTANT")
with tf.variable_scope("layer_0",reuse=(i>0)):
layer = tfc.SignalConv2D_slim(
total_filters_num, (5, 5), corr=False, strides_up=2, padding="same_zeros",
use_bias=True, activation=None)
tensor = layer(tensor_igdn_0, _switch, _switch)
# the second deconv and igdn layers
with tf.variable_scope("gdn_sy_1_{:1d}".format(i)):
tensor_igdn_1 = tfc.GDN(inverse=True)(tensor)
tensor_igdn_1 = tf.pad(tensor_igdn_1, [[0,0], [0,0], [0,0], [0,(total_filters_num - _switch)]], "CONSTANT")
with tf.variable_scope("layer_1",reuse=(i>0)):
layer = tfc.SignalConv2D_slim(
total_filters_num, (5, 5), corr=False, strides_up=2, padding="same_zeros",
use_bias=True, activation=None)
tensor = layer(tensor_igdn_1, _switch, _switch)
# the third deconv and igdn layers
with tf.variable_scope("gdn_sy_2_{:1d}".format(i)):
tensor_igdn_2 = tfc.GDN(inverse=True)(tensor)
tensor_igdn_2 = tf.pad(tensor_igdn_2, [[0,0], [0,0], [0,0], [0,(total_filters_num - _switch)]], "CONSTANT")
with tf.variable_scope("layer_2",reuse=(i>0)):
layer = tfc.SignalConv2D_slim(
3, (9, 9), corr=False, strides_up=4, padding="same_zeros",
use_bias=True, activation=None)
tensor = layer(tensor_igdn_2, _switch, 3)
# store the reconstructions from different width
tensor_decoder.append(tensor)
return tensor_decoder
# train function
def train(last_step, lmbdas):
"""Trains the model."""
if args.verbose:
tf.logging.set_verbosity(tf.logging.INFO)
# create input data pipeline.
with tf.device('/cpu:0'):
train_files = glob.glob(args.train_glob)
train_dataset = tf.data.Dataset.from_tensor_slices(train_files)
train_dataset = train_dataset.shuffle(buffer_size=len(train_files)).repeat()
train_dataset = train_dataset.map(
load_image, num_parallel_calls=args.preprocess_threads)
train_dataset = train_dataset.map(
lambda x: tf.random_crop(x, (args.patchsize, args.patchsize, 3)))
train_dataset = train_dataset.batch(args.batchsize)
train_dataset = train_dataset.prefetch(32)
num_pixels = args.batchsize * args.patchsize ** 2
total_filters_num = args.num_filters
# get training patch from dataset.
x = train_dataset.make_one_shot_iterator().get_next()
if args.train_jointly:
# lists to keep loss for each lambda
y_tilde, entropy_bottlenecks, likelihoods = list(), list(), list()
train_bpp, train_mse, train_loss = list(), list(), list()
# build a slimmable encoder
y = slimmable_analysis_transform(x, args.switch_list, total_filters_num)
for i, _switch in enumerate(args.switch_list):
entropy_bottlenecks.append(tfc.EntropyBottleneck())
_y_tilde, _likelihoods = entropy_bottlenecks[i](y[i], training=True)
y_tilde.append(_y_tilde)
likelihoods.append(_likelihoods)
# build a slimmable decoder
x_tilde = slimmable_synthesis_transform(y_tilde, args.switch_list, total_filters_num)
for i, _switch in enumerate(args.switch_list):
# Total number of bits divided by number of pixels.
train_bpp.append(tf.reduce_sum(tf.log(likelihoods[i])) / (-np.log(2) * num_pixels))
# Mean squared error across pixels.
train_mse.append(tf.reduce_mean(tf.squared_difference(x, x_tilde[i])))
# Multiply by 255^2 to correct for rescaling.
# train_mse[i] *= 255 ** 2
# The rate-distortion cost.
train_loss.append(lmbdas[i] * train_mse[i] + train_bpp[i])
# total loss
total_train_loss = tf.add_n(train_loss)
# minimize loss and auxiliary loss, and execute update op.
step = tf.train.create_global_step()
main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
main_step = main_optimizer.minimize(total_train_loss, global_step=step)
aux_optimizers = list()
list_ops = [main_step]
for i, entropy_bottleneck in enumerate(entropy_bottlenecks):
aux_optimizers.append(tf.train.AdamOptimizer(learning_rate=1e-3))
list_ops.append(aux_optimizers[i].minimize(entropy_bottleneck.losses[0]))
list_ops.append(entropy_bottleneck.updates[0])
train_op = tf.group(list_ops)
# summaries
for i, _switch in enumerate(args.switch_list):
tf.summary.scalar("loss_%d" % i, train_loss[i])
tf.summary.scalar("bpp_%d" % i, train_bpp[i])
tf.summary.scalar("mse_%d" % i, train_mse[i]* 255 ** 2) # Rescaled
tf.summary.histogram("hist_y_%d" % i, y[i])
tf.summary.scalar("total_loss", total_train_loss)
hooks = [
tf.train.StopAtStepHook(last_step=last_step),
tf.train.NanTensorHook(total_train_loss),
]
with tf.train.MonitoredTrainingSession(
hooks=hooks, checkpoint_dir=args.checkpoint_dir,
save_checkpoint_secs=900, save_summaries_secs=600) as sess:
while not sess.should_stop():
sess.run(step)
sess.run(train_op)
def evaluate(last_step):
"""Evaluate the model for test dataset"""
# process all the images in input_path
imagesList = listdir(args.inputPath)
# Initialize metric scores
bpp_estimate_total = [0.0] * len(args.switch_list)
mse_total = [0.0] * len(args.switch_list)
psnr_total = [0.0] * len(args.switch_list)
msssim_total = [0.0] * len(args.switch_list)
msssim_db_total = [0.0] * len(args.switch_list)
for image in imagesList:
entropy_bottlenecks = list()
y_hat, likelihoods, eval_bpp = list(), list(), list()
mse, psnr, msssim = list(), list(), list()
x = load_image(args.inputPath + image)
x = tf.expand_dims(x, 0)
x.set_shape([1, None, None, 3])
y = slimmable_analysis_transform(x, args.switch_list, args.num_filters)
for i, _switch in enumerate(args.switch_list):
entropy_bottlenecks.append(tfc.EntropyBottleneck())
#string_un = entropy_bottlenecks[i].compress(y[i])
_y_hat, _likelihoods = entropy_bottlenecks[i](y[i], training=False)
y_hat.append(_y_hat)
likelihoods.append(_likelihoods)
x_hat = slimmable_synthesis_transform(y_hat, args.switch_list, args.num_filters)
num_pixels = tf.to_float(tf.reduce_prod(tf.shape(x)[:-1]))
# Bring both images back to 0..255 range.
x *= 255
for i, _switch in enumerate(args.switch_list):
# Total number of bits divided by number of pixels.
eval_bpp.append(tf.reduce_sum(tf.log(likelihoods[i])) / (-np.log(2) * num_pixels))
x_rec = tf.clip_by_value(x_hat[i], 0, 1)
x_rec = tf.round(x_rec * 255)
x_rec = tf.slice(x_rec, [0, 0, 0, 0], [1,tf.shape(x)[1], tf.shape(x)[2], 3])
mse.append(tf.reduce_mean(tf.squared_difference(x, x_rec)))
psnr.append(tf.squeeze(tf.image.psnr(x_rec, x, 255)))
msssim.append(tf.squeeze(tf.image.ssim_multiscale(x_rec, x, 255)))
with tf.Session() as sess:
# Load the latest model checkpoint, get the evaluation results.
latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)
tf.train.Saver().restore(sess, save_path=latest)
eval_bpp, mse, psnr, msssim, num_pixels = sess.run(
[eval_bpp, mse, psnr, msssim, num_pixels])
# print RD results
for i, _switch in enumerate(args.switch_list):
print("Switch level:{:1d}".format(i))
print("Mean squared error: {:0.4f}".format(mse[i]))
print("PSNR (dB): {:0.2f}".format(psnr[i]))
print("Multiscale SSIM: {:0.4f}".format(msssim[i]))
print("Multiscale SSIM (dB): {:0.2f}".format(-10 * np.log10(1 - msssim[i])))
print("Information content in bpp: {:0.4f}".format(eval_bpp[i]))
bpp_estimate_total[i] += eval_bpp[i]
mse_total[i] += mse[i]
psnr_total[i] += psnr[i]
msssim_total[i] += msssim[i]
msssim_db_total[i] += (-10 * np.log10(1 - msssim[i]))
tf.reset_default_graph()
if args.evaluation_name is not None:
Avg_bpp_estimate = np.array(bpp_estimate_total) / len(imagesList)
Avg_mse, Avg_psnr = np.array(mse_total) / len(imagesList), np.array(psnr_total) / len(imagesList)
Avg_msssim, Avg_msssim_db = np.array(msssim_total) / len(imagesList), np.array(msssim_db_total) / len(imagesList)
with open (args.evaluation_name + str(last_step) + '.txt', 'w') as f:
f.write('Avg_bpp_estimate: '+str(Avg_bpp_estimate)+'\n')
f.write('Avg_mse: '+str(Avg_mse)+'\n')
f.write('Avg_psnr: '+str(Avg_psnr)+'\n')
f.write('Avg_msssim: '+str(Avg_msssim)+'\n')
f.write('Avg_msssim_db: '+str(Avg_msssim_db)+'\n')
return Avg_bpp_estimate, Avg_psnr
def train_loop():
"search the optimal RD points in a slimmable compressive autoencoder"
# the number of iterations
last_step = args.last_step
# initial RD tradeoffs
lmbdas = args.lmbda
# train SlimCAE as stage 1
train(last_step, lmbdas)
tf.reset_default_graph()
# evaluate model with validation dataset
bpp, psnr = evaluate(last_step)
lambda_log = list()
grad_flag_log = list()
grad_current_log = list()
# train SlimCAE with lambda scheduling as stage 2
for i in range(len(lmbdas)-1):
grad_flag = (psnr[i] - psnr[i+1]) / (bpp[i] - bpp[i+1])
factor = 1
m = 1
while True:
lmbdas[(i+1):] = [0.8 * element for element in lmbdas[(i+1):]] # adjust the lambda values
lambda_log.append(lmbdas)
last_step += 20
train(last_step, lmbdas) # train the model with more iterations
tf.reset_default_graph()
bpp, psnr = evaluate(last_step)
tf.reset_default_graph()
grad_current = (psnr[i] - psnr[i+1]) / (bpp[i] - bpp[i+1])
grad_flag_log.append(grad_flag)
grad_current_log.append(grad_current)
if grad_current > grad_flag:
break
else:
if m == 1:
factor_flag = grad_flag - grad_current
elif m < 7:
factor = (grad_flag - grad_current) / factor_flag
factor_flag = grad_flag - grad_current
else:
break
grad_flag = grad_current
m += 1
# save log files during the lambda scheduling
with open('lmbdaslog', 'w') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(lambda_log)
with open('grad_flag_log', 'w') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(grad_flag_log)
with open('grad_current_log', 'w') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(grad_current_log)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"command", choices=["train", "evaluate", "train_lambda_schedule"],
help="What to do: 'train' loads training data and trains "
"a new model. 'evaluate' loads a pre-trained model and "
"evaluates on a given dataset. 'train_lambda_schedule' "
"means train a new model with lambda scheduling. ")
parser.add_argument(
"--verbose", "-v", action="store_true",
help="Report bitrate and distortion when training or compressing.")
parser.add_argument(
"--num_filters", type=int, default=128,
help="Number of filters per layer.")
parser.add_argument(
"--checkpoint_dir", default="train",
help="Directory where to save/load model checkpoints.")
parser.add_argument(
"--train_glob", default="images/*.png",
help="Glob pattern identifying training data. This pattern must expand "
"to a list of RGB images in PNG format.")
parser.add_argument(
"--batchsize", type=int, default=8,
help="Batch size for training.")
parser.add_argument(
"--patchsize", type=int, default=256,
help="Size of image patches for training.")
parser.add_argument(
"--lambda", nargs="+", type=float, default=[512], dest="lmbda",
help="Lambdas for rate-distortion tradeoff point.")
parser.add_argument(
"--last_step", type=int, default=800000,
help="Train up to this number of steps.")
parser.add_argument(
"--preprocess_threads", type=int, default=6,
help="Number of CPU threads to use for parallel decoding of training "
"images.")
parser.add_argument(
"--switch_list", nargs="+", type=int, default=[64],
help="Number of filters per layer.")
parser.add_argument(
"--evaluation_name", type=str, default='./searchRDpoints/One',
help="the name of evaluation results txt file.")
parser.add_argument(
"--inputPath", type=str, default=None,
help="Directory where to evaluation dataset.")
parser.add_argument(
"--train_jointly", action="store_true",
help="train all the variables together.")
args = parser.parse_args()
if args.command == "train":
train(args.last_step, args.lmbda)
elif args.command == "train_lambda_schedule":
train_loop()
elif args.command == "evaluate":
if args.inputPath is None:
raise ValueError("Need input path for evaluation.")
evaluate(args.last_step)
| [
"argparse.ArgumentParser",
"tensorflow.clip_by_value",
"tensorflow.write_file",
"tensorflow.reset_default_graph",
"tensorflow.image.psnr",
"tensorflow.logging.set_verbosity",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.NanTensorHook",
"glob.glob",
"tensorflow.train.MonitoredTrainingSes... | [((366, 388), 'tensorflow.read_file', 'tf.read_file', (['filename'], {}), '(filename)\n', (378, 388), True, 'import tensorflow as tf\n'), ((401, 442), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['string'], {'channels': '(3)'}), '(string, channels=3)\n', (422, 442), True, 'import tensorflow as tf\n'), ((455, 481), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (462, 481), True, 'import tensorflow as tf\n'), ((556, 585), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (572, 585), True, 'import tensorflow as tf\n'), ((598, 619), 'tensorflow.round', 'tf.round', (['(image * 255)'], {}), '(image * 255)\n', (606, 619), True, 'import tensorflow as tf\n'), ((632, 656), 'tensorflow.cast', 'tf.cast', (['image', 'tf.uint8'], {}), '(image, tf.uint8)\n', (639, 656), True, 'import tensorflow as tf\n'), ((795, 821), 'tensorflow.image.encode_png', 'tf.image.encode_png', (['image'], {}), '(image)\n', (814, 821), True, 'import tensorflow as tf\n'), ((833, 864), 'tensorflow.write_file', 'tf.write_file', (['filename', 'string'], {}), '(filename, string)\n', (846, 864), True, 'import tensorflow as tf\n'), ((9287, 9310), 'os.listdir', 'listdir', (['args.inputPath'], {}), '(args.inputPath)\n', (9294, 9310), False, 'from os import listdir\n'), ((13382, 13406), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (13404, 13406), True, 'import tensorflow as tf\n'), ((15253, 15332), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (15276, 15332), False, 'import argparse\n'), ((1039, 1068), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""analysis"""'], {}), "('analysis')\n", (1056, 1068), True, 'import tensorflow as tf\n'), ((3190, 3220), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""synthesis"""'], {}), "('synthesis')\n", (3207, 3220), True, 'import tensorflow as tf\n'), ((5418, 5459), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (5442, 5459), True, 'import tensorflow as tf\n'), ((5504, 5523), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (5513, 5523), True, 'import tensorflow as tf\n'), ((5547, 5573), 'glob.glob', 'glob.glob', (['args.train_glob'], {}), '(args.train_glob)\n', (5556, 5573), False, 'import glob\n'), ((5598, 5645), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['train_files'], {}), '(train_files)\n', (5632, 5645), True, 'import tensorflow as tf\n'), ((7651, 7671), 'tensorflow.add_n', 'tf.add_n', (['train_loss'], {}), '(train_loss)\n', (7659, 7671), True, 'import tensorflow as tf\n'), ((7755, 7784), 'tensorflow.train.create_global_step', 'tf.train.create_global_step', ([], {}), '()\n', (7782, 7784), True, 'import tensorflow as tf\n'), ((7810, 7854), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (7832, 7854), True, 'import tensorflow as tf\n'), ((8308, 8326), 'tensorflow.group', 'tf.group', (['list_ops'], {}), '(list_ops)\n', (8316, 8326), True, 'import tensorflow as tf\n'), ((8665, 8714), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_loss"""', 'total_train_loss'], {}), "('total_loss', total_train_loss)\n", (8682, 8714), True, 'import tensorflow as tf\n'), ((9838, 9858), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (9852, 9858), True, 'import tensorflow as tf\n'), ((12328, 12352), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (12350, 12352), True, 'import tensorflow as tf\n'), ((14855, 14896), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_ALL'}), '(myfile, quoting=csv.QUOTE_ALL)\n', (14865, 14896), False, 'import csv\n'), ((14990, 15031), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_ALL'}), '(myfile, quoting=csv.QUOTE_ALL)\n', (15000, 15031), False, 'import csv\n'), ((15131, 15172), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_ALL'}), '(myfile, quoting=csv.QUOTE_ALL)\n', (15141, 15172), False, 'import csv\n'), ((1656, 1752), 'tensorflow.pad', 'tf.pad', (['tensor_gdn_0', '[[0, 0], [0, 0], [0, 0], [0, total_filters_num - _switch]]', '"""CONSTANT"""'], {}), "(tensor_gdn_0, [[0, 0], [0, 0], [0, 0], [0, total_filters_num -\n _switch]], 'CONSTANT')\n", (1662, 1752), True, 'import tensorflow as tf\n'), ((2274, 2370), 'tensorflow.pad', 'tf.pad', (['tensor_gdn_1', '[[0, 0], [0, 0], [0, 0], [0, total_filters_num - _switch]]', '"""CONSTANT"""'], {}), "(tensor_gdn_1, [[0, 0], [0, 0], [0, 0], [0, total_filters_num -\n _switch]], 'CONSTANT')\n", (2280, 2370), True, 'import tensorflow as tf\n'), ((3516, 3613), 'tensorflow.pad', 'tf.pad', (['tensor_igdn_0', '[[0, 0], [0, 0], [0, 0], [0, total_filters_num - _switch]]', '"""CONSTANT"""'], {}), "(tensor_igdn_0, [[0, 0], [0, 0], [0, 0], [0, total_filters_num -\n _switch]], 'CONSTANT')\n", (3522, 3613), True, 'import tensorflow as tf\n'), ((4153, 4250), 'tensorflow.pad', 'tf.pad', (['tensor_igdn_1', '[[0, 0], [0, 0], [0, 0], [0, total_filters_num - _switch]]', '"""CONSTANT"""'], {}), "(tensor_igdn_1, [[0, 0], [0, 0], [0, 0], [0, total_filters_num -\n _switch]], 'CONSTANT')\n", (4159, 4250), True, 'import tensorflow as tf\n'), ((4781, 4878), 'tensorflow.pad', 'tf.pad', (['tensor_igdn_2', '[[0, 0], [0, 0], [0, 0], [0, total_filters_num - _switch]]', '"""CONSTANT"""'], {}), "(tensor_igdn_2, [[0, 0], [0, 0], [0, 0], [0, total_filters_num -\n _switch]], 'CONSTANT')\n", (4787, 4878), True, 'import tensorflow as tf\n'), ((8415, 8462), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('loss_%d' % i)", 'train_loss[i]'], {}), "('loss_%d' % i, train_loss[i])\n", (8432, 8462), True, 'import tensorflow as tf\n'), ((8475, 8520), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('bpp_%d' % i)", 'train_bpp[i]'], {}), "('bpp_%d' % i, train_bpp[i])\n", (8492, 8520), True, 'import tensorflow as tf\n'), ((8533, 8589), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('mse_%d' % i)", '(train_mse[i] * 255 ** 2)'], {}), "('mse_%d' % i, train_mse[i] * 255 ** 2)\n", (8550, 8589), True, 'import tensorflow as tf\n'), ((8612, 8655), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('hist_y_%d' % i)", 'y[i]'], {}), "('hist_y_%d' % i, y[i])\n", (8632, 8655), True, 'import tensorflow as tf\n'), ((8746, 8790), 'tensorflow.train.StopAtStepHook', 'tf.train.StopAtStepHook', ([], {'last_step': 'last_step'}), '(last_step=last_step)\n', (8769, 8790), True, 'import tensorflow as tf\n'), ((8804, 8844), 'tensorflow.train.NanTensorHook', 'tf.train.NanTensorHook', (['total_train_loss'], {}), '(total_train_loss)\n', (8826, 8844), True, 'import tensorflow as tf\n'), ((8870, 9008), 'tensorflow.train.MonitoredTrainingSession', 'tf.train.MonitoredTrainingSession', ([], {'hooks': 'hooks', 'checkpoint_dir': 'args.checkpoint_dir', 'save_checkpoint_secs': '(900)', 'save_summaries_secs': '(600)'}), '(hooks=hooks, checkpoint_dir=args.\n checkpoint_dir, save_checkpoint_secs=900, save_summaries_secs=600)\n', (8903, 9008), True, 'import tensorflow as tf\n'), ((10828, 10860), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x_hat[i]', '(0)', '(1)'], {}), '(x_hat[i], 0, 1)\n', (10844, 10860), True, 'import tensorflow as tf\n'), ((10881, 10902), 'tensorflow.round', 'tf.round', (['(x_rec * 255)'], {}), '(x_rec * 255)\n', (10889, 10902), True, 'import tensorflow as tf\n'), ((11224, 11236), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11234, 11236), True, 'import tensorflow as tf\n'), ((11343, 11405), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', ([], {'checkpoint_dir': 'args.checkpoint_dir'}), '(checkpoint_dir=args.checkpoint_dir)\n', (11369, 11405), True, 'import tensorflow as tf\n'), ((12422, 12450), 'numpy.array', 'np.array', (['bpp_estimate_total'], {}), '(bpp_estimate_total)\n', (12430, 12450), True, 'import numpy as np\n'), ((14040, 14064), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (14062, 14064), True, 'import tensorflow as tf\n'), ((14121, 14145), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (14143, 14145), True, 'import tensorflow as tf\n'), ((1224, 1265), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_0"""'], {'reuse': '(i > 0)'}), "('layer_0', reuse=i > 0)\n", (1241, 1265), True, 'import tensorflow as tf\n'), ((1290, 1423), 'tensorflow_compression.SignalConv2D_slim', 'tfc.SignalConv2D_slim', (['total_filters_num', '(9, 9)'], {'corr': '(True)', 'strides_down': '(4)', 'padding': '"""same_zeros"""', 'use_bias': '(True)', 'activation': 'None'}), "(total_filters_num, (9, 9), corr=True, strides_down=4,\n padding='same_zeros', use_bias=True, activation=None)\n", (1311, 1423), True, 'import tensorflow_compression as tfc\n'), ((1821, 1862), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_1"""'], {'reuse': '(i > 0)'}), "('layer_1', reuse=i > 0)\n", (1838, 1862), True, 'import tensorflow as tf\n'), ((1887, 2020), 'tensorflow_compression.SignalConv2D_slim', 'tfc.SignalConv2D_slim', (['total_filters_num', '(5, 5)'], {'corr': '(True)', 'strides_down': '(2)', 'padding': '"""same_zeros"""', 'use_bias': '(True)', 'activation': 'None'}), "(total_filters_num, (5, 5), corr=True, strides_down=2,\n padding='same_zeros', use_bias=True, activation=None)\n", (1908, 2020), True, 'import tensorflow_compression as tfc\n'), ((2438, 2479), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_2"""'], {'reuse': '(i > 0)'}), "('layer_2', reuse=i > 0)\n", (2455, 2479), True, 'import tensorflow as tf\n'), ((2504, 2638), 'tensorflow_compression.SignalConv2D_slim', 'tfc.SignalConv2D_slim', (['total_filters_num', '(5, 5)'], {'corr': '(True)', 'strides_down': '(2)', 'padding': '"""same_zeros"""', 'use_bias': '(False)', 'activation': 'None'}), "(total_filters_num, (5, 5), corr=True, strides_down=2,\n padding='same_zeros', use_bias=False, activation=None)\n", (2525, 2638), True, 'import tensorflow_compression as tfc\n'), ((3638, 3679), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_0"""'], {'reuse': '(i > 0)'}), "('layer_0', reuse=i > 0)\n", (3655, 3679), True, 'import tensorflow as tf\n'), ((3704, 3836), 'tensorflow_compression.SignalConv2D_slim', 'tfc.SignalConv2D_slim', (['total_filters_num', '(5, 5)'], {'corr': '(False)', 'strides_up': '(2)', 'padding': '"""same_zeros"""', 'use_bias': '(True)', 'activation': 'None'}), "(total_filters_num, (5, 5), corr=False, strides_up=2,\n padding='same_zeros', use_bias=True, activation=None)\n", (3725, 3836), True, 'import tensorflow_compression as tfc\n'), ((4275, 4316), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_1"""'], {'reuse': '(i > 0)'}), "('layer_1', reuse=i > 0)\n", (4292, 4316), True, 'import tensorflow as tf\n'), ((4341, 4473), 'tensorflow_compression.SignalConv2D_slim', 'tfc.SignalConv2D_slim', (['total_filters_num', '(5, 5)'], {'corr': '(False)', 'strides_up': '(2)', 'padding': '"""same_zeros"""', 'use_bias': '(True)', 'activation': 'None'}), "(total_filters_num, (5, 5), corr=False, strides_up=2,\n padding='same_zeros', use_bias=True, activation=None)\n", (4362, 4473), True, 'import tensorflow_compression as tfc\n'), ((4891, 4932), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_2"""'], {'reuse': '(i > 0)'}), "('layer_2', reuse=i > 0)\n", (4908, 4932), True, 'import tensorflow as tf\n'), ((4957, 5074), 'tensorflow_compression.SignalConv2D_slim', 'tfc.SignalConv2D_slim', (['(3)', '(9, 9)'], {'corr': '(False)', 'strides_up': '(4)', 'padding': '"""same_zeros"""', 'use_bias': '(True)', 'activation': 'None'}), "(3, (9, 9), corr=False, strides_up=4, padding=\n 'same_zeros', use_bias=True, activation=None)\n", (4978, 5074), True, 'import tensorflow_compression as tfc\n'), ((5907, 5961), 'tensorflow.random_crop', 'tf.random_crop', (['x', '(args.patchsize, args.patchsize, 3)'], {}), '(x, (args.patchsize, args.patchsize, 3))\n', (5921, 5961), True, 'import tensorflow as tf\n'), ((6705, 6728), 'tensorflow_compression.EntropyBottleneck', 'tfc.EntropyBottleneck', ([], {}), '()\n', (6726, 6728), True, 'import tensorflow_compression as tfc\n'), ((8100, 8143), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (8122, 8143), True, 'import tensorflow as tf\n'), ((10083, 10106), 'tensorflow_compression.EntropyBottleneck', 'tfc.EntropyBottleneck', ([], {}), '()\n', (10104, 10106), True, 'import tensorflow_compression as tfc\n'), ((12294, 12317), 'numpy.log10', 'np.log10', (['(1 - msssim[i])'], {}), '(1 - msssim[i])\n', (12302, 12317), True, 'import numpy as np\n'), ((12497, 12516), 'numpy.array', 'np.array', (['mse_total'], {}), '(mse_total)\n', (12505, 12516), True, 'import numpy as np\n'), ((12536, 12556), 'numpy.array', 'np.array', (['psnr_total'], {}), '(psnr_total)\n', (12544, 12556), True, 'import numpy as np\n'), ((12611, 12633), 'numpy.array', 'np.array', (['msssim_total'], {}), '(msssim_total)\n', (12619, 12633), True, 'import numpy as np\n'), ((12653, 12678), 'numpy.array', 'np.array', (['msssim_db_total'], {}), '(msssim_db_total)\n', (12661, 12678), True, 'import numpy as np\n'), ((1611, 1620), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {}), '()\n', (1618, 1620), True, 'import tensorflow_compression as tfc\n'), ((2229, 2238), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {}), '()\n', (2236, 2238), True, 'import tensorflow_compression as tfc\n'), ((2835, 2844), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {}), '()\n', (2842, 2844), True, 'import tensorflow_compression as tfc\n'), ((3447, 3468), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {'inverse': '(True)'}), '(inverse=True)\n', (3454, 3468), True, 'import tensorflow_compression as tfc\n'), ((4095, 4116), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {'inverse': '(True)'}), '(inverse=True)\n', (4102, 4116), True, 'import tensorflow_compression as tfc\n'), ((4723, 4744), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {'inverse': '(True)'}), '(inverse=True)\n', (4730, 4744), True, 'import tensorflow_compression as tfc\n'), ((7341, 7377), 'tensorflow.squared_difference', 'tf.squared_difference', (['x', 'x_tilde[i]'], {}), '(x, x_tilde[i])\n', (7362, 7377), True, 'import tensorflow as tf\n'), ((10486, 10497), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (10494, 10497), True, 'import tensorflow as tf\n'), ((11031, 11062), 'tensorflow.squared_difference', 'tf.squared_difference', (['x', 'x_rec'], {}), '(x, x_rec)\n', (11052, 11062), True, 'import tensorflow as tf\n'), ((11100, 11128), 'tensorflow.image.psnr', 'tf.image.psnr', (['x_rec', 'x', '(255)'], {}), '(x_rec, x, 255)\n', (11113, 11128), True, 'import tensorflow as tf\n'), ((11168, 11207), 'tensorflow.image.ssim_multiscale', 'tf.image.ssim_multiscale', (['x_rec', 'x', '(255)'], {}), '(x_rec, x, 255)\n', (11192, 11207), True, 'import tensorflow as tf\n'), ((11418, 11434), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (11432, 11434), True, 'import tensorflow as tf\n'), ((7195, 7217), 'tensorflow.log', 'tf.log', (['likelihoods[i]'], {}), '(likelihoods[i])\n', (7201, 7217), True, 'import tensorflow as tf\n'), ((10755, 10777), 'tensorflow.log', 'tf.log', (['likelihoods[i]'], {}), '(likelihoods[i])\n', (10761, 10777), True, 'import tensorflow as tf\n'), ((10956, 10967), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (10964, 10967), True, 'import tensorflow as tf\n'), ((10972, 10983), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (10980, 10983), True, 'import tensorflow as tf\n'), ((11971, 11994), 'numpy.log10', 'np.log10', (['(1 - msssim[i])'], {}), '(1 - msssim[i])\n', (11979, 11994), True, 'import numpy as np\n'), ((7223, 7232), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (7229, 7232), True, 'import numpy as np\n'), ((10783, 10792), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (10789, 10792), True, 'import numpy as np\n')] |
import cv2
import numpy as np
img = cv2.imread('dataset/train/1/1.png')
img_bw = 255*(cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)>5).astype('uint8')
se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
se2 = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2))
mask = cv2.morphologyEx(img_bw, cv2.MORPH_CLOSE, se1)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, se2)
mask = np.dstack([mask,mask,mask])/255
out = img*mask
#cv2.imshow('Output', out)
# image = cv2.imread('input1.jpg')
#img_gray = cv2.cvtColor(out, cv2.COLOR_BGR2GRAY)
img_gray = out
img_gray = cv2.medianBlur(img_gray, 5)
edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize=5)
ret,mask =cv2.threshold(edges,100,255,cv2.THRESH_BINARY_INV)
image2 = cv2.bitwise_and(image, image, mask=mask)
image2 = cv2.medianBlur(image2, 3) # this
cv2.imshow("Mask", mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('output.png', mask)
| [
"numpy.dstack",
"cv2.bitwise_and",
"cv2.medianBlur",
"cv2.waitKey",
"cv2.morphologyEx",
"cv2.getStructuringElement",
"cv2.threshold",
"cv2.destroyAllWindows",
"cv2.imwrite",
"cv2.cvtColor",
"cv2.imread",
"cv2.imshow",
"cv2.Laplacian"
] | [((38, 73), 'cv2.imread', 'cv2.imread', (['"""dataset/train/1/1.png"""'], {}), "('dataset/train/1/1.png')\n", (48, 73), False, 'import cv2\n'), ((150, 199), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(5, 5)'], {}), '(cv2.MORPH_RECT, (5, 5))\n', (175, 199), False, 'import cv2\n'), ((205, 254), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(2, 2)'], {}), '(cv2.MORPH_RECT, (2, 2))\n', (230, 254), False, 'import cv2\n'), ((260, 306), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img_bw', 'cv2.MORPH_CLOSE', 'se1'], {}), '(img_bw, cv2.MORPH_CLOSE, se1)\n', (276, 306), False, 'import cv2\n'), ((314, 357), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'se2'], {}), '(mask, cv2.MORPH_OPEN, se2)\n', (330, 357), False, 'import cv2\n'), ((552, 579), 'cv2.medianBlur', 'cv2.medianBlur', (['img_gray', '(5)'], {}), '(img_gray, 5)\n', (566, 579), False, 'import cv2\n'), ((588, 631), 'cv2.Laplacian', 'cv2.Laplacian', (['img_gray', 'cv2.CV_8U'], {'ksize': '(5)'}), '(img_gray, cv2.CV_8U, ksize=5)\n', (601, 631), False, 'import cv2\n'), ((642, 695), 'cv2.threshold', 'cv2.threshold', (['edges', '(100)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(edges, 100, 255, cv2.THRESH_BINARY_INV)\n', (655, 695), False, 'import cv2\n'), ((702, 742), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask'}), '(image, image, mask=mask)\n', (717, 742), False, 'import cv2\n'), ((752, 777), 'cv2.medianBlur', 'cv2.medianBlur', (['image2', '(3)'], {}), '(image2, 3)\n', (766, 777), False, 'import cv2\n'), ((786, 810), 'cv2.imshow', 'cv2.imshow', (['"""Mask"""', 'mask'], {}), "('Mask', mask)\n", (796, 810), False, 'import cv2\n'), ((811, 825), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (822, 825), False, 'import cv2\n'), ((826, 849), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (847, 849), False, 'import cv2\n'), ((853, 884), 'cv2.imwrite', 'cv2.imwrite', (['"""output.png"""', 'mask'], {}), "('output.png', mask)\n", (864, 884), False, 'import cv2\n'), ((365, 394), 'numpy.dstack', 'np.dstack', (['[mask, mask, mask]'], {}), '([mask, mask, mask])\n', (374, 394), True, 'import numpy as np\n'), ((88, 125), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (100, 125), False, 'import cv2\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import os
import pandas as pd
import requests
import sys
#Download data file if it does not exist
if (not os.path.exists('communities.data')):
print('Data set does not exist in current folder --- have to download it')
r = requests.get('https://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data',
allow_redirects=True)
if r.status_code == requests.codes.ok:
print('Download successful\n')
else:
print('Could not download the data set --- please download it manually')
sys.exit()
open('communities.data', 'wb').write(r.content)
#Make folder for saving files if it does not exist
if not os.path.exists('communities_and_crime_train_and_test_splits_TWO_GROUPS'):
os.mkdir('./communities_and_crime_train_and_test_splits_TWO_GROUPS')
if not os.path.exists('communities_and_crime_train_and_test_splits_THREE_GROUPS'):
os.mkdir('./communities_and_crime_train_and_test_splits_THREE_GROUPS')
#Prepare the data set and do some preprocessing
train_size=1500
dataORIGINAL=pd.read_csv('communities.data',header=None,na_values='?').iloc[:,5:]
dataORIGINAL=dataORIGINAL.dropna(axis=1).values
(N,d)=dataORIGINAL.shape
prot_attrTEMP=dataORIGINAL[:,[2,3,4,5]]
labelTEMP=dataORIGINAL[:,-1]
data_for_predicting_Y=dataORIGINAL[:,np.hstack(([0,1],range(6,d-1)))]
(N,d)=data_for_predicting_Y.shape
labels=np.searchsorted([0.05,0.1,0.2,0.3,0.4,0.5,0.6],labelTEMP)+1
pr_attr_two_groups=np.zeros(N,dtype=int)
pr_attr_two_groups[prot_attrTEMP[:,1]>0.8]=1
pr_attr_three_groups=np.zeros(N,dtype=int)
pr_attr_three_groups[prot_attrTEMP[:,0]>=0.25]=1
pr_attr_three_groups[np.logical_and((prot_attrTEMP[:,2]+prot_attrTEMP[:,3]>=0.25),prot_attrTEMP[:,0]<0.25)]=2
rng = np.random.default_rng(0)
for ell in range(20):
train_indices=rng.choice(N,train_size,replace=False)
test_indices=np.setdiff1d(np.arange(N),train_indices)
train_data=data_for_predicting_Y[train_indices,:].copy()
test_data=data_for_predicting_Y[test_indices,:].copy()
mean_vec = np.mean(train_data, axis=0)
std_vec = np.std(train_data, axis=0)
for mmm in range(train_data.shape[1]):
if std_vec[mmm] < 0.0001:
std_vec[mmm] = 1
train_data[:, mmm] = train_data[:, mmm] - mean_vec[mmm]
test_data[:, mmm] = test_data[:, mmm] - mean_vec[mmm]
train_data[:, mmm] = train_data[:, mmm] / std_vec[mmm]
test_data[:, mmm] = test_data[:, mmm] / std_vec[mmm]
train_label=labels[train_indices]
test_label=labels[test_indices]
train_pr_attr=pr_attr_two_groups[train_indices]
test_pr_attr=pr_attr_two_groups[test_indices]
np.savetxt('communities_and_crime_train_and_test_splits_TWO_GROUPS/train_data_CommAndCrime_2groups.'+str(ell),
np.hstack((train_data,train_label.reshape((train_size,1)))),delimiter=' ', fmt="%1f")
np.savetxt('communities_and_crime_train_and_test_splits_TWO_GROUPS/test_data_CommAndCrime_2groups.' + str(ell),
np.hstack((test_data, test_label.reshape((N-train_size,1)))), delimiter=' ', fmt="%1f")
np.savetxt('communities_and_crime_train_and_test_splits_TWO_GROUPS/train_prot_attr_CommAndCrime_2groups.'+str(ell),
train_pr_attr, delimiter=' ', fmt="%1d")
np.savetxt('communities_and_crime_train_and_test_splits_TWO_GROUPS/test_prot_attr_CommAndCrime_2groups.'+str(ell),
test_pr_attr, delimiter=' ', fmt="%1d")
train_pr_attr=pr_attr_three_groups[train_indices]
test_pr_attr=pr_attr_three_groups[test_indices]
np.savetxt('communities_and_crime_train_and_test_splits_THREE_GROUPS/train_data_CommAndCrime_3groups.'+str(ell),
np.hstack((train_data,train_label.reshape((train_size,1)))),delimiter=' ', fmt="%1f")
np.savetxt('communities_and_crime_train_and_test_splits_THREE_GROUPS/test_data_CommAndCrime_3groups.' + str(ell),
np.hstack((test_data, test_label.reshape((N-train_size,1)))), delimiter=' ', fmt="%1f")
np.savetxt('communities_and_crime_train_and_test_splits_THREE_GROUPS/train_prot_attr_CommAndCrime_3groups.' +
str(ell), train_pr_attr, delimiter=' ', fmt="%1d")
np.savetxt('communities_and_crime_train_and_test_splits_THREE_GROUPS/test_prot_attr_CommAndCrime_3groups.' +
str(ell), test_pr_attr, delimiter=' ', fmt="%1d")
| [
"os.mkdir",
"numpy.logical_and",
"numpy.std",
"pandas.read_csv",
"numpy.zeros",
"os.path.exists",
"numpy.searchsorted",
"numpy.random.default_rng",
"numpy.mean",
"numpy.arange",
"requests.get",
"sys.exit"
] | [((1600, 1622), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (1608, 1622), True, 'import numpy as np\n'), ((1689, 1711), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (1697, 1711), True, 'import numpy as np\n'), ((1879, 1903), 'numpy.random.default_rng', 'np.random.default_rng', (['(0)'], {}), '(0)\n', (1900, 1903), True, 'import numpy as np\n'), ((236, 270), 'os.path.exists', 'os.path.exists', (['"""communities.data"""'], {}), "('communities.data')\n", (250, 270), False, 'import os\n'), ((360, 494), 'requests.get', 'requests.get', (['"""https://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data"""'], {'allow_redirects': '(True)'}), "(\n 'https://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data'\n , allow_redirects=True)\n", (372, 494), False, 'import requests\n'), ((811, 883), 'os.path.exists', 'os.path.exists', (['"""communities_and_crime_train_and_test_splits_TWO_GROUPS"""'], {}), "('communities_and_crime_train_and_test_splits_TWO_GROUPS')\n", (825, 883), False, 'import os\n'), ((889, 957), 'os.mkdir', 'os.mkdir', (['"""./communities_and_crime_train_and_test_splits_TWO_GROUPS"""'], {}), "('./communities_and_crime_train_and_test_splits_TWO_GROUPS')\n", (897, 957), False, 'import os\n'), ((965, 1039), 'os.path.exists', 'os.path.exists', (['"""communities_and_crime_train_and_test_splits_THREE_GROUPS"""'], {}), "('communities_and_crime_train_and_test_splits_THREE_GROUPS')\n", (979, 1039), False, 'import os\n'), ((1045, 1115), 'os.mkdir', 'os.mkdir', (['"""./communities_and_crime_train_and_test_splits_THREE_GROUPS"""'], {}), "('./communities_and_crime_train_and_test_splits_THREE_GROUPS')\n", (1053, 1115), False, 'import os\n'), ((1520, 1584), 'numpy.searchsorted', 'np.searchsorted', (['[0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]', 'labelTEMP'], {}), '([0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6], labelTEMP)\n', (1535, 1584), True, 'import numpy as np\n'), ((1781, 1879), 'numpy.logical_and', 'np.logical_and', (['(prot_attrTEMP[:, 2] + prot_attrTEMP[:, 3] >= 0.25)', '(prot_attrTEMP[:, 0] < 0.25)'], {}), '(prot_attrTEMP[:, 2] + prot_attrTEMP[:, 3] >= 0.25, \n prot_attrTEMP[:, 0] < 0.25)\n', (1795, 1879), True, 'import numpy as np\n'), ((2178, 2205), 'numpy.mean', 'np.mean', (['train_data'], {'axis': '(0)'}), '(train_data, axis=0)\n', (2185, 2205), True, 'import numpy as np\n'), ((2220, 2246), 'numpy.std', 'np.std', (['train_data'], {'axis': '(0)'}), '(train_data, axis=0)\n', (2226, 2246), True, 'import numpy as np\n'), ((687, 697), 'sys.exit', 'sys.exit', ([], {}), '()\n', (695, 697), False, 'import sys\n'), ((1196, 1255), 'pandas.read_csv', 'pd.read_csv', (['"""communities.data"""'], {'header': 'None', 'na_values': '"""?"""'}), "('communities.data', header=None, na_values='?')\n", (1207, 1255), True, 'import pandas as pd\n'), ((2013, 2025), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2022, 2025), True, 'import numpy as np\n')] |
""" Dynamic components of a multialgorithm simulation
:Author: <NAME> <<EMAIL>>
:Author: <NAME> <<EMAIL>>
:Date: 2018-02-07
:Copyright: 2017-2019, Karr Lab
:License: MIT
"""
from enum import Enum, auto
from pprint import pformat
import collections
import inspect
import itertools
import math
import networkx
import numpy
import warnings
from obj_tables.math.expression import Expression, ObjTablesTokenCodes
from wc_lang import Species, Compartment
from wc_onto import onto
from wc_sim.model_utilities import ModelUtilities
from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning
from wc_sim.species_populations import LocalSpeciesPopulation
from wc_utils.util.enumerate import CaseInsensitiveEnum
from wc_utils.util.ontology import are_terms_equivalent
import obj_tables
import wc_lang
import wc_sim.config
import wc_sim.submodels
# mapping from wc_lang Models to DynamicComponents
WC_LANG_MODEL_TO_DYNAMIC_MODEL = {}
class SimTokCodes(int, CaseInsensitiveEnum):
""" Token codes used in WcSimTokens """
dynamic_expression = 1
other = 2
# a token in DynamicExpression._obj_tables_tokens
WcSimToken = collections.namedtuple('WcSimToken', 'code, token_string, dynamic_expression')
# make dynamic_expression optional: see https://stackoverflow.com/a/18348004
WcSimToken.__new__.__defaults__ = (None, )
WcSimToken.__doc__ += ': Token in a validated expression'
WcSimToken.code.__doc__ = 'SimTokCodes encoding'
WcSimToken.token_string.__doc__ = "The token's string"
WcSimToken.dynamic_expression.__doc__ = "When code is dynamic_expression, the dynamic_expression instance"
class DynamicComponent(object):
""" Component of a simulation
Attributes:
dynamic_model (:obj:`DynamicModel`): the simulation's root dynamic model
local_species_population (:obj:`LocalSpeciesPopulation`): the simulation's species population store
id (:obj:`str`): unique id
"""
# map of all dynamic components, indexed by type and then identifier
dynamic_components_objs = {}
def __init__(self, dynamic_model, local_species_population, wc_lang_model):
"""
Args:
dynamic_model (:obj:`DynamicModel`): the simulation's dynamic model
local_species_population (:obj:`LocalSpeciesPopulation`): the simulation's species
population store
wc_lang_model (:obj:`obj_tables.Model`): a corresponding `wc_lang` `Model`, from which this
`DynamicComponent` is derived
"""
self.dynamic_model = dynamic_model
self.local_species_population = local_species_population
self.id = wc_lang_model.id
model_type = DynamicComponent.get_dynamic_model_type(wc_lang_model)
if model_type not in DynamicComponent.dynamic_components_objs:
DynamicComponent.dynamic_components_objs[model_type] = {}
DynamicComponent.dynamic_components_objs[model_type][self.id] = self
@staticmethod
def get_dynamic_model_type(model_type):
""" Get a simulation's dynamic component type
Obtain a dynamic component type from a corresponding `wc_lang` Model type, instance or
string name.
Args:
model_type (:obj:`Object`): a `wc_lang` Model type represented by a subclass of
`obj_tables.Model`, an instance of `obj_tables.Model`, or a string name for a `obj_tables.Model`
Returns:
:obj:`type`: the dynamic component
Raises:
:obj:`MultialgorithmError`: if the corresponding dynamic component type cannot be determined
"""
if isinstance(model_type, type) and issubclass(model_type, obj_tables.Model):
if model_type in WC_LANG_MODEL_TO_DYNAMIC_MODEL:
return WC_LANG_MODEL_TO_DYNAMIC_MODEL[model_type]
raise MultialgorithmError(f"model class of type '{model_type.__name__}' not found")
if isinstance(model_type, obj_tables.Model):
if model_type.__class__ in WC_LANG_MODEL_TO_DYNAMIC_MODEL:
return WC_LANG_MODEL_TO_DYNAMIC_MODEL[model_type.__class__]
raise MultialgorithmError(f"model of type '{model_type.__class__.__name__}' not found")
if isinstance(model_type, str):
model_type_type = getattr(wc_lang, model_type, None)
if model_type_type is not None:
if model_type_type in WC_LANG_MODEL_TO_DYNAMIC_MODEL:
return WC_LANG_MODEL_TO_DYNAMIC_MODEL[model_type_type]
raise MultialgorithmError(f"model of type '{model_type_type.__name__}' not found")
raise MultialgorithmError(f"model type '{model_type}' not defined")
raise MultialgorithmError(f"model type '{model_type}' has wrong type")
@staticmethod
def get_dynamic_component(model_type, id):
""" Get a simulation's dynamic component
Args:
model_type (:obj:`type`): the subclass of `DynamicComponent` (or `obj_tables.Model`)
being retrieved
id (:obj:`str`): the dynamic component's id
Returns:
:obj:`DynamicComponent`: the dynamic component
Raises:
:obj:`MultialgorithmError`: if the dynamic component cannot be found
"""
if not inspect.isclass(model_type) or not issubclass(model_type, DynamicComponent):
model_type = DynamicComponent.get_dynamic_model_type(model_type)
if model_type not in DynamicComponent.dynamic_components_objs:
raise MultialgorithmError(f"model type '{model_type.__name__}' not in "
f"DynamicComponent.dynamic_components_objs")
if id not in DynamicComponent.dynamic_components_objs[model_type]:
raise MultialgorithmError(f"model type '{model_type.__name__}' with id='{id}' not in "
f"DynamicComponent.dynamic_components_objs")
return DynamicComponent.dynamic_components_objs[model_type][id]
def __str__(self):
""" Provide a readable representation of this `DynamicComponent`
Returns:
:obj:`str`: a readable representation of this `DynamicComponent`
"""
rv = ['DynamicComponent:']
rv.append(f"type: {self.__class__.__name__}")
rv.append(f"id: {self.id}")
return '\n'.join(rv)
class DynamicExpression(DynamicComponent):
""" Simulation representation of a mathematical expression, based on :obj:`ParsedExpression`
Attributes:
expression (:obj:`str`): the expression defined in the `wc_lang` Model
wc_sim_tokens (:obj:`list` of :obj:`WcSimToken`): a tokenized, compressed representation of
`expression`
expr_substrings (:obj:`list` of :obj:`str`): strings which are joined to form the string
which is 'eval'ed
local_ns (:obj:`dict`): pre-computed local namespace of functions used in `expression`
"""
NON_LANG_OBJ_ID_TOKENS = set([ObjTablesTokenCodes.math_func_id,
ObjTablesTokenCodes.number,
ObjTablesTokenCodes.op,
ObjTablesTokenCodes.other])
def __init__(self, dynamic_model, local_species_population, wc_lang_model, wc_lang_expression):
"""
Args:
dynamic_model (:obj:`DynamicModel`): the simulation's dynamic model
local_species_population (:obj:`LocalSpeciesPopulation`): the simulation's species
population store
wc_lang_model (:obj:`obj_tables.Model`): the corresponding `wc_lang` `Model`
wc_lang_expression (:obj:`ParsedExpression`): an analyzed and validated expression
Raises:
:obj:`MultialgorithmError`: if `wc_lang_expression` does not contain an analyzed,
validated expression
"""
super().__init__(dynamic_model, local_species_population, wc_lang_model)
# wc_lang_expression must have been successfully `tokenize`d.
if not wc_lang_expression._obj_tables_tokens:
raise MultialgorithmError(f"_obj_tables_tokens cannot be empty - ensure that "
f"'{wc_lang_model}' is valid")
# optimization: self.wc_lang_expression will be deleted by prepare()
self.wc_lang_expression = wc_lang_expression
self.expression = wc_lang_expression.expression
def prepare(self):
""" Prepare this dynamic expression for simulation
Because they refer to each other, all :obj:`DynamicExpression`\ s must be created before any
of them are prepared.
Raises:
:obj:`MultialgorithmError`: if a Python function used in `wc_lang_expression` does not exist
"""
# create self.wc_sim_tokens, which contains WcSimTokens that refer to other DynamicExpressions
self.wc_sim_tokens = []
# optimization: combine together adjacent obj_tables_token.tok_codes other than obj_id
next_static_tokens = ''
function_names = set()
i = 0
while i < len(self.wc_lang_expression._obj_tables_tokens):
obj_tables_token = self.wc_lang_expression._obj_tables_tokens[i]
if obj_tables_token.code == ObjTablesTokenCodes.math_func_id:
function_names.add(obj_tables_token.token_string)
if obj_tables_token.code in self.NON_LANG_OBJ_ID_TOKENS:
next_static_tokens = next_static_tokens + obj_tables_token.token_string
elif obj_tables_token.code == ObjTablesTokenCodes.obj_id:
if next_static_tokens != '':
self.wc_sim_tokens.append(WcSimToken(SimTokCodes.other, next_static_tokens))
next_static_tokens = ''
try:
dynamic_expression = DynamicComponent.get_dynamic_component(obj_tables_token.model,
obj_tables_token.model_id)
except:
raise MultialgorithmError("'{}.{} must be prepared to create '{}''".format(
obj_tables_token.model.__class__.__name__, obj_tables_token.model_id, self.id))
self.wc_sim_tokens.append(WcSimToken(SimTokCodes.dynamic_expression,
obj_tables_token.token_string,
dynamic_expression))
else: # pragma: no cover
assert False, f"unknown code {obj_tables_token.code} in {obj_tables_token}"
# advance to the next token
i += 1
if next_static_tokens != '':
self.wc_sim_tokens.append(WcSimToken(SimTokCodes.other, next_static_tokens))
# optimization: to conserve memory, delete self.wc_lang_expression
del self.wc_lang_expression
# optimization: pre-allocate and pre-populate substrings for the expression to eval
self.expr_substrings = []
for sim_token in self.wc_sim_tokens:
if sim_token.code == SimTokCodes.other:
self.expr_substrings.append(sim_token.token_string)
else:
self.expr_substrings.append('')
# optimization: pre-allocate Python functions in namespace
self.local_ns = {}
for func_name in function_names:
if func_name in globals()['__builtins__']:
self.local_ns[func_name] = globals()['__builtins__'][func_name]
elif hasattr(globals()['math'], func_name):
self.local_ns[func_name] = getattr(globals()['math'], func_name)
else: # pragma no cover, because only known functions are allowed in model expressions
raise MultialgorithmError(f"loading expression '{self.expression}' "
f"cannot find function '{func_name}'")
def eval(self, time):
""" Evaluate this mathematical expression
Approach:
* Replace references to related Models in `self.wc_sim_tokens` with their values
* Join the elements of `self.wc_sim_tokens` into a Python expression
* `eval` the Python expression
Args:
time (:obj:`float`): the simulation time at which the expression should be evaluated
Returns:
:obj:`float` or :obj:`bool`: the value of this :obj:`DynamicExpression` at time `time`
Raises:
:obj:`MultialgorithmError`: if Python `eval` raises an exception
"""
assert hasattr(self, 'wc_sim_tokens'), f"'{self.id}' must use prepare() before eval()"
# if caching is enabled & the expression's value is cached, return it
if self.dynamic_model.cache_manager.caching():
try:
return self.dynamic_model.cache_manager.get(self)
except MultialgorithmError:
pass
for idx, sim_token in enumerate(self.wc_sim_tokens):
if sim_token.code == SimTokCodes.dynamic_expression:
self.expr_substrings[idx] = str(sim_token.dynamic_expression.eval(time))
try:
value = eval(''.join(self.expr_substrings), {}, self.local_ns)
# if caching is enabled cache the expression's value
self.dynamic_model.cache_manager.set(self, value)
return value
except BaseException as e:
raise MultialgorithmError(f"eval of '{self.expression}' "
f"raises {type(e).__name__}: {str(e)}'")
def __str__(self):
""" Provide a readable representation of this `DynamicExpression`
Returns:
:obj:`str`: a readable representation of this `DynamicExpression`
"""
rv = ['DynamicExpression:']
rv.append(f"type: {self.__class__.__name__}")
rv.append(f"id: {self.id}")
rv.append(f"expression: {self.expression}")
return '\n'.join(rv)
class DynamicFunction(DynamicExpression):
""" The dynamic representation of a :obj:`wc_lang.Function`
"""
def __init__(self, *args):
super().__init__(*args)
class DynamicStopCondition(DynamicExpression):
""" The dynamic representation of a :obj:`wc_lang.StopCondition`
"""
def __init__(self, *args):
super().__init__(*args)
class DynamicObservable(DynamicExpression):
""" The dynamic representation of an :obj:`wc_lang.Observable`
"""
def __init__(self, *args):
super().__init__(*args)
class DynamicDfbaObjective(DynamicExpression):
""" The dynamic representation of a :obj:`wc_lang.DfbaObjective`
"""
def __init__(self, *args):
super().__init__(*args)
class DynamicRateLaw(DynamicExpression):
""" The dynamic representation of a :obj:`wc_lang.RateLaw`
"""
def __init__(self, *args):
super().__init__(*args)
class DynamicParameter(DynamicComponent):
""" The dynamic representation of a :obj:`wc_lang.Parameter`
"""
def __init__(self, dynamic_model, local_species_population, wc_lang_model, value):
"""
Args:
dynamic_model (:obj:`DynamicModel`): the simulation's dynamic model
local_species_population (:obj:`LocalSpeciesPopulation`): the simulation's species
population store
wc_lang_model (:obj:`obj_tables.Model`): the corresponding :obj:`wc_lang.Parameter`
value (:obj:`float`): the parameter's value
"""
super().__init__(dynamic_model, local_species_population, wc_lang_model)
self.value = value
def eval(self, time):
""" Provide the value of this parameter
Args:
time (:obj:`float`): the current simulation time; not needed, but included so that all
dynamic expression models have the same signature for `eval`
Returns:
:obj:`float`: the dynamic parameter's value
"""
return self.value
class DynamicSpecies(DynamicComponent):
""" The dynamic representation of a :obj:`wc_lang.Species`
"""
def __init__(self, dynamic_model, local_species_population, wc_lang_model):
"""
Args:
dynamic_model (:obj:`DynamicModel`): the simulation's dynamic model
local_species_population (:obj:`LocalSpeciesPopulation`): the simulation's species
population store
wc_lang_model (:obj:`obj_tables.Model`): the corresponding :obj:`wc_lang.Species`
"""
super().__init__(dynamic_model, local_species_population, wc_lang_model)
def eval(self, time):
""" Provide the population of this species
Args:
time (:obj:`float`): the current simulation time
Returns:
:obj:`float`: the population of this species at time `time`
"""
return self.local_species_population.read_one(time, self.id)
class DynamicCompartment(DynamicComponent):
""" A dynamic compartment
A :obj:`DynamicCompartment` tracks the dynamic aggregate state of a compartment. A
:obj:`DynamicCompartment` is created for each `wc_lang` `Compartment` in a whole-cell model.
Attributes:
id (:obj:`str`): id of this :obj:`DynamicCompartment`, copied from the `wc_lang` `Compartment`
biological_type (:obj:`pronto.term.Term`): biological type of this :obj:`DynamicCompartment`,
copied from the `Compartment`
physical_type (:obj:`pronto.term.Term`): physical type of this :obj:`DynamicCompartment`,
copied from the `Compartment`
random_state (:obj:`numpy.random.RandomState`): a random state
init_volume (:obj:`float`): initial volume, sampled from the distribution specified in the
`wc_lang` model
init_accounted_mass (:obj:`float`): the initial mass accounted for by the initial species
init_mass (:obj:`float`): initial mass, including the mass not accounted for by
explicit species
init_density (:obj:`float`): the initial density of this :obj:`DynamicCompartment`, as
specified by the model; this is the *constant* density of the compartment
init_accounted_density (:obj:`float`): the initial density accounted for by the
initial species
accounted_fraction (:obj:`float`): the fraction of the initial mass or density accounted
for by initial species; assumed to be constant throughout a dynamical model
species_population (:obj:`LocalSpeciesPopulation`): the simulation's species population store
species_ids (:obj:`list` of :obj:`str`): the IDs of the species stored in this
:obj:`DynamicCompartment`\ ; if `None`, use the IDs of all species in `species_population`
"""
def __init__(self, dynamic_model, random_state, wc_lang_compartment, species_ids=None):
""" Initialize the volume and density of this :obj:`DynamicCompartment`\ .
Args:
dynamic_model (:obj:`DynamicModel`): the simulation's dynamic model
random_state (:obj:`numpy.random.RandomState`): a random state
wc_lang_compartment (:obj:`Compartment`): the corresponding static `wc_lang` `Compartment`
species_ids (:obj:`list` of :obj:`str`, optional): the IDs of the species stored
in this compartment
Raises:
:obj:`MultialgorithmError`: if `self.init_volume` or `self.init_density` are not
positive numbers
"""
super(DynamicCompartment, self).__init__(dynamic_model, None, wc_lang_compartment)
self.id = wc_lang_compartment.id
self.biological_type = wc_lang_compartment.biological_type
self.physical_type = wc_lang_compartment.physical_type
self.species_ids = species_ids
# obtain initial compartment volume by sampling its specified distribution
if wc_lang_compartment.init_volume and \
are_terms_equivalent(wc_lang_compartment.init_volume.distribution,
onto['WC:normal_distribution']):
mean = wc_lang_compartment.init_volume.mean
std = wc_lang_compartment.init_volume.std
if numpy.isnan(std):
config_multialgorithm = wc_sim.config.core.get_config()['wc_sim']['multialgorithm']
MEAN_TO_STD_DEV_RATIO = config_multialgorithm['mean_to_std_dev_ratio']
std = mean / MEAN_TO_STD_DEV_RATIO
self.init_volume = ModelUtilities.non_neg_normal_sample(random_state, mean, std)
else:
raise MultialgorithmError('Initial volume must be normally distributed')
if math.isnan(self.init_volume): # pragma no cover: cannot be True
raise MultialgorithmError(f"DynamicCompartment {self.id}: init_volume is NaN, but must "
f"be a positive number.")
if self.init_volume <= 0:
raise MultialgorithmError(f"DynamicCompartment {self.id}: init_volume "
f"({self.init_volume}) must be a positive number.")
if not self._is_abstract():
init_density = wc_lang_compartment.init_density.value
if math.isnan(init_density):
raise MultialgorithmError(f"DynamicCompartment {self.id}: init_density is NaN, "
f"but must be a positive number.")
if init_density <= 0:
raise MultialgorithmError(f"DynamicCompartment {self.id}: init_density "
f"({init_density}) must be a positive number.")
self.init_density = init_density
def initialize_mass_and_density(self, species_population):
""" Initialize the species populations and the mass accounted for by species.
Also initialize the fraction of density accounted for by species, `self.accounted_fraction`.
Args:
species_population (:obj:`LocalSpeciesPopulation`): the simulation's
species population store
Raises:
:obj:`MultialgorithmError`: if `accounted_fraction == 0` or
if `MAX_ALLOWED_INIT_ACCOUNTED_FRACTION < accounted_fraction`
"""
config_multialgorithm = wc_sim.config.core.get_config()['wc_sim']['multialgorithm']
MAX_ALLOWED_INIT_ACCOUNTED_FRACTION = config_multialgorithm['max_allowed_init_accounted_fraction']
self.species_population = species_population
self.init_accounted_mass = self.accounted_mass(time=0)
if self._is_abstract():
self.init_mass = self.init_accounted_mass
else:
self.init_mass = self.init_density * self.init_volume
self.init_accounted_density = self.init_accounted_mass / self.init_volume
# calculate fraction of initial mass or density represented by species
self.accounted_fraction = self.init_accounted_density / self.init_density
# also, accounted_fraction = self.init_accounted_mass / self.init_mass
# usually epsilon < accounted_fraction <= 1, where epsilon depends on how thoroughly
# processes in the compartment are characterized
if 0 == self.accounted_fraction:
raise MultialgorithmError(f"DynamicCompartment '{self.id}': "
f"initial accounted ratio is 0")
elif 1.0 < self.accounted_fraction <= MAX_ALLOWED_INIT_ACCOUNTED_FRACTION:
warnings.warn(f"DynamicCompartment '{self.id}': "
f"initial accounted ratio ({self.accounted_fraction:.3E}) "
f"greater than 1.0", MultialgorithmWarning)
if MAX_ALLOWED_INIT_ACCOUNTED_FRACTION < self.accounted_fraction:
raise MultialgorithmError(f"DynamicCompartment {self.id}: "
f"initial accounted ratio ({self.accounted_fraction:.3E}) "
f"greater than MAX_ALLOWED_INIT_ACCOUNTED_FRACTION "
f"({MAX_ALLOWED_INIT_ACCOUNTED_FRACTION}).")
def _is_abstract(self):
""" Indicate whether this is an abstract compartment
An abstract compartment has a `physical_type` of `abstract_compartment` as defined in the WC
ontology.
Its contents do not represent physical matter, so no relationship exists among its mass,
volume and
density. Its volume is constant and its density is ignored and need not be defined. Abstract
compartments are useful for modeling dynamics that are not based on physical chemistry, and
for testing models and software.
These :obj:`DynamicCompartment` attributes are not initialized in abstract compartments:
`init_density`, `init_accounted_density` and `accounted_fraction`.
Returns:
:obj:`bool`: whether this is an abstract compartment
"""
return are_terms_equivalent(self.physical_type, onto['WC:abstract_compartment'])
def accounted_mass(self, time=None):
""" Provide the total current mass of all species in this :obj:`DynamicCompartment`
Args:
time (:obj:`Rational`, optional): the current simulation time
Returns:
:obj:`float`: the total current mass of all species (g)
"""
# if caching is enabled & the expression's value is cached, return it
if self.dynamic_model.cache_manager.caching():
try:
return self.dynamic_model.cache_manager.get(self)
except MultialgorithmError:
pass
value = self.species_population.compartmental_mass(self.id, time=time)
# if caching is enabled cache the accounted_mass
self.dynamic_model.cache_manager.set(self, value)
return value
def accounted_volume(self, time=None):
""" Provide the current volume occupied by all species in this :obj:`DynamicCompartment`
Args:
time (:obj:`Rational`, optional): the current simulation time
Returns:
:obj:`float`: the current volume of all species (l)
"""
if self._is_abstract():
return self.volume()
else:
return self.accounted_mass(time=time) / self.init_density
def mass(self, time=None):
""" Provide the total current mass of this :obj:`DynamicCompartment`
This mass includes the mass not accounted for by explicit species, as determined by
the initial specified density, specified volume, and mass accounted for by species.
Args:
time (:obj:`Rational`, optional): the current simulation time
Returns:
:obj:`float`: this compartment's total current mass (g)
"""
if self._is_abstract():
return self.accounted_mass(time=time)
else:
return self.accounted_mass(time=time) / self.accounted_fraction
def volume(self, time=None):
""" Provide the current volume of this :obj:`DynamicCompartment`
This volume includes the volume not accounted for by explicit species, as determined by
the ratio of the specified initial density to the initial density accounted for by species.
Args:
time (:obj:`Rational`, optional): the current simulation time
Returns:
:obj:`float`: this compartment's current volume (l)
"""
if self._is_abstract():
return self.init_volume
else:
return self.accounted_volume(time=time) / self.accounted_fraction
# todo: make time required, to avoid the possibility of eval'ing an expression @ multiple times
def eval(self, time=None):
""" Provide the mass of this :obj:`DynamicCompartment`
Args:
time (:obj:`Rational`, optional): the current simulation time
Returns:
:obj:`float`: this compartment's current mass (g)
"""
return self.mass(time=time)
def fold_change_total_mass(self, time=None):
""" Provide the fold change of the total mass of this :obj:`DynamicCompartment`
Args:
time (:obj:`Rational`, optional): the current simulation time
Returns:
:obj:`float`: the fold change of the total mass of this compartment
"""
return self.mass(time=time) / self.init_mass
def fold_change_total_volume(self, time=None):
""" Provide the fold change of the total volume of this :obj:`DynamicCompartment`
Args:
time (:obj:`Rational`, optional): the current simulation time
Returns:
:obj:`float`: the fold change of the total volume of this compartment
"""
return self.volume(time=time) / self.init_volume
def _initialized(self):
""" Indicate whether this :obj:`DynamicCompartment` has been initialized
Returns:
:obj:`bool`: whether this compartment has been initialized by `initialize_mass_and_density()`
"""
return hasattr(self, 'init_accounted_mass')
def __str__(self):
""" Provide a string representation of this :obj:`DynamicCompartment`
Returns:
:obj:`str`: a string representation of this compartment at the current simulation time
"""
values = []
values.append("ID: " + self.id)
if self._initialized():
values.append(f"Initialization state: '{self.id}' has been initialized.")
else:
values.append(f"Initialization state: '{self.id}' has not been initialized.")
# todo: be careful with units; if initial values are specified in other units, are they converted?
values.append(f"Initial volume (l): {self.init_volume:.3E}")
values.append(f"Physical type: {self.physical_type.name}")
values.append(f"Biological type: {self.biological_type.name}")
if not self._is_abstract():
values.append(f"Specified density (g l^-1): {self.init_density}")
if self._initialized():
values.append(f"Initial mass in species (g): {self.init_accounted_mass:.3E}")
values.append(f"Initial total mass (g): {self.init_mass:.3E}")
if not self._is_abstract():
values.append(f"Fraction of mass accounted for by species (dimensionless): "
f"{self.accounted_fraction:.3E}")
values.append(f"Current mass in species (g): {self.accounted_mass():.3E}")
values.append(f"Current total mass (g): {self.mass():.3E}")
values.append(f"Fold change total mass: {self.fold_change_total_mass():.3E}")
values.append(f"Current volume in species (l): {self.accounted_volume():.3E}")
values.append(f"Current total volume (l): {self.volume():.3E}")
values.append(f"Fold change total volume: {self.fold_change_total_volume():.3E}")
return "DynamicCompartment:\n{}".format('\n'.join(values))
class DynamicModel(object):
""" Represent and access the dynamics of a whole-cell model simulation
A `DynamicModel` provides access to dynamical components of the simulation, and
determines aggregate properties that are not provided
by other, more specific, dynamical components like species populations, submodels, and
dynamic compartments.
Attributes:
id (:obj:`str`): id of the `wc_lang` model
dynamic_compartments (:obj:`dict`): map from compartment ID to :obj:`DynamicCompartment`\ ;
the simulation's :obj:`DynamicCompartment`\ s, one for each compartment in `model`
cellular_dyn_compartments (:obj:`list`): list of the cellular compartments
species_population (:obj:`LocalSpeciesPopulation`): populations of all the species in
the model
dynamic_submodels (:obj:`dict` of `DynamicSubmodel`): the simulation's dynamic submodels,
indexed by their ids
dynamic_species (:obj:`dict` of `DynamicSpecies`): the simulation's dynamic species,
indexed by their ids
dynamic_parameters (:obj:`dict` of `DynamicParameter`): the simulation's parameters,
indexed by their ids
dynamic_observables (:obj:`dict` of `DynamicObservable`): the simulation's dynamic
observables, indexed by their ids
dynamic_functions (:obj:`dict` of `DynamicFunction`): the simulation's dynamic functions,
indexed by their ids
dynamic_stop_conditions (:obj:`dict` of `DynamicStopCondition`): the simulation's stop
conditions, indexed by their ids
dynamic_rate_laws (:obj:`dict` of `DynamicRateLaw`): the simulation's rate laws,
indexed by their ids
dynamic_dfba_objectives (:obj:`dict` of `DynamicDfbaObjective`): the simulation's dFBA
Objective, indexed by their ids
cache_manager (:obj:`CacheManager`): a cache for potentially expensive expression evaluations
that get repeated
rxn_expression_dependencies (:obj:`dict`): map from reactions to lists of expressions
whose values depend on species with non-zero stoichiometry in the reaction
# TODO (APG): OPTIMIZE DFBA CACHING: describe dFBA caching optimization
continuous_rxn_dependencies (:obj:`dict`): map from ids of continuous submodels to sets
identifying expressions whose values depend on species with non-zero stoichiometry in
reaction(s) modeled by the submodel
all_continuous_rxn_dependencies (:obj:`tuple`): all expressions in `continuous_rxn_dependencies`
"""
AGGREGATE_VALUES = ['mass', 'volume', 'accounted mass', 'accounted volume']
def __init__(self, model, species_population, dynamic_compartments):
""" Prepare a `DynamicModel` for a discrete-event simulation
Args:
model (:obj:`Model`): the description of the whole-cell model in `wc_lang`
species_population (:obj:`LocalSpeciesPopulation`): the simulation's
species population store
dynamic_compartments (:obj:`dict`): the simulation's :obj:`DynamicCompartment`\ s, one
for each compartment in `model`
Raises:
:obj:`MultialgorithmError`: if the model has no cellular compartments
"""
self.id = model.id
self.dynamic_compartments = dynamic_compartments
self.species_population = species_population
self.num_submodels = len(model.get_submodels())
# determine cellular compartments
self.cellular_dyn_compartments = []
for dynamic_compartment in dynamic_compartments.values():
if dynamic_compartment.biological_type == onto['WC:cellular_compartment']:
self.cellular_dyn_compartments.append(dynamic_compartment)
if dynamic_compartments and not self.cellular_dyn_compartments:
raise MultialgorithmError(f"model '{model.id}' must have at least 1 cellular compartment")
# === create dynamic objects that are not expressions ===
# create dynamic parameters
self.dynamic_parameters = {}
for parameter in model.parameters:
self.dynamic_parameters[parameter.id] = \
DynamicParameter(self, self.species_population, parameter, parameter.value)
# create dynamic species
self.dynamic_species = {}
for species in model.get_species():
self.dynamic_species[species.id] = \
DynamicSpecies(self, self.species_population, species)
# === create dynamic expressions ===
# create dynamic observables
self.dynamic_observables = {}
for observable in model.observables:
self.dynamic_observables[observable.id] = \
DynamicObservable(self, self.species_population, observable,
observable.expression._parsed_expression)
# create dynamic functions
self.dynamic_functions = {}
for function in model.functions:
self.dynamic_functions[function.id] = \
DynamicFunction(self, self.species_population, function,
function.expression._parsed_expression)
# create dynamic stop conditions
self.dynamic_stop_conditions = {}
for stop_condition in model.stop_conditions:
self.dynamic_stop_conditions[stop_condition.id] = \
DynamicStopCondition(self, self.species_population, stop_condition,
stop_condition.expression._parsed_expression)
# create dynamic rate laws
self.dynamic_rate_laws = {}
for rate_law in model.rate_laws:
self.dynamic_rate_laws[rate_law.id] = \
DynamicRateLaw(self, self.species_population, rate_law,
rate_law.expression._parsed_expression)
# create dynamic dFBA Objectives
self.dynamic_dfba_objectives = {}
for dfba_objective in model.dfba_objs:
error = dfba_objective.expression.validate()
assert error is None, str(error)
self.dynamic_dfba_objectives[dfba_objective.id] = \
DynamicDfbaObjective(self, self.species_population, dfba_objective,
dfba_objective.expression._parsed_expression)
# prepare dynamic expressions
for dynamic_expression_group in [self.dynamic_observables,
self.dynamic_functions,
self.dynamic_stop_conditions,
self.dynamic_rate_laws]:
for dynamic_expression in dynamic_expression_group.values():
dynamic_expression.prepare()
# initialize cache manager
self.cache_manager = CacheManager()
def cell_mass(self):
""" Provide the cell's current mass
Sum the mass of all cellular :obj:`DynamicCompartment`\ s.
Returns:
:obj:`float`: the cell's current mass (g)
"""
return sum([dynamic_compartment.mass()
for dynamic_compartment in self.cellular_dyn_compartments])
def cell_volume(self):
""" Provide the cell's current volume
Sum the volume of all cellular :obj:`DynamicCompartment`\ s.
Returns:
:obj:`float`: the cell's current volume (l)
"""
return sum([dynamic_compartment.volume()
for dynamic_compartment in self.cellular_dyn_compartments])
def cell_growth(self):
""" Report the cell's growth in cell/s, relative to the cell's initial volume
Returns:
:obj:`float`: growth in cell/s, relative to the cell's initial volume
"""
# TODO(Arthur): implement growth measurement
pass
def cell_accounted_mass(self):
""" Provide the total current mass of all species in the cell
Sum the current mass of all species in cellular :obj:`DynamicCompartment`\ s.
Returns:
:obj:`float`: the current mass of all species in the cell (g)
"""
return sum([dynamic_compartment.accounted_mass()
for dynamic_compartment in self.cellular_dyn_compartments])
def cell_accounted_volume(self):
""" Provide the current volume occupied by all species in the cell
Sum the current volume occupied by all species in cellular :obj:`DynamicCompartment`\ s.
Returns:
:obj:`float`: the current volume occupied by all species in the cell (l)
"""
return sum([dynamic_compartment.accounted_volume()
for dynamic_compartment in self.cellular_dyn_compartments])
def get_aggregate_state(self):
""" Report the cell's aggregate state
Returns:
:obj:`dict`: the cell's aggregate state
"""
# get the state values configured in DynamicModel.AGGREGATE_VALUES
aggregate_state = {}
cell_aggregate_values = [f'cell {value}' for value in self.AGGREGATE_VALUES]
for cell_aggregate_value in cell_aggregate_values:
aggregate_func = getattr(self, cell_aggregate_value.replace(' ', '_'))
aggregate_state[cell_aggregate_value] = aggregate_func()
compartment_values = {}
for dynamic_compartment in self.cellular_dyn_compartments:
compartment_values[dynamic_compartment.id] = {}
for aggregate_value in self.AGGREGATE_VALUES:
aggregate_func = getattr(dynamic_compartment, aggregate_value.replace(' ', '_'))
compartment_values[dynamic_compartment.id][aggregate_value] = aggregate_func()
aggregate_state['compartments'] = compartment_values
return aggregate_state
def eval_dynamic_observables(self, time, observables_to_eval=None):
""" Evaluate some dynamic observables at time `time`
Args:
time (:obj:`float`): the simulation time
observables_to_eval (:obj:`list` of :obj:`str`, optional): if provided, ids of the
observables to evaluate; otherwise, evaluate all observables
Returns:
:obj:`dict`: map from the IDs of dynamic observables in `observables_to_eval` to their
values at simulation time `time`
"""
if observables_to_eval is None:
observables_to_eval = list(self.dynamic_observables.keys())
evaluated_observables = {}
for dyn_obsable_id in observables_to_eval:
evaluated_observables[dyn_obsable_id] = self.dynamic_observables[dyn_obsable_id].eval(time)
return evaluated_observables
def eval_dynamic_functions(self, time, functions_to_eval=None):
""" Evaluate some dynamic functions at time `time`
Args:
time (:obj:`float`): the simulation time
functions_to_eval (:obj:`list` of :obj:`str`, optional): if provided, ids of the
functions to evaluate; otherwise, evaluate all functions
Returns:
:obj:`dict`: map from the IDs of dynamic functions in `functions_to_eval` to their
values at simulation time `time`
"""
if functions_to_eval is None:
functions_to_eval = list(self.dynamic_functions.keys())
evaluated_functions = {}
for dyn_function_id in functions_to_eval:
evaluated_functions[dyn_function_id] = self.dynamic_functions[dyn_function_id].eval(time)
return evaluated_functions
def eval_dynamic_rate_laws(self, time):
""" Evaluate all dynamic rate laws at time `time`
Does not consider whether a rate law's reaction is enabled.
Args:
time (:obj:`float`): the simulation time
Returns:
:obj:`dict`: map from the IDs of dynamic rate laws to their values at simulation time `time`
"""
evaluated_rate_laws = {}
for rate_law_id, rate_law in self.dynamic_rate_laws.items():
evaluated_rate_laws[rate_law_id] = self.dynamic_rate_laws[rate_law_id].eval(time)
return evaluated_rate_laws
def get_reaction_fluxes(self):
""" Obtain the most recent flux for all reactions modeled by dFBA submodels
Returns:
:obj:`dict`: map from the IDs of reactions modeled by dFBA submodels to their most recent fluxes
"""
reaction_fluxes = {}
for dynamic_submodel in self.dynamic_submodels.values():
if isinstance(dynamic_submodel, wc_sim.submodels.dfba.DfbaSubmodel):
reaction_fluxes = {**reaction_fluxes, **dynamic_submodel.get_reaction_fluxes()}
return reaction_fluxes
def get_num_submodels(self):
""" Provide the number of submodels
Returns:
:obj:`int`: the number of submodels
"""
return self.num_submodels
def get_stop_condition(self):
""" Provide a simulation's stop condition
A simulation's stop condition is constructed as a logical 'or' of all :obj:`StopConditions`
in a model.
Returns:
:obj:`function`: a function which computes the logical 'or' of all :obj:`StopConditions`,
or `None` if no stop condition are defined
"""
if self.dynamic_stop_conditions:
dynamic_stop_conditions = self.dynamic_stop_conditions.values()
def all_stop_conditions(time):
for dynamic_stop_condition in dynamic_stop_conditions:
if dynamic_stop_condition.eval(time):
return True
return False
return all_stop_conditions
else:
return None
def get_species_count_array(self, now): # pragma no cover, not used
""" Map current species counts into an numpy array
Args:
now (:obj:`float`): the current simulation time
Returns:
numpy array, #species x # compartments, containing count of species in compartment
"""
species_counts = numpy.zeros((len(model.species), len(model.compartments)))
for species in model.species:
for compartment in model.compartments:
species_id = Species.gen_id(species.id, compartment.id)
species_counts[species.index, compartment.index] = \
model.local_species_population.read_one(now, species_id)
return species_counts
def obtain_dependencies(self, model):
""" Obtain the dependencies of expressions on reactions in a WC Lang model
An expression depends on a reaction if the expression uses any species whose population
changes when the reaction executes, or the expression uses an expression that depends on
the reaction.
When caching is active, these dependencies identify which cached expressions to invalidate.
They're also used by the Next Reaction Method to determine which rate laws must be evaluated
after a reaction executes.
`obtain_dependencies` is memory and compute intensive because it builds and walks an in-memory
DAG that represents the dependency relationships among the WC-Lang models used by a whole-cell
model.
If a simulation fails with the error "killed" and no other information, then it is probably
running on a system or in a container which does not have sufficient memory to complete
this function. Try running on a system with more memory, simulating a smaller model, or
disabling caching in `wc_sim.cfg`.
Args:
model (:obj:`Model`): the description of the whole-cell model in `wc_lang`
Returns:
:obj:`dict` of :obj:`list`: the dependencies of expressions on reactions, which maps each
reaction to a list of expressions
"""
used_model_types = set((wc_lang.Function,
wc_lang.Observable,
wc_lang.RateLaw,
wc_lang.Species,
wc_lang.StopCondition,
wc_lang.Compartment))
model_entities = itertools.chain(model.functions,
model.observables,
model.rate_laws,
model.stop_conditions)
# 1) make digraph of dependencies among model instances
dependency_graph = networkx.DiGraph()
def map_model_to_dynamic_model(model):
return DynamicComponent.get_dynamic_component(type(model), model.id)
for dependent_model_entity in model_entities:
dependent_model_entity_expr = dependent_model_entity.expression
# get all instances of types in used_model_types used by dependent_model_entity
used_models = []
for attr_name, attr in dependent_model_entity_expr.Meta.attributes.items():
if isinstance(attr, obj_tables.RelatedAttribute):
if attr.related_class in used_model_types:
used_models.extend(getattr(dependent_model_entity_expr, attr_name))
# add edges from model_type entities to the dependent_model_entity that uses them
for used_model in used_models:
dependency_graph.add_edge(map_model_to_dynamic_model(used_model),
map_model_to_dynamic_model(dependent_model_entity))
# 2) a compartment in an expression is a special case that computes the compartment's mass
# add dependencies between each compartment used in an expression and all the species
# in the compartment
dynamic_compartments_in_use = set()
for node in dependency_graph.nodes():
if isinstance(node, DynamicCompartment):
dynamic_compartments_in_use.add(node)
for dynamic_compartment in dynamic_compartments_in_use:
compartment = model.compartments.get(id=dynamic_compartment.id)[0]
for species in compartment.species:
dependency_graph.add_edge(map_model_to_dynamic_model(species),
map_model_to_dynamic_model(compartment))
# 3) add edges of species altered by reactions to determine dependencies of expressions on
# reactions
# to reduce memory use, process one reaction at a time
reaction_dependencies = {}
dfs_preorder_nodes = networkx.algorithms.traversal.depth_first_search.dfs_preorder_nodes
for dynamic_submodel in self.dynamic_submodels.values():
for reaction in dynamic_submodel.reactions:
net_stoichiometric_coefficients = collections.defaultdict(float)
for participant in reaction.participants:
species = participant.species
net_stoichiometric_coefficients[species] += participant.coefficient
for species, net_stoich_coeff in net_stoichiometric_coefficients.items():
if net_stoich_coeff < 0 or 0 < net_stoich_coeff:
dependency_graph.add_edge(reaction,
map_model_to_dynamic_model(species))
# 4) traverse from each reaction to dependent expressions
reaction_dependencies[reaction] = set()
for node in dfs_preorder_nodes(dependency_graph, reaction):
if not isinstance(node, (wc_lang.Reaction, DynamicCompartment, DynamicSpecies)):
reaction_dependencies[reaction].add(node)
# convert the dependency sets into lists, which are more than 3x smaller than sets
reaction_dependencies[reaction] = list(reaction_dependencies[reaction])
dependency_graph.remove_node(reaction)
# 5) remove reactions that have no dependencies to conserve space
rxns_to_remove = set()
for rxn_id, dependencies in reaction_dependencies.items():
if not dependencies:
rxns_to_remove.add(rxn_id)
for rxn_id in rxns_to_remove:
del reaction_dependencies[rxn_id]
return reaction_dependencies
def continuous_reaction_dependencies(self):
""" Get the expressions that depend on species used by reactions modeled by continuous submodels
Caching uses these dependencies to determine the expressions that should be invalidated when
species populations change or time advances.
Returns:
(:obj:`dict`): map from ids of continuous submodels to lists of expressions
whose values depend on species with non-zero stoichiometry in reaction(s)
modeled by the submodel
"""
rxns_modeled_by_continuous_submodels = {}
for submodel_id, dynamic_submodel in self.dynamic_submodels.items():
if isinstance(dynamic_submodel, (wc_sim.submodels.dfba.DfbaSubmodel,
wc_sim.submodels.odes.OdeSubmodel)):
rxns_modeled_by_continuous_submodels[submodel_id] = set(dynamic_submodel.reactions)
continuous_rxn_dependencies = {}
for submodel_id, sm_reactions in rxns_modeled_by_continuous_submodels.items():
continuous_rxn_dependencies[submodel_id] = set()
# TODO (APG): OPTIMIZE DFBA CACHING: only include exchange and objective (biomass) reactions in dFBA submodels
for reaction, dependencies in self.rxn_expression_dependencies.items():
if reaction in sm_reactions:
continuous_rxn_dependencies[submodel_id].update(dependencies)
# to save space convert values in continuous_rxn_dependencies from sets to lists
for submodel_id, dependencies in continuous_rxn_dependencies.items():
continuous_rxn_dependencies[submodel_id] = list(dependencies)
return continuous_rxn_dependencies
def prepare_dependencies(self, model):
""" Initialize expression dependency attributes
Args:
model (:obj:`Model`): the description of the whole-cell model in `wc_lang`
"""
self.rxn_expression_dependencies = self.obtain_dependencies(model)
self.continuous_rxn_dependencies = self.continuous_reaction_dependencies()
all_continuous_rxn_dependencies = set()
for expression_keys in self.continuous_rxn_dependencies.values():
all_continuous_rxn_dependencies.union(expression_keys)
self.all_continuous_rxn_dependencies = tuple(all_continuous_rxn_dependencies)
def _stop_caching(self):
""" Disable caching; used for testing """
self.cache_manager._stop_caching()
def _start_caching(self):
""" Enable caching; used for testing """
self.cache_manager._start_caching()
def flush_compartment_masses(self):
""" If caching is enabled, invalidate cache entries for compartmental masses
Run whenever populations change or time advances
"""
if self.cache_manager.caching():
# do nothing if the invalidation is EVENT_BASED invalidation, because invalidate() clears the cache
if self.cache_manager.invalidation_approach() is InvalidationApproaches.REACTION_DEPENDENCY_BASED:
dynamic_compartments = [dyn_compartment for dyn_compartment in self.dynamic_compartments.values()]
self.cache_manager.invalidate(expressions=dynamic_compartments)
def flush_after_reaction(self, reaction):
""" If caching is enabled, invalidate cache entries when time advances and a reaction executes
Args:
reaction (:obj:`Reaction`): the reaction that executed
"""
expressions = self.all_continuous_rxn_dependencies
if reaction in self.rxn_expression_dependencies:
expressions = itertools.chain(expressions, self.rxn_expression_dependencies[reaction])
self.cache_manager.invalidate(expressions=expressions)
self.flush_compartment_masses()
def continuous_submodel_flush_after_populations_change(self, dynamic_submodel_id):
""" Invalidate cache entries that depend on reactions modeled by a continuous submodel
Only used when caching is enabled.
Runs when a continuous submodel advances time or changes species populations.
Args:
dynamic_submodel_id (:obj:`str`): the id of the continuous submodel that's running
"""
self.cache_manager.invalidate(expressions=self.continuous_rxn_dependencies[dynamic_submodel_id])
self.flush_compartment_masses()
WC_LANG_MODEL_TO_DYNAMIC_MODEL = {
wc_lang.Compartment: DynamicCompartment,
wc_lang.DfbaObjective: DynamicDfbaObjective,
wc_lang.Function: DynamicFunction,
wc_lang.Observable: DynamicObservable,
wc_lang.Parameter: DynamicParameter,
wc_lang.RateLaw: DynamicRateLaw,
wc_lang.Species: DynamicSpecies,
wc_lang.StopCondition: DynamicStopCondition,
}
class InvalidationApproaches(Enum):
REACTION_DEPENDENCY_BASED = auto()
EVENT_BASED = auto()
class CachingEvents(Enum):
""" Types of caching events, used to maintain caching statistics
"""
HIT = auto()
MISS = auto()
HIT_RATIO = auto()
FLUSH_HIT = auto()
FLUSH_MISS = auto()
FLUSH_HIT_RATIO = auto()
class CacheManager(object):
""" Represent a RAM cache of `DynamicExpression.eval()` values
This is a centralized cache for all `DynamicExpression` values and `DynamicCompartment` `accounted_mass` values.
Caching may speed up a simulation substantially, or may slow it down.
All caching is controlled by the multialgorithm configuration file.
The `expression_caching` attribute determines whether caching is active.
The `cache_invalidation` attribute selects the cache invalidation approach.
The `event_based` invalidation approach invalidates (flushes) the entire cache at the start of
each simulation event which changes species populations, that is, that executes a reaction. Thus,
all expressions used during the event must be recalculated. This approach will boost performance
if many expressions are used repeatedly during
a single event, as occurs when many rate laws that share functions are evaluated.
The `reaction_dependency_based` invalidation approach invalidates (flushes) individual cache
entries that depend on the execution of a particular reaction.
The dependencies of `DynamicExpression`\ s on species populations and the reactions that alter
the populations are computed at initialization.
Under the `reaction_dependency_based` approach,
when a reaction executes all cached values of the `DynamicExpression`\ s that depend on
the reaction are invalidated.
This approach will be superior if a typical reaction execution changes populations of species
that are used, directly or indirectly, by only a small fraction of the cached values of
the `DynamicExpression`\ s.
In addition, since the populations of species modeled by continuous integration algorithms,
such as ODEs and dFBA,
vary continuously, `DynamicExpression`\ s that depend on them must always be invalidated
whenever simulation time advances.
Attributes:
caching_active (:obj:`bool`): whether caching is active
cache_invalidation (:obj:`InvalidationApproaches`): the cache invalidation approach
_cache (:obj:`dict`): cache of `DynamicExpression.eval()` values
_cache_stats (:obj:`dict`): caching stats
"""
def __init__(self, caching_active=None, cache_invalidation=None):
"""
Args:
caching_active (:obj:`bool`, optional): whether `DynamicExpression` values are cached
cache_invalidation (:obj:`str`, optional): the cache invalidation approach:
either reaction_dependency_based or event_based
Raises:
:obj:`MultialgorithmError`: if `cache_invalidation` is not `reaction_dependency_based`
or `event_based`
"""
config_multialgorithm = wc_sim.config.core.get_config()['wc_sim']['multialgorithm']
self.caching_active = caching_active
if caching_active is None:
self.caching_active = config_multialgorithm['expression_caching']
if self.caching_active:
if cache_invalidation is None:
cache_invalidation = config_multialgorithm['cache_invalidation']
cache_invalidation = cache_invalidation.upper()
if cache_invalidation not in InvalidationApproaches.__members__:
raise MultialgorithmError(f"cache_invalidation '{cache_invalidation}' not in "
f"{str(set(InvalidationApproaches.__members__))}")
self.cache_invalidation = InvalidationApproaches[cache_invalidation]
self._cache = dict()
self._cache_stats = dict()
for cls in itertools.chain(DynamicExpression.__subclasses__(), (DynamicCompartment,)):
self._cache_stats[cls.__name__] = dict()
for caching_event in list(CachingEvents):
self._cache_stats[cls.__name__][caching_event] = 0
def invalidation_approach(self):
""" Provide the invalidation approach
Returns:
:obj:`InvalidationApproaches`: the invalidation approach
"""
return self.cache_invalidation
def get(self, expression):
""" If caching is enabled, get the value of `expression` from the cache if it's stored
Also maintain caching statistics.
Args:
expression (:obj:`object`): a dynamic expression
Returns:
:obj:`object`: the cached value of `expression`
Raises:
:obj:`MultialgorithmError`: if the cache does not contain an entry for `expression`
"""
if self.caching_active:
cls_name = expression.__class__.__name__
if expression in self._cache:
self._cache_stats[cls_name][CachingEvents.HIT] += 1
return self._cache[expression]
else:
self._cache_stats[cls_name][CachingEvents.MISS] += 1
raise MultialgorithmError(f"dynamic expression ({cls_name}.{expression.id}) "
f"not in cache")
def set(self, expression, value):
""" If caching is enabled, set a value for `expression` in the cache
Args:
expression (:obj:`object`): a dynamic expression
value (:obj:`object`): value of the dynamic expression
"""
if self.caching_active:
self._cache[expression] = value
def flush(self, expressions):
""" Invalidate the cache entries for all dynamic expressions in `expressions`
Missing cache entries are ignored.
Args:
expressions (:obj:`list` of :obj:`obj`): iterator over dynamic expression instances
"""
for expression in expressions:
cls_name = expression.__class__.__name__
try:
del self._cache[expression]
self._cache_stats[cls_name][CachingEvents.FLUSH_HIT] += 1
except KeyError:
self._cache_stats[cls_name][CachingEvents.FLUSH_MISS] += 1
def clear_cache(self):
""" Remove all cache entries """
self._cache.clear()
def invalidate(self, expressions=None):
""" Invalidate the cache entries for all dynamic expressions in `expressions`
Missing cache entries are ignored. Does nothing if caching is not enabled.
Args:
expressions (:obj:`set` of :obj:`obj`): iterator over dynamic expression instances
"""
if self.caching_active: # coverage's claim that 'self.caching_active' is never False is wrong
if self.cache_invalidation == InvalidationApproaches.REACTION_DEPENDENCY_BASED:
expressions = tuple() if expressions is None else expressions
self.flush(expressions)
elif self.cache_invalidation == InvalidationApproaches.EVENT_BASED:
self.clear_cache()
else:
pass # pragma: no cover
def caching(self):
""" Is caching enabled?
Returns:
:obj:`bool`: return `True` if caching is enabled
"""
return self.caching_active
def set_caching(self, caching_active):
""" Set the state of caching
Args:
caching_active (:obj:`bool`): `True` if caching should be enabled, otherwise `False`
"""
self.caching_active = caching_active
def __contains__(self, expression):
""" Indicate whether the cache contains an expression
Args:
expression (:obj:`object`): a dynamic expression
Returns:
:obj:`bool`: return :obj:`True` if caching is enabled and `expression` is in the cache,
:obj:`False` otherwise
"""
return self.caching_active and expression in self._cache
def size(self):
""" Get the cache's size
Returns:
:obj:`int`: the number of entries in the cache
"""
return len(self._cache)
def empty(self):
""" Determine whether the cache is empty
Returns:
:obj:`bool`: return :obj:`True` if the cache is empty, :obj:`False` otherwise
"""
return self.size() == 0
def _stop_caching(self):
""" Disable caching; used for testing """
self.set_caching(False)
def _start_caching(self):
""" Enable caching; used for testing """
self.clear_cache()
self.set_caching(True)
def _add_hit_ratios(self):
""" Add hit ratios to the cache stats dictionary
"""
for _, stats in self._cache_stats.items():
c_e = CachingEvents
try:
stats[c_e.HIT_RATIO] = stats[c_e.HIT] / (stats[c_e.HIT] + stats[c_e.MISS])
except ZeroDivisionError:
stats[c_e.HIT_RATIO] = float('nan')
try:
stats[c_e.FLUSH_HIT_RATIO] = \
stats[c_e.FLUSH_HIT] / (stats[c_e.FLUSH_HIT] + stats[c_e.FLUSH_MISS])
except ZeroDivisionError:
stats[c_e.FLUSH_HIT_RATIO] = float('nan')
def cache_stats_table(self):
""" Provide the caching stats
Returns:
:obj:`str`: the caching stats in a table
"""
self._add_hit_ratios()
rv = ['Class\t' + '\t'.join(caching_event.name for caching_event in list(CachingEvents))]
for expression, stats in self._cache_stats.items():
row = [expression]
for caching_event in CachingEvents:
val = stats[caching_event]
if isinstance(val, float):
row.append(f'{val:.2f}')
else:
row.append(str(val))
rv.append('\t'.join(row))
return '\n'.join(rv)
def __str__(self):
""" Readable cache state
Returns:
:obj:`str`: the caching stats in a table
"""
rv = [f"caching_active: {self.caching_active}"]
if self.caching_active:
rv.append(f"cache_invalidation: {self.cache_invalidation}")
rv.append('cache:')
rv.append(pformat(self._cache))
return '\n'.join(rv)
| [
"math.isnan",
"pprint.pformat",
"wc_sim.multialgorithm_errors.MultialgorithmError",
"wc_utils.util.ontology.are_terms_equivalent",
"inspect.isclass",
"numpy.isnan",
"wc_lang.Species.gen_id",
"collections.defaultdict",
"wc_sim.model_utilities.ModelUtilities.non_neg_normal_sample",
"collections.name... | [((1148, 1226), 'collections.namedtuple', 'collections.namedtuple', (['"""WcSimToken"""', '"""code, token_string, dynamic_expression"""'], {}), "('WcSimToken', 'code, token_string, dynamic_expression')\n", (1170, 1226), False, 'import collections\n'), ((56532, 56538), 'enum.auto', 'auto', ([], {}), '()\n', (56536, 56538), False, 'from enum import Enum, auto\n'), ((56554, 56560), 'enum.auto', 'auto', ([], {}), '()\n', (56558, 56560), False, 'from enum import Enum, auto\n'), ((56677, 56683), 'enum.auto', 'auto', ([], {}), '()\n', (56681, 56683), False, 'from enum import Enum, auto\n'), ((56695, 56701), 'enum.auto', 'auto', ([], {}), '()\n', (56699, 56701), False, 'from enum import Enum, auto\n'), ((56718, 56724), 'enum.auto', 'auto', ([], {}), '()\n', (56722, 56724), False, 'from enum import Enum, auto\n'), ((56741, 56747), 'enum.auto', 'auto', ([], {}), '()\n', (56745, 56747), False, 'from enum import Enum, auto\n'), ((56765, 56771), 'enum.auto', 'auto', ([], {}), '()\n', (56769, 56771), False, 'from enum import Enum, auto\n'), ((56794, 56800), 'enum.auto', 'auto', ([], {}), '()\n', (56798, 56800), False, 'from enum import Enum, auto\n'), ((4706, 4770), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""model type \'{model_type}\' has wrong type"""'], {}), '(f"model type \'{model_type}\' has wrong type")\n', (4725, 4770), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((20741, 20769), 'math.isnan', 'math.isnan', (['self.init_volume'], {}), '(self.init_volume)\n', (20751, 20769), False, 'import math\n'), ((25107, 25180), 'wc_utils.util.ontology.are_terms_equivalent', 'are_terms_equivalent', (['self.physical_type', "onto['WC:abstract_compartment']"], {}), "(self.physical_type, onto['WC:abstract_compartment'])\n", (25127, 25180), False, 'from wc_utils.util.ontology import are_terms_equivalent\n'), ((47512, 47608), 'itertools.chain', 'itertools.chain', (['model.functions', 'model.observables', 'model.rate_laws', 'model.stop_conditions'], {}), '(model.functions, model.observables, model.rate_laws, model.\n stop_conditions)\n', (47527, 47608), False, 'import itertools\n'), ((47819, 47837), 'networkx.DiGraph', 'networkx.DiGraph', ([], {}), '()\n', (47835, 47837), False, 'import networkx\n'), ((3838, 3915), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""model class of type \'{model_type.__name__}\' not found"""'], {}), '(f"model class of type \'{model_type.__name__}\' not found")\n', (3857, 3915), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((4135, 4221), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""model of type \'{model_type.__class__.__name__}\' not found"""'], {}), '(\n f"model of type \'{model_type.__class__.__name__}\' not found")\n', (4154, 4221), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((4629, 4690), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""model type \'{model_type}\' not defined"""'], {}), '(f"model type \'{model_type}\' not defined")\n', (4648, 4690), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((5531, 5647), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""model type \'{model_type.__name__}\' not in DynamicComponent.dynamic_components_objs"""'], {}), '(\n f"model type \'{model_type.__name__}\' not in DynamicComponent.dynamic_components_objs"\n )\n', (5550, 5647), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((5773, 5904), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""model type \'{model_type.__name__}\' with id=\'{id}\' not in DynamicComponent.dynamic_components_objs"""'], {}), '(\n f"model type \'{model_type.__name__}\' with id=\'{id}\' not in DynamicComponent.dynamic_components_objs"\n )\n', (5792, 5904), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((8115, 8224), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""_obj_tables_tokens cannot be empty - ensure that \'{wc_lang_model}\' is valid"""'], {}), '(\n f"_obj_tables_tokens cannot be empty - ensure that \'{wc_lang_model}\' is valid"\n )\n', (8134, 8224), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((20023, 20126), 'wc_utils.util.ontology.are_terms_equivalent', 'are_terms_equivalent', (['wc_lang_compartment.init_volume.distribution', "onto['WC:normal_distribution']"], {}), "(wc_lang_compartment.init_volume.distribution, onto[\n 'WC:normal_distribution'])\n", (20043, 20126), False, 'from wc_utils.util.ontology import are_terms_equivalent\n'), ((20281, 20297), 'numpy.isnan', 'numpy.isnan', (['std'], {}), '(std)\n', (20292, 20297), False, 'import numpy\n'), ((20568, 20629), 'wc_sim.model_utilities.ModelUtilities.non_neg_normal_sample', 'ModelUtilities.non_neg_normal_sample', (['random_state', 'mean', 'std'], {}), '(random_state, mean, std)\n', (20604, 20629), False, 'from wc_sim.model_utilities import ModelUtilities\n'), ((20662, 20728), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['"""Initial volume must be normally distributed"""'], {}), "('Initial volume must be normally distributed')\n", (20681, 20728), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((20826, 20940), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""DynamicCompartment {self.id}: init_volume is NaN, but must be a positive number."""'], {}), "(\n f'DynamicCompartment {self.id}: init_volume is NaN, but must be a positive number.'\n )\n", (20845, 20940), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((21025, 21148), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""DynamicCompartment {self.id}: init_volume ({self.init_volume}) must be a positive number."""'], {}), "(\n f'DynamicCompartment {self.id}: init_volume ({self.init_volume}) must be a positive number.'\n )\n", (21044, 21148), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((21299, 21323), 'math.isnan', 'math.isnan', (['init_density'], {}), '(init_density)\n', (21309, 21323), False, 'import math\n'), ((35114, 35203), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""model \'{model.id}\' must have at least 1 cellular compartment"""'], {}), '(\n f"model \'{model.id}\' must have at least 1 cellular compartment")\n', (35133, 35203), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((55330, 55402), 'itertools.chain', 'itertools.chain', (['expressions', 'self.rxn_expression_dependencies[reaction]'], {}), '(expressions, self.rxn_expression_dependencies[reaction])\n', (55345, 55402), False, 'import itertools\n'), ((66884, 66904), 'pprint.pformat', 'pformat', (['self._cache'], {}), '(self._cache)\n', (66891, 66904), False, 'from pprint import pformat\n'), ((4534, 4610), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""model of type \'{model_type_type.__name__}\' not found"""'], {}), '(f"model of type \'{model_type_type.__name__}\' not found")\n', (4553, 4610), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((5288, 5315), 'inspect.isclass', 'inspect.isclass', (['model_type'], {}), '(model_type)\n', (5303, 5315), False, 'import inspect\n'), ((21347, 21462), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""DynamicCompartment {self.id}: init_density is NaN, but must be a positive number."""'], {}), "(\n f'DynamicCompartment {self.id}: init_density is NaN, but must be a positive number.'\n )\n", (21366, 21462), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((21555, 21675), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""DynamicCompartment {self.id}: init_density ({init_density}) must be a positive number."""'], {}), "(\n f'DynamicCompartment {self.id}: init_density ({init_density}) must be a positive number.'\n )\n", (21574, 21675), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((23371, 23460), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""DynamicCompartment \'{self.id}\': initial accounted ratio is 0"""'], {}), '(\n f"DynamicCompartment \'{self.id}\': initial accounted ratio is 0")\n', (23390, 23460), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((23919, 24128), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""DynamicCompartment {self.id}: initial accounted ratio ({self.accounted_fraction:.3E}) greater than MAX_ALLOWED_INIT_ACCOUNTED_FRACTION ({MAX_ALLOWED_INIT_ACCOUNTED_FRACTION})."""'], {}), "(\n f'DynamicCompartment {self.id}: initial accounted ratio ({self.accounted_fraction:.3E}) greater than MAX_ALLOWED_INIT_ACCOUNTED_FRACTION ({MAX_ALLOWED_INIT_ACCOUNTED_FRACTION}).'\n )\n", (23938, 24128), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((45538, 45580), 'wc_lang.Species.gen_id', 'Species.gen_id', (['species.id', 'compartment.id'], {}), '(species.id, compartment.id)\n', (45552, 45580), False, 'from wc_lang import Species, Compartment\n'), ((50119, 50149), 'collections.defaultdict', 'collections.defaultdict', (['float'], {}), '(float)\n', (50142, 50149), False, 'import collections\n'), ((61709, 61798), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""dynamic expression ({cls_name}.{expression.id}) not in cache"""'], {}), "(\n f'dynamic expression ({cls_name}.{expression.id}) not in cache')\n", (61728, 61798), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((11813, 11920), 'wc_sim.multialgorithm_errors.MultialgorithmError', 'MultialgorithmError', (['f"""loading expression \'{self.expression}\' cannot find function \'{func_name}\'"""'], {}), '(\n f"loading expression \'{self.expression}\' cannot find function \'{func_name}\'"\n )\n', (11832, 11920), False, 'from wc_sim.multialgorithm_errors import MultialgorithmError, MultialgorithmWarning\n'), ((23605, 23760), 'warnings.warn', 'warnings.warn', (['f"""DynamicCompartment \'{self.id}\': initial accounted ratio ({self.accounted_fraction:.3E}) greater than 1.0"""', 'MultialgorithmWarning'], {}), '(\n f"DynamicCompartment \'{self.id}\': initial accounted ratio ({self.accounted_fraction:.3E}) greater than 1.0"\n , MultialgorithmWarning)\n', (23618, 23760), False, 'import warnings\n')] |
from __future__ import print_function
###########################################
# SVHN dataset #
# http://ufldl.stanford.edu/housenumbers/ #
###########################################
import os
import numpy as np
import scipy.io
import tensorflow as tf
from .tfrecords_utils import *
from .mnist import MNISTLoader, MNISTFeatures
class SVHNConverter(Converter):
features = MNISTFeatures
def __init__(self, data_dir):
"""Initialize the object for the SVHN dataset in `data_dir`"""
print('Loading original SVHN data from', data_dir)
self.data = []
for name, key in [('train', 'train'), ('val', 'extra'), ('test', 'test')]:
data = os.path.join(data_dir, '%s_32x32.mat' % key)
if not os.path.isfile(data):
print('Warning: Missing %s data' % name)
else:
self.data.append((name, data))
def convert(self, tfrecords_path, compression_type=None, sort=False):
"""Convert the dataset in TFRecords saved in the given `tfrecords_path`"""
for name, data in self.data:
# Load
mat = scipy.io.loadmat(data)
images, labels = mat['X'], mat['y']
num_items = labels.shape[0]
# Write
writer_path = '%s_%s' % (tfrecords_path, name)
writer = self.init_writer(writer_path, compression_type=compression_type)
labels_order = np.argsort(labels, axis=0) if sort else range(num_items)
for x, index in enumerate(labels_order):
print('\rLoad %s: %d / %d' % (name, x + 1, num_items), end='')
img = images[:, :, :, index]
img = img.astype(np.uint8)
class_id = int(labels[index, 0])
class_id = 0 if class_id == 10 else class_id
writer.write(self.create_example_proto([class_id], [img.tostring()], [index]))
# End
writer.close()
print('\nWrote %s in file %s (%.2fMB)' % (
name, writer_path, os.path.getsize(writer_path) / 1e6))
print()
class SVHNLoader(MNISTLoader):
shape = (32, 32, 3) | [
"numpy.argsort",
"os.path.getsize",
"os.path.isfile",
"os.path.join"
] | [((718, 762), 'os.path.join', 'os.path.join', (['data_dir', "('%s_32x32.mat' % key)"], {}), "(data_dir, '%s_32x32.mat' % key)\n", (730, 762), False, 'import os\n'), ((782, 802), 'os.path.isfile', 'os.path.isfile', (['data'], {}), '(data)\n', (796, 802), False, 'import os\n'), ((1474, 1500), 'numpy.argsort', 'np.argsort', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (1484, 1500), True, 'import numpy as np\n'), ((2091, 2119), 'os.path.getsize', 'os.path.getsize', (['writer_path'], {}), '(writer_path)\n', (2106, 2119), False, 'import os\n')] |
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os
from pathlib import Path
from typing import Union
import numpy as np
from tqdm import tqdm
def scatter_mean(data, indices):
inverse, counts = np.unique(indices, return_inverse=True, return_counts=True)[1:3]
idx_sorted = np.argsort(inverse)
reduce_idx = np.zeros(len(counts), dtype=counts.dtype)
np.add.accumulate(counts[:-1], out=reduce_idx[1:])
out = np.add.reduceat(data[idx_sorted], reduce_idx) / counts[:, None]
return out
def grid_filter_numpy(pts, size):
# compute unique cluster indices
pool_coords = np.floor(pts / size).astype(int)
pool_id, counts = np.unique(
pool_coords, return_inverse=True, return_counts=True, axis=0
)[1:]
# compose pool information
idx = np.repeat(np.arange(len(counts)), counts)
values = np.argsort(pool_id)
# compute scatter mean
reduce_idx = np.zeros_like(counts)
np.add.accumulate(counts[:-1], out=reduce_idx[1:])
out = np.add.reduceat(pts[values], reduce_idx) / counts[:, None]
return out, dict(idx=idx, values=values)
def search_optimal_grid_size(
points: np.array, target_points: int, max_iters: int = 100
):
# first grid size estimate
span = np.max(points, axis=0) - np.min(points, axis=0)
size = np.max(span / target_points)
#
i = 0
while True:
n = len(grid_filter_numpy(points, size)[0])
# print(i, n)
i += 1
rel = abs(n - target_points) / target_points
if n >= target_points and (rel < 0.05 or i >= max_iters):
break
size *= 1.1 if n > target_points else 0.9
return size
def process_pointcloud(points: np.array, colors: np.array, max_points: int):
# if point cloud is already small do nothing
if len(points) <= max_points:
return points, colors
# find right grid size, right above max_points
grid_size = search_optimal_grid_size(points, max_points)
# filter the grid out
points_r, cells = grid_filter_numpy(points, grid_size)
colors_r = scatter_mean(colors[cells["values"]], cells["idx"])
# subsample max number
idx = np.random.choice(len(points_r), max_points, replace=False)
return points_r[idx], colors_r[idx]
def process_folder(prefix: Union[str, Path], output: Union[str, Path], max_points: int):
# create output folder unconditionally
os.makedirs(output, exist_ok=True)
# Figure out how many files we need to process
n = len(list(os.scandir(prefix)))
# iterate all file in prefix
for i, item in enumerate(tqdm(os.scandir(prefix), total=n)):
# sample 113 had no points
# if i < 113:
# continue
if not item.is_file():
continue
# point clouds files. down sample
if item.name.endswith(".npz"):
data = np.load(item.path)
points, colors = process_pointcloud(data["pcd"], data["color"], max_points)
np.savez_compressed(
os.path.join(output, item.name), pcd=points, color=colors
)
# symlink sequence files
elif item.name.endswith(".txt"):
src = item.path
dst = os.path.join(output, item.name)
# if both are relative. symlink relative
if not (os.path.isabs(src) or os.path.isabs(dst)):
src = os.path.relpath(src, output)
try:
os.symlink(src, dst)
except FileExistsError:
pass
def parse_arguments():
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("output", help="The desired output directory")
parser.add_argument(
"--max_points", type=int, default=1024, help="Max number of points to retain"
)
parser.add_argument(
"--prefix",
default="pointclouds",
help="Allows manually specifying a prefix for the input folder.",
)
parser.add_argument(
"--max_iters",
type=int,
default=20,
help="Number of iterations spent trying the find optimal grid size",
)
return parser.parse_args()
def main():
# parse arguments
args = parse_arguments()
process_folder(args.prefix, args.output, args.max_points)
if __name__ == "__main__":
main()
| [
"numpy.load",
"numpy.zeros_like",
"os.path.isabs",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.floor",
"os.symlink",
"numpy.add.reduceat",
"numpy.argsort",
"numpy.max",
"numpy.min",
"os.path.relpath",
"numpy.add.accumulate",
"os.path.join",
"os.scandir",
"numpy.unique"
] | [((308, 327), 'numpy.argsort', 'np.argsort', (['inverse'], {}), '(inverse)\n', (318, 327), True, 'import numpy as np\n'), ((392, 442), 'numpy.add.accumulate', 'np.add.accumulate', (['counts[:-1]'], {'out': 'reduce_idx[1:]'}), '(counts[:-1], out=reduce_idx[1:])\n', (409, 442), True, 'import numpy as np\n'), ((866, 885), 'numpy.argsort', 'np.argsort', (['pool_id'], {}), '(pool_id)\n', (876, 885), True, 'import numpy as np\n'), ((931, 952), 'numpy.zeros_like', 'np.zeros_like', (['counts'], {}), '(counts)\n', (944, 952), True, 'import numpy as np\n'), ((957, 1007), 'numpy.add.accumulate', 'np.add.accumulate', (['counts[:-1]'], {'out': 'reduce_idx[1:]'}), '(counts[:-1], out=reduce_idx[1:])\n', (974, 1007), True, 'import numpy as np\n'), ((1322, 1350), 'numpy.max', 'np.max', (['(span / target_points)'], {}), '(span / target_points)\n', (1328, 1350), True, 'import numpy as np\n'), ((2413, 2447), 'os.makedirs', 'os.makedirs', (['output'], {'exist_ok': '(True)'}), '(output, exist_ok=True)\n', (2424, 2447), False, 'import os\n'), ((3571, 3632), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'formatter_class': 'ArgumentDefaultsHelpFormatter'}), '(formatter_class=ArgumentDefaultsHelpFormatter)\n', (3585, 3632), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((226, 285), 'numpy.unique', 'np.unique', (['indices'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(indices, return_inverse=True, return_counts=True)\n', (235, 285), True, 'import numpy as np\n'), ((453, 498), 'numpy.add.reduceat', 'np.add.reduceat', (['data[idx_sorted]', 'reduce_idx'], {}), '(data[idx_sorted], reduce_idx)\n', (468, 498), True, 'import numpy as np\n'), ((679, 750), 'numpy.unique', 'np.unique', (['pool_coords'], {'return_inverse': '(True)', 'return_counts': '(True)', 'axis': '(0)'}), '(pool_coords, return_inverse=True, return_counts=True, axis=0)\n', (688, 750), True, 'import numpy as np\n'), ((1018, 1058), 'numpy.add.reduceat', 'np.add.reduceat', (['pts[values]', 'reduce_idx'], {}), '(pts[values], reduce_idx)\n', (1033, 1058), True, 'import numpy as np\n'), ((1263, 1285), 'numpy.max', 'np.max', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (1269, 1285), True, 'import numpy as np\n'), ((1288, 1310), 'numpy.min', 'np.min', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (1294, 1310), True, 'import numpy as np\n'), ((624, 644), 'numpy.floor', 'np.floor', (['(pts / size)'], {}), '(pts / size)\n', (632, 644), True, 'import numpy as np\n'), ((2517, 2535), 'os.scandir', 'os.scandir', (['prefix'], {}), '(prefix)\n', (2527, 2535), False, 'import os\n'), ((2606, 2624), 'os.scandir', 'os.scandir', (['prefix'], {}), '(prefix)\n', (2616, 2624), False, 'import os\n'), ((2872, 2890), 'numpy.load', 'np.load', (['item.path'], {}), '(item.path)\n', (2879, 2890), True, 'import numpy as np\n'), ((3028, 3059), 'os.path.join', 'os.path.join', (['output', 'item.name'], {}), '(output, item.name)\n', (3040, 3059), False, 'import os\n'), ((3221, 3252), 'os.path.join', 'os.path.join', (['output', 'item.name'], {}), '(output, item.name)\n', (3233, 3252), False, 'import os\n'), ((3392, 3420), 'os.path.relpath', 'os.path.relpath', (['src', 'output'], {}), '(src, output)\n', (3407, 3420), False, 'import os\n'), ((3455, 3475), 'os.symlink', 'os.symlink', (['src', 'dst'], {}), '(src, dst)\n', (3465, 3475), False, 'import os\n'), ((3327, 3345), 'os.path.isabs', 'os.path.isabs', (['src'], {}), '(src)\n', (3340, 3345), False, 'import os\n'), ((3349, 3367), 'os.path.isabs', 'os.path.isabs', (['dst'], {}), '(dst)\n', (3362, 3367), False, 'import os\n')] |
import caffe2.python.onnx.backend as backend
import numpy as np
import onnx
# Load the ONNX model
model = onnx.load("alexnet.onnx")
# Check that the IR is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
onnx.helper.printable_graph(model.graph)
rep = backend.prepare(model, device="CUDA:0") # or "CPU"
# For the Caffe2 backend:
# rep.predict_net is the Caffe2 protobuf for the network
# rep.workspace is the Caffe2 workspace for the network
# (see the class caffe2.python.onnx.backend.Workspace)
outputs = rep.run(np.random.randn(10, 3, 224, 224).astype(np.float32))
# To run networks with more than one input, pass a tuple
# rather than a single numpy ndarray.
print(outputs[0])
| [
"numpy.random.randn",
"onnx.helper.printable_graph",
"caffe2.python.onnx.backend.prepare",
"onnx.checker.check_model",
"onnx.load"
] | [((107, 132), 'onnx.load', 'onnx.load', (['"""alexnet.onnx"""'], {}), "('alexnet.onnx')\n", (116, 132), False, 'import onnx\n'), ((169, 200), 'onnx.checker.check_model', 'onnx.checker.check_model', (['model'], {}), '(model)\n', (193, 200), False, 'import onnx\n'), ((255, 295), 'onnx.helper.printable_graph', 'onnx.helper.printable_graph', (['model.graph'], {}), '(model.graph)\n', (282, 295), False, 'import onnx\n'), ((303, 342), 'caffe2.python.onnx.backend.prepare', 'backend.prepare', (['model'], {'device': '"""CUDA:0"""'}), "(model, device='CUDA:0')\n", (318, 342), True, 'import caffe2.python.onnx.backend as backend\n'), ((581, 613), 'numpy.random.randn', 'np.random.randn', (['(10)', '(3)', '(224)', '(224)'], {}), '(10, 3, 224, 224)\n', (596, 613), True, 'import numpy as np\n')] |
# IMPORT MODULES
import numpy as np
from scipy import optimize
from scipy import special
from reported_statistics import get_p
from typing import List
import time
class BinaryOutcomeModel(object):
"""
A binary outcome model class that Logit and Probit are built on.
:param add_intercept: If True, an intercept term is added to the model, that is a column of ones in the data.
:type add_intercept: bool, optional, defaults to False
:param compute_statistics: If True, standard errors and p-values are computed for the point estimates as well as R-squared and adjusted R-squared values for the model.
:type compute_statistics: bool, optional, defaults to False
:param bootstrap_marginal_effect_variances: If True, the marginal effect variances are bootstrapped, if False, the delta method is used. The delta method is quicker.
:type bootstrap_marginal_effect_variances: bool, optional, defaults to False
"""
def __init__(self, add_intercept:bool=False, compute_statistics:bool=True, bootstrap_marginal_effect_variances:bool=False):
self.parameters = {'add_intercept':add_intercept, 'compute_statistics':compute_statistics, 'bootstrap_marginal_effect_variances':bootstrap_marginal_effect_variances}
self.X:np.ndarray = None
self.y:np.ndarray = None
self.dummy_columns:List[bool] = []
self.beta:np.ndarray = None
self.covariance_matrix = None
def fit(self, X:np.ndarray, y:np.ndarray):
"""
Fit binary model, using the BFGS algorithm for MLE (minimize the negative log-likelihood)
:param X: Input data / Independent Variables, a matrix with (n x k) dimensions.
:type X: numpy.ndarray
:param y: Dependent / output variable, a vector of (n x 1) dimensions.
:type y: numpy.ndarray
:return: Only updates the OLS object
:rtype: None
"""
if self.parameters['add_intercept']:
X = np.append(X, np.ones(shape=(X.shape[0],1), dtype=np.int8), axis=1)
n, k = X.shape
self.X, self.y = X, y
self.get_dummy_columns() # dummy check
# MLE - optimization
beta_0 = np.zeros(shape=k)
bfgs = optimize.minimize(self.objective_function, beta_0, method='BFGS', jac=self.score, options={'disp': True}) # MLE
self.beta = bfgs.x
if self.parameters['compute_statistics']:
self.compute_statistics()
def predict(self, X):
"""
Predict probability: Pr(y=1|X) based on a fitted model.
:param X: Input data (test set) with the same column dimensions as the data used to fit the model (training set).
:type X: numpy.ndarray
:return: y_hat and Pr(y=1|x)
:rtype: tuple
"""
p = self.link_function(X, self.beta)
y_hat = np.where(p > 0.5, 1, 0)
return y_hat, p
def get_dummy_columns(self):
"""
Refresh dummy_columns list - tells which column in X is dummy variable, which one is continuous
"""
self.dummy_columns = []
for i in range(self.X.shape[1]):
if sorted(list(np.unique(self.X[:,i]))) == [0, 1]: # if dummy
self.dummy_columns.append(True)
else:
self.dummy_columns.append(False)
def compute_statistics(self):
"""
Updates covariance matrix and p-values for the estimated parameters of fitted model.
"""
self.covariance_matrix = self.get_covariance_matrix()
self.p_values = get_p(self.beta, self.covariance_matrix)
pass
def get_covariance_matrix(self):
"""
Compute covariance matrix for the parameters, the asymptotical variance is the inverse information matrix.
:return: Covariance matrix.
:rtype: numpy.ndarray
"""
self.covariance_matrix = np.linalg.inv(self.information())
return self.covariance_matrix
def get_marginal_effects(self, X:np.ndarray, variances:bool=True):
"""
Get marginal effects.
:param X: The data, an (n x k) matrix.
:type X: numpy.ndarray
:param variances: If true, compute variances of estimated marginal effects.
:type variances: bool, optional, defaults to True
:return: An (n x k) matrix that consists of k marginal effects for the n observations. If the 'variances' parameter is True, the function returns a tuple with the marginal effects and their corresponding variances (marginals, var_marginals).
:rtype: numpy.ndarray or if 'variances' is True, (numpy.ndarra, numpy.ndarray) tuple.
"""
marginals = self.compute_marginal_effects(X, self.beta) # (n x k)
if variances:
var_marginals = self.get_marginal_effect_variances(X) # (n x k)
return marginals, var_marginals
return marginals
def compute_marginal_effects(self, X:np.ndarray, beta:np.ndarray):
"""
Calculate marginal effects given X (data) and beta (estimated parameters).
:param X: The data.
:type X: numpy.ndarray
:param beta: The model parameter vector (k x 1).
:type beta: numpy.ndarray
:return: Marginal effects dy_i/dx_ij, an (n x k) matrix where a value in i-th row and j-th column is the calculated marginal effect of variable j on observed output i.
:rtype: numpy.ndarray
"""
marginals = []
for i in range(X.shape[0]): # for each row in X
marginals_i = []
for j in range(X.shape[1]): # for each column in X
if self.dummy_columns[j]: # difference of predicted probabilities (x_ij = 1 and x_ij = 0) (Dummy variables)
X_i1, X_i0 = X[i].copy(), X[i].copy()
X_i1[j], X_i0[j] = 1, 0
marginals_i.append(self.link_function(X_i1, beta) - self.link_function(X_i0, beta))
else: # derivative of predicted probability wrt. x_ij (Continuous variables)
marginals_i.append(self.link_function_derivative(X[i], beta)*beta[j]) # (1 x k)
marginals.append(marginals_i)
marginals = np.array(marginals) # (n x k)
return marginals
def get_marginal_effect_variances(self, X:np.ndarray):
"""
Calculate variance of marginal effects.
:param X: The fitted data, an (n x k) matrix.
:type X: numpy.ndarray
:return: An (n x k) matrix, - variances for k marginal effects for the n observations.
:rtype: numpy.ndarray
"""
if self.parameters['bootstrap_marginal_effect_variances']: # bootsrap method
variances = self.bootstrap(X) # n x k
else: # delta method
variances = self.delta_method(X) # n x k
return variances
def bootstrap(self, X:np.ndarray):
"""
Implement bootstrap method for marginal effect variance computation for Logit and Probit.
:param X: The fitted data, an (n x k) matrix.
:type X: numpy.ndarray
:return: An (n x k) matrix. Variances for k marginal effects for the n observations.
:rtype: numpy.ndarray
"""
start = time.time()
# sample coefficients - from their marginal distribution, using the fact that the coefficients are asymptotically normal
bootstrap_size = 500 # bootstrap size: 500
beta_samples = np.zeros(shape=(bootstrap_size, X.shape[1])) # preallocate sample matrix (b x k)
for j in range(X.shape[1]): # populate sample matrix
beta_samples[:,j] = np.random.normal(loc=self.beta[j], scale=np.diag(self.covariance_matrix)[j], size=bootstrap_size) # add samples as columns
# get marginal effects for each coefficient sample
marginal_effect_samples = np.zeros(shape=(bootstrap_size, X.shape[0], X.shape[1])) # (b x n x k) tensor
for b in range(bootstrap_size):
marginal_effect_samples[b,:,:] = self.compute_marginal_effects(X, beta_samples[b,:])
print(f'computing marginal effects for each coefficient sample took {time.time()-start} seconds')
# variance of marginal effect samples (the mean is given - calculated in self.get_marginal_effects)
variances = np.var(marginal_effect_samples, axis=0) # collapse to (n x k)
return variances
# not implemented methods
def objective_function(self, beta:np.ndarray):
"""
Negative log likelihood.
:param beta: The model parameter vector (k x 1).
:type beta: numpy.ndarray
:return: The negative log-likelihood function evaluated at X,y,beta.
:rtype: float
"""
link = self.link_function(self.X, beta)
return - np.sum( self.y*np.log(link) + (1 - self.y)*np.log(1 - link) ) / self.X.shape[0]
def link_function(self):
"""
Implement link function for Logit or Probit.
:raises NotImplementedError: This method is not implemented in the generic BinaryOutcomeModel.
"""
raise NotImplementedError('This method must be implemented for the specific binary outcome model')
def link_function_derivative(self):
"""
Implement the derivative link function for Logit or Probit.
:raises NotImplementedError: This method is not implemented in the generic BinaryOutcomeModel.
"""
raise NotImplementedError('This method must be implemented for the specific binary outcome model')
def delta_method(self):
"""
Implement delta method for marginal effect variance computation for Logit or Probit.
:raises NotImplementedError: This method is not implemented in the generic BinaryOutcomeModel.
"""
raise NotImplementedError('This method must be implemented for the specific binary outcome model')
def score(self):
"""
Implement objective function derivative.
:raises NotImplementedError: This method is not implemented in the generic BinaryOutcomeModel.
"""
raise NotImplementedError('This method must be implemented for the specific binary outcome model')
def information(self):
"""
Implement Fisher Information matrix computation.
:raises NotImplementedError: This method is not implemented in the generic BinaryOutcomeModel.
"""
raise NotImplementedError('This method must be implemented for the specific binary outcome model')
class Logit(BinaryOutcomeModel):
"""
Fit logit
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def link_function(self, X:np.ndarray, beta:np.ndarray):
"""
The logistic link function.
:param X: The (n x k) data.
:type X: numpy.ndarray
:param beta: The model parameter vector (k x 1).
:type beta: numpy.ndarray
:return: The evaluated link function at X,beta.
:rtype: numpy.ndarray
"""
return (np.exp(X @ beta)) / (1 + np.exp(X @ beta)) # logit
def link_function_derivative(self, X:np.ndarray, beta:np.ndarray):
"""
The derivative of the link function.
:param X: The (n x k) data.
:type X: numpy.ndarray
:param beta: The model parameter vector (k x 1).
:type beta: numpy.ndarray
:return: The evaluated link function derivative at X,beta.
:rtype: numpy.ndarray
"""
link = self.link_function(X, beta)
return link * (1 - link)
def score(self, beta:np.ndarray):
"""
The score is the first derivative of the objective function at beta.
:param beta: The model parameter vector (k x 1).
:type beta: numpy.ndarray
:return: The score.
:rtype: float
"""
res = self.y - self.link_function(self.X, beta)
return - np.sum( self.X * np.reshape(res, newshape=(res.shape[0], 1)), axis=0 )
def information(self):
"""
Compute Fisher information matrix.
:return: The Fisher information matrix.
:rtype: numpy.ndarray
"""
information = np.zeros(shape=(self.X.shape[1], 1))
for i in range(self.X.shape[0]):
X_i = np.reshape(self.X[i], newshape=(1, self.X[i].shape[0])) # X_i is a row vector
link_i = self.link_function(X_i, self.beta)
w = (link_i) * (1 - link_i) # or just link derivative
information = information + w * (np.transpose(X_i) @ X_i)
return information
def delta_method(self, X:np.ndarray):
"""
Calculate variance of marginal effects (X (n x k)).
:param X: The (n x k) data.
:type X: numpy.ndarray
:return: An (n x k) matrix that contains variances for k marginal effects for the n observations.
:rtype: numpy.ndarray
"""
# continuous variables
link = self.link_function(X, self.beta) # (n x 1)
link_derivative = self.link_function_derivative(X, self.beta) # (n x 1)
variances_list = []
for i in range(X.shape[0]): # go through each row of X
X_i = np.reshape(X[i], newshape=(1, X[i].shape[0])) # X_i is a row vector
partial = np.identity(X.shape[1]) * link_derivative[i] + np.outer((self.beta * link_derivative[i] * (1 - 2*(link[i])) ), X_i) # (k x k)
variances_i = list(np.diag(partial @ self.covariance_matrix @ np.transpose(partial))) # 1 x k
# binary variables
for j in range(len(variances_i)):
if self.dummy_columns[j]: # replace variance if dummy
X_i1, X_i0 = X_i.copy(), X_i.copy()
X_i1[0,j], X_i0[0,j] = 1, 0
partial = link_derivative[i] * X_i1 - link_derivative[i] * X_i0
variances_i[j] = (partial) @ self.covariance_matrix @ np.transpose(partial)
variances_list.append(variances_i) # construct list of variance lists
return np.array(variances_list, dtype=object) # the variances (n x k)
class Probit(BinaryOutcomeModel):
"""
Fit probit
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def link_function(self, X:np.ndarray, beta:np.ndarray):
"""
The probit link function.
:param X: The (n x k) data.
:type X: numpy.ndarray
:param beta: The model parameter vector (k x 1).
:type beta: numpy.ndarray
:return: The evaluated link function at X,beta.
:rtype: numpy.ndarray
"""
# probit -> standard normal distribution, can be used for prediction, so does not use self.X as default
return ((1 + special.erf((X @ beta) / np.sqrt(2))) / 2) # Probit
def link_function_derivative(self, X:np.ndarray, beta:np.ndarray):
"""
The derivative of the link function.
:param X: The (n x k) data.
:type X: numpy.ndarray
:param beta: The model parameter vector (k x 1).
:type beta: numpy.ndarray
:return: The evaluated link function derivative at X,beta.
:rtype: numpy.ndarray
"""
return np.exp(- ((X @ beta)**2) / 2) / np.sqrt(2*np.pi)
def score(self, beta:np.ndarray):
"""
The score is the first derivative of the objective function at beta.
:param beta: The model parameter vector (k x 1).
:type beta: numpy.ndarray
:return: The score.
:rtype: float
"""
link, link_derivative = self.link_function(self.X, beta), self.link_function_derivative(self.X, beta)
res = self.y - link
w = link_derivative / ((link)*(1 - link))
return - np.sum(self.X * np.reshape(res, newshape=(res.shape[0], 1)) * np.reshape(w, newshape=(res.shape[0], 1)), axis=0 )
def information(self):
"""
Compute Fisher information matrix.
:return: The Fisher information matrix.
:rtype: numpy.ndarray
"""
information = np.zeros(shape=(self.X.shape[1], 1))
for i in range(self.X.shape[0]):
X_i = np.reshape(self.X[i], newshape=(1, self.X[i].shape[0])) # X_i is a row vector
link_i, link_derivative_i = self.link_function(X_i, self.beta), self.link_function_derivative(X_i, self.beta)
w = link_derivative_i**2 / ((link_i)*(1 - link_i))
information = information + w * (np.transpose(X_i) @ X_i)
return information
def delta_method(self, X:np.ndarray):
"""
Calculate variance of marginal effects (X (n x k)).
:param X: The (n x k) data.
:type X: numpy.ndarray
:return: An (n x k) matrix that contains variances for k marginal effects for the n observations.
:rtype: numpy.ndarray
"""
# continuous variables
link_derivative = self.link_function_derivative(X, self.beta) # (n x 1)
variances_list = []
for i in range(X.shape[0]): # go through each row of X
X_i = np.reshape(X[i], newshape=(1, X[i].shape[0])) # X_i is a row vector
partial = link_derivative[i] * (np.identity(X.shape[1]) - np.outer(np.outer(self.beta, np.transpose(self.beta)) @ np.transpose(X_i), X_i)) # (k x k)
variances_i = list(np.diag(partial @ self.covariance_matrix @ np.transpose(partial))) # 1 x k
# binary variables
for j in range(len(variances_i)):
if self.dummy_columns[j]: # replace variance if dummy
X_i1, X_i0 = X_i.copy(), X_i.copy()
X_i1[0,j], X_i0[0,j] = 1, 0
partial = link_derivative[i] * X_i1 - link_derivative[i] * X_i0
variances_i[j] = (partial) @ self.covariance_matrix @ np.transpose(partial)
variances_list.append(variances_i) # construct list of variance lists
return np.array(variances_list, dtype=object) # the variances (n x k)
| [
"numpy.diag",
"scipy.optimize.minimize",
"numpy.outer",
"numpy.log",
"numpy.zeros",
"numpy.ones",
"numpy.identity",
"time.time",
"numpy.transpose",
"numpy.where",
"numpy.array",
"numpy.exp",
"numpy.reshape",
"reported_statistics.get_p",
"numpy.var",
"numpy.unique",
"numpy.sqrt"
] | [((2219, 2236), 'numpy.zeros', 'np.zeros', ([], {'shape': 'k'}), '(shape=k)\n', (2227, 2236), True, 'import numpy as np\n'), ((2253, 2363), 'scipy.optimize.minimize', 'optimize.minimize', (['self.objective_function', 'beta_0'], {'method': '"""BFGS"""', 'jac': 'self.score', 'options': "{'disp': True}"}), "(self.objective_function, beta_0, method='BFGS', jac=self.\n score, options={'disp': True})\n", (2270, 2363), False, 'from scipy import optimize\n'), ((2888, 2911), 'numpy.where', 'np.where', (['(p > 0.5)', '(1)', '(0)'], {}), '(p > 0.5, 1, 0)\n', (2896, 2911), True, 'import numpy as np\n'), ((3617, 3657), 'reported_statistics.get_p', 'get_p', (['self.beta', 'self.covariance_matrix'], {}), '(self.beta, self.covariance_matrix)\n', (3622, 3657), False, 'from reported_statistics import get_p\n'), ((6302, 6321), 'numpy.array', 'np.array', (['marginals'], {}), '(marginals)\n', (6310, 6321), True, 'import numpy as np\n'), ((7360, 7371), 'time.time', 'time.time', ([], {}), '()\n', (7369, 7371), False, 'import time\n'), ((7578, 7622), 'numpy.zeros', 'np.zeros', ([], {'shape': '(bootstrap_size, X.shape[1])'}), '(shape=(bootstrap_size, X.shape[1]))\n', (7586, 7622), True, 'import numpy as np\n'), ((7974, 8030), 'numpy.zeros', 'np.zeros', ([], {'shape': '(bootstrap_size, X.shape[0], X.shape[1])'}), '(shape=(bootstrap_size, X.shape[0], X.shape[1]))\n', (7982, 8030), True, 'import numpy as np\n'), ((8430, 8469), 'numpy.var', 'np.var', (['marginal_effect_samples'], {'axis': '(0)'}), '(marginal_effect_samples, axis=0)\n', (8436, 8469), True, 'import numpy as np\n'), ((12415, 12451), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.X.shape[1], 1)'}), '(shape=(self.X.shape[1], 1))\n', (12423, 12451), True, 'import numpy as np\n'), ((14306, 14344), 'numpy.array', 'np.array', (['variances_list'], {'dtype': 'object'}), '(variances_list, dtype=object)\n', (14314, 14344), True, 'import numpy as np\n'), ((16378, 16414), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.X.shape[1], 1)'}), '(shape=(self.X.shape[1], 1))\n', (16386, 16414), True, 'import numpy as np\n'), ((18286, 18324), 'numpy.array', 'np.array', (['variances_list'], {'dtype': 'object'}), '(variances_list, dtype=object)\n', (18294, 18324), True, 'import numpy as np\n'), ((11230, 11246), 'numpy.exp', 'np.exp', (['(X @ beta)'], {}), '(X @ beta)\n', (11236, 11246), True, 'import numpy as np\n'), ((12513, 12568), 'numpy.reshape', 'np.reshape', (['self.X[i]'], {'newshape': '(1, self.X[i].shape[0])'}), '(self.X[i], newshape=(1, self.X[i].shape[0]))\n', (12523, 12568), True, 'import numpy as np\n'), ((13441, 13486), 'numpy.reshape', 'np.reshape', (['X[i]'], {'newshape': '(1, X[i].shape[0])'}), '(X[i], newshape=(1, X[i].shape[0]))\n', (13451, 13486), True, 'import numpy as np\n'), ((15507, 15535), 'numpy.exp', 'np.exp', (['(-(X @ beta) ** 2 / 2)'], {}), '(-(X @ beta) ** 2 / 2)\n', (15513, 15535), True, 'import numpy as np\n'), ((15539, 15557), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (15546, 15557), True, 'import numpy as np\n'), ((16476, 16531), 'numpy.reshape', 'np.reshape', (['self.X[i]'], {'newshape': '(1, self.X[i].shape[0])'}), '(self.X[i], newshape=(1, self.X[i].shape[0]))\n', (16486, 16531), True, 'import numpy as np\n'), ((17408, 17453), 'numpy.reshape', 'np.reshape', (['X[i]'], {'newshape': '(1, X[i].shape[0])'}), '(X[i], newshape=(1, X[i].shape[0]))\n', (17418, 17453), True, 'import numpy as np\n'), ((2012, 2057), 'numpy.ones', 'np.ones', ([], {'shape': '(X.shape[0], 1)', 'dtype': 'np.int8'}), '(shape=(X.shape[0], 1), dtype=np.int8)\n', (2019, 2057), True, 'import numpy as np\n'), ((11255, 11271), 'numpy.exp', 'np.exp', (['(X @ beta)'], {}), '(X @ beta)\n', (11261, 11271), True, 'import numpy as np\n'), ((13579, 13644), 'numpy.outer', 'np.outer', (['(self.beta * link_derivative[i] * (1 - 2 * link[i]))', 'X_i'], {}), '(self.beta * link_derivative[i] * (1 - 2 * link[i]), X_i)\n', (13587, 13644), True, 'import numpy as np\n'), ((12156, 12199), 'numpy.reshape', 'np.reshape', (['res'], {'newshape': '(res.shape[0], 1)'}), '(res, newshape=(res.shape[0], 1))\n', (12166, 12199), True, 'import numpy as np\n'), ((13532, 13555), 'numpy.identity', 'np.identity', (['X.shape[1]'], {}), '(X.shape[1])\n', (13543, 13555), True, 'import numpy as np\n'), ((16121, 16162), 'numpy.reshape', 'np.reshape', (['w'], {'newshape': '(res.shape[0], 1)'}), '(w, newshape=(res.shape[0], 1))\n', (16131, 16162), True, 'import numpy as np\n'), ((17521, 17544), 'numpy.identity', 'np.identity', (['X.shape[1]'], {}), '(X.shape[1])\n', (17532, 17544), True, 'import numpy as np\n'), ((3207, 3230), 'numpy.unique', 'np.unique', (['self.X[:, i]'], {}), '(self.X[:, i])\n', (3216, 3230), True, 'import numpy as np\n'), ((7795, 7826), 'numpy.diag', 'np.diag', (['self.covariance_matrix'], {}), '(self.covariance_matrix)\n', (7802, 7826), True, 'import numpy as np\n'), ((8269, 8280), 'time.time', 'time.time', ([], {}), '()\n', (8278, 8280), False, 'import time\n'), ((12761, 12778), 'numpy.transpose', 'np.transpose', (['X_i'], {}), '(X_i)\n', (12773, 12778), True, 'import numpy as np\n'), ((13733, 13754), 'numpy.transpose', 'np.transpose', (['partial'], {}), '(partial)\n', (13745, 13754), True, 'import numpy as np\n'), ((14183, 14204), 'numpy.transpose', 'np.transpose', (['partial'], {}), '(partial)\n', (14195, 14204), True, 'import numpy as np\n'), ((15053, 15063), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15060, 15063), True, 'import numpy as np\n'), ((16075, 16118), 'numpy.reshape', 'np.reshape', (['res'], {'newshape': '(res.shape[0], 1)'}), '(res, newshape=(res.shape[0], 1))\n', (16085, 16118), True, 'import numpy as np\n'), ((16787, 16804), 'numpy.transpose', 'np.transpose', (['X_i'], {}), '(X_i)\n', (16799, 16804), True, 'import numpy as np\n'), ((17713, 17734), 'numpy.transpose', 'np.transpose', (['partial'], {}), '(partial)\n', (17725, 17734), True, 'import numpy as np\n'), ((18163, 18184), 'numpy.transpose', 'np.transpose', (['partial'], {}), '(partial)\n', (18175, 18184), True, 'import numpy as np\n'), ((8939, 8951), 'numpy.log', 'np.log', (['link'], {}), '(link)\n', (8945, 8951), True, 'import numpy as np\n'), ((8967, 8983), 'numpy.log', 'np.log', (['(1 - link)'], {}), '(1 - link)\n', (8973, 8983), True, 'import numpy as np\n'), ((17603, 17620), 'numpy.transpose', 'np.transpose', (['X_i'], {}), '(X_i)\n', (17615, 17620), True, 'import numpy as np\n'), ((17576, 17599), 'numpy.transpose', 'np.transpose', (['self.beta'], {}), '(self.beta)\n', (17588, 17599), True, 'import numpy as np\n')] |
from __future__ import print_function
from __future__ import division
from random import shuffle
# Written by <NAME>
# Updated 11.28.2016
from optparse import OptionParser
from collections import Counter
import array
import itertools
import math
import sys,re
import os
import logging
from scipy.stats import binom as binomial
import numpy as np
import numpy.matlib
import time
from scipy.stats import invgamma
import sklearn
import sklearn.covariance
# Set up basic logging
logger = logging.getLogger('Log')
from scipy import stats
from scipy.stats import multivariate_normal
import random
np.seterr(divide='warn')
def is_pos_def(x):
i = 0
x = np.matrix(x)
if np.all(np.linalg.eigvals(x) > 0):
return True
else:
return False
# return BIC -2*log(p(Data | theta that maximizes C, Mc)) + vc log(n) : vc is the number of parameters (K+J)*(C-1), K is the number of phenotypes, J is the number of genes, C is the number of clusters
def mrpmm(betas,ses,vymat,annotvec,genevec,protvec,chroffvec,clusters,fout,Rphen,Rpheninv,phenidarr, Rphenuse=True, fdr=.05, niter=1000,burn=100,thinning=1,verbose=True, protectivescan = False, outpath='/Users/mrivas/', maxlor = 0.693):
print("Running MCMC algorithm...")
print(sys.flags.optimize)
epsilon = .0000000000000001
storephensvar = []
S = vymat
xi0 = 1 # hyperparameter to control spread of proposals for annotation
xialpha0 = 1
betas = numpy.matrix(betas)
ses = numpy.matrix(ses)
S = numpy.matrix(S)
Sinv = numpy.linalg.inv(S)
# Let k be the number of clusters, where cluster 1 is the null model cluster
C = clusters
maxloglkiter = np.zeros((niter+2,1))
# Let k be the number of phenotypes
k = betas.shape[1]
# Let m be the number of variants
m = betas.shape[0]
# Initialize
#Sigma0 for alternative clusters
if Rphenuse:
if is_pos_def(Rphen):
Theta0 = Rphen
Theta0inv = Rpheninv
else:
Theta0 = sklearn.covariance.shrunk_covariance(Rphen)
Theta0inv = numpy.linalg.inv(Theta0)
else:
Theta0 = numpy.eye(Rphen.shape[0])
Theta0inv = numpy.linalg.inv(Theta0)
#scale matrix
geneset = set(genevec)
genemap = list(geneset)
annotset = set(annotvec)
annotlen = len(annotset)
annotmap = list(annotset)
scales = numpy.zeros((niter+2,annotlen))
# store the mean trait value across the clusters for individuals that are members
bc = numpy.zeros((niter+2,C,k))
# store the probabilities (proportions) of cluster memberships
pc = numpy.zeros((niter+2,1,C))
# store the probabilities (proportions) of cluster memberships for each gene
genenum = len(set(genevec))
pcj = numpy.zeros((niter+2,genenum,C))
# for each iteration keep record of the variant membership
deltam = numpy.zeros((niter+2,m))
###### Why are these stored separately?
# non-normalized probabilities for each individual variant
uc = numpy.zeros((niter+2,m,C))
# normalized probabilities for each individual variant
ws = numpy.zeros((niter+2,m,C))
# for each iteration keep record of the variant membership
tm = numpy.zeros((niter+2,m))
#sharing parameter
alpha = numpy.zeros((niter+2,1))
ks = numpy.arange(1,C+1)
sigmainvdict = {}
sigmadict = {}
thetadict = {}
thetainvdict = {}
for clusteriter in range(2,C+1):
sigmadict[0,clusteriter] = S
sigmainvdict[0,clusteriter] = Sinv
thetadict[0,clusteriter] = Theta0
thetainvdict[0,clusteriter] = Theta0inv
# For Metropolois Hastings sub-step : keep track of acceptance rate
acceptmh1 = 0
rejectmh1 = 0
acceptmh1_postburnin = 0
rejectmh1_postburnin = 0
acceptmh3 = 0
rejectmh3 = 0
acceptmh3_postburnin = 0
rejectmh3_postburnin = 0
acceptmh2 = [0]*annotlen
rejectmh2 = [0]*annotlen
acceptmh2_postburnin = [0]*annotlen
rejectmh2_postburnin = [0]*annotlen
# initialize \alpha : sharing of clusters across genes
alpha[0,:] = invgamma.rvs(1,0,1,size = 1)
# initialize pc (proportions across all variants)
pc[0,0,:] = np.random.dirichlet([1]*C)
# initialize pcj (proportions for each gene j)
for geneidx in range(0,genenum):
pcj[0,geneidx,:] = np.random.dirichlet(alpha[0,0]*pc[0,0,:])
bc[0,0,:] = np.array([0]*k)
for clusteridx in range(1,C):
bc[0,clusteridx,:] = np.random.multivariate_normal(np.array([0]*k).T,Theta0)
for scaleidx in range(0,annotlen):
scales[0,scaleidx] = np.power(0.2,2)
# initialize variant membership across clusters
deltam[0,:] = np.random.randint(0,C,m)
# protective candidate alleles
protind = numpy.zeros((niter+2,m))
# Iterations MCMC samplers
for iter in range(1,niter+1):
gamma = 1
if iter % 500 == 0:
print(iter)
## a) Update \pi_0 : Proposal centered around the current value, Set gamma to 1 , how to set gamma?
## mhstep1
pcproposal = np.random.dirichlet(alpha[iter-1,0]*pc[iter-1,0,:])
# lnormDprop = math.lgamma(np.sum([alpha[iter-1,0]*i for i in pcproposal])) - np.sum([math.lgamma(max(alpha[iter-1,0]*i,epsilon)) for i in pcproposal])
lnormDprop = math.lgamma(np.sum([gamma*i for i in pcproposal])) - np.sum([math.lgamma(max(gamma*i,epsilon)) for i in pcproposal])
# second part of density
# densitypropb = np.sum([(alpha[iter-1,0]*pcproposal[i] - 1)*np.log(pc[iter-1,0,i]) for i in range(0,C)])
densitypropb = np.sum([(gamma*pcproposal[i] - 1)*np.log(pc[iter-1,0,i]) for i in range(0,C)])
lpdirprop = lnormDprop + densitypropb
#go through each gene
lpdirpropgene = 0
lnormDprop = math.lgamma(np.sum([alpha[iter-1,0]*i for i in pcproposal])) - np.sum([math.lgamma(max(alpha[iter-1,0]*i,epsilon)) for i in pcproposal])
for geneidx in range(0,genenum):
# second part of density
densitypropb = np.sum([(alpha[iter-1,0]*pcproposal[i] - 1)*np.log(pcj[iter-1,geneidx,i]) for i in range(0,C)])
lpdirpropgene += densitypropb + lnormDprop
lpdirnum = lpdirprop + lpdirpropgene
# denominator, iteration - 1 pc
# lnormD = math.lgamma(np.sum([alpha[iter-1,0]*i for i in pc[iter-1,0,:]])) - np.sum([math.lgamma(max(alpha[iter-1,0]*i,epsilon)) for i in pc[iter-1,0,:]])
lnormD = math.lgamma(np.sum([gamma*i for i in pc[iter-1,0,:]])) - np.sum([math.lgamma(max(gamma*i,epsilon)) for i in pc[iter-1,0,:]])
# second part of density
densityb = np.sum([(gamma*pc[iter-1,0,i] - 1)*np.log(pcproposal[i]) for i in range(0,C)])
lpdir = lnormD + densityb
#go through each gene
lpdirgene = 0
lnormD = math.lgamma(np.sum([alpha[iter-1,0]*i for i in pc[iter-1,0,:]])) - np.sum([math.lgamma(max(alpha[iter-1,0]*i,epsilon)) for i in pc[iter-1,0,:]])
for geneidx in range(0,genenum):
# second part of density
densityb = np.sum([(alpha[iter-1,0]*pc[iter-1,0,i] - 1)*np.log(pcj[iter-1,geneidx,i]) for i in range(0,C)])
lpdirgene += densityb + lnormD
lpdirdenom = lpdir + lpdirgene
lpdir = lpdirnum - lpdirdenom
## Metropolis-Hastings step
if numpy.log(np.random.uniform(0,1,size = 1)[0]) < min(0, lpdir):
acceptmh1 += 1
pc[iter,0,:] = pcproposal
if iter > burn:
acceptmh1_postburnin += 1
else:
rejectmh1 += 1
pc[iter,0,:] = pc[iter-1,0,:]
if iter > burn:
rejectmh1_postburnin += 1
# b) For each gene j = 1, ..., J update \pi_j
for geneidx in range(0,genenum):
paramvecshared = alpha[iter-1,0]*pc[iter,0,:]
for geneiter in range(0,len(genevec)):
if genevec[geneiter] == genemap[geneidx]:
paramvecshared[int(deltam[iter-1,geneiter])] += 1
pcj[iter,geneidx,:] = np.random.dirichlet(paramvecshared)
# c) Update delta_jm
xk = numpy.arange(0,C)
for varidx in range(0,m):
probmjc = [0]*C
lprobmjcu = [0]*C
uc = [0]*C
varannot = annotvec[varidx]
annotidx = [i for i in range(0,annotlen) if annotmap[i] == varannot][0]
genevar = genevec[varidx]
geneid = [i for i in range(0,len(genemap)) if genemap[i] == genevar][0]
atmp = np.array(ses[varidx,:])[0]
dtmp = numpy.matlib.eye(len(atmp))
np.fill_diagonal(dtmp,atmp)
Vjm = dtmp*S*dtmp + np.matlib.eye(S.shape[0])*.000001
# Gives covariance matrix of variant effect on sets of phenotypes (after fixed effect meta-analysis has been applied across all studies available)
for cidx in range(0,C):
llk2 = multivariate_normal.logpdf(betas[varidx,:],np.sqrt(scales[iter-1,annotidx])*bc[iter-1,cidx,:],Vjm) + np.log(pcj[iter,geneid,cidx])
if int(deltam[iter-1,varidx]) == cidx:
maxloglkiter[iter-1,0] += llk2
lprobmjcu[cidx] += llk2
#normalize uc - set to wc
maxloglk = numpy.max(lprobmjcu)
for cidx in range(0,C):
uc[cidx] = numpy.exp(lprobmjcu[cidx] - maxloglk)
for cidx in range(0,C):
probmjc[cidx] = uc[cidx]/numpy.sum(uc)
if numpy.isnan(probmjc[0]):
wstmp = numpy.random.dirichlet(numpy.repeat(numpy.array([1]),C,axis = 0))
custm = stats.rv_discrete(name='custm',values=(xk,wstmp))
else:
custm = stats.rv_discrete(name='custm',values=(xk,probmjc))
deltam[iter,varidx] = int(custm.rvs(size=1)[0])
if protectivescan:
protbool = 0
protadverse = 0
for tmptidx in range(0, k):
if np.sqrt(scales[iter-1,annotidx])*bc[iter-1,int(deltam[iter,varidx]),tmptidx] >= maxlor:
protadverse = 1
if np.sqrt(scales[iter-1,annotidx])*bc[iter-1,int(deltam[iter,varidx]),tmptidx] < -.1:
protbool = 1
if protbool == 1 and protadverse == 0:
protind[iter,varidx] = 1
# d) Update b_c using a Gibbs update from a Gaussian distribution
for cidx in range(1,C):
cnt = 0
mucurrenttmp1 = 0
varcurrenttmp1 = 0
mucurrenttmp2 = 0*betas[0,:]
mucurrenttmp2 = mucurrenttmp2.T
for varidx in range(0,m):
if int(deltam[iter,varidx]) == cidx:
cnt += 1
if cnt == 1:
varannot = annotvec[varidx]
annotidx = [i for i in range(0,annotlen) if annotmap[i] == varannot][0]
atmp = np.array(ses[varidx,:])[0]
dtmp = numpy.matlib.eye(len(atmp))
np.fill_diagonal(dtmp,atmp)
Vjmtmp = dtmp*S*dtmp + np.matlib.eye(S.shape[0])*.000001
Vjminvtmp = np.linalg.inv(Vjmtmp)
mucurrenttmp1 = scales[iter-1,annotidx]*Vjminvtmp
mucurrenttmp2 = np.sqrt(scales[iter-1,annotidx])*Vjminvtmp*betas[varidx,:].T
varcurrenttmp1 = scales[iter-1,annotidx]*Vjminvtmp
else:
varannot = annotvec[varidx]
annotidx = [i for i in range(0,annotlen) if annotmap[i] == varannot][0]
atmp = np.array(ses[varidx,:])[0]
dtmp = numpy.matlib.eye(len(atmp))
np.fill_diagonal(dtmp,atmp)
Vjmtmp = dtmp*S*dtmp + np.matlib.eye(S.shape[0])*.000001
Vjminvtmp = np.linalg.inv(Vjmtmp)
mucurrenttmp1 += scales[iter-1,annotidx]*Vjminvtmp
mucurrenttmp2 += np.sqrt(scales[iter-1,annotidx])*Vjminvtmp*betas[varidx,:].T
varcurrenttmp1 += scales[iter-1,annotidx]*Vjminvtmp
mucurrenttmp1 += Theta0inv
varcurrenttmp1 += Theta0inv
meanparam = np.ravel(np.linalg.inv(mucurrenttmp1)*mucurrenttmp2)
varparam = np.linalg.inv(varcurrenttmp1)
bc[iter,cidx,:] = np.random.multivariate_normal(meanparam,varparam)
# e) Update scale sigma^2 annot.
for annotidx in range(0,annotlen):
scaleprop = abs(np.random.normal(np.sqrt(scales[iter-1,annotidx]),xi0,size = 1)[0])
annotdata = annotmap[annotidx]
probnum1 = stats.invgamma.logpdf(np.power(scaleprop,2),1,scale=1)
probdenom1 = stats.invgamma.logpdf(scales[iter-1,annotidx],1,scale=1)
lnum2 = 0
ldenom2 = 0
for varidx in range(0,m):
if annotvec[varidx] == annotdata:
atmp = np.array(ses[varidx,:])[0]
dtmp = numpy.matlib.eye(len(atmp))
np.fill_diagonal(dtmp,atmp)
Vjm = dtmp*S*dtmp + np.matlib.eye(S.shape[0])*.000001
cidx = int(deltam[iter,varidx])
# print(cidx,iter,varidx)
lnum2 += multivariate_normal.logpdf(betas[varidx,:],scaleprop*bc[iter,cidx,:],Vjm)
ldenom2 += multivariate_normal.logpdf(betas[varidx,:],np.sqrt(scales[iter-1,annotidx])*bc[iter,cidx,:],Vjm)
## Metropolis-Hastings step
if np.log(np.random.uniform(0,1,size = 1)[0]) < min(0, (lnum2 + probnum1) - (probdenom1 + ldenom2)):
acceptmh2[annotidx] += 1
scales[iter,annotidx] = np.power(scaleprop,2)
if iter > burn:
acceptmh2_postburnin[annotidx] += 1
else:
rejectmh2[annotidx] += 1
scales[iter,annotidx] = scales[iter-1,annotidx]
if iter > burn:
rejectmh2_postburnin[annotidx] += 1
# f) alpha
alphaprop = abs(np.random.normal(alpha[iter-1,0],xialpha0,size = 1)[0])
alphanum = -2*np.log(alphaprop) - 1/alphaprop
alphadenom = -2*np.log(alpha[iter-1,0]) - 1/alpha[iter-1,0]
alphanum2 = 0
alphadenom2 = 0
lnormDprop = 0
lpdirpropgene = 0
lnormDliter = 0
lpdirlgene = 0
densitypropa = 0
densitya = 0
lnormDprop = math.lgamma(np.sum([alphaprop*i for i in pc[iter,0,:]])) - np.sum([math.lgamma(max(alphaprop*i,epsilon)) for i in pc[iter,0,:]])
lnormDliter = math.lgamma(np.sum([alpha[iter-1,0]*i for i in pc[iter,0,:]])) - np.sum([math.lgamma(max(alpha[iter-1,0]*i,epsilon)) for i in pc[iter,0,:]])
for geneidx in range(0,genenum):
densitypropa = np.sum([(alphaprop*pc[iter,0,:] - 1)*np.log(pcj[iter-1,geneidx,i]) for i in range(0,C)])
lpdirpropgene += densitypropa + lnormDprop
densitya = np.sum([(alpha[iter-1,0]*pc[iter,0,:] - 1)*np.log(pcj[iter-1,geneidx,i]) for i in range(0,C)])
lpdirlgene += densitya + lnormDliter
ladirnum = alphanum + lpdirpropgene
ladirdenom = alphadenom + lpdirlgene
ladir = ladirnum - ladirdenom
## Metropolis-Hastings step
if numpy.log(np.random.uniform(0,1,size = 1)[0]) < min(0, ladir):
acceptmh3 += 1
alpha[iter,:] = alphaprop
if iter > burn:
acceptmh3_postburnin += 1
else:
rejectmh3 += 1
alpha[iter,:] = alpha[iter-1,:]
if iter > burn:
rejectmh3_postburnin += 1
## Write output for input files
mcout = open(outpath + str(fout) + '.mcmc.posteriors','w+')
varprobdict = {}
test5 = open('test5.1.txt','w')
for varidx in range(0,m):
mcout.write(chroffvec[varidx] + '\t' + annotvec[varidx] + '\t' + protvec[varidx] + '\t' + genevec[varidx] + '\t' + str(genevec[varidx] + ':' + annotvec[varidx] + ':' + protvec[varidx]))
for cidx in range(0,C):
probclustervar = numpy.where(deltam[burn+1:niter+1,varidx] == cidx)[0].shape[0]/(niter - burn)
varprobdict[chroffvec[varidx],cidx + 1] = probclustervar
mcout.write('\t' + str(probclustervar))
mcout.write('\n')
mcout.close()
## Write output for protective scan
if protectivescan:
protout = open(outpath + str(fout) + '.mcmc.protective','w+')
for varidx in range(0,m):
protout.write(chroffvec[varidx] + '\t' + annotvec[varidx] + '\t' + protvec[varidx] + '\t' + genevec[varidx] + '\t' + str(genevec[varidx] + ':' + annotvec[varidx] + ':' + protvec[varidx]))
protdattmp = numpy.where(protind[burn+1:niter+1,varidx] == 1)[0].shape[0]/(niter - burn)
protout.write('\t' + str(protdattmp))
protout.write('\n')
protout.close()
fdrout = open(outpath + str(fout) + '.fdr','w+')
print(str(fdr),file = fdrout)
varprobnull = []
varfdrid = []
for varidx in range(0,m):
varfdrid.append(chroffvec[varidx])
varprobnull.append(varprobdict[chroffvec[varidx],1])
idxsort = sorted(range(len(varprobnull)), key=lambda k: varprobnull[k])
varprobnullsort = [varprobnull[i] for i in idxsort]
varfdridsort = [varfdrid[i] for i in idxsort]
numfdrtmp = 0
counter = 0
varlfdr = []
for i in range(0,len(varprobnullsort)):
counter += 1
numfdrtmp += varprobnullsort[i]
fdrtmp = numfdrtmp/counter
if fdrtmp <= fdr:
print(varfdridsort[i], file = fdrout)
fdrout.close()
rejectionrate = rejectmh1_postburnin/(acceptmh1_postburnin + rejectmh1_postburnin)
logger.info(("Your acceptance rate is %2.2f") % ( rejectmh1_postburnin/(acceptmh1_postburnin + rejectmh1_postburnin)))
genedatm50 = {}
genedatl95 = {}
genedatu95 = {}
if verbose:
probout = fout + '.mcmc.probs'
numpy.savetxt(outpath + probout, deltam, fmt='%1.3f')
bcout = open(outpath + str(fout) + '.mcmc.bc','w+')
bcout.write('cluster')
for i in range(0,len(phenidarr)):
print(("\t%s\t%s\t%s") % (phenidarr[i] + 'm50',phenidarr[i] + 'l95', phenidarr[i] + 'u95'), end = '', file = bcout)
bcout.write('\n')
for cidx in range(0,C):
mean = numpy.mean(bc[burn+1:niter+1:thinning,cidx,:],axis = 0)
l95ci = numpy.percentile(bc[burn+1:niter+1:thinning,cidx,:],2.5, axis = 0)
u95ci = numpy.percentile(bc[burn+1:niter+1:thinning,cidx,:],97.5, axis = 0)
bcout.write(str(cidx))
for phenidx in range(0,mean.shape[0]):
print(("\t%2.2f\t%2.2f\t%2.2f") % (mean[phenidx], l95ci[phenidx], u95ci[phenidx]), end = '', file = bcout)
bcout.write('\n')
bcout.close()
scaleout = open(outpath + str(fout) + '.mcmc.scale','w+')
for annotidx in range(0,annotlen):
mean = numpy.mean(np.sqrt(scales[burn+1:niter+1:thinning,annotidx]),axis = 0)
l95ci = numpy.percentile(np.sqrt(scales[burn+1:niter+1:thinning,annotidx]),2.5, axis = 0)
u95ci = numpy.percentile(np.sqrt(scales[burn+1:niter+1:thinning,annotidx]),97.5, axis = 0)
print(("%s\t%s\t%2.2f\t%2.2f\t%2.2f") % (str(annotidx),annotmap[annotidx],mean,l95ci,u95ci), file = scaleout)
scaleout.close()
tmpbc = open(outpath + str(fout) + '.theta.bc', 'w+')
for jidx in range(0,k):
for kidx in range(0,k):
print(Theta0[jidx,kidx], file = tmpbc,end = ' ')
print('\n',end='',file=tmpbc)
tmpbc.close()
genesdict = {}
for geneidx in range(0,genenum):
genesdict[genemap[geneidx]] = genemap[geneidx]
genedatm50[genemap[geneidx]] = np.percentile(pcj[burn+1:niter+1:thinning,geneidx,:],50, axis=0)
genedatl95[genemap[geneidx]] = np.percentile(pcj[burn+1:niter+1:thinning,geneidx,:], 2.5, axis=0)
genedatu95[genemap[geneidx]] = np.percentile(pcj[burn+1:niter+1:thinning,geneidx,:], 97.5, axis=0)
alphaout = open(outpath + str(fout) + '.mcmc.alpha','w+')
mean = numpy.mean(alpha[burn+1:niter+1:thinning,0],axis = 0)
l95ci = numpy.percentile(alpha[burn+1:niter+1:thinning,0],2.5, axis = 0)
u95ci = numpy.percentile(alpha[burn+1:niter+1:thinning,0],97.5, axis = 0)
print(("%2.2f\t%2.2f\t%2.2f") % (mean,l95ci,u95ci), file = alphaout)
alphaout.close()
maxllkiter = np.max(maxloglkiter[burn+1:niter:thinning,0])
BIC = -2*maxllkiter + (k+ genenum)*(C-1)*np.log(m)
AIC = -2*maxllkiter + (k+ genenum)*(C-1)*2
geneout = open(outpath + str(fout) + '.mcmc.gene.posteriors','w+')
for genekey in genesdict.keys():
print(genekey, file = geneout, end = '')
for i in range(0,len(genedatm50[genekey])):
print(("\t%2.2f") % (genedatm50[genekey][i]), file = geneout, end = '')
for i in range(0,len(genedatl95[genekey])):
print(("\t%2.2f") % (genedatl95[genekey][i]), file = geneout, end = '')
for i in range(0,len(genedatu95[genekey])):
print(("\t%2.2f") % (genedatu95[genekey][i]), file = geneout, end = '')
geneout.write("\n")
geneout.close()
return [BIC,AIC,genedatm50]
def targeted(betas,ses,vymat,annotvec,genevec,protvec,chroffvec,clusters,fout,Rphen,Rpheninv,Rphenuse=True,niter=1000,burn=100,thinning=1,verbose=True, maxlor = 0.693):
print("Running MCMC algorithm...")
epsilon = .0000000000000001
storephensvar = []
S = vymat
xi0 = 1 # hyperparameter to control spread of proposals for annotation
xialpha0 = 1
betas = numpy.matrix(betas)
ses = numpy.matrix(ses)
S = numpy.matrix(S)
Sinv = numpy.linalg.inv(S)
# Let k be the number of clusters, where cluster 1 is the null model cluster
C = clusters
maxloglkiter = np.zeros((niter+2,1))
# Let k be the number of phenotypes
k = betas.shape[1]
# Let m be the number of variants
m = betas.shape[0]
# Initialize
#Sigma0 for alternative clusters
if Rphenuse:
if is_pos_def(Rphen):
Theta0 = Rphen
Theta0inv = Rpheninv
else:
Theta0 = sklearn.covariance.shrunk_covariance(Rphen)
Theta0inv = numpy.linalg.inv(Theta0)
else:
Theta0 = numpy.eye(Rphen.shape[0])
Theta0inv = numpy.linalg.inv(Theta0)
#scale matrix
geneset = set(genevec)
genemap = list(geneset)
annotset = set(annotvec)
annotlen = len(annotset)
annotmap = list(annotset)
scales = numpy.zeros((niter+2,annotlen))
# store the mean trait value across the clusters for individuals that are members
bc = numpy.zeros((niter+2,C,k))
# store the probabilities (proportions) of cluster memberships
pc = numpy.zeros((niter+2,1,C))
# store the probabilities (proportions) of cluster memberships for each gene
genenum = len(set(genevec))
pcj = numpy.zeros((niter+2,genenum,C))
# for each iteration keep record of the variant membership
deltam = numpy.zeros((niter+2,m))
# non-normalized probabilities for each individual variant
uc = numpy.zeros((niter+2,m,C))
# normalized probabilities for each individual variant
ws = numpy.zeros((niter+2,m,C))
# for each iteration keep record of the variant membership
tm = numpy.zeros((niter+2,m))
#sharing parameter
alpha = numpy.zeros((niter+2,1))
ks = numpy.arange(1,C+1)
# prot scan array
protind = numpy.zeros((niter+2,m))
sigmainvdict = {}
sigmadict = {}
thetadict = {}
thetainvdict = {}
for clusteriter in range(2,C+1):
sigmadict[0,clusteriter] = S
sigmainvdict[0,clusteriter] = Sinv
thetadict[0,clusteriter] = Theta0
thetainvdict[0,clusteriter] = Theta0inv
# For Metropolois Hastings sub-step : keep track of acceptance rate
acceptmh1 = 0
rejectmh1 = 0
acceptmh1_postburnin = 0
rejectmh1_postburnin = 0
acceptmh3 = 0
rejectmh3 = 0
acceptmh3_postburnin = 0
rejectmh3_postburnin = 0
acceptmh2 = [0]*annotlen
rejectmh2 = [0]*annotlen
acceptmh2_postburnin = [0]*annotlen
rejectmh2_postburnin = [0]*annotlen
# initialize \alpha : sharing of clusters across genes
alpha[0,:] = invgamma.rvs(1,0,1,size = 1)
# initialize pc (proportions across all variants)
pc[0,0,:] = np.random.dirichlet([1]*C)
# initialize pcj (proportions for each gene j)
for geneidx in range(0,genenum):
pcj[0,geneidx,:] = np.random.dirichlet(alpha[0,0]*pc[0,0,:])
bc[0,0,:] = np.array([0]*k)
for clusteridx in range(1,C):
bc[0,clusteridx,:] = np.random.multivariate_normal(np.array([0]*k).T,Theta0)
for scaleidx in range(0,annotlen):
scales[0,scaleidx] = np.power(0.2,2)
# initialize variant membership across clusters
deltam[0,:] = np.random.randint(0,C,m)
# Iterations MCMC samplers
for iter in range(1,niter+1):
gamma = 1
if iter % 100 == 0:
print(iter)
## a) Update \pi_0 : Proposal centred around the current value, Set gamma to 1 , how to set gamma?
## mhstep1
pcproposal = np.random.dirichlet(alpha[iter-1,0]*pc[iter-1,0,:])
# lnormDprop = math.lgamma(np.sum([alpha[iter-1,0]*i for i in pcproposal])) - np.sum([math.lgamma(max(alpha[iter-1,0]*i,epsilon)) for i in pcproposal])
lnormDprop = math.lgamma(np.sum([gamma*i for i in pcproposal])) - np.sum([math.lgamma(max(gamma*i,epsilon)) for i in pcproposal])
# second part of density
# densitypropb = np.sum([(alpha[iter-1,0]*pcproposal[i] - 1)*np.log(pc[iter-1,0,i]) for i in range(0,C)])
densitypropb = np.sum([(gamma*pcproposal[i] - 1)*np.log(pc[iter-1,0,i]) for i in range(0,C)])
lpdirprop = lnormDprop + densitypropb
#go through each gene
lpdirpropgene = 0
lnormDprop = math.lgamma(np.sum([alpha[iter-1,0]*i for i in pcproposal])) - np.sum([math.lgamma(max(alpha[iter-1,0]*i,epsilon)) for i in pcproposal])
for geneidx in range(0,genenum):
# second part of density
densitypropb = np.sum([(alpha[iter-1,0]*pcproposal[i] - 1)*np.log(pcj[iter-1,geneidx,i]) for i in range(0,C)])
lpdirpropgene += densitypropb + lnormDprop
lpdirnum = lpdirprop + lpdirpropgene
# denominator, iteration - 1 pc
# lnormD = math.lgamma(np.sum([alpha[iter-1,0]*i for i in pc[iter-1,0,:]])) - np.sum([math.lgamma(max(alpha[iter-1,0]*i,epsilon)) for i in pc[iter-1,0,:]])
lnormD = math.lgamma(np.sum([gamma*i for i in pc[iter-1,0,:]])) - np.sum([math.lgamma(max(gamma*i,epsilon)) for i in pc[iter-1,0,:]])
# second part of density
densityb = np.sum([(gamma*pc[iter-1,0,i] - 1)*np.log(pcproposal[i]) for i in range(0,C)])
lpdir = lnormD + densityb
#go through each gene
lpdirgene = 0
lnormD = math.lgamma(np.sum([alpha[iter-1,0]*i for i in pc[iter-1,0,:]])) - np.sum([math.lgamma(max(alpha[iter-1,0]*i,epsilon)) for i in pc[iter-1,0,:]])
for geneidx in range(0,genenum):
# second part of density
densityb = np.sum([(alpha[iter-1,0]*pc[iter-1,0,i] - 1)*np.log(pcj[iter-1,geneidx,i]) for i in range(0,C)])
lpdirgene += densityb + lnormD
lpdirdenom = lpdir + lpdirgene
lpdir = lpdirnum - lpdirdenom
## Metropolis-Hastings step
if numpy.log(np.random.uniform(0,1,size = 1)[0]) < min(0, lpdir):
acceptmh1 += 1
pc[iter,0,:] = pcproposal
if iter > burn:
acceptmh1_postburnin += 1
else:
rejectmh1 += 1
pc[iter,0,:] = pc[iter-1,0,:]
if iter > burn:
rejectmh1_postburnin += 1
# b) For each gene j = 1, ..., J update \pi_j
for geneidx in range(0,genenum):
paramvecshared = alpha[iter-1,0]*pc[iter,0,:]
for geneiter in range(0,len(genevec)):
if genevec[geneiter] == genemap[geneidx]:
paramvecshared[deltam[iter-1,geneiter]] += 1
pcj[iter,geneidx,:] = np.random.dirichlet(paramvecshared)
# c) Update delta_jm
xk = numpy.arange(0,C)
for varidx in range(0,m):
probmjc = [0]*C
lprobmjcu = [0]*C
uc = [0]*C
varannot = annotvec[varidx]
annotidx = [i for i in range(0,annotlen) if annotmap[i] == varannot][0]
genevar = genevec[varidx]
geneid = [i for i in range(0,len(genemap)) if genemap[i] == genevar][0]
atmp = np.array(ses[varidx,:])[0]
dtmp = numpy.matlib.eye(len(atmp))
np.fill_diagonal(dtmp,atmp)
Vjm = dtmp*S*dtmp + np.matlib.eye(S.shape[0])*np.finfo(float).eps
# Gives covariance matrix of variant effect on sets of phenotypes (after fixed effect meta-analysis has been applied across all studies available)
for cidx in range(0,C):
llk2 = multivariate_normal.logpdf(betas[varidx,:],np.sqrt(scales[iter-1,annotidx])*bc[iter-1,cidx,:],Vjm) + np.log(pcj[iter,geneid,cidx])
if deltam[iter-1,varidx] == cidx:
maxloglkiter[iter-1,0] += llk2
lprobmjcu[cidx] += llk2
#normalize uc - set to wc
maxloglk = numpy.max(lprobmjcu)
for cidx in range(0,C):
uc[cidx] = numpy.exp(lprobmjcu[cidx] - maxloglk)
for cidx in range(0,C):
probmjc[cidx] = uc[cidx]/numpy.sum(uc)
if numpy.isnan(probmjc[0]):
wstmp = numpy.random.dirichlet(numpy.repeat(numpy.array([1]),C,axis = 0))
custm = stats.rv_discrete(name='custm',values=(xk,wstmp))
else:
custm = stats.rv_discrete(name='custm',values=(xk,probmjc))
deltam[iter,varidx] = custm.rvs(size=1)[0]
protbool = 0
protadverse = 0
for tmptidx in range(0, k):
if np.sqrt(scales[iter-1,annotidx])*bc[iter-1,deltam[iter,varidx],tmptidx] >= maxlor:
protadverse = 1
if np.sqrt(scales[iter-1,annotidx])*bc[iter-1,deltam[iter,varidx],tmptidx] < -.1:
protbool = 1
if protbool == 1 and protadverse == 0:
protind[iter,varidx] = 1
# d) Update b_c using a Gibbs update from a Gaussian distribution
for cidx in range(1,C):
cnt = 0
mucurrenttmp1 = 0
varcurrenttmp1 = 0
mucurrenttmp2 = 0*betas[0,:]
mucurrenttmp2 = mucurrenttmp2.T
for varidx in range(0,m):
if deltam[iter,varidx] == cidx:
cnt += 1
if cnt == 1:
varannot = annotvec[varidx]
annotidx = [i for i in range(0,annotlen) if annotmap[i] == varannot][0]
atmp = np.array(ses[varidx,:])[0]
dtmp = numpy.matlib.eye(len(atmp))
np.fill_diagonal(dtmp,atmp)
Vjmtmp = dtmp*S*dtmp + np.matlib.eye(S.shape[0])*.000001
Vjminvtmp = np.linalg.inv(Vjmtmp)
mucurrenttmp1 = scales[iter-1,annotidx]*Vjminvtmp
mucurrenttmp2 = np.sqrt(scales[iter-1,annotidx])*Vjminvtmp*betas[varidx,:].T
varcurrenttmp1 = scales[iter-1,annotidx]*Vjminvtmp
else:
varannot = annotvec[varidx]
annotidx = [i for i in range(0,annotlen) if annotmap[i] == varannot][0]
atmp = np.array(ses[varidx,:])[0]
dtmp = numpy.matlib.eye(len(atmp))
np.fill_diagonal(dtmp,atmp)
Vjmtmp = dtmp*S*dtmp + np.matlib.eye(S.shape[0])*.000001
Vjminvtmp = np.linalg.inv(Vjmtmp)
mucurrenttmp1 += scales[iter-1,annotidx]*Vjminvtmp
mucurrenttmp2 += np.sqrt(scales[iter-1,annotidx])*Vjminvtmp*betas[varidx,:].T
varcurrenttmp1 += scales[iter-1,annotidx]*Vjminvtmp
mucurrenttmp1 += Theta0inv
varcurrenttmp1 += Theta0inv
meanparam = np.ravel(np.linalg.inv(mucurrenttmp1)*mucurrenttmp2)
varparam = np.linalg.inv(varcurrenttmp1)
bc[iter,cidx,:] = np.random.multivariate_normal(meanparam,varparam)
# e) Update scale sigma^2 annot.
for annotidx in range(0,annotlen):
scaleprop = abs(np.random.normal(np.sqrt(scales[iter-1,annotidx]),xi0,size = 1)[0])
annotdata = annotmap[annotidx]
probnum1 = stats.invgamma.logpdf(np.power(scaleprop,2),1,scale=1)
probdenom1 = stats.invgamma.logpdf(scales[iter-1,annotidx],1,scale=1)
lnum2 = 0
ldenom2 = 0
for varidx in range(0,m):
if annotvec[varidx] == annotdata:
atmp = np.array(ses[varidx,:])[0]
dtmp = numpy.matlib.eye(len(atmp))
np.fill_diagonal(dtmp,atmp)
Vjm = dtmp*S*dtmp + np.matlib.eye(S.shape[0])*np.finfo(float).eps
cidx = deltam[iter,varidx]
lnum2 += multivariate_normal.logpdf(betas[varidx,:],scaleprop*bc[iter,cidx,:],Vjm)
ldenom2 += multivariate_normal.logpdf(betas[varidx,:],np.sqrt(scales[iter-1,annotidx])*bc[iter,cidx,:],Vjm)
## Metropolis-Hastings step
if iter % 100 == 0:
print(probnum1,probdenom1,lnum2,ldenom2)
if np.log(np.random.uniform(0,1,size = 1)[0]) < min(0, (lnum2 + probnum1) - (probdenom1 + ldenom2)):
acceptmh2[annotidx] += 1
scales[iter,annotidx] = np.power(scaleprop,2)
if iter > burn:
acceptmh2_postburnin[annotidx] += 1
else:
rejectmh2[annotidx] += 1
scales[iter,annotidx] = scales[iter-1,annotidx]
if iter > burn:
rejectmh2_postburnin[annotidx] += 1
# f) alpha
alphaprop = abs(np.random.normal(alpha[iter-1,0],xialpha0,size = 1)[0])
alphanum = -2*np.log(alphaprop) - 1/alphaprop
alphadenom = -2*np.log(alpha[iter-1,0]) - 1/alpha[iter-1,0]
alphanum2 = 0
alphadenom2 = 0
lnormDprop = 0
lpdirpropgene = 0
lnormDliter = 0
lpdirlgene = 0
densitypropa = 0
densitya = 0
lnormDprop = math.lgamma(np.sum([alphaprop*i for i in pc[iter,0,:]])) - np.sum([math.lgamma(max(alphaprop*i,epsilon)) for i in pc[iter,0,:]])
lnormDliter = math.lgamma(np.sum([alpha[iter-1,0]*i for i in pc[iter,0,:]])) - np.sum([math.lgamma(max(alpha[iter-1,0]*i,epsilon)) for i in pc[iter,0,:]])
for geneidx in range(0,genenum):
densitypropa = np.sum([(alphaprop*pc[iter,0,:] - 1)*np.log(pcj[iter-1,geneidx,i]) for i in range(0,C)])
lpdirpropgene += densitypropa + lnormDprop
densitya = np.sum([(alpha[iter-1,0]*pc[iter,0,:] - 1)*np.log(pcj[iter-1,geneidx,i]) for i in range(0,C)])
lpdirlgene += densitya + lnormDliter
ladirnum = alphanum + lpdirpropgene
ladirdenom = alphadenom + lpdirlgene
ladir = ladirnum - ladirdenom
## Metropolis-Hastings step
if numpy.log(np.random.uniform(0,1,size = 1)[0]) < min(0, ladir):
acceptmh3 += 1
alpha[iter,:] = alphaprop
if iter > burn:
acceptmh3_postburnin += 1
else:
rejectmh3 += 1
alpha[iter,:] = alpha[iter-1,:]
if iter > burn:
rejectmh3_postburnin += 1
## Write output for input files
mcout = open(outpath + str(fout) + '.mcmc.posteriors','w+')
for varidx in range(0,m):
mcout.write(chroffvec[varidx] + '\t' + annotvec[varidx] + '\t' + protvec[varidx] + '\t' + genevec[varidx] + '\t' + str(genevec[varidx] + ':' + annotvec[varidx] + ':' + protvec[varidx]))
for cidx in range(0,C):
probclustervar = numpy.where(deltam[burn+1:niter+1,varidx] == cidx)[0].shape[0]/(niter - burn)
mcout.write('\t' + str(probclustervar))
mcout.write('\n')
mcout.close()
## Write output for input files
protout = open(outpath + str(fout) + '.mcmc.protective','w+')
for varidx in range(0,m):
protout.write(chroffvec[varidx] + '\t' + annotvec[varidx] + '\t' + protvec[varidx] + '\t' + genevec[varidx] + '\t' + str(genevec[varidx] + ':' + annotvec[varidx] + ':' + protvec[varidx]))
protdattmp = numpy.where(protind[burn+1:niter+1,varidx] == 1)[0].shape[0]/(niter - burn)
protout.write('\t' + str(protdattmp))
protout.write('\n')
protout.close()
rejectionrate = rejectmh1_postburnin/(acceptmh1_postburnin + rejectmh1_postburnin)
print(rejectmh1_postburnin,acceptmh1_postburnin)
logger.info(("Your acceptance rate is %2.2f") % ( rejectmh1_postburnin/(acceptmh1_postburnin + rejectmh1_postburnin)))
print(rejectmh2_postburnin,acceptmh2_postburnin)
print(rejectmh3_postburnin,acceptmh3_postburnin)
genedat = {}
if verbose:
probout = fout + '.mcmc.probs'
numpy.savetxt(outpath + probout, deltam, fmt='%1.3f')
bcout = open(outpath + str(fout) + '.mcmc.bc','w+')
for cidx in range(0,C):
mean = numpy.mean(bc[burn+1:niter+1:thinning,cidx,:],axis = 0)
l95ci = numpy.percentile(bc[burn+1:niter+1:thinning,cidx,:],2.5, axis = 0)
u95ci = numpy.percentile(bc[burn+1:niter+1:thinning,cidx,:],97.5, axis = 0)
bcout.write(str(cidx))
for phenidx in range(0,mean.shape[0]):
print(("\t%2.2f\t%2.2f\t%2.2f") % (mean[phenidx], l95ci[phenidx], u95ci[phenidx]), end = '', file = bcout)
bcout.write('\n')
bcout.close()
scaleout = open(outpath + str(fout) + '.mcmc.scale','w+')
for annotidx in range(0,annotlen):
mean = numpy.mean(np.sqrt(scales[burn+1:niter+1:thinning,annotidx]),axis = 0)
l95ci = numpy.percentile(np.sqrt(scales[burn+1:niter+1:thinning,annotidx]),2.5, axis = 0)
u95ci = numpy.percentile(np.sqrt(scales[burn+1:niter+1:thinning,annotidx]),97.5, axis = 0)
print(("%s\t%s\t%2.2f\t%2.2f\t%2.2f") % (str(annotidx),annotmap[annotidx],mean,l95ci,u95ci), file = scaleout)
scaleout.close()
tmpbc = open(outpath + str(fout) + '.theta.bc', 'w+')
for jidx in range(0,k):
for kidx in range(0,k):
print(Theta0[jidx,kidx], file = tmpbc,end = ' ')
print('\n',end='',file=tmpbc)
tmpbc.close()
pc[0,0,:]
print('geneset', np.mean(pcj[burn+1:niter+1:thinning,:],axis=0))
# initialize pcj (proportions for each gene j)
for geneidx in range(0,genenum):
genedat[genemap[geneidx]] = np.mean(pcj[burn+1:niter+1:thinning,geneidx,:],axis=0)
alphaout = open(outpath + str(fout) + '.mcmc.alpha','w+')
mean = numpy.mean(alpha[burn+1:niter+1:thinning,0],axis = 0)
l95ci = numpy.percentile(alpha[burn+1:niter+1:thinning,0],2.5, axis = 0)
u95ci = numpy.percentile(alpha[burn+1:niter+1:thinning,0],97.5, axis = 0)
print(mean)
print(("%2.2f\t%2.2f\t%2.2f") % (mean,l95ci,u95ci), file = alphaout)
alphaout.close()
maxllkiter = np.max(maxloglkiter[burn+1:niter:thinning,0])
BIC = -2*maxllkiter + (k+ genenum)*(C-1)*np.log(m)
AIC = -2*maxllkiter + (k+ genenum)*(C-1)*2
# print((k+genenum)*(C-1)*np.log(m), k,genenum,C,m,np.log(m), maxllkiter)
# print(maxloglkiter[burn+1:niter:thinning,0])
return [BIC,AIC,genedat]
| [
"numpy.linalg.eigvals",
"numpy.sum",
"scipy.stats.invgamma.logpdf",
"scipy.stats.invgamma.rvs",
"numpy.random.randint",
"numpy.mean",
"numpy.random.normal",
"numpy.matlib.eye",
"scipy.stats.multivariate_normal.logpdf",
"numpy.power",
"numpy.finfo",
"numpy.max",
"numpy.random.dirichlet",
"n... | [((486, 510), 'logging.getLogger', 'logging.getLogger', (['"""Log"""'], {}), "('Log')\n", (503, 510), False, 'import logging\n'), ((595, 619), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""warn"""'}), "(divide='warn')\n", (604, 619), True, 'import numpy as np\n'), ((657, 669), 'numpy.matrix', 'np.matrix', (['x'], {}), '(x)\n', (666, 669), True, 'import numpy as np\n'), ((1663, 1687), 'numpy.zeros', 'np.zeros', (['(niter + 2, 1)'], {}), '((niter + 2, 1))\n', (1671, 1687), True, 'import numpy as np\n'), ((4083, 4112), 'scipy.stats.invgamma.rvs', 'invgamma.rvs', (['(1)', '(0)', '(1)'], {'size': '(1)'}), '(1, 0, 1, size=1)\n', (4095, 4112), False, 'from scipy.stats import invgamma\n'), ((4182, 4210), 'numpy.random.dirichlet', 'np.random.dirichlet', (['([1] * C)'], {}), '([1] * C)\n', (4201, 4210), True, 'import numpy as np\n'), ((4382, 4399), 'numpy.array', 'np.array', (['([0] * k)'], {}), '([0] * k)\n', (4390, 4399), True, 'import numpy as np\n'), ((4671, 4697), 'numpy.random.randint', 'np.random.randint', (['(0)', 'C', 'm'], {}), '(0, C, m)\n', (4688, 4697), True, 'import numpy as np\n'), ((20633, 20681), 'numpy.max', 'np.max', (['maxloglkiter[burn + 1:niter:thinning, 0]'], {}), '(maxloglkiter[burn + 1:niter:thinning, 0])\n', (20639, 20681), True, 'import numpy as np\n'), ((22028, 22052), 'numpy.zeros', 'np.zeros', (['(niter + 2, 1)'], {}), '((niter + 2, 1))\n', (22036, 22052), True, 'import numpy as np\n'), ((24455, 24484), 'scipy.stats.invgamma.rvs', 'invgamma.rvs', (['(1)', '(0)', '(1)'], {'size': '(1)'}), '(1, 0, 1, size=1)\n', (24467, 24484), False, 'from scipy.stats import invgamma\n'), ((24554, 24582), 'numpy.random.dirichlet', 'np.random.dirichlet', (['([1] * C)'], {}), '([1] * C)\n', (24573, 24582), True, 'import numpy as np\n'), ((24754, 24771), 'numpy.array', 'np.array', (['([0] * k)'], {}), '([0] * k)\n', (24762, 24771), True, 'import numpy as np\n'), ((25043, 25069), 'numpy.random.randint', 'np.random.randint', (['(0)', 'C', 'm'], {}), '(0, C, m)\n', (25060, 25069), True, 'import numpy as np\n'), ((39714, 39762), 'numpy.max', 'np.max', (['maxloglkiter[burn + 1:niter:thinning, 0]'], {}), '(maxloglkiter[burn + 1:niter:thinning, 0])\n', (39720, 39762), True, 'import numpy as np\n'), ((4324, 4370), 'numpy.random.dirichlet', 'np.random.dirichlet', (['(alpha[0, 0] * pc[0, 0, :])'], {}), '(alpha[0, 0] * pc[0, 0, :])\n', (4343, 4370), True, 'import numpy as np\n'), ((4585, 4601), 'numpy.power', 'np.power', (['(0.2)', '(2)'], {}), '(0.2, 2)\n', (4593, 4601), True, 'import numpy as np\n'), ((5053, 5113), 'numpy.random.dirichlet', 'np.random.dirichlet', (['(alpha[iter - 1, 0] * pc[iter - 1, 0, :])'], {}), '(alpha[iter - 1, 0] * pc[iter - 1, 0, :])\n', (5072, 5113), True, 'import numpy as np\n'), ((24696, 24742), 'numpy.random.dirichlet', 'np.random.dirichlet', (['(alpha[0, 0] * pc[0, 0, :])'], {}), '(alpha[0, 0] * pc[0, 0, :])\n', (24715, 24742), True, 'import numpy as np\n'), ((24957, 24973), 'numpy.power', 'np.power', (['(0.2)', '(2)'], {}), '(0.2, 2)\n', (24965, 24973), True, 'import numpy as np\n'), ((25350, 25410), 'numpy.random.dirichlet', 'np.random.dirichlet', (['(alpha[iter - 1, 0] * pc[iter - 1, 0, :])'], {}), '(alpha[iter - 1, 0] * pc[iter - 1, 0, :])\n', (25369, 25410), True, 'import numpy as np\n'), ((684, 704), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['x'], {}), '(x)\n', (701, 704), True, 'import numpy as np\n'), ((2007, 2050), 'sklearn.covariance.shrunk_covariance', 'sklearn.covariance.shrunk_covariance', (['Rphen'], {}), '(Rphen)\n', (2043, 2050), False, 'import sklearn\n'), ((8017, 8052), 'numpy.random.dirichlet', 'np.random.dirichlet', (['paramvecshared'], {}), '(paramvecshared)\n', (8036, 8052), True, 'import numpy as np\n'), ((8579, 8607), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dtmp', 'atmp'], {}), '(dtmp, atmp)\n', (8595, 8607), True, 'import numpy as np\n'), ((12397, 12426), 'numpy.linalg.inv', 'np.linalg.inv', (['varcurrenttmp1'], {}), '(varcurrenttmp1)\n', (12410, 12426), True, 'import numpy as np\n'), ((12457, 12507), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['meanparam', 'varparam'], {}), '(meanparam, varparam)\n', (12486, 12507), True, 'import numpy as np\n'), ((12833, 12894), 'scipy.stats.invgamma.logpdf', 'stats.invgamma.logpdf', (['scales[iter - 1, annotidx]', '(1)'], {'scale': '(1)'}), '(scales[iter - 1, annotidx], 1, scale=1)\n', (12854, 12894), False, 'from scipy import stats\n'), ((19954, 20025), 'numpy.percentile', 'np.percentile', (['pcj[burn + 1:niter + 1:thinning, geneidx, :]', '(50)'], {'axis': '(0)'}), '(pcj[burn + 1:niter + 1:thinning, geneidx, :], 50, axis=0)\n', (19967, 20025), True, 'import numpy as np\n'), ((20062, 20134), 'numpy.percentile', 'np.percentile', (['pcj[burn + 1:niter + 1:thinning, geneidx, :]', '(2.5)'], {'axis': '(0)'}), '(pcj[burn + 1:niter + 1:thinning, geneidx, :], 2.5, axis=0)\n', (20075, 20134), True, 'import numpy as np\n'), ((20172, 20245), 'numpy.percentile', 'np.percentile', (['pcj[burn + 1:niter + 1:thinning, geneidx, :]', '(97.5)'], {'axis': '(0)'}), '(pcj[burn + 1:niter + 1:thinning, geneidx, :], 97.5, axis=0)\n', (20185, 20245), True, 'import numpy as np\n'), ((20724, 20733), 'numpy.log', 'np.log', (['m'], {}), '(m)\n', (20730, 20733), True, 'import numpy as np\n'), ((22371, 22414), 'sklearn.covariance.shrunk_covariance', 'sklearn.covariance.shrunk_covariance', (['Rphen'], {}), '(Rphen)\n', (22407, 22414), False, 'import sklearn\n'), ((28309, 28344), 'numpy.random.dirichlet', 'np.random.dirichlet', (['paramvecshared'], {}), '(paramvecshared)\n', (28328, 28344), True, 'import numpy as np\n'), ((28871, 28899), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dtmp', 'atmp'], {}), '(dtmp, atmp)\n', (28887, 28899), True, 'import numpy as np\n'), ((32601, 32630), 'numpy.linalg.inv', 'np.linalg.inv', (['varcurrenttmp1'], {}), '(varcurrenttmp1)\n', (32614, 32630), True, 'import numpy as np\n'), ((32661, 32711), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['meanparam', 'varparam'], {}), '(meanparam, varparam)\n', (32690, 32711), True, 'import numpy as np\n'), ((33037, 33098), 'scipy.stats.invgamma.logpdf', 'stats.invgamma.logpdf', (['scales[iter - 1, annotidx]', '(1)'], {'scale': '(1)'}), '(scales[iter - 1, annotidx], 1, scale=1)\n', (33058, 33098), False, 'from scipy import stats\n'), ((39066, 39118), 'numpy.mean', 'np.mean', (['pcj[burn + 1:niter + 1:thinning, :]'], {'axis': '(0)'}), '(pcj[burn + 1:niter + 1:thinning, :], axis=0)\n', (39073, 39118), True, 'import numpy as np\n'), ((39250, 39311), 'numpy.mean', 'np.mean', (['pcj[burn + 1:niter + 1:thinning, geneidx, :]'], {'axis': '(0)'}), '(pcj[burn + 1:niter + 1:thinning, geneidx, :], axis=0)\n', (39257, 39311), True, 'import numpy as np\n'), ((39805, 39814), 'numpy.log', 'np.log', (['m'], {}), '(m)\n', (39811, 39814), True, 'import numpy as np\n'), ((4491, 4508), 'numpy.array', 'np.array', (['([0] * k)'], {}), '([0] * k)\n', (4499, 4508), True, 'import numpy as np\n'), ((5297, 5338), 'numpy.sum', 'np.sum', (['[(gamma * i) for i in pcproposal]'], {}), '([(gamma * i) for i in pcproposal])\n', (5303, 5338), True, 'import numpy as np\n'), ((5785, 5839), 'numpy.sum', 'np.sum', (['[(alpha[iter - 1, 0] * i) for i in pcproposal]'], {}), '([(alpha[iter - 1, 0] * i) for i in pcproposal])\n', (5791, 5839), True, 'import numpy as np\n'), ((6443, 6492), 'numpy.sum', 'np.sum', (['[(gamma * i) for i in pc[iter - 1, 0, :]]'], {}), '([(gamma * i) for i in pc[iter - 1, 0, :]])\n', (6449, 6492), True, 'import numpy as np\n'), ((6802, 6864), 'numpy.sum', 'np.sum', (['[(alpha[iter - 1, 0] * i) for i in pc[iter - 1, 0, :]]'], {}), '([(alpha[iter - 1, 0] * i) for i in pc[iter - 1, 0, :]])\n', (6808, 6864), True, 'import numpy as np\n'), ((8493, 8517), 'numpy.array', 'np.array', (['ses[varidx, :]'], {}), '(ses[varidx, :])\n', (8501, 8517), True, 'import numpy as np\n'), ((9609, 9660), 'scipy.stats.rv_discrete', 'stats.rv_discrete', ([], {'name': '"""custm"""', 'values': '(xk, wstmp)'}), "(name='custm', values=(xk, wstmp))\n", (9626, 9660), False, 'from scipy import stats\n'), ((9701, 9754), 'scipy.stats.rv_discrete', 'stats.rv_discrete', ([], {'name': '"""custm"""', 'values': '(xk, probmjc)'}), "(name='custm', values=(xk, probmjc))\n", (9718, 9754), False, 'from scipy import stats\n'), ((12775, 12797), 'numpy.power', 'np.power', (['scaleprop', '(2)'], {}), '(scaleprop, 2)\n', (12783, 12797), True, 'import numpy as np\n'), ((13817, 13839), 'numpy.power', 'np.power', (['scaleprop', '(2)'], {}), '(scaleprop, 2)\n', (13825, 13839), True, 'import numpy as np\n'), ((14181, 14235), 'numpy.random.normal', 'np.random.normal', (['alpha[iter - 1, 0]', 'xialpha0'], {'size': '(1)'}), '(alpha[iter - 1, 0], xialpha0, size=1)\n', (14197, 14235), True, 'import numpy as np\n'), ((14259, 14276), 'numpy.log', 'np.log', (['alphaprop'], {}), '(alphaprop)\n', (14265, 14276), True, 'import numpy as np\n'), ((14316, 14342), 'numpy.log', 'np.log', (['alpha[iter - 1, 0]'], {}), '(alpha[iter - 1, 0])\n', (14322, 14342), True, 'import numpy as np\n'), ((14581, 14630), 'numpy.sum', 'np.sum', (['[(alphaprop * i) for i in pc[iter, 0, :]]'], {}), '([(alphaprop * i) for i in pc[iter, 0, :]])\n', (14587, 14630), True, 'import numpy as np\n'), ((14732, 14790), 'numpy.sum', 'np.sum', (['[(alpha[iter - 1, 0] * i) for i in pc[iter, 0, :]]'], {}), '([(alpha[iter - 1, 0] * i) for i in pc[iter, 0, :]])\n', (14738, 14790), True, 'import numpy as np\n'), ((19116, 19170), 'numpy.sqrt', 'np.sqrt', (['scales[burn + 1:niter + 1:thinning, annotidx]'], {}), '(scales[burn + 1:niter + 1:thinning, annotidx])\n', (19123, 19170), True, 'import numpy as np\n'), ((19213, 19267), 'numpy.sqrt', 'np.sqrt', (['scales[burn + 1:niter + 1:thinning, annotidx]'], {}), '(scales[burn + 1:niter + 1:thinning, annotidx])\n', (19220, 19267), True, 'import numpy as np\n'), ((19315, 19369), 'numpy.sqrt', 'np.sqrt', (['scales[burn + 1:niter + 1:thinning, annotidx]'], {}), '(scales[burn + 1:niter + 1:thinning, annotidx])\n', (19322, 19369), True, 'import numpy as np\n'), ((24863, 24880), 'numpy.array', 'np.array', (['([0] * k)'], {}), '([0] * k)\n', (24871, 24880), True, 'import numpy as np\n'), ((25594, 25635), 'numpy.sum', 'np.sum', (['[(gamma * i) for i in pcproposal]'], {}), '([(gamma * i) for i in pcproposal])\n', (25600, 25635), True, 'import numpy as np\n'), ((26082, 26136), 'numpy.sum', 'np.sum', (['[(alpha[iter - 1, 0] * i) for i in pcproposal]'], {}), '([(alpha[iter - 1, 0] * i) for i in pcproposal])\n', (26088, 26136), True, 'import numpy as np\n'), ((26740, 26789), 'numpy.sum', 'np.sum', (['[(gamma * i) for i in pc[iter - 1, 0, :]]'], {}), '([(gamma * i) for i in pc[iter - 1, 0, :]])\n', (26746, 26789), True, 'import numpy as np\n'), ((27099, 27161), 'numpy.sum', 'np.sum', (['[(alpha[iter - 1, 0] * i) for i in pc[iter - 1, 0, :]]'], {}), '([(alpha[iter - 1, 0] * i) for i in pc[iter - 1, 0, :]])\n', (27105, 27161), True, 'import numpy as np\n'), ((28785, 28809), 'numpy.array', 'np.array', (['ses[varidx, :]'], {}), '(ses[varidx, :])\n', (28793, 28809), True, 'import numpy as np\n'), ((29908, 29959), 'scipy.stats.rv_discrete', 'stats.rv_discrete', ([], {'name': '"""custm"""', 'values': '(xk, wstmp)'}), "(name='custm', values=(xk, wstmp))\n", (29925, 29959), False, 'from scipy import stats\n'), ((30000, 30053), 'scipy.stats.rv_discrete', 'stats.rv_discrete', ([], {'name': '"""custm"""', 'values': '(xk, probmjc)'}), "(name='custm', values=(xk, probmjc))\n", (30017, 30053), False, 'from scipy import stats\n'), ((32979, 33001), 'numpy.power', 'np.power', (['scaleprop', '(2)'], {}), '(scaleprop, 2)\n', (32987, 33001), True, 'import numpy as np\n'), ((34072, 34094), 'numpy.power', 'np.power', (['scaleprop', '(2)'], {}), '(scaleprop, 2)\n', (34080, 34094), True, 'import numpy as np\n'), ((34436, 34490), 'numpy.random.normal', 'np.random.normal', (['alpha[iter - 1, 0]', 'xialpha0'], {'size': '(1)'}), '(alpha[iter - 1, 0], xialpha0, size=1)\n', (34452, 34490), True, 'import numpy as np\n'), ((34514, 34531), 'numpy.log', 'np.log', (['alphaprop'], {}), '(alphaprop)\n', (34520, 34531), True, 'import numpy as np\n'), ((34571, 34597), 'numpy.log', 'np.log', (['alpha[iter - 1, 0]'], {}), '(alpha[iter - 1, 0])\n', (34577, 34597), True, 'import numpy as np\n'), ((34836, 34885), 'numpy.sum', 'np.sum', (['[(alphaprop * i) for i in pc[iter, 0, :]]'], {}), '([(alphaprop * i) for i in pc[iter, 0, :]])\n', (34842, 34885), True, 'import numpy as np\n'), ((34987, 35045), 'numpy.sum', 'np.sum', (['[(alpha[iter - 1, 0] * i) for i in pc[iter, 0, :]]'], {}), '([(alpha[iter - 1, 0] * i) for i in pc[iter, 0, :]])\n', (34993, 35045), True, 'import numpy as np\n'), ((38351, 38405), 'numpy.sqrt', 'np.sqrt', (['scales[burn + 1:niter + 1:thinning, annotidx]'], {}), '(scales[burn + 1:niter + 1:thinning, annotidx])\n', (38358, 38405), True, 'import numpy as np\n'), ((38448, 38502), 'numpy.sqrt', 'np.sqrt', (['scales[burn + 1:niter + 1:thinning, annotidx]'], {}), '(scales[burn + 1:niter + 1:thinning, annotidx])\n', (38455, 38502), True, 'import numpy as np\n'), ((38550, 38604), 'numpy.sqrt', 'np.sqrt', (['scales[burn + 1:niter + 1:thinning, annotidx]'], {}), '(scales[burn + 1:niter + 1:thinning, annotidx])\n', (38557, 38604), True, 'import numpy as np\n'), ((5605, 5631), 'numpy.log', 'np.log', (['pc[iter - 1, 0, i]'], {}), '(pc[iter - 1, 0, i])\n', (5611, 5631), True, 'import numpy as np\n'), ((6643, 6664), 'numpy.log', 'np.log', (['pcproposal[i]'], {}), '(pcproposal[i])\n', (6649, 6664), True, 'import numpy as np\n'), ((7310, 7341), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(1)'}), '(0, 1, size=1)\n', (7327, 7341), True, 'import numpy as np\n'), ((8639, 8664), 'numpy.matlib.eye', 'np.matlib.eye', (['S.shape[0]'], {}), '(S.shape[0])\n', (8652, 8664), True, 'import numpy as np\n'), ((8993, 9024), 'numpy.log', 'np.log', (['pcj[iter, geneid, cidx]'], {}), '(pcj[iter, geneid, cidx])\n', (8999, 9024), True, 'import numpy as np\n'), ((12330, 12358), 'numpy.linalg.inv', 'np.linalg.inv', (['mucurrenttmp1'], {}), '(mucurrenttmp1)\n', (12343, 12358), True, 'import numpy as np\n'), ((13153, 13181), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dtmp', 'atmp'], {}), '(dtmp, atmp)\n', (13169, 13181), True, 'import numpy as np\n'), ((13381, 13466), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['betas[varidx, :]', '(scaleprop * bc[iter, cidx, :])', 'Vjm'], {}), '(betas[varidx, :], scaleprop * bc[iter, cidx, :], Vjm\n )\n', (13407, 13466), False, 'from scipy.stats import multivariate_normal\n'), ((15424, 15455), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(1)'}), '(0, 1, size=1)\n', (15441, 15455), True, 'import numpy as np\n'), ((25902, 25928), 'numpy.log', 'np.log', (['pc[iter - 1, 0, i]'], {}), '(pc[iter - 1, 0, i])\n', (25908, 25928), True, 'import numpy as np\n'), ((26940, 26961), 'numpy.log', 'np.log', (['pcproposal[i]'], {}), '(pcproposal[i])\n', (26946, 26961), True, 'import numpy as np\n'), ((27607, 27638), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(1)'}), '(0, 1, size=1)\n', (27624, 27638), True, 'import numpy as np\n'), ((28931, 28956), 'numpy.matlib.eye', 'np.matlib.eye', (['S.shape[0]'], {}), '(S.shape[0])\n', (28944, 28956), True, 'import numpy as np\n'), ((29297, 29328), 'numpy.log', 'np.log', (['pcj[iter, geneid, cidx]'], {}), '(pcj[iter, geneid, cidx])\n', (29303, 29328), True, 'import numpy as np\n'), ((32534, 32562), 'numpy.linalg.inv', 'np.linalg.inv', (['mucurrenttmp1'], {}), '(mucurrenttmp1)\n', (32547, 32562), True, 'import numpy as np\n'), ((33357, 33385), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dtmp', 'atmp'], {}), '(dtmp, atmp)\n', (33373, 33385), True, 'import numpy as np\n'), ((33547, 33632), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['betas[varidx, :]', '(scaleprop * bc[iter, cidx, :])', 'Vjm'], {}), '(betas[varidx, :], scaleprop * bc[iter, cidx, :], Vjm\n )\n', (33573, 33632), False, 'from scipy.stats import multivariate_normal\n'), ((35679, 35710), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(1)'}), '(0, 1, size=1)\n', (35696, 35710), True, 'import numpy as np\n'), ((6059, 6092), 'numpy.log', 'np.log', (['pcj[iter - 1, geneidx, i]'], {}), '(pcj[iter - 1, geneidx, i])\n', (6065, 6092), True, 'import numpy as np\n'), ((7081, 7114), 'numpy.log', 'np.log', (['pcj[iter - 1, geneidx, i]'], {}), '(pcj[iter - 1, geneidx, i])\n', (7087, 7114), True, 'import numpy as np\n'), ((11066, 11094), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dtmp', 'atmp'], {}), '(dtmp, atmp)\n', (11082, 11094), True, 'import numpy as np\n'), ((11211, 11232), 'numpy.linalg.inv', 'np.linalg.inv', (['Vjmtmp'], {}), '(Vjmtmp)\n', (11224, 11232), True, 'import numpy as np\n'), ((11798, 11826), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dtmp', 'atmp'], {}), '(dtmp, atmp)\n', (11814, 11826), True, 'import numpy as np\n'), ((11943, 11964), 'numpy.linalg.inv', 'np.linalg.inv', (['Vjmtmp'], {}), '(Vjmtmp)\n', (11956, 11964), True, 'import numpy as np\n'), ((12636, 12671), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (12643, 12671), True, 'import numpy as np\n'), ((13051, 13075), 'numpy.array', 'np.array', (['ses[varidx, :]'], {}), '(ses[varidx, :])\n', (13059, 13075), True, 'import numpy as np\n'), ((13645, 13676), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(1)'}), '(0, 1, size=1)\n', (13662, 13676), True, 'import numpy as np\n'), ((14966, 14999), 'numpy.log', 'np.log', (['pcj[iter - 1, geneidx, i]'], {}), '(pcj[iter - 1, geneidx, i])\n', (14972, 14999), True, 'import numpy as np\n'), ((15139, 15172), 'numpy.log', 'np.log', (['pcj[iter - 1, geneidx, i]'], {}), '(pcj[iter - 1, geneidx, i])\n', (15145, 15172), True, 'import numpy as np\n'), ((26356, 26389), 'numpy.log', 'np.log', (['pcj[iter - 1, geneidx, i]'], {}), '(pcj[iter - 1, geneidx, i])\n', (26362, 26389), True, 'import numpy as np\n'), ((27378, 27411), 'numpy.log', 'np.log', (['pcj[iter - 1, geneidx, i]'], {}), '(pcj[iter - 1, geneidx, i])\n', (27384, 27411), True, 'import numpy as np\n'), ((28957, 28972), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (28965, 28972), True, 'import numpy as np\n'), ((30219, 30254), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (30226, 30254), True, 'import numpy as np\n'), ((30357, 30392), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (30364, 30392), True, 'import numpy as np\n'), ((31270, 31298), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dtmp', 'atmp'], {}), '(dtmp, atmp)\n', (31286, 31298), True, 'import numpy as np\n'), ((31415, 31436), 'numpy.linalg.inv', 'np.linalg.inv', (['Vjmtmp'], {}), '(Vjmtmp)\n', (31428, 31436), True, 'import numpy as np\n'), ((32002, 32030), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dtmp', 'atmp'], {}), '(dtmp, atmp)\n', (32018, 32030), True, 'import numpy as np\n'), ((32147, 32168), 'numpy.linalg.inv', 'np.linalg.inv', (['Vjmtmp'], {}), '(Vjmtmp)\n', (32160, 32168), True, 'import numpy as np\n'), ((32840, 32875), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (32847, 32875), True, 'import numpy as np\n'), ((33255, 33279), 'numpy.array', 'np.array', (['ses[varidx, :]'], {}), '(ses[varidx, :])\n', (33263, 33279), True, 'import numpy as np\n'), ((33900, 33931), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(1)'}), '(0, 1, size=1)\n', (33917, 33931), True, 'import numpy as np\n'), ((35221, 35254), 'numpy.log', 'np.log', (['pcj[iter - 1, geneidx, i]'], {}), '(pcj[iter - 1, geneidx, i])\n', (35227, 35254), True, 'import numpy as np\n'), ((35394, 35427), 'numpy.log', 'np.log', (['pcj[iter - 1, geneidx, i]'], {}), '(pcj[iter - 1, geneidx, i])\n', (35400, 35427), True, 'import numpy as np\n'), ((8935, 8970), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (8942, 8970), True, 'import numpy as np\n'), ((9972, 10007), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (9979, 10007), True, 'import numpy as np\n'), ((10956, 10980), 'numpy.array', 'np.array', (['ses[varidx, :]'], {}), '(ses[varidx, :])\n', (10964, 10980), True, 'import numpy as np\n'), ((11688, 11712), 'numpy.array', 'np.array', (['ses[varidx, :]'], {}), '(ses[varidx, :])\n', (11696, 11712), True, 'import numpy as np\n'), ((13221, 13246), 'numpy.matlib.eye', 'np.matlib.eye', (['S.shape[0]'], {}), '(S.shape[0])\n', (13234, 13246), True, 'import numpy as np\n'), ((13529, 13564), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (13536, 13564), True, 'import numpy as np\n'), ((29239, 29274), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (29246, 29274), True, 'import numpy as np\n'), ((31160, 31184), 'numpy.array', 'np.array', (['ses[varidx, :]'], {}), '(ses[varidx, :])\n', (31168, 31184), True, 'import numpy as np\n'), ((31892, 31916), 'numpy.array', 'np.array', (['ses[varidx, :]'], {}), '(ses[varidx, :])\n', (31900, 31916), True, 'import numpy as np\n'), ((33425, 33450), 'numpy.matlib.eye', 'np.matlib.eye', (['S.shape[0]'], {}), '(S.shape[0])\n', (33438, 33450), True, 'import numpy as np\n'), ((33695, 33730), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (33702, 33730), True, 'import numpy as np\n'), ((10127, 10162), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (10134, 10162), True, 'import numpy as np\n'), ((11141, 11166), 'numpy.matlib.eye', 'np.matlib.eye', (['S.shape[0]'], {}), '(S.shape[0])\n', (11154, 11166), True, 'import numpy as np\n'), ((11347, 11382), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (11354, 11382), True, 'import numpy as np\n'), ((11873, 11898), 'numpy.matlib.eye', 'np.matlib.eye', (['S.shape[0]'], {}), '(S.shape[0])\n', (11886, 11898), True, 'import numpy as np\n'), ((12081, 12116), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (12088, 12116), True, 'import numpy as np\n'), ((31345, 31370), 'numpy.matlib.eye', 'np.matlib.eye', (['S.shape[0]'], {}), '(S.shape[0])\n', (31358, 31370), True, 'import numpy as np\n'), ((31551, 31586), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (31558, 31586), True, 'import numpy as np\n'), ((32077, 32102), 'numpy.matlib.eye', 'np.matlib.eye', (['S.shape[0]'], {}), '(S.shape[0])\n', (32090, 32102), True, 'import numpy as np\n'), ((32285, 32320), 'numpy.sqrt', 'np.sqrt', (['scales[iter - 1, annotidx]'], {}), '(scales[iter - 1, annotidx])\n', (32292, 32320), True, 'import numpy as np\n'), ((33451, 33466), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (33459, 33466), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from sklearn import svm
import numpy as np
import matplotlib.pyplot as plt
from utils.FScore import F1Score
from Identification.LoadDescriptors import loadAllDescriptors
from Identification.PreprocessingDescriptors import preprocessDescriptors
from Identification.TrainCvTest import separateDatabases
Descriptors = loadAllDescriptors(reverbs=True)
normalized_features, yClass, features_names = preprocessDescriptors(Descriptors)
del Descriptors # Ya no lo voy a utilizar
normalizedTrain, yTrain, normalizedCV, yCV, normalizedTest, yTest = separateDatabases(normalized_features, yClass)
def test_data_size(training_features, training_classes, test_features, test_classes):
index = np.arange(0, len(training_classes))
np.random.shuffle(index)
test_size = np.linspace(0.1, 1, 50) * len(index)
test_size = [int(i) for i in test_size]
f_train = []
f_cv = []
clf = svm.SVC(C=1.833, gamma=0.1366, cache_size=1000)
for iii in test_size:
clf.fit(training_features[index[0:iii]], training_classes[index[0:iii]])
f_train = np.append(f_train, np.mean(F1Score(training_features[index[0:iii]],
training_classes[index[0:iii]], clf).values()))
f_cv = np.append(f_cv, np.mean(F1Score(test_features, test_classes, clf).values()))
return f_train, f_cv, test_size
F1Train, F1CV, testSize = test_data_size(normalizedTrain, yTrain, normalizedCV, yCV)
plt.xlabel("Cantidad de muestras", fontsize=20)
plt.ylabel("Error",fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=15)
plt.text(2000, 0.3, r'$C=1.833,\ \Gamma=0.1366$', fontsize=22)
plt.text(1000, 0.07, 'Medida-F en la base\nde entrenamiento', fontsize=20, color='blue')
text = unicode('Medida-F en la base\nde validación cruzada', 'utf-8')
plt.text(1000, 0.25, text, fontsize=20, color='green')
plt.plot(testSize, 1 - F1Train, c='blue', linewidth=4.0)
plt.plot(testSize, 1 - F1CV, color='green', linewidth=4.0)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"utils.FScore.F1Score",
"matplotlib.pyplot.text",
"Identification.TrainCvTest.separateDatabases",
"Identification.PreprocessingDescriptors.preprocessDescriptors",
"numpy.linspace",
"sklearn.svm.SVC",
"matplotlib.pyplot.tick_params",
"matplotlib.p... | [((342, 374), 'Identification.LoadDescriptors.loadAllDescriptors', 'loadAllDescriptors', ([], {'reverbs': '(True)'}), '(reverbs=True)\n', (360, 374), False, 'from Identification.LoadDescriptors import loadAllDescriptors\n'), ((421, 455), 'Identification.PreprocessingDescriptors.preprocessDescriptors', 'preprocessDescriptors', (['Descriptors'], {}), '(Descriptors)\n', (442, 455), False, 'from Identification.PreprocessingDescriptors import preprocessDescriptors\n'), ((567, 613), 'Identification.TrainCvTest.separateDatabases', 'separateDatabases', (['normalized_features', 'yClass'], {}), '(normalized_features, yClass)\n', (584, 613), False, 'from Identification.TrainCvTest import separateDatabases\n'), ((1479, 1526), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cantidad de muestras"""'], {'fontsize': '(20)'}), "('Cantidad de muestras', fontsize=20)\n", (1489, 1526), True, 'import matplotlib.pyplot as plt\n'), ((1527, 1559), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {'fontsize': '(20)'}), "('Error', fontsize=20)\n", (1537, 1559), True, 'import matplotlib.pyplot as plt\n'), ((1559, 1616), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(15)'}), "(axis='both', which='major', labelsize=15)\n", (1574, 1616), True, 'import matplotlib.pyplot as plt\n'), ((1617, 1680), 'matplotlib.pyplot.text', 'plt.text', (['(2000)', '(0.3)', '"""$C=1.833,\\\\ \\\\Gamma=0.1366$"""'], {'fontsize': '(22)'}), "(2000, 0.3, '$C=1.833,\\\\ \\\\Gamma=0.1366$', fontsize=22)\n", (1625, 1680), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1776), 'matplotlib.pyplot.text', 'plt.text', (['(1000)', '(0.07)', '"""Medida-F en la base\nde entrenamiento"""'], {'fontsize': '(20)', 'color': '"""blue"""'}), '(1000, 0.07, """Medida-F en la base\nde entrenamiento""", fontsize=\n 20, color=\'blue\')\n', (1688, 1776), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1893), 'matplotlib.pyplot.text', 'plt.text', (['(1000)', '(0.25)', 'text'], {'fontsize': '(20)', 'color': '"""green"""'}), "(1000, 0.25, text, fontsize=20, color='green')\n", (1847, 1893), True, 'import matplotlib.pyplot as plt\n'), ((1894, 1950), 'matplotlib.pyplot.plot', 'plt.plot', (['testSize', '(1 - F1Train)'], {'c': '"""blue"""', 'linewidth': '(4.0)'}), "(testSize, 1 - F1Train, c='blue', linewidth=4.0)\n", (1902, 1950), True, 'import matplotlib.pyplot as plt\n'), ((1951, 2009), 'matplotlib.pyplot.plot', 'plt.plot', (['testSize', '(1 - F1CV)'], {'color': '"""green"""', 'linewidth': '(4.0)'}), "(testSize, 1 - F1CV, color='green', linewidth=4.0)\n", (1959, 2009), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2020), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2018, 2020), True, 'import matplotlib.pyplot as plt\n'), ((754, 778), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (771, 778), True, 'import numpy as np\n'), ((918, 965), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': '(1.833)', 'gamma': '(0.1366)', 'cache_size': '(1000)'}), '(C=1.833, gamma=0.1366, cache_size=1000)\n', (925, 965), False, 'from sklearn import svm\n'), ((795, 818), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1)', '(50)'], {}), '(0.1, 1, 50)\n', (806, 818), True, 'import numpy as np\n'), ((1120, 1197), 'utils.FScore.F1Score', 'F1Score', (['training_features[index[0:iii]]', 'training_classes[index[0:iii]]', 'clf'], {}), '(training_features[index[0:iii]], training_classes[index[0:iii]], clf)\n', (1127, 1197), False, 'from utils.FScore import F1Score\n'), ((1301, 1342), 'utils.FScore.F1Score', 'F1Score', (['test_features', 'test_classes', 'clf'], {}), '(test_features, test_classes, clf)\n', (1308, 1342), False, 'from utils.FScore import F1Score\n')] |
# System libs
import os
import argparse
from distutils.version import LooseVersion
from multiprocessing import Queue, Process
# Numerical libs
import numpy as np
import math
import torch
import torch.nn as nn
from scipy.io import loadmat
# Our libs
from lib.nn.dataset_for_eval import ValDataset
from lib.modeling import semseg_heads as ModelBuilder
from lib.nn.utils_for_eval import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices
from lib.nn.parallel.data_parallel_for_eval import user_scattered_collate, async_copy_to
from lib.utils import as_numpy, mark_volatile
import lib.utils.data as torchdata
import cv2
from tqdm import tqdm
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
colors = loadmat('lib/datasets/color150.mat')['colors']
class SegmentationModuleBase(nn.Module):
def __init__(self):
super(SegmentationModuleBase, self).__init__()
def pixel_acc(self, pred, label):
_, preds = torch.max(pred, dim=1)
valid = (label >= 0).long()
acc_sum = torch.sum(valid * (preds == label).long())
pixel_sum = torch.sum(valid)
acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
return acc
class SegmentationModule(SegmentationModuleBase):
def __init__(self, net_enc, net_dec, crit, deep_sup_scale=None):
super(SegmentationModule, self).__init__()
self.encoder = net_enc
self.decoder = net_dec
self.crit = crit
self.deep_sup_scale = deep_sup_scale
def forward(self, feed_dict, *, segSize=None):
# training
if segSize is None:
if self.deep_sup_scale is not None: # use deep supervision technique
(pred, pred_deepsup) = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True))
else:
pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True))
loss = self.crit(pred, feed_dict['seg_label'])
if self.deep_sup_scale is not None:
loss_deepsup = self.crit(pred_deepsup, feed_dict['seg_label'])
loss = loss + loss_deepsup * self.deep_sup_scale
acc = self.pixel_acc(pred, feed_dict['seg_label'])
return loss, acc
# inference
else:
pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True), segSize=segSize)
return pred
def visualize_result(data, pred, args):
(img, seg, info) = data
# segmentation
seg_color = colorEncode(seg, colors)
# prediction
pred_color = colorEncode(pred, colors)
# aggregate images and save
im_vis = np.concatenate((img, seg_color, pred_color),
axis=1).astype(np.uint8)
img_name = info.split('/')[-1]
cv2.imwrite(os.path.join(args.result,
img_name.replace('.jpg', '.png')), im_vis)
def evaluate(segmentation_module, loader, args, dev_id, result_queue):
segmentation_module.eval()
for batch_data in loader:
# process data
batch_data = batch_data[0]
seg_label = as_numpy(batch_data['seg_label'][0])
img_resized_list = batch_data['img_data']
with torch.no_grad():
segSize = (seg_label.shape[0], seg_label.shape[1])
scores = torch.zeros(1, cfg.MODEL.NUM_CLASSES, segSize[0], segSize[1])
scores = async_copy_to(scores, dev_id)
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict['img_data'] = img
del feed_dict['img_ori']
del feed_dict['info']
feed_dict = async_copy_to(feed_dict, dev_id)
# forward pass
scores_tmp = segmentation_module(feed_dict, segSize=segSize)
scores = scores + scores_tmp / len(cfg.TRAIN.SCALES)
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu())
# calculate accuracy and SEND THEM TO MASTER
acc, pix = accuracy(pred, seg_label)
intersection, union = intersectionAndUnion(pred, seg_label, cfg.MODEL.NUM_CLASSES)
result_queue.put_nowait((acc, pix, intersection, union))
# visualization
if args.visualize:
visualize_result(
(batch_data['img_ori'], seg_label, batch_data['info']),
pred, args)
def worker(args, dev_id, start_idx, end_idx, result_queue):
torch.cuda.set_device(dev_id)
# Dataset and Loader
dataset_val = ValDataset(
args.list_val, args, max_sample=args.num_val,
start_idx=start_idx, end_idx=end_idx)
loader_val = torchdata.DataLoader(
dataset_val,
batch_size=args.batch_size,
shuffle=False,
collate_fn=user_scattered_collate,
num_workers=2)
# Network Builders
builder = ModelBuilder()
#net_encoder = builder.build_encoder(
# arch=args.arch_encoder,
# fc_dim=args.fc_dim,
# weights=args.weights_encoder)
#net_decoder = builder.build_decoder(
# arch=args.arch_decoder,
# fc_dim=args.fc_dim,
# num_class=args.num_class,
# weights=args.weights_decoder,
# use_softmax=True)
snet_encoder = builder.build_encoder(
arch=cfg.SEM.ARCH_ENCODER,
fc_dim=cfg.SEM.FC_DIM)
net_decoder = builder.build_decoder(
arch=cfg.SEM.DECODER_TYPE,
fc_dim=cfg.SEM.FC_DIM,
num_class=cfg.MODEL.NUM_CLASSES,
use_softmax=not self.training,
weights='')
crit = nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
print("Loading model weights")
state_dict={}
pretrained=torch.load(args.ckpt, map_location=lambda storage, loc: storage)
pretrained = pretrained['model']
segmentation_module.load_state_dict(pretrained,strict=True)
print("Weights load success")
segmentation_module.cuda()
# Main loop
evaluate(segmentation_module, loader_val, args, dev_id, result_queue)
def main(args):
# Parse device ids
default_dev, *parallel_dev = parse_devices(args.devices)
all_devs = parallel_dev + [default_dev]
all_devs = [x.replace('gpu', '') for x in all_devs]
all_devs = [int(x) for x in all_devs]
nr_devs = len(all_devs)
with open(args.list_val, 'r') as f:
lines = f.readlines()
nr_files = len(lines)
if args.num_val > 0:
nr_files = min(nr_files, args.num_val)
nr_files_per_dev = math.ceil(nr_files / nr_devs)
pbar = tqdm(total=nr_files)
acc_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
result_queue = Queue(500)
procs = []
for dev_id in range(nr_devs):
start_idx = dev_id * nr_files_per_dev
end_idx = min(start_idx + nr_files_per_dev, nr_files)
proc = Process(target=worker, args=(args, dev_id, start_idx, end_idx, result_queue))
print('process:{}, start_idx:{}, end_idx:{}'.format(dev_id, start_idx, end_idx))
proc.start()
procs.append(proc)
# master fetches results
processed_counter = 0
while processed_counter < nr_files:
if result_queue.empty():
continue
(acc, pix, intersection, union) = result_queue.get()
acc_meter.update(acc, pix)
intersection_meter.update(intersection)
union_meter.update(union)
processed_counter += 1
pbar.update(1)
for p in procs:
p.join()
# summary
iou = intersection_meter.sum / (union_meter.sum + 1e-10)
for i, _iou in enumerate(iou):
print('class [{}], IoU: {:.4f}'.format(i, _iou))
print('[Eval Summary]:')
print('Mean IoU: {:.4f}, Accuracy: {:.2f}%'
.format(iou.mean(), acc_meter.average()*100))
print('Evaluation Done!')
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
'PyTorch>=0.4.0 is required'
parser = argparse.ArgumentParser()
# Model related arguments
parser.add_argument('--id', required=False,
help="a name for identifying the model to load")
parser.add_argument('--suffix', default='_epoch_20.pth',
help="which snapshot to load")
parser.add_argument('--arch_encoder', default='not implement',
help="architecture of net_encoder")
parser.add_argument('--arch_decoder', default='not implement',
help="architecture of net_decoder")
parser.add_argument('--fc_dim', default=2048, type=int,
help='number of features between encoder and decoder')
# Path related arguments
parser.add_argument('--list_val',
default='./data/validation.odgt')
parser.add_argument('--root_dataset',
default='./data/')
# Data related arguments
parser.add_argument('--num_val', default=500, type=int,
help='number of images to evalutate')
parser.add_argument('--num_class', default=150, type=int,
help='number of classes')
parser.add_argument('--batch_size', default=1, type=int,
help='batchsize. current only supports 1')
parser.add_argument('--imgSize', default=[450], nargs='+', type=int,
help='list of input image sizes.'
'for multiscale testing, e.g. 300 400 500 600')
parser.add_argument('--imgMaxSize', default=1000, type=int,
help='maximum input image size of long edge')
parser.add_argument('--padding_constant', default=8, type=int,
help='maxmimum downsampling rate of the network')
# Misc arguments
parser.add_argument('--ckpt', default='./ckpt',
help='folder to output checkpoints')
parser.add_argument('--visualize', action='store_true',
help='output visualization?')
parser.add_argument('--result', default='./result',
help='folder to output visualization results')
parser.add_argument('--devices', default='gpu0',
help='gpu_id for evaluation')
parser.add_argument(
'--cfg', dest='cfg_file', required=False,default='configs/baselines/e2e_pspnet-50_2x.yaml',
help='Config file for training (and optionally testing)')
args = parser.parse_args()
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
args.arch_encoder = cfg.SEM.AECH_ENCODER
args.arch_decoder = cfg.DECODER_TYPE
print("Input arguments:")
for key, val in vars(args).items():
print("{:16} {}".format(key, val))
# absolute paths of model weights
args.result = os.path.join(args.result, args.id)
if not os.path.isdir(args.result):
os.makedirs(args.result)
main(args)
| [
"argparse.ArgumentParser",
"scipy.io.loadmat",
"lib.modeling.semseg_heads",
"torch.nn.NLLLoss",
"multiprocessing.Queue",
"lib.nn.utils_for_eval.accuracy",
"torch.no_grad",
"os.path.join",
"torch.load",
"lib.nn.utils_for_eval.colorEncode",
"lib.nn.parallel.data_parallel_for_eval.async_copy_to",
... | [((772, 808), 'scipy.io.loadmat', 'loadmat', (['"""lib/datasets/color150.mat"""'], {}), "('lib/datasets/color150.mat')\n", (779, 808), False, 'from scipy.io import loadmat\n'), ((2615, 2639), 'lib.nn.utils_for_eval.colorEncode', 'colorEncode', (['seg', 'colors'], {}), '(seg, colors)\n', (2626, 2639), False, 'from lib.nn.utils_for_eval import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices\n'), ((2678, 2703), 'lib.nn.utils_for_eval.colorEncode', 'colorEncode', (['pred', 'colors'], {}), '(pred, colors)\n', (2689, 2703), False, 'from lib.nn.utils_for_eval import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices\n'), ((4617, 4646), 'torch.cuda.set_device', 'torch.cuda.set_device', (['dev_id'], {}), '(dev_id)\n', (4638, 4646), False, 'import torch\n'), ((4694, 4793), 'lib.nn.dataset_for_eval.ValDataset', 'ValDataset', (['args.list_val', 'args'], {'max_sample': 'args.num_val', 'start_idx': 'start_idx', 'end_idx': 'end_idx'}), '(args.list_val, args, max_sample=args.num_val, start_idx=\n start_idx, end_idx=end_idx)\n', (4704, 4793), False, 'from lib.nn.dataset_for_eval import ValDataset\n'), ((4826, 4956), 'lib.utils.data.DataLoader', 'torchdata.DataLoader', (['dataset_val'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'collate_fn': 'user_scattered_collate', 'num_workers': '(2)'}), '(dataset_val, batch_size=args.batch_size, shuffle=False,\n collate_fn=user_scattered_collate, num_workers=2)\n', (4846, 4956), True, 'import lib.utils.data as torchdata\n'), ((5040, 5054), 'lib.modeling.semseg_heads', 'ModelBuilder', ([], {}), '()\n', (5052, 5054), True, 'from lib.modeling import semseg_heads as ModelBuilder\n'), ((5751, 5778), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (5761, 5778), True, 'import torch.nn as nn\n'), ((5930, 5994), 'torch.load', 'torch.load', (['args.ckpt'], {'map_location': '(lambda storage, loc: storage)'}), '(args.ckpt, map_location=lambda storage, loc: storage)\n', (5940, 5994), False, 'import torch\n'), ((6338, 6365), 'lib.nn.utils_for_eval.parse_devices', 'parse_devices', (['args.devices'], {}), '(args.devices)\n', (6351, 6365), False, 'from lib.nn.utils_for_eval import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices\n'), ((6751, 6780), 'math.ceil', 'math.ceil', (['(nr_files / nr_devs)'], {}), '(nr_files / nr_devs)\n', (6760, 6780), False, 'import math\n'), ((6795, 6815), 'tqdm.tqdm', 'tqdm', ([], {'total': 'nr_files'}), '(total=nr_files)\n', (6799, 6815), False, 'from tqdm import tqdm\n'), ((6835, 6849), 'lib.nn.utils_for_eval.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6847, 6849), False, 'from lib.nn.utils_for_eval import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices\n'), ((6876, 6890), 'lib.nn.utils_for_eval.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6888, 6890), False, 'from lib.nn.utils_for_eval import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices\n'), ((6910, 6924), 'lib.nn.utils_for_eval.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6922, 6924), False, 'from lib.nn.utils_for_eval import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices\n'), ((6947, 6957), 'multiprocessing.Queue', 'Queue', (['(500)'], {}), '(500)\n', (6952, 6957), False, 'from multiprocessing import Queue, Process\n'), ((8290, 8315), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8313, 8315), False, 'import argparse\n'), ((10825, 10853), 'core.config.cfg_from_file', 'cfg_from_file', (['args.cfg_file'], {}), '(args.cfg_file)\n', (10838, 10853), False, 'from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg\n'), ((11195, 11229), 'os.path.join', 'os.path.join', (['args.result', 'args.id'], {}), '(args.result, args.id)\n', (11207, 11229), False, 'import os\n'), ((1005, 1027), 'torch.max', 'torch.max', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (1014, 1027), False, 'import torch\n'), ((1148, 1164), 'torch.sum', 'torch.sum', (['valid'], {}), '(valid)\n', (1157, 1164), False, 'import torch\n'), ((3215, 3251), 'lib.utils.as_numpy', 'as_numpy', (["batch_data['seg_label'][0]"], {}), "(batch_data['seg_label'][0])\n", (3223, 3251), False, 'from lib.utils import as_numpy, mark_volatile\n'), ((4175, 4200), 'lib.nn.utils_for_eval.accuracy', 'accuracy', (['pred', 'seg_label'], {}), '(pred, seg_label)\n', (4183, 4200), False, 'from lib.nn.utils_for_eval import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices\n'), ((4232, 4292), 'lib.nn.utils_for_eval.intersectionAndUnion', 'intersectionAndUnion', (['pred', 'seg_label', 'cfg.MODEL.NUM_CLASSES'], {}), '(pred, seg_label, cfg.MODEL.NUM_CLASSES)\n', (4252, 4292), False, 'from lib.nn.utils_for_eval import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices\n'), ((7135, 7212), 'multiprocessing.Process', 'Process', ([], {'target': 'worker', 'args': '(args, dev_id, start_idx, end_idx, result_queue)'}), '(target=worker, args=(args, dev_id, start_idx, end_idx, result_queue))\n', (7142, 7212), False, 'from multiprocessing import Queue, Process\n'), ((8176, 8207), 'distutils.version.LooseVersion', 'LooseVersion', (['torch.__version__'], {}), '(torch.__version__)\n', (8188, 8207), False, 'from distutils.version import LooseVersion\n'), ((8211, 8232), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.4.0"""'], {}), "('0.4.0')\n", (8223, 8232), False, 'from distutils.version import LooseVersion\n'), ((10898, 10926), 'core.config.cfg_from_list', 'cfg_from_list', (['args.set_cfgs'], {}), '(args.set_cfgs)\n', (10911, 10926), False, 'from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg\n'), ((11242, 11268), 'os.path.isdir', 'os.path.isdir', (['args.result'], {}), '(args.result)\n', (11255, 11268), False, 'import os\n'), ((11279, 11303), 'os.makedirs', 'os.makedirs', (['args.result'], {}), '(args.result)\n', (11290, 11303), False, 'import os\n'), ((2753, 2805), 'numpy.concatenate', 'np.concatenate', (['(img, seg_color, pred_color)'], {'axis': '(1)'}), '((img, seg_color, pred_color), axis=1)\n', (2767, 2805), True, 'import numpy as np\n'), ((3319, 3334), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3332, 3334), False, 'import torch\n'), ((3422, 3483), 'torch.zeros', 'torch.zeros', (['(1)', 'cfg.MODEL.NUM_CLASSES', 'segSize[0]', 'segSize[1]'], {}), '(1, cfg.MODEL.NUM_CLASSES, segSize[0], segSize[1])\n', (3433, 3483), False, 'import torch\n'), ((3506, 3535), 'lib.nn.parallel.data_parallel_for_eval.async_copy_to', 'async_copy_to', (['scores', 'dev_id'], {}), '(scores, dev_id)\n', (3519, 3535), False, 'from lib.nn.parallel.data_parallel_for_eval import user_scattered_collate, async_copy_to\n'), ((4022, 4046), 'torch.max', 'torch.max', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (4031, 4046), False, 'import torch\n'), ((3782, 3814), 'lib.nn.parallel.data_parallel_for_eval.async_copy_to', 'async_copy_to', (['feed_dict', 'dev_id'], {}), '(feed_dict, dev_id)\n', (3795, 3814), False, 'from lib.nn.parallel.data_parallel_for_eval import user_scattered_collate, async_copy_to\n')] |
from base.base_evaluater import BaseEvaluater
from utils.uts_classification.utils import save_evaluating_result
import numpy as np
class UtsClassificationEvaluater(BaseEvaluater):
def __init__(self,model,data,nb_classes,config):
super(UtsClassificationEvaluater,self).__init__(model,data,config)
self.nb_classes = nb_classes
def evluate(self):
y_pred = self.model.predict(self.data[0])
loss, accuracy, precision, recall, f1 = self.model.evaluate(self.data[0], self.data[1])
print('loss:', loss)
print('accuracy:', accuracy)
print('precision:', precision)
print('recall:', recall)
print('f1:', f1)
y_pred = np.argmax(y_pred, axis=1)
y_true = self.data[2]
if(self.config.model.name == 'tlenet'):
# get the true predictions of the test set
tot_increase_num = self.data[3]
y_predicted = []
test_num_batch = int(self.data[0].shape[0] / tot_increase_num)
for i in range(test_num_batch):
unique_value, sub_ind, correspond_ind, count = np.unique(y_pred, True, True, True)
idx_max = np.argmax(count)
predicted_label = unique_value[idx_max]
y_predicted.append(predicted_label)
y_pred = np.array(y_predicted)
cvconfusion,metrics = save_evaluating_result(self.config.result_dir, y_pred, y_true, self.nb_classes)
self.confusion_matrix = cvconfusion
self.acc = metrics.loc[0,"Accuracy"]
self.precision = metrics.loc[0,"Precision(macro)"]
self.recall = metrics.loc[0,"Recall(macro)"]
self.f1 = metrics.loc[0,"F1(macro)"]
self.precision_list = []
self.recall_list = []
self.f1_list = []
for i in range(self.nb_classes):
self.precision_list.append(metrics.loc[0,'Precison(Cla.' + str(i)+')'])
self.recall_list.append(metrics.loc[0,'Recall(Cla.' + str(i)+')'])
self.f1_list.append(metrics.loc[0,'F1(Cla.' + str(i)+')'])
| [
"utils.uts_classification.utils.save_evaluating_result",
"numpy.array",
"numpy.unique",
"numpy.argmax"
] | [((695, 720), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (704, 720), True, 'import numpy as np\n'), ((1373, 1452), 'utils.uts_classification.utils.save_evaluating_result', 'save_evaluating_result', (['self.config.result_dir', 'y_pred', 'y_true', 'self.nb_classes'], {}), '(self.config.result_dir, y_pred, y_true, self.nb_classes)\n', (1395, 1452), False, 'from utils.uts_classification.utils import save_evaluating_result\n'), ((1320, 1341), 'numpy.array', 'np.array', (['y_predicted'], {}), '(y_predicted)\n', (1328, 1341), True, 'import numpy as np\n'), ((1109, 1144), 'numpy.unique', 'np.unique', (['y_pred', '(True)', '(True)', '(True)'], {}), '(y_pred, True, True, True)\n', (1118, 1144), True, 'import numpy as np\n'), ((1172, 1188), 'numpy.argmax', 'np.argmax', (['count'], {}), '(count)\n', (1181, 1188), True, 'import numpy as np\n')] |
# py_rfq_utils.py
# Written by <NAME> in August 2018
#
# Contains the PyRfqUtils class designed to work in tandem with the RFQ object from
# the py_rfq_module (py_rfq_designer), and a corresponding WARP simulation.
#
from warp import *
import numpy as np
import pickle
import os
import matplotlib.pyplot as plt
import bisect
from dans_pymodules import MyColors
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from PyQt5.QtCore import QThread
import h5py
from random import sample
import datetime
from mpi4py import MPI
import itertools
__author__ = "<NAME>"
__doc__ = """Utilities for the PyRFQ Module"""
colors = MyColors()
class PyRfqUtils(object):
def __init__(self, rfq, beam=[]):
self._velocity_calculated = False
self._zclose = rfq._field._zmax
self._zfar = self._zclose + 0.01
self._velocityarray = []
self._velocityarray = np.array(self._velocityarray)
self._average_velocity = 0.0
self._wavelength = 0.0
self._bunch_particles = {}
self._bunchfound = False
self._beam = []
self._beam += beam
self._rfq = rfq
self._wavelengthbound = None
self._max_steps_find_bunch = None
self._velocity_count = top.npinject * 150
self._app = pg.mkQApp()
self._view = None
self._x_top_rms = None
self._x_bottom_rms = None
self._y_top_rms = None
self._y_bottom_rms = None
self._view_scatter = None
self._scatter_x = None
self._scatter_y = None
self._particle_outfile = None
self._particle_data_group = None
self._data_out_called = False
# winon()
# Helper functions to get all particles from all beams
def _get_all_z_part(self, beamlist):
return np.ravel([elem.getz() for elem in beamlist])
def _get_all_x_part(self, beamlist):
return np.ravel([elem.getx() for elem in beamlist])
def _get_all_y_part(self, beamlist):
return np.ravel([elem.gety() for elem in beamlist])
def find_bunch_p(self, bunch_beam, max_steps):
self._max_steps_find_bunch = top.it + max_steps
if (np.max(bunch_beam.getz()) < self._rfq._field._zmax):
print("Particles have not yet reached the end of the RFQ. Abandoning bunch finding.")
return None
starttime = time.time()
for i in range(0, max_steps):
step(1)
self.measure_bunch_p(bunch_beam=bunch_beam)
if (self._bunchfound):
break
if (not self._bunchfound):
self._bunch_particles = None
endtime = time.time()
print("It took {} seconds to find a bunch.".format(endtime - starttime))
return self._bunch_particles
def measure_bunch_p(self, bunch_beam=None, beamlist=None):
if bunch_beam==None:
bunch_beam = self._beam[-1]
if beamlist==None:
beamlist = self._beam
if self._bunchfound:
return
if not self._velocity_calculated:
step_zdata = bunch_beam.getz()
crossedZ = np.where(np.logical_and(step_zdata>(self._zclose-0.005), step_zdata<(self._zclose + 0.005)))
velocities = bunch_beam.getvz()
particle_velocities = velocities[crossedZ]
self._velocityarray = np.concatenate((self._velocityarray, particle_velocities))
if (len(self._velocityarray) > self._velocity_count):
self._average_velocity = np.mean(self._velocityarray)
self._wavelength = self._average_velocity / self._rfq.rf_freq
self._velocity_calculated = True
self._zfar = self._zclose + self._wavelength
self._wavelengthbound = self._zfar
print('_wavelengthbound: {}'.format(self._wavelengthbound))
return
elif self._velocity_calculated:
print("self._zclose: {} self._zfar: {}".format(self._zclose, self._zfar))
z_positions = [elem for elem in bunch_beam.getz() if (self._zclose < elem < self._zfar)]
print("Restul: {}, Desired: {}".format(np.around(np.mean(z_positions), decimals=3), np.around((self._zfar + self._zclose) / 2, decimals=3)))
if (np.around(np.mean(z_positions), decimals=3) == (np.around(((self._zfar - self._zclose) / 2) + self._zclose, decimals=3))):
self._bunchfound = True
for beam in self._beam:
step_zdata = beam.getz()
bunchparticles_indices = np.where(np.logical_and(step_zdata>(self._zclose), step_zdata<(self._zfar)))
self._bunch_particles[beam.name] = {}
self._bunch_particles[beam.name]["x"] = beam.getx()[bunchparticles_indices]
self._bunch_particles[beam.name]["y"] = beam.gety()[bunchparticles_indices]
self._bunch_particles[beam.name]["z"] = beam.getz()[bunchparticles_indices]
self._bunch_particles[beam.name]["r"] = beam.getr()[bunchparticles_indices]
self._bunch_particles[beam.name]["theta"] = beam.gettheta()[bunchparticles_indices]
self._bunch_particles[beam.name]["vx"] = beam.getvx()[bunchparticles_indices]
self._bunch_particles[beam.name]["vy"] = beam.getvy()[bunchparticles_indices]
self._bunch_particles[beam.name]["vz"] = beam.getvz()[bunchparticles_indices]
self._bunch_particles[beam.name]["ux"] = beam.getux()[bunchparticles_indices]
self._bunch_particles[beam.name]["uy"] = beam.getuy()[bunchparticles_indices]
self._bunch_particles[beam.name]["uz"] = beam.getuz()[bunchparticles_indices]
self._bunch_particles[beam.name]["xp"] = beam.getxp()[bunchparticles_indices]
self._bunch_particles[beam.name]["yp"] = beam.getyp()[bunchparticles_indices]
self._bunch_particles[beam.name]["rp"] = beam.getrp()[bunchparticles_indices]
self._bunch_particles[beam.name]["gaminv"] = beam.getgaminv()[bunchparticles_indices]
bunch_particles = self._bunch_particles
i = 0
while os.path.exists("bunch_particles.%s.dump" % i):
i += 1
comm = MPI.COMM_WORLD
comm.Barrier()
if (comm.Get_rank() == 0):
pickle.dump(bunch_particles, open("bunch_particles.%s.dump" % i, "wb"))
print("Bunch found.")
def find_bunch(self, bunch_beam, max_steps):
self._max_steps_find_bunch = top.it + max_steps
if (np.max(bunch_beam.getz()) < self._rfq._field._zmax):
print("Particles have not yet reached the end of the RFQ. Abandoning bunch finding.")
return None
# starttime = time.time()
for i in range(0, max_steps):
step(1)
self.measure_bunch(bunch_beam=bunch_beam)
if (self._bunchfound):
break
if (not self._bunchfound):
self._bunch_particles = None
# endtime = time.time()
print("It took {} seconds to find a bunch.".format(endtime - starttime))
return self._bunch_particles
def measure_bunch(self, bunch_beam=None, beamlist=None):
if bunch_beam==None:
bunch_beam = self._beam[-1]
if beamlist==None:
beamlist = self._beam
if self._bunchfound:
return
if not self._velocity_calculated:
crossedZ = bunch_beam.selectparticles(zc=self._zclose)
velocities = bunch_beam.getvz()
particle_velocities = velocities[crossedZ]
self._velocityarray = np.concatenate((self._velocityarray, particle_velocities))
if (len(self._velocityarray) > self._velocity_count):
self._average_velocity = np.mean(self._velocityarray)
self._wavelength = self._average_velocity / self._rfq.rf_freq
self._velocity_calculated = True
self._zfar = self._zclose + self._wavelength
self._wavelengthbound = self._zfar
print('_wavelengthbound: {}'.format(self._wavelengthbound))
return
elif self._velocity_calculated:
tot_particles = list(zip(bunch_beam.getx(), bunch_beam.gety(), bunch_beam.getz()))
#tot_particles = np.array(tot_particles)
print("self._zclose: {} self._zfar: {}".format(self._zclose, self._zfar))
particles = [item for item in tot_particles if (self._zclose < item[2] < self._zfar)]
z_positions = [item[2] for item in particles]
print("Result: {}, Desired: {}".format(np.mean(z_positions), (self._zfar + self._zclose) / 2))
print("RestulR: {}, Desired: {}".format(np.around(np.mean(z_positions), decimals=2), np.around((self._zfar + self._zclose) / 2, decimals=2)))
if (np.around(np.mean(z_positions), decimals=3) == (np.around(((self._zfar - self._zclose) / 2) + self._zclose, decimals=3))):
self._bunchfound = True
for beam in self._beam:
bunchparticles_indices = beam.selectparticles(zl=self._zclose, zu=self._zfar)
self._bunch_particles[beam.name] = {}
self._bunch_particles[beam.name]["x"] = beam.getx()[bunchparticles_indices]
self._bunch_particles[beam.name]["y"] = beam.gety()[bunchparticles_indices]
self._bunch_particles[beam.name]["z"] = beam.getz()[bunchparticles_indices]
self._bunch_particles[beam.name]["r"] = beam.getr()[bunchparticles_indices]
self._bunch_particles[beam.name]["theta"] = beam.gettheta()[bunchparticles_indices]
self._bunch_particles[beam.name]["vx"] = beam.getvx()[bunchparticles_indices]
self._bunch_particles[beam.name]["vy"] = beam.getvy()[bunchparticles_indices]
self._bunch_particles[beam.name]["vz"] = beam.getvz()[bunchparticles_indices]
self._bunch_particles[beam.name]["ux"] = beam.getux()[bunchparticles_indices]
self._bunch_particles[beam.name]["uy"] = beam.getuy()[bunchparticles_indices]
self._bunch_particles[beam.name]["uz"] = beam.getuz()[bunchparticles_indices]
self._bunch_particles[beam.name]["xp"] = beam.getxp()[bunchparticles_indices]
self._bunch_particles[beam.name]["yp"] = beam.getyp()[bunchparticles_indices]
self._bunch_particles[beam.name]["rp"] = beam.getrp()[bunchparticles_indices]
self._bunch_particles[beam.name]["gaminv"] = beam.getgaminv()[bunchparticles_indices]
bunch_particles = self._bunch_particles
i = 0
while os.path.exists("bunch_particles.%s.dump" % i):
i += 1
comm = MPI.COMM_WORLD
comm.Barrier()
pickle.dump(bunch_particles, open("bunch_particles.%s.dump" % i, "wb"))
print("Bunch found.")
def plotXZparticles(self, beamlist=None, view=1):
if beamlist==None:
beamlist = self._beam
plsys(view)
plg([w3d.xmmin,w3d.xmmax],[self._rfq._field._zmin, self._rfq._field._zmin], color=red)
plg([w3d.xmmin,w3d.xmmax],[self._rfq._field._zmax, self._rfq._field._zmax], color=red)
if (self._wavelengthbound):
plg([w3d.xmmin,w3d.xmmax],[self._wavelengthbound, self._wavelengthbound], color=red)
self._rfq._conductors.draw()
# pfzx(plotsg=0, cond=0, titles=False, view=view)
for beam in beamlist:
beam.ppzx(titles=False, view=view)
limits(w3d.zmminglobal, w3d.zmmaxglobal)
ptitles("", "Z (m)", "X (m)")
def plotYZparticles(self, beamlist=None, view=1):
if beamlist==None:
beamlist = self._beam
plsys(view)
plg([w3d.ymmin,w3d.ymmax],[self._rfq._field._zmin, self._rfq._field._zmin], color=red)
plg([w3d.ymmin,w3d.ymmax],[self._rfq._field._zmax, self._rfq._field._zmax], color=red)
if (self._wavelengthbound):
plg([w3d.ymmin,w3d.ymmax],[self._wavelengthbound, self._wavelengthbound], color=red)
self._rfq._conductors.draw()
# pfzy(plotsg=0, cond=0, titles=False, view=view)
for beam in beamlist:
beam.ppzy(titles=False, view=view)
limits(w3d.zmminglobal, w3d.zmmaxglobal)
ptitles("", "Z (m)", "Y (m)")
def plotXphase(self, beamlist=None, view=1):
if beamlist==None:
beamlist=self._beam
plsys(view)
for beam in beamlist:
beam.ppxp()
def plotYphase(self, beamlist=None, view=1):
if beamlist==None:
beamlist=self._beam
plsys(view)
for beam in beamlist:
beam.ppyp()
def beamplots(self, beamlist=None):
if beamlist==None:
beamlist=self._beam
window()
# fma()
self.plotXZparticles(beamlist=beamlist, view=9)
# refresh()
# window(winnum=2)
# fma()
self.plotYZparticles(beamlist=beamlist, view=10)
fma()
refresh()
# window(2)
# fma()
# self.plotXphase()
# refresh()
# window(3)
# fma()
# self.plotYphase()
# refresh()
def make_plots(self, beamlist=None, rate=10):
if beamlist==None:
beamlist=self._beam
if top.it%rate == 0:
self.beamplots(beamlist=beamlist)
# def plot_rms_graph(self, start, end, bucketsize=0.001):
# beam = self._beam
# x = beam.getx()
# y = beam.gety()
# z = beam.getz()
# data = np.array(list(zip(x, y, z)))
# def rms(ray):
# temp = np.array(ray)
# temp = temp ** 2
# avg = temp.mean()
# avg = np.sqrt(avg)
# return avg
# bins = np.arange(start, end, bucketsize)
# zdigitized = np.digitize(z,bins)
# xrms_ray = []
# yrms_ray = []
# for i in range(1, len(bins) + 1):
# to_rms = data[zdigitized == i]
# if (len(to_rms) == 0):
# xrms_ray.append(0)
# yrms_ray.append(0)
# continue
# unzipped = list(zip(*to_rms))
# # if (rms(unzipped[0]) > 0.02):
# # xrms_ray.append(0.02)
# # else:
# # xrms_ray.append(rms(unzipped[0]))
# # if (rms(unzipped[1]) > 0.02):
# # yrms_ray.append(0.02)
# # else:
# # yrms_ray.append(rms(unzipped[1]))
# xrms_ray.append(rms(unzipped[0]))
# yrms_ray.append(rms(unzipped[1]))
# # xrms_ray.append(np.mean(unzipped[0]))
# # yrms_ray.append(np.mean(unzipped[1]))
# plt.plot(bins, xrms_ray)
# plt.plot(bins, yrms_ray)
# plt.show()
def find_nearest(self, array,value):
idx = np.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):
return idx-1
else:
return idx
# Assumes symmetry about the z-axis
def find_vane_mesh_boundaries(self, NX, sim_start, sim_end, sim_xmin, sim_xmax, vane_dist, vane_rad):
refinement_array = np.linspace(0, 2*sim_xmax, NX)
vane_top_edge = sim_xmax - vane_dist - (vane_rad)
tvane_bottom_edge = vane_top_edge + (2*vane_rad)
vane_top2_edge = sim_xmax + vane_dist - vane_rad
bvane_bottom_edge = sim_xmax + vane_dist + vane_rad
# Finding the place where the mesh around the north vane ends. Finds which refinement box it
# lands between, then increases its size by one course box worth
ymax_north_idx = bisect.bisect_left(refinement_array, tvane_bottom_edge)
ymax_north = refinement_array[ymax_north_idx]
if (ymax_north < 2*sim_xmax):
ymax_north += (2*sim_xmax)/(NX-1)
# Similarly for the mesh around the southern vane
ymax_south = refinement_array[bisect.bisect_left(refinement_array, bvane_bottom_edge)]
if (ymax_south < 2*sim_xmax):
ymax_south += (2*sim_xmax)/(NX-1)
ymin_north_idx = bisect.bisect_left(refinement_array, vane_top_edge)
if (ymin_north_idx > 0):
ymin_north_idx -= 1
if (ymin_north_idx > 0):
ymin_north_idx -= 1
ymin_north = refinement_array[ymin_north_idx]
ymin_south_idx = bisect.bisect_left(refinement_array, vane_top2_edge)
if (ymin_south_idx > 0):
ymin_south_idx -= 1
if (ymin_south_idx > 0):
ymin_south_idx -= 1
ymin_south = refinement_array[ymin_south_idx]
xmax_east = ymax_south
xmin_east = ymin_south
xmax_west = ymax_north
xmin_west = ymin_north
# Similar process for mesh boundaries across central axis
vertical_xmax = refinement_array[bisect.bisect_left(refinement_array, sim_xmax + vane_rad)]
if (vertical_xmax < 2*sim_xmax):
vertical_xmax += (2*sim_xmax)/(NX-1)
vertical_xmin_idx = bisect.bisect_left(refinement_array, sim_xmax - vane_rad)
if (vertical_xmin_idx > 0):
vertical_xmin_idx -= 1
if (vertical_xmin_idx > 0):
vertical_xmin_idx -= 1
vertical_xmin = refinement_array[vertical_xmin_idx]
lateral_ymin = vertical_xmin
lateral_ymax = vertical_xmax
xmax_east -= sim_xmax
ymax_south -= sim_xmax
xmin_east -= sim_xmax
ymin_south -= sim_xmax
xmax_west -= sim_xmax
ymax_north -= sim_xmax
xmin_west -= sim_xmax
ymin_north -= sim_xmax
lateral_ymin -= sim_xmax
lateral_ymax -= sim_xmax
vertical_xmin -= sim_xmax
vertical_xmax -= sim_xmax
boundaries = {
"northmins": [vertical_xmin, ymin_north, sim_start],
"northmaxs": [vertical_xmax, ymax_north, sim_end],
"southmins": [vertical_xmin, ymin_south, sim_start],
"southmaxs": [vertical_xmax, ymax_south, sim_end],
"westmins": [xmin_west, lateral_ymin, sim_start],
"westmaxs": [xmax_west, lateral_ymax, sim_end],
"eastmins": [xmin_east, lateral_ymin, sim_start],
"eastmaxs": [xmax_east, lateral_ymax, sim_end]
}
return boundaries
def my_extractvar(self, name, varsuffix=None, pkg='top', ff=None):
"""
Helper function which, given a name, returns the appropriate data. Note that
name could actually be the variable itself, in which case, it is just
returned.
"""
if isinstance(name,str):
# --- if varsuffix is specified, try to evaluate the name with the
# --- suffix. If ok, return the result, otherwise, default to the
# --- fortran variable in the specified package.
if varsuffix is not None:
vname = name + str(varsuffix)
try: result = ff.read(vname)
except: result = None
if result is not None: return result
try: result = __main__.__dict__[vname]
except: result = None
if result is not None: return result
try: result = ff.read(name+'@'+pkg)
except: result = None
if result is not None: return result
return getattr(packageobject(pkg),name)
else:
return name
def plot_xedges(self, plot_item_top, plot_item_bottom, pen=(200,200,200), symbol=None, symbolPen=(200,200,200), symbolBrush=(50,50,150), fillLevel=None,
brush=None, js=-1, zoffset=None,zscale=1.,scale=1., titleb=None,titles=1):
"""Plots beam X edges (centroid +- twice X rms) versus Z
- symbol: A string describing the shape of symbols to use for each point. Optionally, this may also be a sequence of strings with a different symbol for each point.
- symbolPen: The pen (or sequence of pens) to use when drawing the symbol outline.
- symbolBrush: The brush (or sequence of brushes) to use when filling the symbol.
- fillLevel: Fills the area under the plot curve to this Y-value.
- brush: The brush to use when filling under the curve.
- js=-1: species number, zero based. When -1, plots data combined from all
species
- zoffset=zbeam: offset added to axis
- zscale=1: scale of axis
plots versus (zoffset + zmntmesh)/zscale
- scale=1.: factor to scale data by
- titleb="Z": bottom title
- titles=1: specifies whether or not to plot titles"""
varsuffix = None
ff = None
if zscale == 0.:
raise Exception("zscale must be nonzero")
if titleb is None:
if zscale == 1.: titleb = "Z (m)"
else: titleb = "Z"
xbarz = self.my_extractvar('xbarz',varsuffix,'top',ff)[...,js]*scale
xrmsz = self.my_extractvar('xrmsz',varsuffix,'top',ff)[...,js]*scale
zmntmesh = self.my_extractvar('zmntmesh',varsuffix,'top',ff)
if zoffset is None: zoffset = self.my_extractvar('zbeam',varsuffix,'top',ff)
# plot_item.plot((zoffset+zmntmesh)/zscale, xbarz+2.*xrmsz, pen=pen, symbol=symbol, symbolPen=symbolPen, symbolBrush=symbolBrush, fillLevel=fillLevel, brush=brush)
# plot_item.plot((zoffset+zmntmesh)/zscale, xbarz-2.*xrmsz, pen=pen, symbol=symbol, symbolPen=symbolPen, symbolBrush=symbolBrush, fillLevel=fillLevel, brush=brush)
plot_item_top.setData((zoffset+zmntmesh)/zscale, xbarz+2.*xrmsz)
plot_item_bottom.setData((zoffset+zmntmesh)/zscale, xbarz-2.*xrmsz)
def gettitler(js):
if js == -1: return "All species"
else: return "Species %d"%js
# if titles:
# # ptitles("Beam X edges (xbar+-2*rms)",titleb,"(m)",
# # gettitler(js))
# pzxedges: Plots beam X edges (centroid +- twice Xrms) versus Z
def plot_yedges(self, plot_item_top, plot_item_bottom, pen=(200,200,200), symbol=None, symbolPen=(200,200,200), symbolBrush=(50,50,150), fillLevel=None,
brush=None, js=-1, zoffset=None,zscale=1.,scale=1., titleb=None,titles=1):
"""Plots beam X edges (centroid +- twice X rms) versus Z
- symbol: A string describing the shape of symbols to use for each point. Optionally, this may also be a sequence of strings with a different symbol for each point.
- symbolPen: The pen (or sequence of pens) to use when drawing the symbol outline.
- symbolBrush: The brush (or sequence of brushes) to use when filling the symbol.
- fillLevel: Fills the area under the plot curve to this Y-value.
- brush: The brush to use when filling under the curve.
- js=-1: species number, zero based. When -1, plots data combined from all
species
- zoffset=zbeam: offset added to axis
- zscale=1: scale of axis
plots versus (zoffset + zmntmesh)/zscale
- scale=1.: factor to scale data by
- titleb="Z": bottom title
- titles=1: specifies whether or not to plot titles"""
varsuffix = None
ff = None
if zscale == 0.:
raise Exception("zscale must be nonzero")
if titleb is None:
if zscale == 1.: titleb = "Z (m)"
else: titleb = "Z"
ybarz = self.my_extractvar('ybarz',varsuffix,'top',ff)[...,js]*scale
yrmsz = self.my_extractvar('yrmsz',varsuffix,'top',ff)[...,js]*scale
zmntmesh = self.my_extractvar('zmntmesh',varsuffix,'top',ff)
if zoffset is None: zoffset = self.my_extractvar('zbeam',varsuffix,'top',ff)
plot_item_top.setData((zoffset+zmntmesh)/zscale, ybarz+2.*yrmsz)
plot_item_bottom.setData((zoffset+zmntmesh)/zscale, ybarz-2.*yrmsz)
def gettitler(js):
if js == -1: return "All species"
else: return "Species %d"%js
# if titles:
# ptitles("Beam Y edges (ybar+-2*rms)",titleb,"(m)",
# gettitler(js))
# Setup the PyQTGraph realtime RMS Plot
def rms_plot_setup(self, xpen=pg.mkPen(width=1.5, color=colors[6]), ypen=pg.mkPen(width=1.5,color=colors[5]),
xrange=[-0.1,1], yrange=[-0.01,0.01], title=None, labels=None):
self._view = pg.PlotWidget(title=title, labels=labels)
self._x_top_rms = pg.PlotDataItem(pen=xpen)
self._x_bottom_rms = pg.PlotDataItem(pen=xpen)
self._y_top_rms = pg.PlotDataItem(pen=ypen)
self._y_bottom_rms = pg.PlotDataItem(pen=ypen)
self._view.setRange(xRange=xrange, yRange=yrange)
self._view.addItem(self._x_top_rms)
self._view.addItem(self._x_bottom_rms)
self._view.addItem(self._y_top_rms)
self._view.addItem(self._y_bottom_rms)
def plot_rms(self):
# pzxedges: Plots beam X and Y edges (centroid +- twice Xrms) versus Z
# Call me every time step
self._view.show()
self.plot_xedges(self._x_top_rms, self._x_bottom_rms)
self.plot_yedges(self._y_top_rms, self._y_bottom_rms)
QtGui.QApplication.processEvents()
def particle_plot_setup(self, xpen=pg.mkPen(width=1, color=colors[6]), ypen=pg.mkPen(width=1, color=colors[5]),
symbol='s', size=0.25, xrange=[-0.1,1], yrange=[-0.01,0.01], title=None, labels=None):
# Setup the PyQTGraph realtime particle plot
self._view_scatter = pg.PlotWidget(title=title, labels=labels)
self._view_scatter.show()
self._scatter_x = pg.ScatterPlotItem(pen=xpen, symbol=symbol, size=size)
self._scatter_y = pg.ScatterPlotItem(pen=ypen, symbol=symbol, size=size)
self._view_scatter.setRange(xRange=xrange, yRange=yrange)
self._view_scatter.addItem(self._scatter_x)
self._view_scatter.addItem(self._scatter_y)
def plot_particles(self, factor=1, beamlist=None):
# Plot the particles X and Y positions vs Z
# Call me every time step
if beamlist == None:
beamlist = self._beam
x_by_z_particles = list(zip(self._get_all_z_part(beamlist), self._get_all_x_part(beamlist)))
factored_x_by_z = sample(x_by_z_particles, int(len(x_by_z_particles)*factor))
self._scatter_x.setData(pos=factored_x_by_z)
y_by_z_particles = list(zip(self._get_all_z_part(beamlist), self._get_all_y_part(beamlist)))
factored_y_by_z = sample(y_by_z_particles, int(len(y_by_z_particles)*factor))
self._scatter_y.setData(pos=factored_y_by_z)
QtGui.QApplication.processEvents()
def get_rms_widget(self):
return self._view
def get_particle_widget(self):
return self._view_scatter
def write_hdf5_data(self, step_num, beamlist=None):
# Write out the particle data to hdf5 file
# step_num refers to top.it
# Beamlist is a list of the WARP beam objects that the user wants data outputted for
# Used in SERIAL
if beamlist == None:
beamlist = self._beam
if not self._data_out_called:
date = datetime.datetime.today()
filename = date.strftime('%Y-%m-%dT%H:%M') + "_particle_data.hdf5"
self._particle_outfile = h5py.File(filename, 'w')
self._particle_outfile.attrs.__setitem__('PY_RFQ_HELPER', b'0.0.1')
self._data_out_called = True
# Store data to identify species later
beam_identifier_list = self._particle_outfile.create_group('SpeciesList')
for beam in beamlist:
beam_identifier_list.create_dataset(beam.name, data=[beam.sm, beam.charge, beam.charge_state, beam.type.A, beam.type.Z])
step_str = "Step#{}".format(step_num)
_part_data = {'x': [], 'y': [], 'z': [],
'px': [], 'py': [], 'pz': [],
'm': [], 'q': [], "ENERGY": [],
'vx': [], 'vy': [], 'vz': [],
'ux': [], 'uy': [], 'uz': [],
'xp': [], 'yp': [], 'id': []}
step_grp = self._particle_outfile.create_group(step_str)
for beam in beamlist:
_npart = beam.getn()
_mass = beam.sm
_part_data['x'] = np.concatenate((_part_data['x'], beam.getx()))
_part_data['y'] = np.concatenate((_part_data['y'], beam.gety()))
_part_data['z'] = np.concatenate((_part_data['z'], beam.getz()))
_part_data['px'] = np.concatenate((_part_data['px'], beam.getux() * _mass))
_part_data['py'] = np.concatenate((_part_data['py'], beam.getuy() * _mass))
_part_data['pz'] = np.concatenate((_part_data['pz'], beam.getuz() * _mass))
_part_data['m'] = np.concatenate((_part_data['m'], np.full(_npart, _mass)))
_part_data['q'] = np.concatenate((_part_data['q'], np.full(_npart, beam.charge)))
_part_data['ENERGY'] = np.concatenate((_part_data['ENERGY'], np.full(_npart, beam.ekin)))
_part_data['vx'] = np.concatenate((_part_data['vx'], beam.getvx()))
_part_data['vy'] = np.concatenate((_part_data['vy'], beam.getvy()))
_part_data['vz'] = np.concatenate((_part_data['vz'], beam.getvz()))
_part_data['ux'] = np.concatenate((_part_data['ux'], beam.getux()))
_part_data['uy'] = np.concatenate((_part_data['uy'], beam.getuy()))
_part_data['uz'] = np.concatenate((_part_data['uz'], beam.getuz()))
_part_data['xp'] = np.concatenate((_part_data['xp'], beam.getxp()))
_part_data['yp'] = np.concatenate((_part_data['yp'], beam.getyp()))
_part_data['id'] = np.concatenate((_part_data['id'], beam.getssn()))
for key in _part_data:
step_grp.create_dataset(key, data=_part_data[key])
def write_hdf5_data_p(self, step_num, beamlist=None):
# Write out the particle data to hdf5 file
# step_num refers to top.it
# Beamlist is a list of the WARP beam objects that the user wants data outputted for
# Used in PARALLEL
if beamlist == None:
beamlist = self._beam
comm = MPI.COMM_WORLD
if (comm.Get_rank() == 0):
if not self._data_out_called:
date = datetime.datetime.today()
filename = date.strftime('%Y-%m-%dT%H:%M') + "_particle_data.hdf5"
self._particle_outfile = h5py.File(filename, 'w')
self._particle_outfile.attrs.__setitem__('PY_RFQ_HELPER', b'0.0.1')
self._data_out_called = True
# Store data to identify species later
beam_identifier_list = self._particle_outfile.create_group('SpeciesList')
for beam in beamlist:
# MASS, CHARGE
beam_identifier_list.create_dataset(beam.name, data=[beam.mass, beam.charge, beam.charge_state, beam.type.A, beam.type.Z])
step_str = "Step#{}".format(step_num)
_part_data = {'x': [], 'y': [], 'z': [], 'px': [], 'py': [], 'pz': [], 'm': [], 'q': [], "ENERGY": [],
'vx': [], 'vy': [], 'vz': [], 'id': []}
for beam in beamlist:
x_gathered = comm.gather(beam.xp, root=0)
y_gathered = comm.gather(beam.yp, root=0)
z_gathered = comm.gather(beam.zp, root=0)
vx_gathered = comm.gather(beam.uxp, root=0)
vy_gathered = comm.gather(beam.uyp, root=0)
vz_gathered = comm.gather(beam.uzp, root=0)
id_gathered = comm.gather(beam.ssn, root=0)
if (comm.Get_rank() == 0):
x_gathered = np.array(list(itertools.chain.from_iterable(x_gathered)))
y_gathered = np.array(list(itertools.chain.from_iterable(y_gathered)))
z_gathered = np.array(list(itertools.chain.from_iterable(z_gathered)))
vx_gathered = np.array(list(itertools.chain.from_iterable(vx_gathered)))
vy_gathered = np.array(list(itertools.chain.from_iterable(vy_gathered)))
vz_gathered = np.array(list(itertools.chain.from_iterable(vz_gathered)))
id_gathered = np.array(list(itertools.chain.from_iterable(id_gathered)))
_npart = len(x_gathered)
_mass = beam.mass
px_gathered = vx_gathered * _mass
py_gathered = vy_gathered * _mass
pz_gathered = vz_gathered * _mass
_part_data['x'] = np.concatenate((_part_data['x'], x_gathered))
_part_data['y'] = np.concatenate((_part_data['y'], y_gathered))
_part_data['z'] = np.concatenate((_part_data['z'], z_gathered))
_part_data['px'] = np.concatenate((_part_data['px'], px_gathered)) # momenta
_part_data['py'] = np.concatenate((_part_data['py'], py_gathered))
_part_data['pz'] = np.concatenate((_part_data['pz'], pz_gathered))
_part_data['m'] = np.concatenate((_part_data['m'], np.full(_npart, _mass)))
_part_data['q'] = np.concatenate((_part_data['q'], np.full(_npart, beam.charge)))
_part_data['ENERGY'] = np.concatenate((_part_data['ENERGY'], np.full(_npart, beam.ekin)))
_part_data['vx'] = np.concatenate((_part_data['vx'], vx_gathered))
_part_data['vy'] = np.concatenate((_part_data['vy'], vy_gathered))
_part_data['vz'] = np.concatenate((_part_data['vz'], vz_gathered))
_part_data['id'] = np.concatenate((_part_data['id'], id_gathered))
if (comm.Get_rank() == 0):
step_grp = self._particle_outfile.create_group(step_str)
for key in _part_data:
step_grp.create_dataset(key, data=_part_data[key])
| [
"itertools.chain.from_iterable",
"pyqtgraph.Qt.QtGui.QApplication.processEvents",
"numpy.around",
"numpy.mean",
"pyqtgraph.ScatterPlotItem",
"numpy.full",
"os.path.exists",
"numpy.linspace",
"pyqtgraph.mkPen",
"dans_pymodules.MyColors",
"h5py.File",
"datetime.datetime.today",
"pyqtgraph.mkQA... | [((631, 641), 'dans_pymodules.MyColors', 'MyColors', ([], {}), '()\n', (639, 641), False, 'from dans_pymodules import MyColors\n'), ((895, 924), 'numpy.array', 'np.array', (['self._velocityarray'], {}), '(self._velocityarray)\n', (903, 924), True, 'import numpy as np\n'), ((1288, 1299), 'pyqtgraph.mkQApp', 'pg.mkQApp', ([], {}), '()\n', (1297, 1299), True, 'import pyqtgraph as pg\n'), ((15328, 15370), 'numpy.searchsorted', 'np.searchsorted', (['array', 'value'], {'side': '"""left"""'}), "(array, value, side='left')\n", (15343, 15370), True, 'import numpy as np\n'), ((15719, 15751), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * sim_xmax)', 'NX'], {}), '(0, 2 * sim_xmax, NX)\n', (15730, 15751), True, 'import numpy as np\n'), ((16183, 16238), 'bisect.bisect_left', 'bisect.bisect_left', (['refinement_array', 'tvane_bottom_edge'], {}), '(refinement_array, tvane_bottom_edge)\n', (16201, 16238), False, 'import bisect\n'), ((16642, 16693), 'bisect.bisect_left', 'bisect.bisect_left', (['refinement_array', 'vane_top_edge'], {}), '(refinement_array, vane_top_edge)\n', (16660, 16693), False, 'import bisect\n'), ((16904, 16956), 'bisect.bisect_left', 'bisect.bisect_left', (['refinement_array', 'vane_top2_edge'], {}), '(refinement_array, vane_top2_edge)\n', (16922, 16956), False, 'import bisect\n'), ((17550, 17607), 'bisect.bisect_left', 'bisect.bisect_left', (['refinement_array', '(sim_xmax - vane_rad)'], {}), '(refinement_array, sim_xmax - vane_rad)\n', (17568, 17607), False, 'import bisect\n'), ((24703, 24739), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'width': '(1.5)', 'color': 'colors[6]'}), '(width=1.5, color=colors[6])\n', (24711, 24739), True, 'import pyqtgraph as pg\n'), ((24746, 24782), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'width': '(1.5)', 'color': 'colors[5]'}), '(width=1.5, color=colors[5])\n', (24754, 24782), True, 'import pyqtgraph as pg\n'), ((24892, 24933), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {'title': 'title', 'labels': 'labels'}), '(title=title, labels=labels)\n', (24905, 24933), True, 'import pyqtgraph as pg\n'), ((24960, 24985), 'pyqtgraph.PlotDataItem', 'pg.PlotDataItem', ([], {'pen': 'xpen'}), '(pen=xpen)\n', (24975, 24985), True, 'import pyqtgraph as pg\n'), ((25015, 25040), 'pyqtgraph.PlotDataItem', 'pg.PlotDataItem', ([], {'pen': 'xpen'}), '(pen=xpen)\n', (25030, 25040), True, 'import pyqtgraph as pg\n'), ((25067, 25092), 'pyqtgraph.PlotDataItem', 'pg.PlotDataItem', ([], {'pen': 'ypen'}), '(pen=ypen)\n', (25082, 25092), True, 'import pyqtgraph as pg\n'), ((25122, 25147), 'pyqtgraph.PlotDataItem', 'pg.PlotDataItem', ([], {'pen': 'ypen'}), '(pen=ypen)\n', (25137, 25147), True, 'import pyqtgraph as pg\n'), ((25690, 25724), 'pyqtgraph.Qt.QtGui.QApplication.processEvents', 'QtGui.QApplication.processEvents', ([], {}), '()\n', (25722, 25724), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((25766, 25800), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'width': '(1)', 'color': 'colors[6]'}), '(width=1, color=colors[6])\n', (25774, 25800), True, 'import pyqtgraph as pg\n'), ((25807, 25841), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'width': '(1)', 'color': 'colors[5]'}), '(width=1, color=colors[5])\n', (25815, 25841), True, 'import pyqtgraph as pg\n'), ((26040, 26081), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {'title': 'title', 'labels': 'labels'}), '(title=title, labels=labels)\n', (26053, 26081), True, 'import pyqtgraph as pg\n'), ((26142, 26196), 'pyqtgraph.ScatterPlotItem', 'pg.ScatterPlotItem', ([], {'pen': 'xpen', 'symbol': 'symbol', 'size': 'size'}), '(pen=xpen, symbol=symbol, size=size)\n', (26160, 26196), True, 'import pyqtgraph as pg\n'), ((26223, 26277), 'pyqtgraph.ScatterPlotItem', 'pg.ScatterPlotItem', ([], {'pen': 'ypen', 'symbol': 'symbol', 'size': 'size'}), '(pen=ypen, symbol=symbol, size=size)\n', (26241, 26277), True, 'import pyqtgraph as pg\n'), ((27144, 27178), 'pyqtgraph.Qt.QtGui.QApplication.processEvents', 'QtGui.QApplication.processEvents', ([], {}), '()\n', (27176, 27178), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((3363, 3421), 'numpy.concatenate', 'np.concatenate', (['(self._velocityarray, particle_velocities)'], {}), '((self._velocityarray, particle_velocities))\n', (3377, 3421), True, 'import numpy as np\n'), ((7844, 7902), 'numpy.concatenate', 'np.concatenate', (['(self._velocityarray, particle_velocities)'], {}), '((self._velocityarray, particle_velocities))\n', (7858, 7902), True, 'import numpy as np\n'), ((16475, 16530), 'bisect.bisect_left', 'bisect.bisect_left', (['refinement_array', 'bvane_bottom_edge'], {}), '(refinement_array, bvane_bottom_edge)\n', (16493, 16530), False, 'import bisect\n'), ((17373, 17430), 'bisect.bisect_left', 'bisect.bisect_left', (['refinement_array', '(sim_xmax + vane_rad)'], {}), '(refinement_array, sim_xmax + vane_rad)\n', (17391, 17430), False, 'import bisect\n'), ((27689, 27714), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (27712, 27714), False, 'import datetime\n'), ((27831, 27855), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (27840, 27855), False, 'import h5py\n'), ((3146, 3234), 'numpy.logical_and', 'np.logical_and', (['(step_zdata > self._zclose - 0.005)', '(step_zdata < self._zclose + 0.005)'], {}), '(step_zdata > self._zclose - 0.005, step_zdata < self._zclose +\n 0.005)\n', (3160, 3234), True, 'import numpy as np\n'), ((3529, 3557), 'numpy.mean', 'np.mean', (['self._velocityarray'], {}), '(self._velocityarray)\n', (3536, 3557), True, 'import numpy as np\n'), ((8010, 8038), 'numpy.mean', 'np.mean', (['self._velocityarray'], {}), '(self._velocityarray)\n', (8017, 8038), True, 'import numpy as np\n'), ((30861, 30886), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (30884, 30886), False, 'import datetime\n'), ((31011, 31035), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (31020, 31035), False, 'import h5py\n'), ((33084, 33129), 'numpy.concatenate', 'np.concatenate', (["(_part_data['x'], x_gathered)"], {}), "((_part_data['x'], x_gathered))\n", (33098, 33129), True, 'import numpy as np\n'), ((33164, 33209), 'numpy.concatenate', 'np.concatenate', (["(_part_data['y'], y_gathered)"], {}), "((_part_data['y'], y_gathered))\n", (33178, 33209), True, 'import numpy as np\n'), ((33244, 33289), 'numpy.concatenate', 'np.concatenate', (["(_part_data['z'], z_gathered)"], {}), "((_part_data['z'], z_gathered))\n", (33258, 33289), True, 'import numpy as np\n'), ((33325, 33372), 'numpy.concatenate', 'np.concatenate', (["(_part_data['px'], px_gathered)"], {}), "((_part_data['px'], px_gathered))\n", (33339, 33372), True, 'import numpy as np\n'), ((33418, 33465), 'numpy.concatenate', 'np.concatenate', (["(_part_data['py'], py_gathered)"], {}), "((_part_data['py'], py_gathered))\n", (33432, 33465), True, 'import numpy as np\n'), ((33501, 33548), 'numpy.concatenate', 'np.concatenate', (["(_part_data['pz'], pz_gathered)"], {}), "((_part_data['pz'], pz_gathered))\n", (33515, 33548), True, 'import numpy as np\n'), ((33880, 33927), 'numpy.concatenate', 'np.concatenate', (["(_part_data['vx'], vx_gathered)"], {}), "((_part_data['vx'], vx_gathered))\n", (33894, 33927), True, 'import numpy as np\n'), ((33963, 34010), 'numpy.concatenate', 'np.concatenate', (["(_part_data['vy'], vy_gathered)"], {}), "((_part_data['vy'], vy_gathered))\n", (33977, 34010), True, 'import numpy as np\n'), ((34046, 34093), 'numpy.concatenate', 'np.concatenate', (["(_part_data['vz'], vz_gathered)"], {}), "((_part_data['vz'], vz_gathered))\n", (34060, 34093), True, 'import numpy as np\n'), ((34129, 34176), 'numpy.concatenate', 'np.concatenate', (["(_part_data['id'], id_gathered)"], {}), "((_part_data['id'], id_gathered))\n", (34143, 34176), True, 'import numpy as np\n'), ((4346, 4415), 'numpy.around', 'np.around', (['((self._zfar - self._zclose) / 2 + self._zclose)'], {'decimals': '(3)'}), '((self._zfar - self._zclose) / 2 + self._zclose, decimals=3)\n', (4355, 4415), True, 'import numpy as np\n'), ((6309, 6354), 'os.path.exists', 'os.path.exists', (["('bunch_particles.%s.dump' % i)"], {}), "('bunch_particles.%s.dump' % i)\n", (6323, 6354), False, 'import os\n'), ((9140, 9209), 'numpy.around', 'np.around', (['((self._zfar - self._zclose) / 2 + self._zclose)'], {'decimals': '(3)'}), '((self._zfar - self._zclose) / 2 + self._zclose, decimals=3)\n', (9149, 9209), True, 'import numpy as np\n'), ((11033, 11078), 'os.path.exists', 'os.path.exists', (["('bunch_particles.%s.dump' % i)"], {}), "('bunch_particles.%s.dump' % i)\n", (11047, 11078), False, 'import os\n'), ((29362, 29384), 'numpy.full', 'np.full', (['_npart', '_mass'], {}), '(_npart, _mass)\n', (29369, 29384), True, 'import numpy as np\n'), ((29450, 29478), 'numpy.full', 'np.full', (['_npart', 'beam.charge'], {}), '(_npart, beam.charge)\n', (29457, 29478), True, 'import numpy as np\n'), ((29554, 29580), 'numpy.full', 'np.full', (['_npart', 'beam.ekin'], {}), '(_npart, beam.ekin)\n', (29561, 29580), True, 'import numpy as np\n'), ((4224, 4278), 'numpy.around', 'np.around', (['((self._zfar + self._zclose) / 2)'], {'decimals': '(3)'}), '((self._zfar + self._zclose) / 2, decimals=3)\n', (4233, 4278), True, 'import numpy as np\n'), ((4308, 4328), 'numpy.mean', 'np.mean', (['z_positions'], {}), '(z_positions)\n', (4315, 4328), True, 'import numpy as np\n'), ((8863, 8883), 'numpy.mean', 'np.mean', (['z_positions'], {}), '(z_positions)\n', (8870, 8883), True, 'import numpy as np\n'), ((9018, 9072), 'numpy.around', 'np.around', (['((self._zfar + self._zclose) / 2)'], {'decimals': '(2)'}), '((self._zfar + self._zclose) / 2, decimals=2)\n', (9027, 9072), True, 'import numpy as np\n'), ((9102, 9122), 'numpy.mean', 'np.mean', (['z_positions'], {}), '(z_positions)\n', (9109, 9122), True, 'import numpy as np\n'), ((32250, 32291), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['x_gathered'], {}), '(x_gathered)\n', (32279, 32291), False, 'import itertools\n'), ((32337, 32378), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['y_gathered'], {}), '(y_gathered)\n', (32366, 32378), False, 'import itertools\n'), ((32424, 32465), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['z_gathered'], {}), '(z_gathered)\n', (32453, 32465), False, 'import itertools\n'), ((32512, 32554), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['vx_gathered'], {}), '(vx_gathered)\n', (32541, 32554), False, 'import itertools\n'), ((32601, 32643), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['vy_gathered'], {}), '(vy_gathered)\n', (32630, 32643), False, 'import itertools\n'), ((32690, 32732), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['vz_gathered'], {}), '(vz_gathered)\n', (32719, 32732), False, 'import itertools\n'), ((32779, 32821), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['id_gathered'], {}), '(id_gathered)\n', (32808, 32821), False, 'import itertools\n'), ((33616, 33638), 'numpy.full', 'np.full', (['_npart', '_mass'], {}), '(_npart, _mass)\n', (33623, 33638), True, 'import numpy as np\n'), ((33708, 33736), 'numpy.full', 'np.full', (['_npart', 'beam.charge'], {}), '(_npart, beam.charge)\n', (33715, 33736), True, 'import numpy as np\n'), ((33816, 33842), 'numpy.full', 'np.full', (['_npart', 'beam.ekin'], {}), '(_npart, beam.ekin)\n', (33823, 33842), True, 'import numpy as np\n'), ((4189, 4209), 'numpy.mean', 'np.mean', (['z_positions'], {}), '(z_positions)\n', (4196, 4209), True, 'import numpy as np\n'), ((4602, 4668), 'numpy.logical_and', 'np.logical_and', (['(step_zdata > self._zclose)', '(step_zdata < self._zfar)'], {}), '(step_zdata > self._zclose, step_zdata < self._zfar)\n', (4616, 4668), True, 'import numpy as np\n'), ((8983, 9003), 'numpy.mean', 'np.mean', (['z_positions'], {}), '(z_positions)\n', (8990, 9003), True, 'import numpy as np\n')] |
""" Example of ordinary Monte Carlo random sampling a 1-dimensional gaussian model """
import numpy as np
import scipy.stats
from matplotlib.colors import Normalize
from pylab import *; ion()
import probayes as pb
# Settings
rand_size = 60
rand_mean = 50.
rand_stdv = 10.
mu_lims = (40, 60)
sigma_lims = (5, 20.)
n_samples = 5000
# Generate data
data = np.random.normal(loc=rand_mean, scale=rand_stdv, size=rand_size)
# Declare RVs
mu = pb.RV('mu', vtype=float, vset=mu_lims)
sigma = pb.RV('sigma', vtype=float, vset=sigma_lims)
x = pb.RV('x', vtype=float, vset=[-np.inf, np.inf])
# Set reciprocal prior for sigma
sigma.set_ufun((np.log, np.exp))
# Set up params and models
paras = pb.RF(mu, sigma)
stats = pb.RF(x)
process = pb.SP(stats, paras)
process.set_prob(scipy.stats.norm.logpdf,
order={'x':0, 'mu':'loc', 'sigma':'scale'},
pscale='log')
# SAMPLE AND SUMMARISE
sampler = process.sampler({'mu': {0}, 'sigma': {0}, 'x': data},
iid=True, joint=True, stop=n_samples)
samples = [sample for sample in sampler]
summary = process(samples)
# DETERMINE HAT VALUES
inference = summary.rescaled()
mu, sigma, post = inference['mu'], inference['sigma'], inference.prob
mu_sort = inference.sorted('mu')
sigma_sort = inference.sorted('sigma')
hat_mu = mu_sort.quantile(0.5)['mu']
hat_sigma = sigma_sort.quantile(0.5)['sigma']
hat_mu_str = '{:.2f}'.format(hat_mu)
hat_sigma_str = '{:.2f}'.format(hat_sigma)
# Plot posterior
figure()
c_norm = Normalize(vmin=np.min(post), vmax=np.max(post))
c_map = cm.jet(c_norm(post))
scatter(mu, sigma, color=c_map, marker='.', alpha=1.)
xlabel(r'$\mu$')
ylabel(r'$\sigma$')
title(r'$\hat{\mu}=' + hat_mu_str + r',\hat{\sigma}=' + hat_sigma_str + r'$')
yscale('log')
| [
"probayes.SP",
"probayes.RF",
"numpy.min",
"numpy.max",
"numpy.random.normal",
"probayes.RV"
] | [((355, 419), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'rand_mean', 'scale': 'rand_stdv', 'size': 'rand_size'}), '(loc=rand_mean, scale=rand_stdv, size=rand_size)\n', (371, 419), True, 'import numpy as np\n'), ((440, 478), 'probayes.RV', 'pb.RV', (['"""mu"""'], {'vtype': 'float', 'vset': 'mu_lims'}), "('mu', vtype=float, vset=mu_lims)\n", (445, 478), True, 'import probayes as pb\n'), ((487, 531), 'probayes.RV', 'pb.RV', (['"""sigma"""'], {'vtype': 'float', 'vset': 'sigma_lims'}), "('sigma', vtype=float, vset=sigma_lims)\n", (492, 531), True, 'import probayes as pb\n'), ((536, 583), 'probayes.RV', 'pb.RV', (['"""x"""'], {'vtype': 'float', 'vset': '[-np.inf, np.inf]'}), "('x', vtype=float, vset=[-np.inf, np.inf])\n", (541, 583), True, 'import probayes as pb\n'), ((688, 704), 'probayes.RF', 'pb.RF', (['mu', 'sigma'], {}), '(mu, sigma)\n', (693, 704), True, 'import probayes as pb\n'), ((713, 721), 'probayes.RF', 'pb.RF', (['x'], {}), '(x)\n', (718, 721), True, 'import probayes as pb\n'), ((732, 751), 'probayes.SP', 'pb.SP', (['stats', 'paras'], {}), '(stats, paras)\n', (737, 751), True, 'import probayes as pb\n'), ((1515, 1527), 'numpy.min', 'np.min', (['post'], {}), '(post)\n', (1521, 1527), True, 'import numpy as np\n'), ((1534, 1546), 'numpy.max', 'np.max', (['post'], {}), '(post)\n', (1540, 1546), True, 'import numpy as np\n')] |
# Partially based on codebase by <NAME> (https://github.com/lmcinnes/umap)
from __future__ import print_function
import numpy as np
import numba
import scipy
from scipy.optimize import curve_fit
from sklearn.neighbors import KDTree
from sklearn.metrics import pairwise_distances
import warnings
#INT32_MIN = np.iinfo(np.int32).min + 1
#INT32_MAX = np.iinfo(np.int32).max - 1
from collections import deque, namedtuple
from warnings import warn
import numpy as np
import numba
#from umap.sparse import sparse_mul, sparse_diff, sparse_sum
#from umap.utils import tau_rand_int, norm
import scipy.sparse
import locale
locale.setlocale(locale.LC_NUMERIC, "C")
EPS = 1e-8
RandomProjectionTreeNode = namedtuple(
"RandomProjectionTreeNode",
["indices", "is_leaf", "hyperplane", "offset", "left_child", "right_child"],
)
FlatTree = namedtuple("FlatTree", ["hyperplanes", "offsets", "children", "indices"])
@numba.njit(fastmath=True)
def angular_random_projection_split(data, indices, rng_state):
dim = data.shape[1]
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
left_norm = norm(data[left])
right_norm = norm(data[right])
if abs(left_norm) < EPS:
left_norm = 1.0
if abs(right_norm) < EPS:
right_norm = 1.0
hyperplane_vector = np.empty(dim, dtype=np.float32)
for d in range(dim):
hyperplane_vector[d] = (data[left, d] / left_norm) - (
data[right, d] / right_norm
)
hyperplane_norm = norm(hyperplane_vector)
if abs(hyperplane_norm) < EPS:
hyperplane_norm = 1.0
for d in range(dim):
hyperplane_vector[d] = hyperplane_vector[d] / hyperplane_norm
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = 0.0
for d in range(dim):
margin += hyperplane_vector[d] * data[indices[i], d]
if abs(margin) < EPS:
side[i] = tau_rand_int(rng_state) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
indices_left = np.empty(n_left, dtype=np.int64)
indices_right = np.empty(n_right, dtype=np.int64)
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
return indices_left, indices_right, hyperplane_vector, None
@numba.njit(fastmath=True, nogil=True)
def euclidean_random_projection_split(data, indices, rng_state):
dim = data.shape[1]
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
hyperplane_offset = 0.0
hyperplane_vector = np.empty(dim, dtype=np.float32)
for d in range(dim):
hyperplane_vector[d] = data[left, d] - data[right, d]
hyperplane_offset -= (
hyperplane_vector[d] * (data[left, d] + data[right, d]) / 2.0
)
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = hyperplane_offset
for d in range(dim):
margin += hyperplane_vector[d] * data[indices[i], d]
if abs(margin) < EPS:
side[i] = tau_rand_int(rng_state) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
indices_left = np.empty(n_left, dtype=np.int64)
indices_right = np.empty(n_right, dtype=np.int64)
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
return indices_left, indices_right, hyperplane_vector, hyperplane_offset
@numba.njit(fastmath=True)
def sparse_angular_random_projection_split(inds, indptr, data, indices, rng_state):
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
left_inds = inds[indptr[left] : indptr[left + 1]]
left_data = data[indptr[left] : indptr[left + 1]]
right_inds = inds[indptr[right] : indptr[right + 1]]
right_data = data[indptr[right] : indptr[right + 1]]
left_norm = norm(left_data)
right_norm = norm(right_data)
if abs(left_norm) < EPS:
left_norm = 1.0
if abs(right_norm) < EPS:
right_norm = 1.0
normalized_left_data = left_data / left_norm
normalized_right_data = right_data / right_norm
hyperplane_inds, hyperplane_data = sparse_diff(
left_inds, normalized_left_data, right_inds, normalized_right_data
)
hyperplane_norm = norm(hyperplane_data)
if abs(hyperplane_norm) < EPS:
hyperplane_norm = 1.0
for d in range(hyperplane_data.shape[0]):
hyperplane_data[d] = hyperplane_data[d] / hyperplane_norm
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = 0.0
i_inds = inds[indptr[indices[i]] : indptr[indices[i] + 1]]
i_data = data[indptr[indices[i]] : indptr[indices[i] + 1]]
mul_inds, mul_data = sparse_mul(
hyperplane_inds, hyperplane_data, i_inds, i_data
)
for d in range(mul_data.shape[0]):
margin += mul_data[d]
if abs(margin) < EPS:
side[i] = tau_rand_int(rng_state) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
indices_left = np.empty(n_left, dtype=np.int64)
indices_right = np.empty(n_right, dtype=np.int64)
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
hyperplane = np.vstack((hyperplane_inds, hyperplane_data))
return indices_left, indices_right, hyperplane, None
@numba.njit(fastmath=True)
def sparse_euclidean_random_projection_split(inds, indptr, data, indices, rng_state):
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
left_inds = inds[indptr[left] : indptr[left + 1]]
left_data = data[indptr[left] : indptr[left + 1]]
right_inds = inds[indptr[right] : indptr[right + 1]]
right_data = data[indptr[right] : indptr[right + 1]]
hyperplane_offset = 0.0
hyperplane_inds, hyperplane_data = sparse_diff(
left_inds, left_data, right_inds, right_data
)
offset_inds, offset_data = sparse_sum(left_inds, left_data, right_inds, right_data)
offset_data = offset_data / 2.0
offset_inds, offset_data = sparse_mul(
hyperplane_inds, hyperplane_data, offset_inds, offset_data
)
for d in range(offset_data.shape[0]):
hyperplane_offset -= offset_data[d]
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = hyperplane_offset
i_inds = inds[indptr[indices[i]] : indptr[indices[i] + 1]]
i_data = data[indptr[indices[i]] : indptr[indices[i] + 1]]
mul_inds, mul_data = sparse_mul(
hyperplane_inds, hyperplane_data, i_inds, i_data
)
for d in range(mul_data.shape[0]):
margin += mul_data[d]
if abs(margin) < EPS:
side[i] = tau_rand_int(rng_state) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
indices_left = np.empty(n_left, dtype=np.int64)
indices_right = np.empty(n_right, dtype=np.int64)
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
hyperplane = np.vstack((hyperplane_inds, hyperplane_data))
return indices_left, indices_right, hyperplane, hyperplane_offset
def make_euclidean_tree(data, indices, rng_state, leaf_size=30):
if indices.shape[0] > leaf_size:
left_indices, right_indices, hyperplane, offset = euclidean_random_projection_split(
data, indices, rng_state
)
left_node = make_euclidean_tree(data, left_indices, rng_state, leaf_size)
right_node = make_euclidean_tree(data, right_indices, rng_state, leaf_size)
node = RandomProjectionTreeNode(
None, False, hyperplane, offset, left_node, right_node
)
else:
node = RandomProjectionTreeNode(indices, True, None, None, None, None)
return node
def make_angular_tree(data, indices, rng_state, leaf_size=30):
if indices.shape[0] > leaf_size:
left_indices, right_indices, hyperplane, offset = angular_random_projection_split(
data, indices, rng_state
)
left_node = make_angular_tree(data, left_indices, rng_state, leaf_size)
right_node = make_angular_tree(data, right_indices, rng_state, leaf_size)
node = RandomProjectionTreeNode(
None, False, hyperplane, offset, left_node, right_node
)
else:
node = RandomProjectionTreeNode(indices, True, None, None, None, None)
return node
def make_sparse_euclidean_tree(inds, indptr, data, indices, rng_state, leaf_size=30):
if indices.shape[0] > leaf_size:
left_indices, right_indices, hyperplane, offset = sparse_euclidean_random_projection_split(
inds, indptr, data, indices, rng_state
)
left_node = make_sparse_euclidean_tree(
inds, indptr, data, left_indices, rng_state, leaf_size
)
right_node = make_sparse_euclidean_tree(
inds, indptr, data, right_indices, rng_state, leaf_size
)
node = RandomProjectionTreeNode(
None, False, hyperplane, offset, left_node, right_node
)
else:
node = RandomProjectionTreeNode(indices, True, None, None, None, None)
return node
def make_sparse_angular_tree(inds, indptr, data, indices, rng_state, leaf_size=30):
if indices.shape[0] > leaf_size:
left_indices, right_indices, hyperplane, offset = sparse_angular_random_projection_split(
inds, indptr, data, indices, rng_state
)
left_node = make_sparse_angular_tree(
inds, indptr, data, left_indices, rng_state, leaf_size
)
right_node = make_sparse_angular_tree(
inds, indptr, data, right_indices, rng_state, leaf_size
)
node = RandomProjectionTreeNode(
None, False, hyperplane, offset, left_node, right_node
)
else:
node = RandomProjectionTreeNode(indices, True, None, None, None, None)
return node
def make_tree(data, rng_state, leaf_size=30, angular=False):
is_sparse = scipy.sparse.isspmatrix_csr(data)
indices = np.arange(data.shape[0])
if is_sparse:
inds = data.indices
indptr = data.indptr
spdata = data.data
if angular:
return make_sparse_angular_tree(
inds, indptr, spdata, indices, rng_state, leaf_size
)
else:
return make_sparse_euclidean_tree(
inds, indptr, spdata, indices, rng_state, leaf_size
)
else:
if angular:
return make_angular_tree(data, indices, rng_state, leaf_size)
else:
return make_euclidean_tree(data, indices, rng_state, leaf_size)
def num_nodes(tree):
if tree.is_leaf:
return 1
else:
return 1 + num_nodes(tree.left_child) + num_nodes(tree.right_child)
def num_leaves(tree):
if tree.is_leaf:
return 1
else:
return num_leaves(tree.left_child) + num_leaves(tree.right_child)
def max_sparse_hyperplane_size(tree):
if tree.is_leaf:
return 0
else:
return max(
tree.hyperplane.shape[1],
max_sparse_hyperplane_size(tree.left_child),
max_sparse_hyperplane_size(tree.right_child),
)
def recursive_flatten(
tree, hyperplanes, offsets, children, indices, node_num, leaf_num
):
if tree.is_leaf:
children[node_num, 0] = -leaf_num
indices[leaf_num, : tree.indices.shape[0]] = tree.indices
leaf_num += 1
return node_num, leaf_num
else:
if len(tree.hyperplane.shape) > 1:
hyperplanes[node_num][:, : tree.hyperplane.shape[1]] = tree.hyperplane
else:
hyperplanes[node_num] = tree.hyperplane
offsets[node_num] = tree.offset
children[node_num, 0] = node_num + 1
old_node_num = node_num
node_num, leaf_num = recursive_flatten(
tree.left_child,
hyperplanes,
offsets,
children,
indices,
node_num + 1,
leaf_num,
)
children[old_node_num, 1] = node_num + 1
node_num, leaf_num = recursive_flatten(
tree.right_child,
hyperplanes,
offsets,
children,
indices,
node_num + 1,
leaf_num,
)
return node_num, leaf_num
def flatten_tree(tree, leaf_size):
n_nodes = num_nodes(tree)
n_leaves = num_leaves(tree)
if len(tree.hyperplane.shape) > 1:
max_hyperplane_nnz = max_sparse_hyperplane_size(tree)
hyperplanes = np.zeros(
(n_nodes, tree.hyperplane.shape[0], max_hyperplane_nnz), dtype=np.float32
)
else:
hyperplanes = np.zeros((n_nodes, tree.hyperplane.shape[0]), dtype=np.float32)
offsets = np.zeros(n_nodes, dtype=np.float32)
children = -1 * np.ones((n_nodes, 2), dtype=np.int64)
indices = -1 * np.ones((n_leaves, leaf_size), dtype=np.int64)
recursive_flatten(tree, hyperplanes, offsets, children, indices, 0, 0)
return FlatTree(hyperplanes, offsets, children, indices)
@numba.njit()
def select_side(hyperplane, offset, point, rng_state):
margin = offset
for d in range(point.shape[0]):
margin += hyperplane[d] * point[d]
if abs(margin) < EPS:
side = tau_rand_int(rng_state) % 2
if side == 0:
return 0
else:
return 1
elif margin > 0:
return 0
else:
return 1
@numba.njit()
def search_flat_tree(point, hyperplanes, offsets, children, indices, rng_state):
node = 0
while children[node, 0] > 0:
side = select_side(hyperplanes[node], offsets[node], point, rng_state)
if side == 0:
node = children[node, 0]
else:
node = children[node, 1]
return indices[-children[node, 0]]
def make_forest(data, n_neighbors, n_trees, rng_state, angular=False):
result = []
leaf_size = max(10, n_neighbors)
try:
result = [
flatten_tree(make_tree(data, rng_state, leaf_size, angular), leaf_size)
for i in range(n_trees)
]
except (RuntimeError, RecursionError, SystemError):
warn(
"Random Projection forest initialisation failed due to recursion"
"limit being reached. Something is a little strange with your "
"data, and this may take longer than normal to compute."
)
return result
def rptree_leaf_array(rp_forest):
if len(rp_forest) > 0:
leaf_array = np.vstack([tree.indices for tree in rp_forest])
else:
leaf_array = np.array([[-1]])
return leaf_array
import numpy as np
import numba
_mock_identity = np.eye(2, dtype=np.float64)
_mock_ones = np.ones(2, dtype=np.float64)
@numba.njit(fastmath=True)
def euclidean(x, y):
result = 0.0
for i in range(x.shape[0]):
result += (x[i] - y[i]) ** 2
return np.sqrt(result)
@numba.njit()
def standardised_euclidean(x, y, sigma=_mock_ones):
result = 0.0
for i in range(x.shape[0]):
result += ((x[i] - y[i]) ** 2) / sigma[i]
return np.sqrt(result)
@numba.njit()
def manhattan(x, y):
result = 0.0
for i in range(x.shape[0]):
result += np.abs(x[i] - y[i])
return result
@numba.njit()
def chebyshev(x, y):
result = 0.0
for i in range(x.shape[0]):
result = max(result, np.abs(x[i] - y[i]))
return result
@numba.njit()
def minkowski(x, y, p=2):
result = 0.0
for i in range(x.shape[0]):
result += (np.abs(x[i] - y[i])) ** p
return result ** (1.0 / p)
@numba.njit()
def weighted_minkowski(x, y, w=_mock_ones, p=2):
result = 0.0
for i in range(x.shape[0]):
result += (w[i] * np.abs(x[i] - y[i])) ** p
return result ** (1.0 / p)
@numba.njit()
def mahalanobis(x, y, vinv=_mock_identity):
result = 0.0
diff = np.empty(x.shape[0], dtype=np.float64)
for i in range(x.shape[0]):
diff[i] = x[i] - y[i]
for i in range(x.shape[0]):
tmp = 0.0
for j in range(x.shape[0]):
tmp += vinv[i, j] * diff[j]
result += tmp * diff[i]
return np.sqrt(result)
@numba.njit()
def hamming(x, y):
result = 0.0
for i in range(x.shape[0]):
if x[i] != y[i]:
result += 1.0
return float(result) / x.shape[0]
@numba.njit()
def canberra(x, y):
result = 0.0
for i in range(x.shape[0]):
denominator = np.abs(x[i]) + np.abs(y[i])
if denominator > 0:
result += np.abs(x[i] - y[i]) / denominator
return result
@numba.njit()
def bray_curtis(x, y):
numerator = 0.0
denominator = 0.0
for i in range(x.shape[0]):
numerator += np.abs(x[i] - y[i])
denominator += np.abs(x[i] + y[i])
if denominator > 0.0:
return float(numerator) / denominator
else:
return 0.0
@numba.njit()
def jaccard(x, y):
num_non_zero = 0.0
num_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_non_zero += x_true or y_true
num_equal += x_true and y_true
if num_non_zero == 0.0:
return 0.0
else:
return float(num_non_zero - num_equal) / num_non_zero
@numba.njit()
def matching(x, y):
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_not_equal += x_true != y_true
return float(num_not_equal) / x.shape[0]
@numba.njit()
def dice(x, y):
num_true_true = 0.0
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_not_equal += x_true != y_true
if num_not_equal == 0.0:
return 0.0
else:
return num_not_equal / (2.0 * num_true_true + num_not_equal)
@numba.njit()
def kulsinski(x, y):
num_true_true = 0.0
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_not_equal += x_true != y_true
if num_not_equal == 0:
return 0.0
else:
return float(num_not_equal - num_true_true + x.shape[0]) / (
num_not_equal + x.shape[0]
)
@numba.njit()
def rogers_tanimoto(x, y):
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_not_equal += x_true != y_true
return (2.0 * num_not_equal) / (x.shape[0] + num_not_equal)
@numba.njit()
def russellrao(x, y):
num_true_true = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
if num_true_true == np.sum(x != 0) and num_true_true == np.sum(y != 0):
return 0.0
else:
return float(x.shape[0] - num_true_true) / (x.shape[0])
@numba.njit()
def sokal_michener(x, y):
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_not_equal += x_true != y_true
return (2.0 * num_not_equal) / (x.shape[0] + num_not_equal)
@numba.njit()
def sokal_sneath(x, y):
num_true_true = 0.0
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_not_equal += x_true != y_true
if num_not_equal == 0.0:
return 0.0
else:
return num_not_equal / (0.5 * num_true_true + num_not_equal)
@numba.njit()
def haversine(x, y):
if x.shape[0] != 2:
raise ValueError("haversine is only defined for 2 dimensional data")
sin_lat = np.sin(0.5 * (x[0] - y[0]))
sin_long = np.sin(0.5 * (x[1] - y[1]))
result = np.sqrt(sin_lat ** 2 + np.cos(x[0]) * np.cos(y[0]) * sin_long ** 2)
return 2.0 * np.arcsin(result)
@numba.njit()
def yule(x, y):
num_true_true = 0.0
num_true_false = 0.0
num_false_true = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_true_false += x_true and (not y_true)
num_false_true += (not x_true) and y_true
num_false_false = x.shape[0] - num_true_true - num_true_false - num_false_true
if num_true_false == 0.0 or num_false_true == 0.0:
return 0.0
else:
return (2.0 * num_true_false * num_false_true) / (
num_true_true * num_false_false + num_true_false * num_false_true
)
@numba.njit()
def cosine(x, y):
result = 0.0
norm_x = 0.0
norm_y = 0.0
for i in range(x.shape[0]):
result += x[i] * y[i]
norm_x += x[i] ** 2
norm_y += y[i] ** 2
if norm_x == 0.0 and norm_y == 0.0:
return 0.0
elif norm_x == 0.0 or norm_y == 0.0:
return 1.0
else:
return 1.0 - (result / np.sqrt(norm_x * norm_y))
@numba.njit()
def correlation(x, y):
mu_x = 0.0
mu_y = 0.0
norm_x = 0.0
norm_y = 0.0
dot_product = 0.0
for i in range(x.shape[0]):
mu_x += x[i]
mu_y += y[i]
mu_x /= x.shape[0]
mu_y /= x.shape[0]
for i in range(x.shape[0]):
shifted_x = x[i] - mu_x
shifted_y = y[i] - mu_y
norm_x += shifted_x ** 2
norm_y += shifted_y ** 2
dot_product += shifted_x * shifted_y
if norm_x == 0.0 and norm_y == 0.0:
return 0.0
elif dot_product == 0.0:
return 1.0
else:
return 1.0 - (dot_product / np.sqrt(norm_x * norm_y))
named_distances = {
"euclidean": euclidean,
"l2": euclidean,
"manhattan": manhattan,
"taxicab": manhattan,
"l1": manhattan,
"chebyshev": chebyshev,
"linfinity": chebyshev,
"linfty": chebyshev,
"linf": chebyshev,
"minkowski": minkowski,
"seuclidean": standardised_euclidean,
"standardised_euclidean": standardised_euclidean,
"wminkowski": weighted_minkowski,
"weighted_minkowski": weighted_minkowski,
"mahalanobis": mahalanobis,
"canberra": canberra,
"cosine": cosine,
"correlation": correlation,
"haversine": haversine,
"braycurtis": bray_curtis,
"hamming": hamming,
"jaccard": jaccard,
"dice": dice,
"matching": matching,
"kulsinski": kulsinski,
"rogerstanimoto": rogers_tanimoto,
"russellrao": russellrao,
"sokalsneath": sokal_sneath,
"sokalmichener": sokal_michener,
"yule": yule,
}
import time
import numpy as np
import numba
@numba.njit(parallel=True)
def fast_knn_indices(X, n_neighbors):
knn_indices = np.empty(
(X.shape[0], n_neighbors), dtype=np.int32
)
for row in numba.prange(X.shape[0]):
v = X[row].argsort(kind="quicksort")
v = v[:n_neighbors]
knn_indices[row] = v
return knn_indices
@numba.njit("i4(i8[:])")
def tau_rand_int(state):
state[0] = (
((state[0] & 4294967294) << 12) & 0xFFFFFFFF
) ^ ((((state[0] << 13) & 0xFFFFFFFF) ^ state[0]) >> 19)
state[1] = (
((state[1] & 4294967288) << 4) & 0xFFFFFFFF
) ^ ((((state[1] << 2) & 0xFFFFFFFF) ^ state[1]) >> 25)
state[2] = (
((state[2] & 4294967280) << 17) & 0xFFFFFFFF
) ^ ((((state[2] << 3) & 0xFFFFFFFF) ^ state[2]) >> 11)
return state[0] ^ state[1] ^ state[2]
@numba.njit("f4(i8[:])")
def tau_rand(state):
integer = tau_rand_int(state)
return abs(float(integer) / 0x7FFFFFFF)
@numba.njit()
def norm(vec):
result = 0.0
for i in range(vec.shape[0]):
result += vec[i] ** 2
return np.sqrt(result)
@numba.njit()
def rejection_sample(n_samples, pool_size, rng_state):
result = np.empty(n_samples, dtype=np.int64)
for i in range(n_samples):
reject_sample = True
while reject_sample:
j = tau_rand_int(rng_state) % pool_size
for k in range(i):
if j == result[k]:
break
else:
reject_sample = False
result[i] = j
return result
@numba.njit("f8[:, :, :](i8,i8)")
def make_heap(n_points, size):
result = np.zeros(
(3, int(n_points), int(size)), dtype=np.float64
)
result[0] = -1
result[1] = np.infty
result[2] = 0
return result
@numba.njit("i8(f8[:,:,:],i8,f8,i8,i8)")
def heap_push(heap, row, weight, index, flag):
row = int(row)
indices = heap[0, row]
weights = heap[1, row]
is_new = heap[2, row]
if weight >= weights[0]:
return 0
for i in range(indices.shape[0]):
if index == indices[i]:
return 0
weights[0] = weight
indices[0] = index
is_new[0] = flag
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= heap.shape[2]:
break
elif ic2 >= heap.shape[2]:
if weights[ic1] > weight:
i_swap = ic1
else:
break
elif weights[ic1] >= weights[ic2]:
if weight < weights[ic1]:
i_swap = ic1
else:
break
else:
if weight < weights[ic2]:
i_swap = ic2
else:
break
weights[i] = weights[i_swap]
indices[i] = indices[i_swap]
is_new[i] = is_new[i_swap]
i = i_swap
weights[i] = weight
indices[i] = index
is_new[i] = flag
return 1
@numba.njit("i8(f8[:,:,:],i8,f8,i8,i8)")
def unchecked_heap_push(heap, row, weight, index, flag):
indices = heap[0, row]
weights = heap[1, row]
is_new = heap[2, row]
if weight >= weights[0]:
return 0
weights[0] = weight
indices[0] = index
is_new[0] = flag
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= heap.shape[2]:
break
elif ic2 >= heap.shape[2]:
if weights[ic1] > weight:
i_swap = ic1
else:
break
elif weights[ic1] >= weights[ic2]:
if weight < weights[ic1]:
i_swap = ic1
else:
break
else:
if weight < weights[ic2]:
i_swap = ic2
else:
break
weights[i] = weights[i_swap]
indices[i] = indices[i_swap]
is_new[i] = is_new[i_swap]
i = i_swap
weights[i] = weight
indices[i] = index
is_new[i] = flag
return 1
@numba.njit()
def siftdown(heap1, heap2, elt):
while elt * 2 + 1 < heap1.shape[0]:
left_child = elt * 2 + 1
right_child = left_child + 1
swap = elt
if heap1[swap] < heap1[left_child]:
swap = left_child
if (
right_child < heap1.shape[0]
and heap1[swap] < heap1[right_child]
):
swap = right_child
if swap == elt:
break
else:
heap1[elt], heap1[swap] = (
heap1[swap],
heap1[elt],
)
heap2[elt], heap2[swap] = (
heap2[swap],
heap2[elt],
)
elt = swap
@numba.njit()
def deheap_sort(heap):
indices = heap[0]
weights = heap[1]
for i in range(indices.shape[0]):
ind_heap = indices[i]
dist_heap = weights[i]
for j in range(ind_heap.shape[0] - 1):
ind_heap[0], ind_heap[
ind_heap.shape[0] - j - 1
] = (
ind_heap[ind_heap.shape[0] - j - 1],
ind_heap[0],
)
dist_heap[0], dist_heap[
dist_heap.shape[0] - j - 1
] = (
dist_heap[dist_heap.shape[0] - j - 1],
dist_heap[0],
)
siftdown(
dist_heap[: dist_heap.shape[0] - j - 1],
ind_heap[: ind_heap.shape[0] - j - 1],
0,
)
return indices.astype(np.int64), weights
@numba.njit("i8(f8[:, :, :],i8)")
def smallest_flagged(heap, row):
ind = heap[0, row]
dist = heap[1, row]
flag = heap[2, row]
min_dist = np.inf
result_index = -1
for i in range(ind.shape[0]):
if flag[i] == 1 and dist[i] < min_dist:
min_dist = dist[i]
result_index = i
if result_index >= 0:
flag[result_index] = 0.0
return int(ind[result_index])
else:
return -1
@numba.njit(parallel=True)
def build_candidates(
current_graph,
n_vertices,
n_neighbors,
max_candidates,
rng_state,
):
candidate_neighbors = make_heap(
n_vertices, max_candidates
)
for i in range(n_vertices):
for j in range(n_neighbors):
if current_graph[0, i, j] < 0:
continue
idx = current_graph[0, i, j]
isn = current_graph[2, i, j]
d = tau_rand(rng_state)
heap_push(candidate_neighbors, i, d, idx, isn)
heap_push(candidate_neighbors, idx, d, i, isn)
current_graph[2, i, j] = 0
return candidate_neighbors
@numba.njit(parallel=True)
def new_build_candidates(
current_graph,
n_vertices,
n_neighbors,
max_candidates,
rng_state,
rho=0.5,
):
new_candidate_neighbors = make_heap(
n_vertices, max_candidates
)
old_candidate_neighbors = make_heap(
n_vertices, max_candidates
)
for i in numba.prange(n_vertices):
for j in range(n_neighbors):
if current_graph[0, i, j] < 0:
continue
idx = current_graph[0, i, j]
isn = current_graph[2, i, j]
d = tau_rand(rng_state)
if tau_rand(rng_state) < rho:
c = 0
if isn:
c += heap_push(
new_candidate_neighbors,
i,
d,
idx,
isn,
)
c += heap_push(
new_candidate_neighbors,
idx,
d,
i,
isn,
)
else:
heap_push(
old_candidate_neighbors,
i,
d,
idx,
isn,
)
heap_push(
old_candidate_neighbors,
idx,
d,
i,
isn,
)
if c > 0:
current_graph[2, i, j] = 0
return new_candidate_neighbors, old_candidate_neighbors
@numba.njit(parallel=True)
def submatrix(dmat, indices_col, n_neighbors):
n_samples_transform, n_samples_fit = dmat.shape
submat = np.zeros(
(n_samples_transform, n_neighbors), dtype=dmat.dtype
)
for i in numba.prange(n_samples_transform):
for j in numba.prange(n_neighbors):
submat[i, j] = dmat[i, indices_col[i, j]]
return submat
def ts():
return time.ctime(time.time())
import numpy as np
import numba
#from umap.rp_tree import search_flat_tree
def make_nn_descent(dist, dist_args):
@numba.njit()
def nn_descent(
data,
n_neighbors,
rng_state,
max_candidates=50,
n_iters=10,
delta=0.001,
rho=0.5,
rp_tree_init=True,
leaf_array=None,
verbose=False,
):
n_vertices = data.shape[0]
current_graph = make_heap(data.shape[0], n_neighbors)
for i in range(data.shape[0]):
indices = rejection_sample(n_neighbors, data.shape[0], rng_state)
for j in range(indices.shape[0]):
d = dist(data[i], data[indices[j]], *dist_args)
heap_push(current_graph, i, d, indices[j], 1)
heap_push(current_graph, indices[j], d, i, 1)
if rp_tree_init:
for n in range(leaf_array.shape[0]):
for i in range(leaf_array.shape[1]):
if leaf_array[n, i] < 0:
break
for j in range(i + 1, leaf_array.shape[1]):
if leaf_array[n, j] < 0:
break
d = dist(
data[leaf_array[n, i]], data[leaf_array[n, j]], *dist_args
)
heap_push(
current_graph, leaf_array[n, i], d, leaf_array[n, j], 1
)
heap_push(
current_graph, leaf_array[n, j], d, leaf_array[n, i], 1
)
for n in range(n_iters):
if verbose:
print("\t", n, " / ", n_iters)
candidate_neighbors = build_candidates(
current_graph, n_vertices, n_neighbors, max_candidates, rng_state
)
c = 0
for i in range(n_vertices):
for j in range(max_candidates):
p = int(candidate_neighbors[0, i, j])
if p < 0 or tau_rand(rng_state) < rho:
continue
for k in range(max_candidates):
q = int(candidate_neighbors[0, i, k])
if (
q < 0
or not candidate_neighbors[2, i, j]
and not candidate_neighbors[2, i, k]
):
continue
d = dist(data[p], data[q], *dist_args)
c += heap_push(current_graph, p, d, q, 1)
c += heap_push(current_graph, q, d, p, 1)
if c <= delta * n_neighbors * data.shape[0]:
break
return deheap_sort(current_graph)
return nn_descent
def make_initialisations(dist, dist_args):
@numba.njit(parallel=True)
def init_from_random(n_neighbors, data, query_points, heap, rng_state):
for i in range(query_points.shape[0]):
indices = rejection_sample(n_neighbors, data.shape[0], rng_state)
for j in range(indices.shape[0]):
if indices[j] < 0:
continue
d = dist(data[indices[j]], query_points[i], *dist_args)
heap_push(heap, i, d, indices[j], 1)
return
@numba.njit(parallel=True)
def init_from_tree(tree, data, query_points, heap, rng_state):
for i in range(query_points.shape[0]):
indices = search_flat_tree(
query_points[i],
tree.hyperplanes,
tree.offsets,
tree.children,
tree.indices,
rng_state,
)
for j in range(indices.shape[0]):
if indices[j] < 0:
continue
d = dist(data[indices[j]], query_points[i], *dist_args)
heap_push(heap, i, d, indices[j], 1)
return
return init_from_random, init_from_tree
def initialise_search(
forest, data, query_points, n_neighbors, init_from_random, init_from_tree, rng_state
):
results = make_heap(query_points.shape[0], n_neighbors)
init_from_random(n_neighbors, data, query_points, results, rng_state)
if forest is not None:
for tree in forest:
init_from_tree(tree, data, query_points, results, rng_state)
return results
def make_initialized_nnd_search(dist, dist_args):
@numba.njit(parallel=True)
def initialized_nnd_search(data, indptr, indices, initialization, query_points):
for i in numba.prange(query_points.shape[0]):
tried = set(initialization[0, i])
while True:
vertex = smallest_flagged(initialization, i)
if vertex == -1:
break
candidates = indices[indptr[vertex] : indptr[vertex + 1]]
for j in range(candidates.shape[0]):
if (
candidates[j] == vertex
or candidates[j] == -1
or candidates[j] in tried
):
continue
d = dist(data[candidates[j]], query_points[i], *dist_args)
unchecked_heap_push(initialization, i, d, candidates[j], 1)
tried.add(candidates[j])
return initialization
return initialized_nnd_search
import numpy as np
import numba
import locale
locale.setlocale(locale.LC_NUMERIC, "C")
@numba.njit()
def arr_unique(arr):
aux = np.sort(arr)
flag = np.concatenate((np.ones(1, dtype=np.bool_), aux[1:] != aux[:-1]))
return aux[flag]
@numba.njit()
def arr_union(ar1, ar2):
if ar1.shape[0] == 0:
return ar2
elif ar2.shape[0] == 0:
return ar1
else:
return arr_unique(np.concatenate((ar1, ar2)))
@numba.njit()
def arr_intersect(ar1, ar2):
aux = np.concatenate((ar1, ar2))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
@numba.njit()
def sparse_sum(ind1, data1, ind2, data2):
result_ind = arr_union(ind1, ind2)
result_data = np.zeros(result_ind.shape[0], dtype=np.float32)
i1 = 0
i2 = 0
nnz = 0
while i1 < ind1.shape[0] and i2 < ind2.shape[0]:
j1 = ind1[i1]
j2 = ind2[i2]
if j1 == j2:
val = data1[i1] + data2[i2]
if val != 0:
result_ind[nnz] = j1
result_data[nnz] = val
nnz += 1
i1 += 1
i2 += 1
elif j1 < j2:
val = data1[i1]
if val != 0:
result_ind[nnz] = j1
result_data[nnz] = val
nnz += 1
i1 += 1
else:
val = data2[i2]
if val != 0:
result_ind[nnz] = j2
result_data[nnz] = val
nnz += 1
i2 += 1
while i1 < ind1.shape[0]:
val = data1[i1]
if val != 0:
result_ind[nnz] = i1
result_data[nnz] = val
nnz += 1
i1 += 1
while i2 < ind2.shape[0]:
val = data2[i2]
if val != 0:
result_ind[nnz] = i2
result_data[nnz] = val
nnz += 1
i2 += 1
result_ind = result_ind[:nnz]
result_data = result_data[:nnz]
return result_ind, result_data
@numba.njit()
def sparse_diff(ind1, data1, ind2, data2):
return sparse_sum(ind1, data1, ind2, -data2)
@numba.njit()
def sparse_mul(ind1, data1, ind2, data2):
result_ind = arr_intersect(ind1, ind2)
result_data = np.zeros(result_ind.shape[0], dtype=np.float32)
i1 = 0
i2 = 0
nnz = 0
while i1 < ind1.shape[0] and i2 < ind2.shape[0]:
j1 = ind1[i1]
j2 = ind2[i2]
if j1 == j2:
val = data1[i1] * data2[i2]
if val != 0:
result_ind[nnz] = j1
result_data[nnz] = val
nnz += 1
i1 += 1
i2 += 1
elif j1 < j2:
i1 += 1
else:
i2 += 1
result_ind = result_ind[:nnz]
result_data = result_data[:nnz]
return result_ind, result_data
def make_sparse_nn_descent(sparse_dist, dist_args):
@numba.njit(parallel=True)
def nn_descent(
inds,
indptr,
data,
n_vertices,
n_neighbors,
rng_state,
max_candidates=50,
n_iters=10,
delta=0.001,
rho=0.5,
rp_tree_init=True,
leaf_array=None,
verbose=False,
):
current_graph = make_heap(n_vertices, n_neighbors)
for i in range(n_vertices):
indices = rejection_sample(n_neighbors, n_vertices, rng_state)
for j in range(indices.shape[0]):
from_inds = inds[indptr[i] : indptr[i + 1]]
from_data = data[indptr[i] : indptr[i + 1]]
to_inds = inds[indptr[indices[j]] : indptr[indices[j] + 1]]
to_data = data[indptr[indices[j]] : indptr[indices[j] + 1]]
d = sparse_dist(from_inds, from_data, to_inds, to_data, *dist_args)
heap_push(current_graph, i, d, indices[j], 1)
heap_push(current_graph, indices[j], d, i, 1)
if rp_tree_init:
for n in range(leaf_array.shape[0]):
for i in range(leaf_array.shape[1]):
if leaf_array[n, i] < 0:
break
for j in range(i + 1, leaf_array.shape[1]):
if leaf_array[n, j] < 0:
break
from_inds = inds[
indptr[leaf_array[n, i]] : indptr[leaf_array[n, i] + 1]
]
from_data = data[
indptr[leaf_array[n, i]] : indptr[leaf_array[n, i] + 1]
]
to_inds = inds[
indptr[leaf_array[n, j]] : indptr[leaf_array[n, j] + 1]
]
to_data = data[
indptr[leaf_array[n, j]] : indptr[leaf_array[n, j] + 1]
]
d = sparse_dist(
from_inds, from_data, to_inds, to_data, *dist_args
)
heap_push(
current_graph, leaf_array[n, i], d, leaf_array[n, j], 1
)
heap_push(
current_graph, leaf_array[n, j], d, leaf_array[n, i], 1
)
for n in range(n_iters):
if verbose:
print("\t", n, " / ", n_iters)
candidate_neighbors = build_candidates(
current_graph, n_vertices, n_neighbors, max_candidates, rng_state
)
c = 0
for i in range(n_vertices):
for j in range(max_candidates):
p = int(candidate_neighbors[0, i, j])
if p < 0 or tau_rand(rng_state) < rho:
continue
for k in range(max_candidates):
q = int(candidate_neighbors[0, i, k])
if (
q < 0
or not candidate_neighbors[2, i, j]
and not candidate_neighbors[2, i, k]
):
continue
from_inds = inds[indptr[p] : indptr[p + 1]]
from_data = data[indptr[p] : indptr[p + 1]]
to_inds = inds[indptr[q] : indptr[q + 1]]
to_data = data[indptr[q] : indptr[q + 1]]
d = sparse_dist(
from_inds, from_data, to_inds, to_data, *dist_args
)
c += heap_push(current_graph, p, d, q, 1)
c += heap_push(current_graph, q, d, p, 1)
if c <= delta * n_neighbors * n_vertices:
break
return deheap_sort(current_graph)
return nn_descent
@numba.njit()
def general_sset_intersection(
indptr1,
indices1,
data1,
indptr2,
indices2,
data2,
result_row,
result_col,
result_val,
mix_weight=0.5,
):
left_min = max(data1.min() / 2.0, 1.0e-8)
right_min = max(data2.min() / 2.0, 1.0e-8)
for idx in range(result_row.shape[0]):
i = result_row[idx]
j = result_col[idx]
left_val = left_min
for k in range(indptr1[i], indptr1[i + 1]):
if indices1[k] == j:
left_val = data1[k]
right_val = right_min
for k in range(indptr2[i], indptr2[i + 1]):
if indices2[k] == j:
right_val = data2[k]
if left_val > left_min or right_val > right_min:
if mix_weight < 0.5:
result_val[idx] = left_val * pow(
right_val, mix_weight / (1.0 - mix_weight)
)
else:
result_val[idx] = (
pow(left_val, (1.0 - mix_weight) / mix_weight) * right_val
)
return
@numba.njit()
def sparse_euclidean(ind1, data1, ind2, data2):
aux_inds, aux_data = sparse_diff(ind1, data1, ind2, data2)
result = 0.0
for i in range(aux_data.shape[0]):
result += aux_data[i] ** 2
return np.sqrt(result)
@numba.njit()
def sparse_manhattan(ind1, data1, ind2, data2):
aux_inds, aux_data = sparse_diff(ind1, data1, ind2, data2)
result = 0.0
for i in range(aux_data.shape[0]):
result += np.abs(aux_data[i])
return result
@numba.njit()
def sparse_chebyshev(ind1, data1, ind2, data2):
aux_inds, aux_data = sparse_diff(ind1, data1, ind2, data2)
result = 0.0
for i in range(aux_data.shape[0]):
result = max(result, np.abs(aux_data[i]))
return result
@numba.njit()
def sparse_minkowski(ind1, data1, ind2, data2, p=2.0):
aux_inds, aux_data = sparse_diff(ind1, data1, ind2, data2)
result = 0.0
for i in range(aux_data.shape[0]):
result += np.abs(aux_data[i]) ** p
return result ** (1.0 / p)
@numba.njit()
def sparse_hamming(ind1, data1, ind2, data2, n_features):
num_not_equal = sparse_diff(ind1, data1, ind2, data2)[0].shape[0]
return float(num_not_equal) / n_features
@numba.njit()
def sparse_canberra(ind1, data1, ind2, data2):
abs_data1 = np.abs(data1)
abs_data2 = np.abs(data2)
denom_inds, denom_data = sparse_sum(ind1, abs_data1, ind2, abs_data2)
denom_data = 1.0 / denom_data
numer_inds, numer_data = sparse_diff(ind1, data1, ind2, data2)
numer_data = np.abs(numer_data)
val_inds, val_data = sparse_mul(numer_inds, numer_data, denom_inds, denom_data)
return np.sum(val_data)
@numba.njit()
def sparse_bray_curtis(ind1, data1, ind2, data2):
abs_data1 = np.abs(data1)
abs_data2 = np.abs(data2)
denom_inds, denom_data = sparse_sum(ind1, abs_data1, ind2, abs_data2)
if denom_data.shape[0] == 0:
return 0.0
denominator = np.sum(denom_data)
numer_inds, numer_data = sparse_diff(ind1, data1, ind2, data2)
numer_data = np.abs(numer_data)
numerator = np.sum(numer_data)
return float(numerator) / denominator
@numba.njit()
def sparse_jaccard(ind1, data1, ind2, data2):
num_non_zero = arr_union(ind1, ind2).shape[0]
num_equal = arr_intersect(ind1, ind2).shape[0]
if num_non_zero == 0:
return 0.0
else:
return float(num_non_zero - num_equal) / num_non_zero
@numba.njit()
def sparse_matching(ind1, data1, ind2, data2, n_features):
num_true_true = arr_intersect(ind1, ind2).shape[0]
num_non_zero = arr_union(ind1, ind2).shape[0]
num_not_equal = num_non_zero - num_true_true
return float(num_not_equal) / n_features
@numba.njit()
def sparse_dice(ind1, data1, ind2, data2):
num_true_true = arr_intersect(ind1, ind2).shape[0]
num_non_zero = arr_union(ind1, ind2).shape[0]
num_not_equal = num_non_zero - num_true_true
if num_not_equal == 0.0:
return 0.0
else:
return num_not_equal / (2.0 * num_true_true + num_not_equal)
@numba.njit()
def sparse_kulsinski(ind1, data1, ind2, data2, n_features):
num_true_true = arr_intersect(ind1, ind2).shape[0]
num_non_zero = arr_union(ind1, ind2).shape[0]
num_not_equal = num_non_zero - num_true_true
if num_not_equal == 0:
return 0.0
else:
return float(num_not_equal - num_true_true + n_features) / (
num_not_equal + n_features
)
@numba.njit()
def sparse_rogers_tanimoto(ind1, data1, ind2, data2, n_features):
num_true_true = arr_intersect(ind1, ind2).shape[0]
num_non_zero = arr_union(ind1, ind2).shape[0]
num_not_equal = num_non_zero - num_true_true
return (2.0 * num_not_equal) / (n_features + num_not_equal)
@numba.njit()
def sparse_russellrao(ind1, data1, ind2, data2, n_features):
if ind1.shape[0] == ind2.shape[0] and np.all(ind1 == ind2):
return 0.0
num_true_true = arr_intersect(ind1, ind2).shape[0]
if num_true_true == np.sum(data1 != 0) and num_true_true == np.sum(data2 != 0):
return 0.0
else:
return float(n_features - num_true_true) / (n_features)
@numba.njit()
def sparse_sokal_michener(ind1, data1, ind2, data2, n_features):
num_true_true = arr_intersect(ind1, ind2).shape[0]
num_non_zero = arr_union(ind1, ind2).shape[0]
num_not_equal = num_non_zero - num_true_true
return (2.0 * num_not_equal) / (n_features + num_not_equal)
@numba.njit()
def sparse_sokal_sneath(ind1, data1, ind2, data2):
num_true_true = arr_intersect(ind1, ind2).shape[0]
num_non_zero = arr_union(ind1, ind2).shape[0]
num_not_equal = num_non_zero - num_true_true
if num_not_equal == 0.0:
return 0.0
else:
return num_not_equal / (0.5 * num_true_true + num_not_equal)
@numba.njit()
def sparse_cosine(ind1, data1, ind2, data2):
aux_inds, aux_data = sparse_mul(ind1, data1, ind2, data2)
result = 0.0
norm1 = norm(data1)
norm2 = norm(data2)
for i in range(aux_data.shape[0]):
result += aux_data[i]
if norm1 == 0.0 and norm2 == 0.0:
return 0.0
elif norm1 == 0.0 or norm2 == 0.0:
return 1.0
else:
return 1.0 - (result / (norm1 * norm2))
@numba.njit()
def sparse_correlation(ind1, data1, ind2, data2, n_features):
mu_x = 0.0
mu_y = 0.0
dot_product = 0.0
if ind1.shape[0] == 0 and ind2.shape[0] == 0:
return 0.0
elif ind1.shape[0] == 0 or ind2.shape[0] == 0:
return 1.0
for i in range(data1.shape[0]):
mu_x += data1[i]
for i in range(data2.shape[0]):
mu_y += data2[i]
mu_x /= n_features
mu_y /= n_features
shifted_data1 = np.empty(data1.shape[0], dtype=np.float32)
shifted_data2 = np.empty(data2.shape[0], dtype=np.float32)
for i in range(data1.shape[0]):
shifted_data1[i] = data1[i] - mu_x
for i in range(data2.shape[0]):
shifted_data2[i] = data2[i] - mu_y
norm1 = np.sqrt(
(norm(shifted_data1) ** 2) + (n_features - ind1.shape[0]) * (mu_x ** 2)
)
norm2 = np.sqrt(
(norm(shifted_data2) ** 2) + (n_features - ind2.shape[0]) * (mu_y ** 2)
)
dot_prod_inds, dot_prod_data = sparse_mul(ind1, shifted_data1, ind2, shifted_data2)
common_indices = set(dot_prod_inds)
for i in range(dot_prod_data.shape[0]):
dot_product += dot_prod_data[i]
for i in range(ind1.shape[0]):
if ind1[i] not in common_indices:
dot_product -= shifted_data1[i] * (mu_y)
for i in range(ind2.shape[0]):
if ind2[i] not in common_indices:
dot_product -= shifted_data2[i] * (mu_x)
all_indices = arr_union(ind1, ind2)
dot_product += mu_x * mu_y * (n_features - all_indices.shape[0])
if norm1 == 0.0 and norm2 == 0.0:
return 0.0
elif dot_product == 0.0:
return 1.0
else:
return 1.0 - (dot_product / (norm1 * norm2))
sparse_named_distances = {
"euclidean": sparse_euclidean,
"manhattan": sparse_manhattan,
"l1": sparse_manhattan,
"taxicab": sparse_manhattan,
"chebyshev": sparse_chebyshev,
"linf": sparse_chebyshev,
"linfty": sparse_chebyshev,
"linfinity": sparse_chebyshev,
"minkowski": sparse_minkowski,
"canberra": sparse_canberra,
"hamming": sparse_hamming,
"jaccard": sparse_jaccard,
"dice": sparse_dice,
"matching": sparse_matching,
"kulsinski": sparse_kulsinski,
"rogerstanimoto": sparse_rogers_tanimoto,
"russellrao": sparse_russellrao,
"sokalmichener": sparse_sokal_michener,
"sokalsneath": sparse_sokal_sneath,
"cosine": sparse_cosine,
"correlation": sparse_correlation,
}
sparse_need_n_features = (
"hamming",
"matching",
"kulsinski",
"rogerstanimoto",
"russellrao",
"sokalmichener",
"correlation",
)
import numpy as np
import numba
import scipy
from sklearn.metrics import pairwise_distances
from sklearn.utils import check_random_state
from sklearn.neighbors import KDTree
from scipy.spatial import cKDTree
from annoy import AnnoyIndex
try:
import faiss
except ImportError:
pass
#INT32_MIN = np.iinfo(np.int32).min + 1
#INT32_MAX = np.iinfo(np.int32).max - 1
SMOOTH_K_TOLERANCE = 1e-5
MIN_K_DIST_SCALE = 1e-3
NPY_INFINITY = np.inf
def nearest_neighbors(
X,
n_neighbors,
metric,
metric_kwds,
angular,
random_state,
verbose=False,
):
if verbose:
print("Finding Nearest Neighbors")
if metric == "precomputed":
knn_indices = fast_knn_indices(X, n_neighbors)
knn_dists = X[
np.arange(X.shape[0])[:, None], knn_indices
].copy()
rp_forest = []
else:
if callable(metric):
distance_func = metric
elif metric in named_distances:
distance_func = named_distances[metric]
else:
raise ValueError(
"Metric is neither callable, "
+ "nor a recognised string"
)
if metric in (
"cosine",
"correlation",
"dice",
"jaccard",
):
angular = True
rng_state = random_state.randint(
np.iinfo(np.int32).min + 1, np.iinfo(np.int32).max - 1, 3
).astype(np.int64)
if scipy.sparse.isspmatrix_csr(X):
if metric in sparse.sparse_named_distances:
distance_func = sparse.sparse_named_distances[
metric
]
if metric in sparse.sparse_need_n_features:
metric_kwds["n_features"] = X.shape[1]
else:
raise ValueError(
"Metric {} not supported for sparse "
+ "data".format(metric)
)
metric_nn_descent = sparse.make_sparse_nn_descent(
distance_func, tuple(metric_kwds.values())
)
n_trees = 5 + int(
round((X.shape[0]) ** 0.5 / 20.0)
)
n_iters = max(
5, int(round(np.log2(X.shape[0])))
)
if verbose:
print(
"Building RP forest with",
str(n_trees),
"trees",
)
rp_forest = make_forest(
X, n_neighbors, n_trees, rng_state, angular
)
leaf_array = rptree_leaf_array(rp_forest)
if verbose:
print(
"NN descent for",
str(n_iters),
"iterations",
)
knn_indices, knn_dists = metric_nn_descent(
X.indices,
X.indptr,
X.data,
X.shape[0],
n_neighbors,
rng_state,
max_candidates=60,
rp_tree_init=True,
leaf_array=leaf_array,
n_iters=n_iters,
verbose=verbose,
)
else:
metric_nn_descent = make_nn_descent(
distance_func, tuple(metric_kwds.values())
)
n_trees = 5 + int(
round((X.shape[0]) ** 0.5 / 20.0)
)
n_iters = max(
5, int(round(np.log2(X.shape[0])))
)
if verbose:
print(
"Building RP forest with",
str(n_trees),
"trees",
)
rp_forest = make_forest(
X, n_neighbors, n_trees, rng_state, angular
)
leaf_array = rptree_leaf_array(rp_forest)
if verbose:
print(
"NN descent for",
str(n_iters),
"iterations",
)
knn_indices, knn_dists = metric_nn_descent(
X,
n_neighbors,
rng_state,
max_candidates=60,
rp_tree_init=True,
leaf_array=leaf_array,
n_iters=n_iters,
verbose=verbose,
)
if np.any(knn_indices < 0):
warn(
"Failed to correctly find n_neighbors for some samples."
"Results may be less than ideal. Try re-running with"
"different parameters."
)
if verbose:
print("Finished Nearest Neighbor Search")
return knn_indices, knn_dists, rp_forest
@numba.njit(
fastmath=True
)
def smooth_knn_dist(
distances,
k,
n_iter=64,
local_connectivity=1.0,
bandwidth=1.0,
cardinality=None
):
if cardinality is None:
target = np.log2(k) * bandwidth
else:
target = cardinality
rho = np.zeros(distances.shape[0])
result = np.zeros(distances.shape[0])
mean_distances = np.mean(distances)
for i in range(distances.shape[0]):
lo = 0.0
hi = NPY_INFINITY
mid = 1.0
ith_distances = distances[i]
non_zero_dists = ith_distances[ith_distances > 0.0]
if non_zero_dists.shape[0] >= local_connectivity:
index = int(np.floor(local_connectivity))
interpolation = local_connectivity - index
if index > 0:
rho[i] = non_zero_dists[index - 1]
if interpolation > SMOOTH_K_TOLERANCE:
rho[i] += interpolation * (
non_zero_dists[index]
- non_zero_dists[index - 1]
)
else:
rho[i] = interpolation * non_zero_dists[0]
elif non_zero_dists.shape[0] > 0:
rho[i] = np.max(non_zero_dists)
for n in range(n_iter):
psum = 0.0
for j in range(1, distances.shape[1]):
d = distances[i, j] - rho[i]
if d > 0:
psum += np.exp(-(d / mid))
else:
psum += 1.0
if np.fabs(psum - target) < SMOOTH_K_TOLERANCE:
break
if psum > target:
hi = mid
mid = (lo + hi) / 2.0
else:
lo = mid
if hi == NPY_INFINITY:
mid *= 2
else:
mid = (lo + hi) / 2.0
result[i] = mid
if rho[i] > 0.0:
mean_ith_distances = np.mean(ith_distances)
if (
result[i]
< MIN_K_DIST_SCALE * mean_ith_distances
):
result[i] = (
MIN_K_DIST_SCALE * mean_ith_distances
)
else:
if (
result[i]
< MIN_K_DIST_SCALE * mean_distances
):
result[i] = (
MIN_K_DIST_SCALE * mean_distances
)
return result, rho
@numba.njit(parallel=True, fastmath=True)
def compute_membership_strengths(
knn_indices, knn_dists, sigmas, rhos
):
n_samples = knn_indices.shape[0]
n_neighbors = knn_indices.shape[1]
rows = np.zeros(knn_indices.size, dtype=np.int64)
cols = np.zeros(knn_indices.size, dtype=np.int64)
vals = np.zeros(knn_indices.size, dtype=np.float64)
for i in range(n_samples):
for j in range(n_neighbors):
if knn_indices[i, j] == -1:
continue
if knn_indices[i, j] == i:
val = 0.0
elif knn_dists[i, j] - rhos[i] <= 0.0:
val = 1.0
else:
val = np.exp(
-(
(knn_dists[i, j] - rhos[i])
/ (sigmas[i])
)
)
rows[i * n_neighbors + j] = i
cols[i * n_neighbors + j] = knn_indices[i, j]
vals[i * n_neighbors + j] = val
return rows, cols, vals
def create_tree(data, metric, approx=True, use_faiss=True, n_trees=10):
if approx:
ckd = AnnoyIndex(data.shape[1], metric=metric)
for i in np.arange(data.shape[0]):
ckd.add_item(i, data[i,:])
ckd.build(n_trees)
elif metric == 'euclidean':
if 'faiss' in sys.modules and use_faiss:
ckd = faiss.IndexFlatL2(data.shape[1])
ckd.add(data)
else:
ckd = cKDTree(data)
else:
ckd = KDTree(data, metric=metric)
return ckd
def query_tree(data, ckd, k, metric, approx=True, use_faiss=True):
if approx:
ckdo_ind = []
ckdo_dist = []
for i in np.arange(data.shape[0]):
holder = ckd.get_nns_by_vector(data[i,:], k, include_distances=True)
ckdo_ind.append(holder[0])
ckdo_dist.append(holder[1])
ckdout = (np.asarray(ckdo_dist), np.asarray(ckdo_ind))
elif metric == 'euclidean':
if 'faiss' in sys.modules and use_faiss:
D, I = ckd.search(data, k)
D[D<0] = 0
ckdout = (np.sqrt(D), I)
else:
ckdout = ckd.query(x=data, k=k, n_jobs=-1)
else:
ckdout = ckd.query(data, k=k)
return ckdout
def partitioned_nearest_neighbors(X, Y, k, metric='euclidean'):
tree = create_tree(Y, metric)
nns = query_tree(X, tree, k, metric)
knn_indices = nns[1]
knn_dists = nns[0]
return knn_indices, knn_dists
import numpy as np
import scipy.sparse
import scipy.sparse.csgraph
from sklearn.manifold import SpectralEmbedding
from sklearn.metrics import pairwise_distances
from warnings import warn
def component_layout(
data, n_components, component_labels, dim, metric="euclidean", metric_kwds={}
):
component_centroids = np.empty((n_components, data.shape[1]), dtype=np.float64)
for label in range(n_components):
component_centroids[label] = data[component_labels == label].mean(axis=0)
distance_matrix = pairwise_distances(
component_centroids, metric=metric, **metric_kwds
)
affinity_matrix = np.exp(-distance_matrix ** 2)
component_embedding = SpectralEmbedding(
n_components=dim, affinity="precomputed"
).fit_transform(affinity_matrix)
component_embedding /= component_embedding.max()
return component_embedding
def multi_component_layout(
data,
graph,
n_components,
component_labels,
dim,
random_state,
metric="euclidean",
metric_kwds={},
):
result = np.empty((graph.shape[0], dim), dtype=np.float32)
if n_components > 2 * dim:
meta_embedding = component_layout(
data,
n_components,
component_labels,
dim,
metric=metric,
metric_kwds=metric_kwds,
)
else:
k = int(np.ceil(n_components / 2.0))
base = np.hstack([np.eye(k), np.zeros((k, dim - k))])
meta_embedding = np.vstack([base, -base])[:n_components]
for label in range(n_components):
component_graph = graph.tocsr()[component_labels == label, :].tocsc()
component_graph = component_graph[:, component_labels == label].tocoo()
distances = pairwise_distances([meta_embedding[label]], meta_embedding)
data_range = distances[distances > 0.0].min() / 2.0
if component_graph.shape[0] < 2 * dim:
result[component_labels == label] = (
random_state.uniform(
low=-data_range,
high=data_range,
size=(component_graph.shape[0], dim),
)
+ meta_embedding[label]
)
continue
diag_data = np.asarray(component_graph.sum(axis=0))
I = scipy.sparse.identity(component_graph.shape[0], dtype=np.float64)
D = scipy.sparse.spdiags(
1.0 / np.sqrt(diag_data),
0,
component_graph.shape[0],
component_graph.shape[0],
)
L = I - D * component_graph * D
k = dim + 1
num_lanczos_vectors = max(2 * k + 1, int(np.sqrt(component_graph.shape[0])))
try:
eigenvalues, eigenvectors = scipy.sparse.linalg.eigsh(
L,
k,
which="SM",
ncv=num_lanczos_vectors,
tol=1e-4,
v0=np.ones(L.shape[0]),
maxiter=graph.shape[0] * 5,
)
order = np.argsort(eigenvalues)[1:k]
component_embedding = eigenvectors[:, order]
expansion = data_range / np.max(np.abs(component_embedding))
component_embedding *= expansion
result[component_labels == label] = (
component_embedding + meta_embedding[label]
)
except scipy.sparse.linalg.ArpackError:
warn(
"WARNING: spectral initialisation failed! The eigenvector solver\n"
"failed. This is likely due to too small an eigengap. Consider\n"
"adding some noise or jitter to your data.\n\n"
"Falling back to random initialisation!"
)
result[component_labels == label] = (
random_state.uniform(
low=-data_range,
high=data_range,
size=(component_graph.shape[0], dim),
)
+ meta_embedding[label]
)
return result
def spectral_layout(data, graph, dim, random_state, metric="euclidean", metric_kwds={}):
n_samples = graph.shape[0]
n_components, labels = scipy.sparse.csgraph.connected_components(graph)
if n_components > 1:
warn(
"Embedding a total of {} separate connected components using meta-embedding (experimental)".format(
n_components
)
)
return multi_component_layout(
data,
graph,
n_components,
labels,
dim,
random_state,
metric=metric,
metric_kwds=metric_kwds,
)
diag_data = np.asarray(graph.sum(axis=0))
I = scipy.sparse.identity(graph.shape[0], dtype=np.float64)
D = scipy.sparse.spdiags(
1.0 / np.sqrt(diag_data), 0, graph.shape[0], graph.shape[0]
)
L = I - D * graph * D
k = dim + 1
num_lanczos_vectors = max(2 * k + 1, int(np.sqrt(graph.shape[0])))
try:
if L.shape[0] < 2000000:
eigenvalues, eigenvectors = scipy.sparse.linalg.eigsh(
L,
k,
which="SM",
ncv=num_lanczos_vectors,
tol=1e-4,
v0=np.ones(L.shape[0]),
maxiter=graph.shape[0] * 5,
)
else:
eigenvalues, eigenvectors = scipy.sparse.linalg.lobpcg(
L, random_state.normal(size=(L.shape[0], k)), largest=False, tol=1e-8
)
order = np.argsort(eigenvalues)[1:k]
return eigenvectors[:, order]
except scipy.sparse.linalg.ArpackError:
warn(
"WARNING: spectral initialisation failed! The eigenvector solver\n"
"failed. This is likely due to too small an eigengap. Consider\n"
"adding some noise or jitter to your data.\n\n"
"Falling back to random initialisation!"
)
return random_state.uniform(low=-10.0, high=10.0, size=(graph.shape[0], dim))
import numpy as np
import numba
@numba.njit()
def clip(val):
if val > 4.0:
return 4.0
elif val < -4.0:
return -4.0
else:
return val
@numba.njit(
"f4(f4[::1],f4[::1])",
fastmath=True,
cache=True,
locals={
"result": numba.types.float32,
"diff": numba.types.float32,
"dim": numba.types.int32,
},
)
def rdist(x, y):
result = 0.0
dim = x.shape[0]
for i in range(dim):
diff = x[i] - y[i]
result += diff * diff
return result
def _optimize_layout_euclidean_single_epoch(
head_embedding,
head,
tail,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma,
dim,
move_other,
alpha,
epochs_per_negative_sample,
epoch_of_next_negative_sample,
epoch_of_next_sample,
n,
):
for i in numba.prange(epochs_per_sample.shape[0]):
if epoch_of_next_sample[i] <= n:
j = head[i]
k = tail[i]
current = head_embedding[j]
other = head_embedding[k]
dist_squared = rdist(current, other)
if dist_squared > 0.0:
grad_coeff = -2.0 * a * b * pow(dist_squared, b - 1.0)
grad_coeff /= a * pow(dist_squared, b) + 1.0
else:
grad_coeff = 0.0
for d in range(dim):
grad_d = clip(grad_coeff * (current[d] - other[d]))
current[d] += grad_d * alpha
if move_other:
other[d] += -grad_d * alpha
epoch_of_next_sample[i] += epochs_per_sample[i]
n_neg_samples = int(
(n - epoch_of_next_negative_sample[i]) / epochs_per_negative_sample[i]
)
for p in range(n_neg_samples):
k = tau_rand_int(rng_state) % n_vertices
other = head_embedding[k]
dist_squared = rdist(current, other)
if dist_squared > 0.0:
grad_coeff = 2.0 * gamma * b
grad_coeff /= (0.001 + dist_squared) * (
a * pow(dist_squared, b) + 1
)
elif j == k:
continue
else:
grad_coeff = 0.0
for d in range(dim):
if grad_coeff > 0.0:
grad_d = clip(grad_coeff * (current[d] - other[d]))
else:
grad_d = 4.0
current[d] += grad_d * alpha
epoch_of_next_negative_sample[i] += (
n_neg_samples * epochs_per_negative_sample[i]
)
return head_embedding
def fuzzy_simplicial_set(
Xs,
joint,
joint_idxs,
weights,
n_neighbors,
cardinality,
metrics,
metric_kwds,
joint_metrics,
angular,
set_op_mix_ratio,
local_connectivity,
n_epochs,
random_state,
verbose,
):
len_Xs = [len(i) for i in Xs]
rows, cols, vals = np.array([]), np.array([]), np.array([])
for i in range(len(Xs)):
X_n_neighbors = int(round(n_neighbors * len_Xs[i]/sum(len_Xs)))
if X_n_neighbors < 2:
weights[(i,i)] *= X_n_neighbors/2
X_n_neighbors = 2
if Xs[i].shape[0] < 4096:
X = Xs[i]
if scipy.sparse.issparse(Xs[i]):
X = Xs[i].toarray()
dmat = pairwise_distances(Xs[i], metric=metrics[i], **metric_kwds[i])
knn_indices, knn_dists, _ = nearest_neighbors(
dmat,
X_n_neighbors,
'precomputed',
{},
angular,
np.random.RandomState(random_state),
verbose=verbose,
)
else:
knn_indices, knn_dists, _ = nearest_neighbors(
Xs[i],
X_n_neighbors,
metrics[i],
metric_kwds[i],
angular,
np.random.RandomState(random_state),
verbose=verbose,
)
sigmas, rhos = smooth_knn_dist(
knn_dists,
0,
local_connectivity=local_connectivity,
cardinality=cardinality * X_n_neighbors/n_neighbors
)
X_rows, X_cols, X_vals = compute_membership_strengths(
knn_indices, knn_dists, sigmas, rhos
)
rows = np.concatenate([rows, X_rows + sum(len_Xs[:i])])
cols = np.concatenate([cols, X_cols + sum(len_Xs[:i])])
vals = np.concatenate([vals, X_vals])
for k in joint.keys():
XY = joint[k]
idxs = joint_idxs[k]
metric = joint_metrics[k]
XY_n_neighbors = int(round(n_neighbors * len_Xs[k[1]]/sum(len_Xs) * len(idxs[1])/len_Xs[k[1]]))
YX_n_neighbors = int(round(n_neighbors * len_Xs[k[0]]/sum(len_Xs) * len(idxs[0])/len_Xs[k[0]]))
if XY_n_neighbors < 2:
weights[(k[0],k[1])] *= XY_n_neighbors/2
XY_n_neighbors = 2
if YX_n_neighbors < 2:
weights[(k[1],k[0])] *= YX_n_neighbors/2
YX_n_neighbors = 2
if metric == 'precomputed':
XY_knn_indices = np.argsort(XY, axis=1)[:,XY_n_neighbors]
XY_knn_dists = np.sort(XY, axis=1)[:,XY_n_neighbors]
YX_knn_indices = np.argsort(XY.T, axis=1)[:,YX_n_neighbors]
YX_knn_dists = np.sort(XY.T, axis=1)[:,YX_n_neighbors]
else:
XY_knn_indices, XY_knn_dists = partitioned_nearest_neighbors(XY[0], XY[1],
XY_n_neighbors, metric)
YX_knn_indices, YX_knn_dists = partitioned_nearest_neighbors(XY[1], XY[0],
YX_n_neighbors, metric)
XY_sigmas, XY_rhos = smooth_knn_dist(
XY_knn_dists,
0,
local_connectivity=local_connectivity,
cardinality=cardinality * XY_n_neighbors/n_neighbors
)
YX_sigmas, YX_rhos = smooth_knn_dist(
YX_knn_dists,
0,
local_connectivity=local_connectivity,
cardinality=cardinality * YX_n_neighbors/n_neighbors
)
XY_rows, XY_cols, XY_vals = compute_membership_strengths(
XY_knn_indices, XY_knn_dists, XY_sigmas, XY_rhos
)
YX_rows, YX_cols, YX_vals = compute_membership_strengths(
YX_knn_indices, YX_knn_dists, YX_sigmas, YX_rhos
)
rows = np.concatenate([rows, idxs[0][XY_rows] + sum(len_Xs[:k[0]])])
cols = np.concatenate([cols, idxs[1][XY_cols] + sum(len_Xs[:k[1]])])
vals = np.concatenate([vals, XY_vals])
rows = np.concatenate([rows, idxs[1][YX_rows] + sum(len_Xs[:k[1]])])
cols = np.concatenate([cols, idxs[0][YX_cols] + sum(len_Xs[:k[0]])])
vals = np.concatenate([vals, YX_vals])
fs = scipy.sparse.coo_matrix(
(vals, (rows, cols)), shape=(sum(len_Xs), sum(len_Xs))
)
fs.eliminate_zeros()
transpose = fs.transpose()
prod_matrix = fs.multiply(transpose)
fs = (
set_op_mix_ratio
* (fs + transpose - prod_matrix)
+ (1.0 - set_op_mix_ratio) * prod_matrix
)
fs.sum_duplicates()
fs.data[fs.data < (fs.data.max() / float(n_epochs))] = 0.0
fs.eliminate_zeros()
full_graph = fs
graphs = []
for i in range(len(Xs)):
graphs += [fs[sum(len_Xs[:i]):sum(len_Xs[:i+1]),
sum(len_Xs[:i]):sum(len_Xs[:i+1])].tocoo()]
joint_graphs = {}
for k in joint.keys():
joint_graphs[k] = fs[sum(len_Xs[:k[0]]):sum(len_Xs[:k[0]+1]),
sum(len_Xs[:k[1]]):sum(len_Xs[:k[1]+1])].tocoo()
return graphs, joint_graphs, full_graph, weights
def init_layout(init,
Xs,
graphs,
n_components,
metrics,
metric_kwds,
random_state):
len_Xs = [len(i) for i in Xs]
if init == 'random':
embeddings = []
for i in range(len(Xs)):
embeddings += [np.random.RandomState(random_state).uniform(low=-10.0, high=10.0,
size=(len_Xs[i], n_components),
).astype(np.float32)]
elif init == 'spectral':
embeddings = []
for i in range(len(Xs)):
try:
X_embedding = spectral_layout(
Xs[i],
graphs[i],
n_components,
np.random.RandomState(random_state),
metric=metrics[i],
metric_kwds=metric_kwds[i],
)
expansion = 10.0 / np.abs(X_embedding).max()
X_embedding = (X_embedding * expansion).astype(np.float32) + \
np.random.RandomState(random_state).normal(scale=0.0001,
size=[len_Xs[i], n_components]
).astype(np.float32)
except:
X_embedding = np.random.RandomState(random_state).uniform(low=-10.0, high=10.0,
size=(len_Xs[i], n_components),
).astype(np.float32)
embeddings += [X_embedding]
else:
if len(init.shape) == 2:
if (np.unique(init, axis=0).shape[0] < init.shape[0]):
tree = KDTree(init_data)
dist, ind = tree.query(init_data, k=2)
nndist = np.mean(dist[:,1])
embedding = init + np.random.RandomState(random_state).normal(
scale=0.001 * nndist,
size=init.shape
).astype(np.float32)
else:
embedding = init
embeddings = []
for i in range(len(Xs)):
embeddings += [embedding[sum(len_Xs[:i]):sum(len_Xs[:i+1])]]
for i in range(len(embeddings)):
embeddings[i] = (10.0 * (embeddings[i] - np.min(embeddings[i], 0))
/ (np.max(embeddings[i], 0) - np.min(embeddings[i], 0))
).astype(np.float32, order="C")
return embeddings
def optimize_layout(
embeddings,
graphs,
joint_graphs,
weights,
n_epochs,
a,
b,
random_state,
gamma=1.0,
initial_alpha=1.0,
negative_sample_rate=5.0,
parallel=False,
verbose=False,
):
len_Xs = np.array([len(i) for i in embeddings])
dim = embeddings[0].shape[1]
move_other = True
alpha = initial_alpha
heads = [i.row for i in graphs]
tails = [i.col for i in graphs]
n_vertices = [i.shape[1] for i in graphs]
epochs_per_sample = [make_epochs_per_sample(i.data, n_epochs) for i in graphs]
epochs_per_negative_sample = [i/negative_sample_rate for i in epochs_per_sample]
epoch_of_next_negative_sample = [i.copy() for i in epochs_per_negative_sample]
epoch_of_next_sample = [i.copy() for i in epochs_per_sample]
joint_heads = {k: np.concatenate([joint_graphs[k].row,
joint_graphs[k].col + len_Xs[k[0]]]) for k in joint_graphs.keys()}
joint_tails = {k: np.concatenate([joint_graphs[k].col + len_Xs[k[0]],
joint_graphs[k].row]) for k in joint_graphs.keys()}
joint_n_vertices = {k: len_Xs[k[0]] + len_Xs[k[1]] for k in joint_graphs.keys()}
joint_epochs_per_sample = {k: make_epochs_per_sample(
np.concatenate([joint_graphs[k].data, joint_graphs[k].data]), n_epochs) for k in joint_graphs.keys()}
joint_epochs_per_negative_sample = {k: joint_epochs_per_sample[k]/negative_sample_rate for k in joint_graphs.keys()}
joint_epoch_of_next_negative_sample = {k: np.copy(joint_epochs_per_negative_sample[k]) for k in joint_graphs.keys()}
joint_epoch_of_next_sample = {k: np.copy(joint_epochs_per_sample[k]) for k in joint_graphs.keys()}
optimize_fn = numba.njit(
_optimize_layout_euclidean_single_epoch, fastmath=True, parallel=parallel
)
for n in range(n_epochs):
for i in range(len(embeddings)):
if weights[(i,i)] != 0:
new_embedding = optimize_fn(
np.copy(embeddings[i]),
heads[i],
tails[i],
n_vertices[i],
epochs_per_sample[i],
a,
b,
np.random.RandomState(random_state).randint(np.iinfo(np.int32).min + 1, np.iinfo(np.int32).max - 1, 3).astype(np.int64),
gamma,
dim,
move_other,
alpha,
epochs_per_negative_sample[i],
epoch_of_next_negative_sample[i],
epoch_of_next_sample[i],
n,
)
embeddings[i] += (new_embedding - embeddings[i]) * weights[(i,i)]
for k in joint_graphs.keys():
if weights[(k[0], k[1])] != 0 or weights[(k[1], k[0])] != 0:
new_embeddings = optimize_fn(
np.concatenate([embeddings[k[0]], embeddings[k[1]]]),
joint_heads[k],
joint_tails[k],
joint_n_vertices[k],
joint_epochs_per_sample[k],
a,
b,
np.random.RandomState(random_state).randint(np.iinfo(np.int32).min + 1, np.iinfo(np.int32).max - 1, 3).astype(np.int64),
gamma,
dim,
move_other,
alpha,
joint_epochs_per_negative_sample[k],
joint_epoch_of_next_negative_sample[k],
joint_epoch_of_next_sample[k],
n,
)
embeddings[k[0]] += (new_embeddings[:len(embeddings[k[0]])] - embeddings[k[0]]) * weights[(k[0], k[1])]
embeddings[k[1]] += (new_embeddings[len(embeddings[k[0]]):] - embeddings[k[1]]) * weights[(k[1], k[0])]
alpha = initial_alpha * (1.0 - (float(n) / float(n_epochs)))
if verbose and n % int(n_epochs / 10) == 0:
print("\tcompleted ", n, " / ", n_epochs, "epochs")
return embeddings
def find_ab_params(spread, min_dist):
def curve(x, a, b):
return 1.0 / (1.0 + a * x ** (2 * b))
xv = np.linspace(0, spread * 3, 300)
yv = np.zeros(xv.shape)
yv[xv < min_dist] = 1.0
yv[xv >= min_dist] = np.exp(-(xv[xv >= min_dist] - min_dist) / spread)
params, covar = curve_fit(curve, xv, yv)
return params[0], params[1]
def make_epochs_per_sample(weights, n_epochs):
result = -1.0 * np.ones(weights.shape[0], dtype=np.float64)
n_samples = n_epochs * (weights / weights.max())
result[n_samples > 0] = float(n_epochs) / n_samples[n_samples > 0]
return result
def elaborate_relation_dict(dict, list_elems=True):
new = {}
for k in dict.keys():
if len(k) == 2 and type(k[0]) != tuple and type(k[1]) != tuple:
new[k] = dict[k]
elif len(k) == 2:
k_0 = k[0]
k_1 = k[1]
if type(k[0]) != tuple:
k_0 = (k_0,)
if type(k[1]) != tuple:
k_1 = (k_1,)
for i in range(len(k_0)):
for j in range(len(k_1)):
if list_elems:
new[(k_0[i], k_1[j])] = [dict[k][0][i], dict[k][1][j]]
else:
new[(k_0[i], k_1[j])] = dict[k]
else:
for i in range(len(k)):
for j in range(i+1, len(k)):
if list_elems:
new[(k[i], k[j])] = [dict[k][i], dict[k][j]]
else:
new[(k[i], k[j])] = dict[k]
return new
def find_weights(strengths, len_Xs, joint_idxs):
if type(strengths) != dict:
strengths = np.clip(strengths, 0, 1)
weights = {}
for i in range(len(len_Xs)):
for j in range(len(len_Xs)):
if i == j:
weights[(i,j)] = strengths[i]
else:
weights[(i,j)] = 1 - strengths[i]
else:
weights = elaborate_relation_dict(strengths, list_elems=False)
for i in range(len(len_Xs)):
for j in range(len(len_Xs)):
if (i,j) not in weights.keys():
weights[(i,j)] = 1
weight_sums = []
for i in range(len(len_Xs)):
weight_sum = 0
for j in range(len(len_Xs)):
weight_sum += weights[(i,j)] * len_Xs[j]
weight_sums += [weight_sum]
for i in range(len(len_Xs)):
for j in range(len(len_Xs)):
weights[(i,j)] *= sum(len_Xs) / weight_sums[i]
for k in weights.keys():
if k[0] != k[1]:
if k in joint_idxs.keys():
weights[k] *= len(joint_idxs[k][1])/len_Xs[k[1]]
elif k[::-1] in joint_idxs.keys():
weights[k] *= len(joint_idxs[k[::-1]][0])/len_Xs[k[1]]
else:
weights[k] = 0
return weights
def MultiGraph(**kwds):
return MultiMAP(**kwds, graph_only=True)
def MultiMAP(Xs,
joint={},
joint_idxs={},
metrics=None,
metric_kwds=None,
joint_metrics={},
n_neighbors=None,
cardinality=None,
angular=False,
set_op_mix_ratio=1.0,
local_connectivity=1.0,
n_components=2,
spread=1.0,
min_dist=None,
init='spectral',
n_epochs=None,
a=None,
b=None,
strengths=None,
random_state=0,
verbose=False,
graph_only=False,
):
'''
Run MultiMAP on a collection of dimensionality reduction matrices. Returns a ``(parameters,
neighbor_graph, embedding)`` tuple, with the embedding optionally skipped if ``graph_only=True``.
Input
-----
Xs : list of ``np.array``
The dimensionality reductions of the datasets to integrate, observations as rows.
>>> Xs = [DR_A, DR_B, DR_C]
joint : dict of ``np.array``
The joint dimensionality reductions generated for all pair combinations of the input
datasets. The keys are to be two-integer tuples, specifying the indices of the two
datasets in ``Xs``
>>> joint = {(0,1):DR_AB, (0,2):DR_AC, (1,2):DR_BC}
graph_only : ``bool``, optional (default: ``False``)
If ``True``, skip producing the embedding and only return the neighbour graph.
All other arguments as described in ``MultiMAP.Integration()``.
'''
#turn off warnings if we're not verbose
if not verbose:
warnings.simplefilter('ignore')
for i in range(len(Xs)):
if not scipy.sparse.issparse(Xs[i]):
Xs[i] = np.array(Xs[i])
len_Xs = [len(i) for i in Xs]
if not joint:
joint = {tuple(range(len(Xs))): Xs}
joint = elaborate_relation_dict(joint, list_elems=True)
joint_idxs = elaborate_relation_dict(joint_idxs, list_elems=True)
joint_metrics = elaborate_relation_dict(joint_metrics, list_elems=False)
for k in joint.keys():
joint[k] = [i.toarray() if scipy.sparse.issparse(i) else np.array(i) for i in joint[k]]
if k not in joint_idxs.keys():
if k[::-1] in joint_idxs.keys():
joint_idxs[k] = joint_idxs[k[::-1]]
else:
joint_idxs[k] = [np.arange(len_Xs[k[0]]), np.arange(len_Xs[k[1]])]
if k not in joint_metrics.keys():
if k[::-1] in joint_metrics.keys():
joint_metrics[k] = joint_metrics[k[::-1]]
else:
joint_metrics[k] = 'euclidean'
if metrics is None:
metrics = ['euclidean' for i in range(len(Xs))]
if metric_kwds is None:
metric_kwds = [{} for i in range(len(Xs))]
if n_neighbors is None:
n_neighbors = 15 * len(Xs)
if cardinality is None:
cardinality = np.log2(n_neighbors)
if min_dist is None:
min_dist = 0.5 * 15/n_neighbors
if scipy.sparse.issparse(init):
init = init.toarray()
else:
init = np.array(init)
if n_epochs is None:
if np.sum(len_Xs) <= 10000:
n_epochs = 500
else:
n_epochs = 200
if a is None or b is None:
a, b = find_ab_params(spread, min_dist)
if strengths is None:
strengths = np.ones(len(Xs))*0.5
weights = find_weights(strengths, len_Xs, joint_idxs)
if verbose:
print("Constructing fuzzy simplicial sets ...")
graphs, joint_graphs, full_graph, weights = fuzzy_simplicial_set(
Xs,
joint,
joint_idxs,
weights,
n_neighbors,
cardinality,
metrics,
metric_kwds,
joint_metrics,
angular,
set_op_mix_ratio,
local_connectivity,
n_epochs,
random_state,
verbose=False
)
#set up parameter output
params = {'n_neighbors': n_neighbors,
'metric': metrics[0],
'multimap': {'cardinality': cardinality,
'set_op_mix_ratio': set_op_mix_ratio,
'local_connectivity': local_connectivity,
'n_components': n_components,
'spread': spread,
'min_dist': min_dist,
'init': init,
'n_epochs': n_epochs,
'a': a,
'b': b,
'strengths': strengths,
'random_state': random_state}}
#return parameter and graph tuple
#TODO: add the distances graph to this once it exists
if graph_only:
return (params, full_graph)
if verbose:
print("Initializing embedding ...")
embeddings = init_layout(
init,
Xs,
graphs,
n_components,
metrics,
metric_kwds,
random_state
)
if verbose:
print("Optimizing embedding ...")
embeddings = optimize_layout(
embeddings,
graphs,
joint_graphs,
weights,
n_epochs,
a,
b,
random_state,
gamma=1.0,
initial_alpha=1.0,
negative_sample_rate=5.0,
parallel=False,
verbose=verbose
)
#undo warning reset
if not verbose:
warnings.resetwarnings()
#return an embedding/graph/parameters tuple
#TODO: add the distances graph to this once it exists
return (params, full_graph, np.concatenate(embeddings))
import sklearn
def tfidf(X, n_components, binarize=True, random_state=0):
from sklearn.feature_extraction.text import TfidfTransformer
sc_count = np.copy(X)
if binarize:
sc_count = np.where(sc_count < 1, sc_count, 1)
tfidf = TfidfTransformer(norm='l2', sublinear_tf=True)
normed_count = tfidf.fit_transform(sc_count)
lsi = sklearn.decomposition.TruncatedSVD(n_components=n_components, random_state=random_state)
lsi_r = lsi.fit_transform(normed_count)
X_lsi = lsi_r[:,1:]
return X_lsi | [
"numpy.abs",
"numpy.sum",
"numpy.empty",
"scipy.sparse.issparse",
"numba.njit",
"numpy.floor",
"numpy.ones",
"numpy.clip",
"numpy.iinfo",
"numpy.argsort",
"numpy.sin",
"numpy.arange",
"numpy.mean",
"numpy.exp",
"numba.prange",
"scipy.sparse.csgraph.connected_components",
"scipy.spati... | [((641, 681), 'locale.setlocale', 'locale.setlocale', (['locale.LC_NUMERIC', '"""C"""'], {}), "(locale.LC_NUMERIC, 'C')\n", (657, 681), False, 'import locale\n'), ((723, 842), 'collections.namedtuple', 'namedtuple', (['"""RandomProjectionTreeNode"""', "['indices', 'is_leaf', 'hyperplane', 'offset', 'left_child', 'right_child']"], {}), "('RandomProjectionTreeNode', ['indices', 'is_leaf', 'hyperplane',\n 'offset', 'left_child', 'right_child'])\n", (733, 842), False, 'from collections import deque, namedtuple\n'), ((862, 935), 'collections.namedtuple', 'namedtuple', (['"""FlatTree"""', "['hyperplanes', 'offsets', 'children', 'indices']"], {}), "('FlatTree', ['hyperplanes', 'offsets', 'children', 'indices'])\n", (872, 935), False, 'from collections import deque, namedtuple\n'), ((939, 964), 'numba.njit', 'numba.njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (949, 964), False, 'import numba\n'), ((2933, 2970), 'numba.njit', 'numba.njit', ([], {'fastmath': '(True)', 'nogil': '(True)'}), '(fastmath=True, nogil=True)\n', (2943, 2970), False, 'import numba\n'), ((4673, 4698), 'numba.njit', 'numba.njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (4683, 4698), False, 'import numba\n'), ((7225, 7250), 'numba.njit', 'numba.njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (7235, 7250), False, 'import numba\n'), ((15624, 15636), 'numba.njit', 'numba.njit', ([], {}), '()\n', (15634, 15636), False, 'import numba\n'), ((16007, 16019), 'numba.njit', 'numba.njit', ([], {}), '()\n', (16017, 16019), False, 'import numba\n'), ((17249, 17276), 'numpy.eye', 'np.eye', (['(2)'], {'dtype': 'np.float64'}), '(2, dtype=np.float64)\n', (17255, 17276), True, 'import numpy as np\n'), ((17290, 17318), 'numpy.ones', 'np.ones', (['(2)'], {'dtype': 'np.float64'}), '(2, dtype=np.float64)\n', (17297, 17318), True, 'import numpy as np\n'), ((17322, 17347), 'numba.njit', 'numba.njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (17332, 17347), False, 'import numba\n'), ((17486, 17498), 'numba.njit', 'numba.njit', ([], {}), '()\n', (17496, 17498), False, 'import numba\n'), ((17682, 17694), 'numba.njit', 'numba.njit', ([], {}), '()\n', (17692, 17694), False, 'import numba\n'), ((17826, 17838), 'numba.njit', 'numba.njit', ([], {}), '()\n', (17836, 17838), False, 'import numba\n'), ((17982, 17994), 'numba.njit', 'numba.njit', ([], {}), '()\n', (17992, 17994), False, 'import numba\n'), ((18151, 18163), 'numba.njit', 'numba.njit', ([], {}), '()\n', (18161, 18163), False, 'import numba\n'), ((18350, 18362), 'numba.njit', 'numba.njit', ([], {}), '()\n', (18360, 18362), False, 'import numba\n'), ((18728, 18740), 'numba.njit', 'numba.njit', ([], {}), '()\n', (18738, 18740), False, 'import numba\n'), ((18902, 18914), 'numba.njit', 'numba.njit', ([], {}), '()\n', (18912, 18914), False, 'import numba\n'), ((19140, 19152), 'numba.njit', 'numba.njit', ([], {}), '()\n', (19150, 19152), False, 'import numba\n'), ((19439, 19451), 'numba.njit', 'numba.njit', ([], {}), '()\n', (19449, 19451), False, 'import numba\n'), ((19803, 19815), 'numba.njit', 'numba.njit', ([], {}), '()\n', (19813, 19815), False, 'import numba\n'), ((20037, 20049), 'numba.njit', 'numba.njit', ([], {}), '()\n', (20047, 20049), False, 'import numba\n'), ((20416, 20428), 'numba.njit', 'numba.njit', ([], {}), '()\n', (20426, 20428), False, 'import numba\n'), ((20847, 20859), 'numba.njit', 'numba.njit', ([], {}), '()\n', (20857, 20859), False, 'import numba\n'), ((21107, 21119), 'numba.njit', 'numba.njit', ([], {}), '()\n', (21117, 21119), False, 'import numba\n'), ((21468, 21480), 'numba.njit', 'numba.njit', ([], {}), '()\n', (21478, 21480), False, 'import numba\n'), ((21727, 21739), 'numba.njit', 'numba.njit', ([], {}), '()\n', (21737, 21739), False, 'import numba\n'), ((22114, 22126), 'numba.njit', 'numba.njit', ([], {}), '()\n', (22124, 22126), False, 'import numba\n'), ((22453, 22465), 'numba.njit', 'numba.njit', ([], {}), '()\n', (22463, 22465), False, 'import numba\n'), ((23104, 23116), 'numba.njit', 'numba.njit', ([], {}), '()\n', (23114, 23116), False, 'import numba\n'), ((23494, 23506), 'numba.njit', 'numba.njit', ([], {}), '()\n', (23504, 23506), False, 'import numba\n'), ((25116, 25141), 'numba.njit', 'numba.njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (25126, 25141), False, 'import numba\n'), ((25434, 25457), 'numba.njit', 'numba.njit', (['"""i4(i8[:])"""'], {}), "('i4(i8[:])')\n", (25444, 25457), False, 'import numba\n'), ((25920, 25943), 'numba.njit', 'numba.njit', (['"""f4(i8[:])"""'], {}), "('f4(i8[:])')\n", (25930, 25943), False, 'import numba\n'), ((26047, 26059), 'numba.njit', 'numba.njit', ([], {}), '()\n', (26057, 26059), False, 'import numba\n'), ((26187, 26199), 'numba.njit', 'numba.njit', ([], {}), '()\n', (26197, 26199), False, 'import numba\n'), ((26637, 26669), 'numba.njit', 'numba.njit', (['"""f8[:, :, :](i8,i8)"""'], {}), "('f8[:, :, :](i8,i8)')\n", (26647, 26669), False, 'import numba\n'), ((26871, 26910), 'numba.njit', 'numba.njit', (['"""i8(f8[:,:,:],i8,f8,i8,i8)"""'], {}), "('i8(f8[:,:,:],i8,f8,i8,i8)')\n", (26881, 26910), False, 'import numba\n'), ((28035, 28074), 'numba.njit', 'numba.njit', (['"""i8(f8[:,:,:],i8,f8,i8,i8)"""'], {}), "('i8(f8[:,:,:],i8,f8,i8,i8)')\n", (28045, 28074), False, 'import numba\n'), ((29093, 29105), 'numba.njit', 'numba.njit', ([], {}), '()\n', (29103, 29105), False, 'import numba\n'), ((29795, 29807), 'numba.njit', 'numba.njit', ([], {}), '()\n', (29805, 29807), False, 'import numba\n'), ((30630, 30662), 'numba.njit', 'numba.njit', (['"""i8(f8[:, :, :],i8)"""'], {}), "('i8(f8[:, :, :],i8)')\n", (30640, 30662), False, 'import numba\n'), ((31085, 31110), 'numba.njit', 'numba.njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (31095, 31110), False, 'import numba\n'), ((31749, 31774), 'numba.njit', 'numba.njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (31759, 31774), False, 'import numba\n'), ((33448, 33473), 'numba.njit', 'numba.njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (33458, 33473), False, 'import numba\n'), ((39481, 39521), 'locale.setlocale', 'locale.setlocale', (['locale.LC_NUMERIC', '"""C"""'], {}), "(locale.LC_NUMERIC, 'C')\n", (39497, 39521), False, 'import locale\n'), ((39525, 39537), 'numba.njit', 'numba.njit', ([], {}), '()\n', (39535, 39537), False, 'import numba\n'), ((39684, 39696), 'numba.njit', 'numba.njit', ([], {}), '()\n', (39694, 39696), False, 'import numba\n'), ((39883, 39895), 'numba.njit', 'numba.njit', ([], {}), '()\n', (39893, 39895), False, 'import numba\n'), ((40021, 40033), 'numba.njit', 'numba.njit', ([], {}), '()\n', (40031, 40033), False, 'import numba\n'), ((41413, 41425), 'numba.njit', 'numba.njit', ([], {}), '()\n', (41423, 41425), False, 'import numba\n'), ((41521, 41533), 'numba.njit', 'numba.njit', ([], {}), '()\n', (41531, 41533), False, 'import numba\n'), ((46288, 46300), 'numba.njit', 'numba.njit', ([], {}), '()\n', (46298, 46300), False, 'import numba\n'), ((47364, 47376), 'numba.njit', 'numba.njit', ([], {}), '()\n', (47374, 47376), False, 'import numba\n'), ((47609, 47621), 'numba.njit', 'numba.njit', ([], {}), '()\n', (47619, 47621), False, 'import numba\n'), ((47848, 47860), 'numba.njit', 'numba.njit', ([], {}), '()\n', (47858, 47860), False, 'import numba\n'), ((48099, 48111), 'numba.njit', 'numba.njit', ([], {}), '()\n', (48109, 48111), False, 'import numba\n'), ((48363, 48375), 'numba.njit', 'numba.njit', ([], {}), '()\n', (48373, 48375), False, 'import numba\n'), ((48552, 48564), 'numba.njit', 'numba.njit', ([], {}), '()\n', (48562, 48564), False, 'import numba\n'), ((49000, 49012), 'numba.njit', 'numba.njit', ([], {}), '()\n', (49010, 49012), False, 'import numba\n'), ((49476, 49488), 'numba.njit', 'numba.njit', ([], {}), '()\n', (49486, 49488), False, 'import numba\n'), ((49757, 49769), 'numba.njit', 'numba.njit', ([], {}), '()\n', (49767, 49769), False, 'import numba\n'), ((50032, 50044), 'numba.njit', 'numba.njit', ([], {}), '()\n', (50042, 50044), False, 'import numba\n'), ((50373, 50385), 'numba.njit', 'numba.njit', ([], {}), '()\n', (50383, 50385), False, 'import numba\n'), ((50778, 50790), 'numba.njit', 'numba.njit', ([], {}), '()\n', (50788, 50790), False, 'import numba\n'), ((51079, 51091), 'numba.njit', 'numba.njit', ([], {}), '()\n', (51089, 51091), False, 'import numba\n'), ((51473, 51485), 'numba.njit', 'numba.njit', ([], {}), '()\n', (51483, 51485), False, 'import numba\n'), ((51773, 51785), 'numba.njit', 'numba.njit', ([], {}), '()\n', (51783, 51785), False, 'import numba\n'), ((52122, 52134), 'numba.njit', 'numba.njit', ([], {}), '()\n', (52132, 52134), False, 'import numba\n'), ((52554, 52566), 'numba.njit', 'numba.njit', ([], {}), '()\n', (52564, 52566), False, 'import numba\n'), ((59973, 59998), 'numba.njit', 'numba.njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (59983, 59998), False, 'import numba\n'), ((62425, 62465), 'numba.njit', 'numba.njit', ([], {'parallel': '(True)', 'fastmath': '(True)'}), '(parallel=True, fastmath=True)\n', (62435, 62465), False, 'import numba\n'), ((71037, 71049), 'numba.njit', 'numba.njit', ([], {}), '()\n', (71047, 71049), False, 'import numba\n'), ((71175, 71339), 'numba.njit', 'numba.njit', (['"""f4(f4[::1],f4[::1])"""'], {'fastmath': '(True)', 'cache': '(True)', 'locals': "{'result': numba.types.float32, 'diff': numba.types.float32, 'dim': numba.\n types.int32}"}), "('f4(f4[::1],f4[::1])', fastmath=True, cache=True, locals={\n 'result': numba.types.float32, 'diff': numba.types.float32, 'dim':\n numba.types.int32})\n", (71185, 71339), False, 'import numba\n'), ((1552, 1583), 'numpy.empty', 'np.empty', (['dim'], {'dtype': 'np.float32'}), '(dim, dtype=np.float32)\n', (1560, 1583), True, 'import numpy as np\n'), ((1989, 2024), 'numpy.empty', 'np.empty', (['indices.shape[0]', 'np.int8'], {}), '(indices.shape[0], np.int8)\n', (1997, 2024), True, 'import numpy as np\n'), ((2524, 2556), 'numpy.empty', 'np.empty', (['n_left'], {'dtype': 'np.int64'}), '(n_left, dtype=np.int64)\n', (2532, 2556), True, 'import numpy as np\n'), ((2577, 2610), 'numpy.empty', 'np.empty', (['n_right'], {'dtype': 'np.int64'}), '(n_right, dtype=np.int64)\n', (2585, 2610), True, 'import numpy as np\n'), ((3409, 3440), 'numpy.empty', 'np.empty', (['dim'], {'dtype': 'np.float32'}), '(dim, dtype=np.float32)\n', (3417, 3440), True, 'import numpy as np\n'), ((3702, 3737), 'numpy.empty', 'np.empty', (['indices.shape[0]', 'np.int8'], {}), '(indices.shape[0], np.int8)\n', (3710, 3737), True, 'import numpy as np\n'), ((4251, 4283), 'numpy.empty', 'np.empty', (['n_left'], {'dtype': 'np.int64'}), '(n_left, dtype=np.int64)\n', (4259, 4283), True, 'import numpy as np\n'), ((4304, 4337), 'numpy.empty', 'np.empty', (['n_right'], {'dtype': 'np.int64'}), '(n_right, dtype=np.int64)\n', (4312, 4337), True, 'import numpy as np\n'), ((5993, 6028), 'numpy.empty', 'np.empty', (['indices.shape[0]', 'np.int8'], {}), '(indices.shape[0], np.int8)\n', (6001, 6028), True, 'import numpy as np\n'), ((6759, 6791), 'numpy.empty', 'np.empty', (['n_left'], {'dtype': 'np.int64'}), '(n_left, dtype=np.int64)\n', (6767, 6791), True, 'import numpy as np\n'), ((6812, 6845), 'numpy.empty', 'np.empty', (['n_right'], {'dtype': 'np.int64'}), '(n_right, dtype=np.int64)\n', (6820, 6845), True, 'import numpy as np\n'), ((7118, 7163), 'numpy.vstack', 'np.vstack', (['(hyperplane_inds, hyperplane_data)'], {}), '((hyperplane_inds, hyperplane_data))\n', (7127, 7163), True, 'import numpy as np\n'), ((8380, 8415), 'numpy.empty', 'np.empty', (['indices.shape[0]', 'np.int8'], {}), '(indices.shape[0], np.int8)\n', (8388, 8415), True, 'import numpy as np\n'), ((9159, 9191), 'numpy.empty', 'np.empty', (['n_left'], {'dtype': 'np.int64'}), '(n_left, dtype=np.int64)\n', (9167, 9191), True, 'import numpy as np\n'), ((9212, 9245), 'numpy.empty', 'np.empty', (['n_right'], {'dtype': 'np.int64'}), '(n_right, dtype=np.int64)\n', (9220, 9245), True, 'import numpy as np\n'), ((9518, 9563), 'numpy.vstack', 'np.vstack', (['(hyperplane_inds, hyperplane_data)'], {}), '((hyperplane_inds, hyperplane_data))\n', (9527, 9563), True, 'import numpy as np\n'), ((12499, 12532), 'scipy.sparse.isspmatrix_csr', 'scipy.sparse.isspmatrix_csr', (['data'], {}), '(data)\n', (12526, 12532), False, 'import scipy\n'), ((12547, 12571), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (12556, 12571), True, 'import numpy as np\n'), ((15325, 15360), 'numpy.zeros', 'np.zeros', (['n_nodes'], {'dtype': 'np.float32'}), '(n_nodes, dtype=np.float32)\n', (15333, 15360), True, 'import numpy as np\n'), ((17467, 17482), 'numpy.sqrt', 'np.sqrt', (['result'], {}), '(result)\n', (17474, 17482), True, 'import numpy as np\n'), ((17663, 17678), 'numpy.sqrt', 'np.sqrt', (['result'], {}), '(result)\n', (17670, 17678), True, 'import numpy as np\n'), ((18436, 18474), 'numpy.empty', 'np.empty', (['x.shape[0]'], {'dtype': 'np.float64'}), '(x.shape[0], dtype=np.float64)\n', (18444, 18474), True, 'import numpy as np\n'), ((18709, 18724), 'numpy.sqrt', 'np.sqrt', (['result'], {}), '(result)\n', (18716, 18724), True, 'import numpy as np\n'), ((22263, 22290), 'numpy.sin', 'np.sin', (['(0.5 * (x[0] - y[0]))'], {}), '(0.5 * (x[0] - y[0]))\n', (22269, 22290), True, 'import numpy as np\n'), ((22306, 22333), 'numpy.sin', 'np.sin', (['(0.5 * (x[1] - y[1]))'], {}), '(0.5 * (x[1] - y[1]))\n', (22312, 22333), True, 'import numpy as np\n'), ((25199, 25250), 'numpy.empty', 'np.empty', (['(X.shape[0], n_neighbors)'], {'dtype': 'np.int32'}), '((X.shape[0], n_neighbors), dtype=np.int32)\n', (25207, 25250), True, 'import numpy as np\n'), ((25280, 25304), 'numba.prange', 'numba.prange', (['X.shape[0]'], {}), '(X.shape[0])\n', (25292, 25304), False, 'import numba\n'), ((26168, 26183), 'numpy.sqrt', 'np.sqrt', (['result'], {}), '(result)\n', (26175, 26183), True, 'import numpy as np\n'), ((26269, 26304), 'numpy.empty', 'np.empty', (['n_samples'], {'dtype': 'np.int64'}), '(n_samples, dtype=np.int64)\n', (26277, 26304), True, 'import numpy as np\n'), ((32085, 32109), 'numba.prange', 'numba.prange', (['n_vertices'], {}), '(n_vertices)\n', (32097, 32109), False, 'import numba\n'), ((33587, 33649), 'numpy.zeros', 'np.zeros', (['(n_samples_transform, n_neighbors)'], {'dtype': 'dmat.dtype'}), '((n_samples_transform, n_neighbors), dtype=dmat.dtype)\n', (33595, 33649), True, 'import numpy as np\n'), ((33677, 33710), 'numba.prange', 'numba.prange', (['n_samples_transform'], {}), '(n_samples_transform)\n', (33689, 33710), False, 'import numba\n'), ((34027, 34039), 'numba.njit', 'numba.njit', ([], {}), '()\n', (34037, 34039), False, 'import numba\n'), ((36790, 36815), 'numba.njit', 'numba.njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (36800, 36815), False, 'import numba\n'), ((37273, 37298), 'numba.njit', 'numba.njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (37283, 37298), False, 'import numba\n'), ((38405, 38430), 'numba.njit', 'numba.njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (38415, 38430), False, 'import numba\n'), ((39569, 39581), 'numpy.sort', 'np.sort', (['arr'], {}), '(arr)\n', (39576, 39581), True, 'import numpy as np\n'), ((39935, 39961), 'numpy.concatenate', 'np.concatenate', (['(ar1, ar2)'], {}), '((ar1, ar2))\n', (39949, 39961), True, 'import numpy as np\n'), ((40133, 40180), 'numpy.zeros', 'np.zeros', (['result_ind.shape[0]'], {'dtype': 'np.float32'}), '(result_ind.shape[0], dtype=np.float32)\n', (40141, 40180), True, 'import numpy as np\n'), ((41637, 41684), 'numpy.zeros', 'np.zeros', (['result_ind.shape[0]'], {'dtype': 'np.float32'}), '(result_ind.shape[0], dtype=np.float32)\n', (41645, 41684), True, 'import numpy as np\n'), ((42299, 42324), 'numba.njit', 'numba.njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (42309, 42324), False, 'import numba\n'), ((47590, 47605), 'numpy.sqrt', 'np.sqrt', (['result'], {}), '(result)\n', (47597, 47605), True, 'import numpy as np\n'), ((48628, 48641), 'numpy.abs', 'np.abs', (['data1'], {}), '(data1)\n', (48634, 48641), True, 'import numpy as np\n'), ((48658, 48671), 'numpy.abs', 'np.abs', (['data2'], {}), '(data2)\n', (48664, 48671), True, 'import numpy as np\n'), ((48864, 48882), 'numpy.abs', 'np.abs', (['numer_data'], {}), '(numer_data)\n', (48870, 48882), True, 'import numpy as np\n'), ((48980, 48996), 'numpy.sum', 'np.sum', (['val_data'], {}), '(val_data)\n', (48986, 48996), True, 'import numpy as np\n'), ((49081, 49094), 'numpy.abs', 'np.abs', (['data1'], {}), '(data1)\n', (49087, 49094), True, 'import numpy as np\n'), ((49111, 49124), 'numpy.abs', 'np.abs', (['data2'], {}), '(data2)\n', (49117, 49124), True, 'import numpy as np\n'), ((49271, 49289), 'numpy.sum', 'np.sum', (['denom_data'], {}), '(denom_data)\n', (49277, 49289), True, 'import numpy as np\n'), ((49375, 49393), 'numpy.abs', 'np.abs', (['numer_data'], {}), '(numer_data)\n', (49381, 49393), True, 'import numpy as np\n'), ((49411, 49429), 'numpy.sum', 'np.sum', (['numer_data'], {}), '(numer_data)\n', (49417, 49429), True, 'import numpy as np\n'), ((53013, 53055), 'numpy.empty', 'np.empty', (['data1.shape[0]'], {'dtype': 'np.float32'}), '(data1.shape[0], dtype=np.float32)\n', (53021, 53055), True, 'import numpy as np\n'), ((53076, 53118), 'numpy.empty', 'np.empty', (['data2.shape[0]'], {'dtype': 'np.float32'}), '(data2.shape[0], dtype=np.float32)\n', (53084, 53118), True, 'import numpy as np\n'), ((60254, 60282), 'numpy.zeros', 'np.zeros', (['distances.shape[0]'], {}), '(distances.shape[0])\n', (60262, 60282), True, 'import numpy as np\n'), ((60296, 60324), 'numpy.zeros', 'np.zeros', (['distances.shape[0]'], {}), '(distances.shape[0])\n', (60304, 60324), True, 'import numpy as np\n'), ((60347, 60365), 'numpy.mean', 'np.mean', (['distances'], {}), '(distances)\n', (60354, 60365), True, 'import numpy as np\n'), ((62633, 62675), 'numpy.zeros', 'np.zeros', (['knn_indices.size'], {'dtype': 'np.int64'}), '(knn_indices.size, dtype=np.int64)\n', (62641, 62675), True, 'import numpy as np\n'), ((62687, 62729), 'numpy.zeros', 'np.zeros', (['knn_indices.size'], {'dtype': 'np.int64'}), '(knn_indices.size, dtype=np.int64)\n', (62695, 62729), True, 'import numpy as np\n'), ((62741, 62785), 'numpy.zeros', 'np.zeros', (['knn_indices.size'], {'dtype': 'np.float64'}), '(knn_indices.size, dtype=np.float64)\n', (62749, 62785), True, 'import numpy as np\n'), ((65231, 65288), 'numpy.empty', 'np.empty', (['(n_components, data.shape[1])'], {'dtype': 'np.float64'}), '((n_components, data.shape[1]), dtype=np.float64)\n', (65239, 65288), True, 'import numpy as np\n'), ((65433, 65502), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['component_centroids'], {'metric': 'metric'}), '(component_centroids, metric=metric, **metric_kwds)\n', (65451, 65502), False, 'from sklearn.metrics import pairwise_distances\n'), ((65539, 65568), 'numpy.exp', 'np.exp', (['(-distance_matrix ** 2)'], {}), '(-distance_matrix ** 2)\n', (65545, 65568), True, 'import numpy as np\n'), ((65966, 66015), 'numpy.empty', 'np.empty', (['(graph.shape[0], dim)'], {'dtype': 'np.float32'}), '((graph.shape[0], dim), dtype=np.float32)\n', (65974, 66015), True, 'import numpy as np\n'), ((69118, 69166), 'scipy.sparse.csgraph.connected_components', 'scipy.sparse.csgraph.connected_components', (['graph'], {}), '(graph)\n', (69159, 69166), False, 'import scipy\n'), ((69686, 69741), 'scipy.sparse.identity', 'scipy.sparse.identity', (['graph.shape[0]'], {'dtype': 'np.float64'}), '(graph.shape[0], dtype=np.float64)\n', (69707, 69741), False, 'import scipy\n'), ((71858, 71898), 'numba.prange', 'numba.prange', (['epochs_per_sample.shape[0]'], {}), '(epochs_per_sample.shape[0])\n', (71870, 71898), False, 'import numba\n'), ((83182, 83272), 'numba.njit', 'numba.njit', (['_optimize_layout_euclidean_single_epoch'], {'fastmath': '(True)', 'parallel': 'parallel'}), '(_optimize_layout_euclidean_single_epoch, fastmath=True, parallel\n =parallel)\n', (83192, 83272), False, 'import numba\n'), ((85663, 85694), 'numpy.linspace', 'np.linspace', (['(0)', '(spread * 3)', '(300)'], {}), '(0, spread * 3, 300)\n', (85674, 85694), True, 'import numpy as np\n'), ((85704, 85722), 'numpy.zeros', 'np.zeros', (['xv.shape'], {}), '(xv.shape)\n', (85712, 85722), True, 'import numpy as np\n'), ((85776, 85825), 'numpy.exp', 'np.exp', (['(-(xv[xv >= min_dist] - min_dist) / spread)'], {}), '(-(xv[xv >= min_dist] - min_dist) / spread)\n', (85782, 85825), True, 'import numpy as np\n'), ((85846, 85870), 'scipy.optimize.curve_fit', 'curve_fit', (['curve', 'xv', 'yv'], {}), '(curve, xv, yv)\n', (85855, 85870), False, 'from scipy.optimize import curve_fit\n'), ((91619, 91646), 'scipy.sparse.issparse', 'scipy.sparse.issparse', (['init'], {}), '(init)\n', (91640, 91646), False, 'import scipy\n'), ((94368, 94378), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (94375, 94378), True, 'import numpy as np\n'), ((94468, 94514), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'norm': '"""l2"""', 'sublinear_tf': '(True)'}), "(norm='l2', sublinear_tf=True)\n", (94484, 94514), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((94575, 94668), 'sklearn.decomposition.TruncatedSVD', 'sklearn.decomposition.TruncatedSVD', ([], {'n_components': 'n_components', 'random_state': 'random_state'}), '(n_components=n_components, random_state=\n random_state)\n', (94609, 94668), False, 'import sklearn\n'), ((15108, 15196), 'numpy.zeros', 'np.zeros', (['(n_nodes, tree.hyperplane.shape[0], max_hyperplane_nnz)'], {'dtype': 'np.float32'}), '((n_nodes, tree.hyperplane.shape[0], max_hyperplane_nnz), dtype=np.\n float32)\n', (15116, 15196), True, 'import numpy as np\n'), ((15246, 15309), 'numpy.zeros', 'np.zeros', (['(n_nodes, tree.hyperplane.shape[0])'], {'dtype': 'np.float32'}), '((n_nodes, tree.hyperplane.shape[0]), dtype=np.float32)\n', (15254, 15309), True, 'import numpy as np\n'), ((15381, 15418), 'numpy.ones', 'np.ones', (['(n_nodes, 2)'], {'dtype': 'np.int64'}), '((n_nodes, 2), dtype=np.int64)\n', (15388, 15418), True, 'import numpy as np\n'), ((15438, 15484), 'numpy.ones', 'np.ones', (['(n_leaves, leaf_size)'], {'dtype': 'np.int64'}), '((n_leaves, leaf_size), dtype=np.int64)\n', (15445, 15484), True, 'import numpy as np\n'), ((17068, 17115), 'numpy.vstack', 'np.vstack', (['[tree.indices for tree in rp_forest]'], {}), '([tree.indices for tree in rp_forest])\n', (17077, 17115), True, 'import numpy as np\n'), ((17147, 17163), 'numpy.array', 'np.array', (['[[-1]]'], {}), '([[-1]])\n', (17155, 17163), True, 'import numpy as np\n'), ((17784, 17803), 'numpy.abs', 'np.abs', (['(x[i] - y[i])'], {}), '(x[i] - y[i])\n', (17790, 17803), True, 'import numpy as np\n'), ((19271, 19290), 'numpy.abs', 'np.abs', (['(x[i] - y[i])'], {}), '(x[i] - y[i])\n', (19277, 19290), True, 'import numpy as np\n'), ((19314, 19333), 'numpy.abs', 'np.abs', (['(x[i] + y[i])'], {}), '(x[i] + y[i])\n', (19320, 19333), True, 'import numpy as np\n'), ((22432, 22449), 'numpy.arcsin', 'np.arcsin', (['result'], {}), '(result)\n', (22441, 22449), True, 'import numpy as np\n'), ((33729, 33754), 'numba.prange', 'numba.prange', (['n_neighbors'], {}), '(n_neighbors)\n', (33741, 33754), False, 'import numba\n'), ((33863, 33874), 'time.time', 'time.time', ([], {}), '()\n', (33872, 33874), False, 'import time\n'), ((38534, 38569), 'numba.prange', 'numba.prange', (['query_points.shape[0]'], {}), '(query_points.shape[0])\n', (38546, 38569), False, 'import numba\n'), ((47807, 47826), 'numpy.abs', 'np.abs', (['aux_data[i]'], {}), '(aux_data[i])\n', (47813, 47826), True, 'import numpy as np\n'), ((51195, 51215), 'numpy.all', 'np.all', (['(ind1 == ind2)'], {}), '(ind1 == ind2)\n', (51201, 51215), True, 'import numpy as np\n'), ((56697, 56727), 'scipy.sparse.isspmatrix_csr', 'scipy.sparse.isspmatrix_csr', (['X'], {}), '(X)\n', (56724, 56727), False, 'import scipy\n'), ((59620, 59643), 'numpy.any', 'np.any', (['(knn_indices < 0)'], {}), '(knn_indices < 0)\n', (59626, 59643), True, 'import numpy as np\n'), ((63541, 63581), 'annoy.AnnoyIndex', 'AnnoyIndex', (['data.shape[1]'], {'metric': 'metric'}), '(data.shape[1], metric=metric)\n', (63551, 63581), False, 'from annoy import AnnoyIndex\n'), ((63599, 63623), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (63608, 63623), True, 'import numpy as np\n'), ((64107, 64131), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (64116, 64131), True, 'import numpy as np\n'), ((66656, 66715), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['[meta_embedding[label]]', 'meta_embedding'], {}), '([meta_embedding[label]], meta_embedding)\n', (66674, 66715), False, 'from sklearn.metrics import pairwise_distances\n'), ((67246, 67311), 'scipy.sparse.identity', 'scipy.sparse.identity', (['component_graph.shape[0]'], {'dtype': 'np.float64'}), '(component_graph.shape[0], dtype=np.float64)\n', (67267, 67311), False, 'import scipy\n'), ((74056, 74068), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (74064, 74068), True, 'import numpy as np\n'), ((74070, 74082), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (74078, 74082), True, 'import numpy as np\n'), ((74084, 74096), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (74092, 74096), True, 'import numpy as np\n'), ((75597, 75627), 'numpy.concatenate', 'np.concatenate', (['[vals, X_vals]'], {}), '([vals, X_vals])\n', (75611, 75627), True, 'import numpy as np\n'), ((77759, 77790), 'numpy.concatenate', 'np.concatenate', (['[vals, XY_vals]'], {}), '([vals, XY_vals])\n', (77773, 77790), True, 'import numpy as np\n'), ((77961, 77992), 'numpy.concatenate', 'np.concatenate', (['[vals, YX_vals]'], {}), '([vals, YX_vals])\n', (77975, 77992), True, 'import numpy as np\n'), ((82231, 82304), 'numpy.concatenate', 'np.concatenate', (['[joint_graphs[k].row, joint_graphs[k].col + len_Xs[k[0]]]'], {}), '([joint_graphs[k].row, joint_graphs[k].col + len_Xs[k[0]]])\n', (82245, 82304), True, 'import numpy as np\n'), ((82396, 82469), 'numpy.concatenate', 'np.concatenate', (['[joint_graphs[k].col + len_Xs[k[0]], joint_graphs[k].row]'], {}), '([joint_graphs[k].col + len_Xs[k[0]], joint_graphs[k].row])\n', (82410, 82469), True, 'import numpy as np\n'), ((82983, 83027), 'numpy.copy', 'np.copy', (['joint_epochs_per_negative_sample[k]'], {}), '(joint_epochs_per_negative_sample[k])\n', (82990, 83027), True, 'import numpy as np\n'), ((83095, 83130), 'numpy.copy', 'np.copy', (['joint_epochs_per_sample[k]'], {}), '(joint_epochs_per_sample[k])\n', (83102, 83130), True, 'import numpy as np\n'), ((85973, 86016), 'numpy.ones', 'np.ones', (['weights.shape[0]'], {'dtype': 'np.float64'}), '(weights.shape[0], dtype=np.float64)\n', (85980, 86016), True, 'import numpy as np\n'), ((87227, 87251), 'numpy.clip', 'np.clip', (['strengths', '(0)', '(1)'], {}), '(strengths, 0, 1)\n', (87234, 87251), True, 'import numpy as np\n'), ((90201, 90232), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (90222, 90232), False, 'import warnings\n'), ((91525, 91545), 'numpy.log2', 'np.log2', (['n_neighbors'], {}), '(n_neighbors)\n', (91532, 91545), True, 'import numpy as np\n'), ((91703, 91717), 'numpy.array', 'np.array', (['init'], {}), '(init)\n', (91711, 91717), True, 'import numpy as np\n'), ((94011, 94035), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (94033, 94035), False, 'import warnings\n'), ((94179, 94205), 'numpy.concatenate', 'np.concatenate', (['embeddings'], {}), '(embeddings)\n', (94193, 94205), True, 'import numpy as np\n'), ((94415, 94450), 'numpy.where', 'np.where', (['(sc_count < 1)', 'sc_count', '(1)'], {}), '(sc_count < 1, sc_count, 1)\n', (94423, 94450), True, 'import numpy as np\n'), ((16725, 16921), 'warnings.warn', 'warn', (['"""Random Projection forest initialisation failed due to recursionlimit being reached. Something is a little strange with your data, and this may take longer than normal to compute."""'], {}), "(\n 'Random Projection forest initialisation failed due to recursionlimit being reached. Something is a little strange with your data, and this may take longer than normal to compute.'\n )\n", (16729, 16921), False, 'from warnings import warn\n'), ((17939, 17958), 'numpy.abs', 'np.abs', (['(x[i] - y[i])'], {}), '(x[i] - y[i])\n', (17945, 17958), True, 'import numpy as np\n'), ((18090, 18109), 'numpy.abs', 'np.abs', (['(x[i] - y[i])'], {}), '(x[i] - y[i])\n', (18096, 18109), True, 'import numpy as np\n'), ((19006, 19018), 'numpy.abs', 'np.abs', (['x[i]'], {}), '(x[i])\n', (19012, 19018), True, 'import numpy as np\n'), ((19021, 19033), 'numpy.abs', 'np.abs', (['y[i]'], {}), '(y[i])\n', (19027, 19033), True, 'import numpy as np\n'), ((21320, 21334), 'numpy.sum', 'np.sum', (['(x != 0)'], {}), '(x != 0)\n', (21326, 21334), True, 'import numpy as np\n'), ((21356, 21370), 'numpy.sum', 'np.sum', (['(y != 0)'], {}), '(y != 0)\n', (21362, 21370), True, 'import numpy as np\n'), ((39609, 39635), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'np.bool_'}), '(1, dtype=np.bool_)\n', (39616, 39635), True, 'import numpy as np\n'), ((48057, 48076), 'numpy.abs', 'np.abs', (['aux_data[i]'], {}), '(aux_data[i])\n', (48063, 48076), True, 'import numpy as np\n'), ((48304, 48323), 'numpy.abs', 'np.abs', (['aux_data[i]'], {}), '(aux_data[i])\n', (48310, 48323), True, 'import numpy as np\n'), ((51317, 51335), 'numpy.sum', 'np.sum', (['(data1 != 0)'], {}), '(data1 != 0)\n', (51323, 51335), True, 'import numpy as np\n'), ((51357, 51375), 'numpy.sum', 'np.sum', (['(data2 != 0)'], {}), '(data2 != 0)\n', (51363, 51375), True, 'import numpy as np\n'), ((59657, 59801), 'warnings.warn', 'warn', (['"""Failed to correctly find n_neighbors for some samples.Results may be less than ideal. Try re-running withdifferent parameters."""'], {}), "(\n 'Failed to correctly find n_neighbors for some samples.Results may be less than ideal. Try re-running withdifferent parameters.'\n )\n", (59661, 59801), False, 'from warnings import warn\n'), ((60182, 60192), 'numpy.log2', 'np.log2', (['k'], {}), '(k)\n', (60189, 60192), True, 'import numpy as np\n'), ((61930, 61952), 'numpy.mean', 'np.mean', (['ith_distances'], {}), '(ith_distances)\n', (61937, 61952), True, 'import numpy as np\n'), ((63919, 63946), 'sklearn.neighbors.KDTree', 'KDTree', (['data'], {'metric': 'metric'}), '(data, metric=metric)\n', (63925, 63946), False, 'from sklearn.neighbors import KDTree\n'), ((64311, 64332), 'numpy.asarray', 'np.asarray', (['ckdo_dist'], {}), '(ckdo_dist)\n', (64321, 64332), True, 'import numpy as np\n'), ((64334, 64354), 'numpy.asarray', 'np.asarray', (['ckdo_ind'], {}), '(ckdo_ind)\n', (64344, 64354), True, 'import numpy as np\n'), ((65596, 65655), 'sklearn.manifold.SpectralEmbedding', 'SpectralEmbedding', ([], {'n_components': 'dim', 'affinity': '"""precomputed"""'}), "(n_components=dim, affinity='precomputed')\n", (65613, 65655), False, 'from sklearn.manifold import SpectralEmbedding\n'), ((66282, 66309), 'numpy.ceil', 'np.ceil', (['(n_components / 2.0)'], {}), '(n_components / 2.0)\n', (66289, 66309), True, 'import numpy as np\n'), ((66398, 66422), 'numpy.vstack', 'np.vstack', (['[base, -base]'], {}), '([base, -base])\n', (66407, 66422), True, 'import numpy as np\n'), ((69786, 69804), 'numpy.sqrt', 'np.sqrt', (['diag_data'], {}), '(diag_data)\n', (69793, 69804), True, 'import numpy as np\n'), ((69934, 69957), 'numpy.sqrt', 'np.sqrt', (['graph.shape[0]'], {}), '(graph.shape[0])\n', (69941, 69957), True, 'import numpy as np\n'), ((70498, 70521), 'numpy.argsort', 'np.argsort', (['eigenvalues'], {}), '(eigenvalues)\n', (70508, 70521), True, 'import numpy as np\n'), ((70617, 70846), 'warnings.warn', 'warn', (['"""WARNING: spectral initialisation failed! The eigenvector solver\nfailed. This is likely due to too small an eigengap. Consider\nadding some noise or jitter to your data.\n\nFalling back to random initialisation!"""'], {}), '(\n """WARNING: spectral initialisation failed! The eigenvector solver\nfailed. This is likely due to too small an eigengap. Consider\nadding some noise or jitter to your data.\n\nFalling back to random initialisation!"""\n )\n', (70621, 70846), False, 'from warnings import warn\n'), ((74378, 74406), 'scipy.sparse.issparse', 'scipy.sparse.issparse', (['Xs[i]'], {}), '(Xs[i])\n', (74399, 74406), False, 'import scipy\n'), ((74463, 74525), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['Xs[i]'], {'metric': 'metrics[i]'}), '(Xs[i], metric=metrics[i], **metric_kwds[i])\n', (74481, 74525), False, 'from sklearn.metrics import pairwise_distances\n'), ((82714, 82774), 'numpy.concatenate', 'np.concatenate', (['[joint_graphs[k].data, joint_graphs[k].data]'], {}), '([joint_graphs[k].data, joint_graphs[k].data])\n', (82728, 82774), True, 'import numpy as np\n'), ((90282, 90310), 'scipy.sparse.issparse', 'scipy.sparse.issparse', (['Xs[i]'], {}), '(Xs[i])\n', (90303, 90310), False, 'import scipy\n'), ((90332, 90347), 'numpy.array', 'np.array', (['Xs[i]'], {}), '(Xs[i])\n', (90340, 90347), True, 'import numpy as np\n'), ((91754, 91768), 'numpy.sum', 'np.sum', (['len_Xs'], {}), '(len_Xs)\n', (91760, 91768), True, 'import numpy as np\n'), ((18289, 18308), 'numpy.abs', 'np.abs', (['(x[i] - y[i])'], {}), '(x[i] - y[i])\n', (18295, 18308), True, 'import numpy as np\n'), ((19084, 19103), 'numpy.abs', 'np.abs', (['(x[i] - y[i])'], {}), '(x[i] - y[i])\n', (19090, 19103), True, 'import numpy as np\n'), ((39850, 39876), 'numpy.concatenate', 'np.concatenate', (['(ar1, ar2)'], {}), '((ar1, ar2))\n', (39864, 39876), True, 'import numpy as np\n'), ((60657, 60685), 'numpy.floor', 'np.floor', (['local_connectivity'], {}), '(local_connectivity)\n', (60665, 60685), True, 'import numpy as np\n'), ((61182, 61204), 'numpy.max', 'np.max', (['non_zero_dists'], {}), '(non_zero_dists)\n', (61188, 61204), True, 'import numpy as np\n'), ((61501, 61523), 'numpy.fabs', 'np.fabs', (['(psum - target)'], {}), '(psum - target)\n', (61508, 61523), True, 'import numpy as np\n'), ((63790, 63822), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['data.shape[1]'], {}), '(data.shape[1])\n', (63807, 63822), False, 'import faiss\n'), ((63881, 63894), 'scipy.spatial.cKDTree', 'cKDTree', (['data'], {}), '(data)\n', (63888, 63894), False, 'from scipy.spatial import cKDTree\n'), ((66337, 66346), 'numpy.eye', 'np.eye', (['k'], {}), '(k)\n', (66343, 66346), True, 'import numpy as np\n'), ((66348, 66370), 'numpy.zeros', 'np.zeros', (['(k, dim - k)'], {}), '((k, dim - k))\n', (66356, 66370), True, 'import numpy as np\n'), ((67364, 67382), 'numpy.sqrt', 'np.sqrt', (['diag_data'], {}), '(diag_data)\n', (67371, 67382), True, 'import numpy as np\n'), ((67595, 67628), 'numpy.sqrt', 'np.sqrt', (['component_graph.shape[0]'], {}), '(component_graph.shape[0])\n', (67602, 67628), True, 'import numpy as np\n'), ((67962, 67985), 'numpy.argsort', 'np.argsort', (['eigenvalues'], {}), '(eigenvalues)\n', (67972, 67985), True, 'import numpy as np\n'), ((68350, 68579), 'warnings.warn', 'warn', (['"""WARNING: spectral initialisation failed! The eigenvector solver\nfailed. This is likely due to too small an eigengap. Consider\nadding some noise or jitter to your data.\n\nFalling back to random initialisation!"""'], {}), '(\n """WARNING: spectral initialisation failed! The eigenvector solver\nfailed. This is likely due to too small an eigengap. Consider\nadding some noise or jitter to your data.\n\nFalling back to random initialisation!"""\n )\n', (68354, 68579), False, 'from warnings import warn\n'), ((74730, 74765), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (74751, 74765), True, 'import numpy as np\n'), ((75042, 75077), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (75063, 75077), True, 'import numpy as np\n'), ((76256, 76278), 'numpy.argsort', 'np.argsort', (['XY'], {'axis': '(1)'}), '(XY, axis=1)\n', (76266, 76278), True, 'import numpy as np\n'), ((76324, 76343), 'numpy.sort', 'np.sort', (['XY'], {'axis': '(1)'}), '(XY, axis=1)\n', (76331, 76343), True, 'import numpy as np\n'), ((76392, 76416), 'numpy.argsort', 'np.argsort', (['XY.T'], {'axis': '(1)'}), '(XY.T, axis=1)\n', (76402, 76416), True, 'import numpy as np\n'), ((76462, 76483), 'numpy.sort', 'np.sort', (['XY.T'], {'axis': '(1)'}), '(XY.T, axis=1)\n', (76469, 76483), True, 'import numpy as np\n'), ((90719, 90743), 'scipy.sparse.issparse', 'scipy.sparse.issparse', (['i'], {}), '(i)\n', (90740, 90743), False, 'import scipy\n'), ((90749, 90760), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (90757, 90760), True, 'import numpy as np\n'), ((22370, 22382), 'numpy.cos', 'np.cos', (['x[0]'], {}), '(x[0])\n', (22376, 22382), True, 'import numpy as np\n'), ((22385, 22397), 'numpy.cos', 'np.cos', (['y[0]'], {}), '(y[0])\n', (22391, 22397), True, 'import numpy as np\n'), ((23465, 23489), 'numpy.sqrt', 'np.sqrt', (['(norm_x * norm_y)'], {}), '(norm_x * norm_y)\n', (23472, 23489), True, 'import numpy as np\n'), ((24100, 24124), 'numpy.sqrt', 'np.sqrt', (['(norm_x * norm_y)'], {}), '(norm_x * norm_y)\n', (24107, 24124), True, 'import numpy as np\n'), ((61412, 61430), 'numpy.exp', 'np.exp', (['(-(d / mid))'], {}), '(-(d / mid))\n', (61418, 61430), True, 'import numpy as np\n'), ((63104, 63154), 'numpy.exp', 'np.exp', (['(-((knn_dists[i, j] - rhos[i]) / sigmas[i]))'], {}), '(-((knn_dists[i, j] - rhos[i]) / sigmas[i]))\n', (63110, 63154), True, 'import numpy as np\n'), ((64521, 64531), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (64528, 64531), True, 'import numpy as np\n'), ((67863, 67882), 'numpy.ones', 'np.ones', (['L.shape[0]'], {}), '(L.shape[0])\n', (67870, 67882), True, 'import numpy as np\n'), ((68092, 68119), 'numpy.abs', 'np.abs', (['component_embedding'], {}), '(component_embedding)\n', (68098, 68119), True, 'import numpy as np\n'), ((70221, 70240), 'numpy.ones', 'np.ones', (['L.shape[0]'], {}), '(L.shape[0])\n', (70228, 70240), True, 'import numpy as np\n'), ((80623, 80640), 'sklearn.neighbors.KDTree', 'KDTree', (['init_data'], {}), '(init_data)\n', (80629, 80640), False, 'from sklearn.neighbors import KDTree\n'), ((80721, 80740), 'numpy.mean', 'np.mean', (['dist[:, 1]'], {}), '(dist[:, 1])\n', (80728, 80740), True, 'import numpy as np\n'), ((83457, 83479), 'numpy.copy', 'np.copy', (['embeddings[i]'], {}), '(embeddings[i])\n', (83464, 83479), True, 'import numpy as np\n'), ((84368, 84420), 'numpy.concatenate', 'np.concatenate', (['[embeddings[k[0]], embeddings[k[1]]]'], {}), '([embeddings[k[0]], embeddings[k[1]]])\n', (84382, 84420), True, 'import numpy as np\n'), ((90970, 90993), 'numpy.arange', 'np.arange', (['len_Xs[k[0]]'], {}), '(len_Xs[k[0]])\n', (90979, 90993), True, 'import numpy as np\n'), ((90995, 91018), 'numpy.arange', 'np.arange', (['len_Xs[k[1]]'], {}), '(len_Xs[k[1]])\n', (91004, 91018), True, 'import numpy as np\n'), ((57485, 57504), 'numpy.log2', 'np.log2', (['X.shape[0]'], {}), '(X.shape[0])\n', (57492, 57504), True, 'import numpy as np\n'), ((58740, 58759), 'numpy.log2', 'np.log2', (['X.shape[0]'], {}), '(X.shape[0])\n', (58747, 58759), True, 'import numpy as np\n'), ((79657, 79692), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (79678, 79692), True, 'import numpy as np\n'), ((81268, 81292), 'numpy.max', 'np.max', (['embeddings[i]', '(0)'], {}), '(embeddings[i], 0)\n', (81274, 81292), True, 'import numpy as np\n'), ((81295, 81319), 'numpy.min', 'np.min', (['embeddings[i]', '(0)'], {}), '(embeddings[i], 0)\n', (81301, 81319), True, 'import numpy as np\n'), ((55991, 56012), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (56000, 56012), True, 'import numpy as np\n'), ((56600, 56618), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (56608, 56618), True, 'import numpy as np\n'), ((56628, 56646), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (56636, 56646), True, 'import numpy as np\n'), ((80549, 80572), 'numpy.unique', 'np.unique', (['init'], {'axis': '(0)'}), '(init, axis=0)\n', (80558, 80572), True, 'import numpy as np\n'), ((81214, 81238), 'numpy.min', 'np.min', (['embeddings[i]', '(0)'], {}), '(embeddings[i], 0)\n', (81220, 81238), True, 'import numpy as np\n'), ((79219, 79254), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (79240, 79254), True, 'import numpy as np\n'), ((79834, 79853), 'numpy.abs', 'np.abs', (['X_embedding'], {}), '(X_embedding)\n', (79840, 79853), True, 'import numpy as np\n'), ((83684, 83719), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (83705, 83719), True, 'import numpy as np\n'), ((84649, 84684), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (84670, 84684), True, 'import numpy as np\n'), ((79969, 80004), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (79990, 80004), True, 'import numpy as np\n'), ((80228, 80263), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (80249, 80263), True, 'import numpy as np\n'), ((80775, 80810), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (80796, 80810), True, 'import numpy as np\n'), ((83728, 83746), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (83736, 83746), True, 'import numpy as np\n'), ((83756, 83774), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (83764, 83774), True, 'import numpy as np\n'), ((84693, 84711), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (84701, 84711), True, 'import numpy as np\n'), ((84721, 84739), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (84729, 84739), True, 'import numpy as np\n')] |
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2017-2018, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
# pylint: disable=too-many-lines
""" Implementation of the SVD model compression technique for TensorFlow """
import os
from functools import reduce
import operator
from enum import Enum
import numpy as np
import tensorflow as tf
from aimet_tensorflow import graph_editor
from aimet_tensorflow.common import core, graph_eval
import libpymo as pymo
from aimet_common import statistics_util as stats_u
from aimet_common.utils import AimetLogger
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Svd)
_SVD_TYPES = {'svd': pymo.TYPE_SINGLE,
'ssvd': pymo.TYPE_SUCCESSIVE}
_SVD_LAYER_TYPES = {'Conv2D': pymo.LAYER_TYPE_CONV,
'MatMul': pymo.LAYER_TYPE_FC}
_MIN_LAYER_DIM_FOR_SVD = 10
_SVD_SUPPORTED_LAYER_TYPES = ['Conv2D', 'MatMul']
class CostMetric(Enum):
""" Enumeration of metrics to measure cost of a model/layer """
mac = 1
memory = 2
class LayerAttributes:
""" Holds attributes for a given layer """
def __init__(self, layer_ref, cost, weight_shape):
"""
Constructor
:param layer_ref: Reference to the layer object in TensorFlow
:param cost: Cost of the layer
:param weight_shape: Shape of the output activation of the layer
"""
self.layer_ref = layer_ref
self.cost = cost
self.weight_shape = weight_shape
class Svd:
"""A class for performing singular value decomposition on a tensorflow model.
The Svd class enables model compression through singular value decomposition (SVD).
It can analyze convolution and fully connected layers and perform
some analysis to find the optimal ranks for balancing compression and the
accuracy of the network.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, graph, checkpoint, metric, output_file='./svd_graph', svd_type='svd',
num_layers=0, layers=None, layer_ranks=None, num_ranks=20, gpu=True, debug=False, no_evaluation=False,
layer_selection_threshold=0.6):
"""
Constructor for the Svd class
Constructs the Svd class from a set of options passed in at construction. The class takes
a number of named arguments which are detailed below.
:param graph: The file path to the meta graph.
:param checkpoint: The file path to the tensorflow checkpoint file.
:param metric: The metric to use for determining the optimal compression. Either
'mac' for optimizing compression to minimize multiplies and accumulates or 'memory' which
optimizes for overall memory footprint. Defaults to 'memory'
:param output_file: The file path for saving the compressed tensorflow graph.
aimet will save to the directory specified, using output_file as a filename prefix
:param svd_type: Indicates which algorithm should be used, either
'svd' or 'ssvd'. Defaults to 'svd'.
:param num_layers: The number of layers to compress. Defaults to '0' which uses a
heuristic to determine the optimal number of layers to compress.
:param layers: A list of op names to compress. All other layers will be ignored.
Overrides num_layers and sets it to the length of this list.
:param layer_ranks: required only if no_evaluation is set to True. A list of tuples to compress
layers specified in layers argument.
:param num_ranks: The number of ranks (compression_points) to evaluate for compression.
Defaults to 20. Value should be greater than 2.
:param gpu: Indicates if the algorithm should run on GPU or CPU. Defaults to GPU. To
use CPU set to false
:param debug: If true debug messages will be printed. Defaults to False.
:param no_evaluation: If true, ranks will be set manually from user. Defaults to False.
:param layer_selection_threshold: Threshold (0-1) to use to select the top layers in the network
:raises: ValueError: An error occurred processing one of the input parameters.
"""
# pylint: disable=too-many-arguments
self._sanity_check_constructor_parameters(layer_selection_threshold, layers, no_evaluation, num_layers,
num_ranks, svd_type)
self._gpu = gpu
self._debug = debug
self._default_meta_graph = graph
self._default_checkpoint = checkpoint
self._output_file = output_file
self._output_dir = os.path.dirname(output_file)
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
logger.info('Saving SVD output as: %s', output_file)
self.svd_type = _SVD_TYPES[svd_type]
self._metric = metric
self._num_layers = num_layers
self._selected_layers = []
self._networkCost = None
if layers:
logger.debug('Attempting to compress: %s', layers)
self._layers_to_compress = layers
else:
self._layers_to_compress = []
if num_ranks < 0:
raise ValueError("num_ranks must be >= 0")
self._num_ranks = num_ranks
if layer_ranks:
self._layer_ranks = layer_ranks
self._num_layer_ranks = len(layer_ranks)
logger.debug('Attempting to compress model with user provided ranks : %s', layer_ranks)
# Setup the SVD instance and load the graph
self._svd = pymo.GetSVDInstance()
self._no_eval = no_evaluation
self._layer_selection_threshold = layer_selection_threshold
self._model_performance_candidate_ranks = list()
# Todo: Need to look at these attributes and see how to handle them better
# Very likely these attributes don't need to be object attributes
self._generator = None
self._eval_names = None
self._eval_func = None
self._iterations = None
self._run_graph = None
self._baseline_perf = None
self._error_margin = None
self._compressible_ops = None
@staticmethod
def _sanity_check_constructor_parameters(layer_selection_threshold, layers, no_evaluation, num_layers,
num_ranks, svd_type):
if svd_type not in _SVD_TYPES:
raise ValueError('Invalid SVD mode: ' + svd_type)
if no_evaluation:
if not layers:
raise ValueError('Both layers and layer_rank parameters are needed for Manual mode')
if layer_selection_threshold < 0 or layer_selection_threshold > 1:
raise ValueError('Layer selection threshold should be between 0 and 1')
if not no_evaluation:
if num_ranks <= 2:
raise ValueError('Number of ranks should be greater than 2 for auto mode')
if num_layers < 0:
raise ValueError("num_layers must be >= 0")
def _compute_per_layer_compression_ratio(self, split_layers_shape, output_shape, original_layer_shape, op_type):
"""
Updates the per layer statistics
:param orig_layer: The layer before it was split
:param split_layers: List of split layers
:return: The compression ratio of split layers
"""
orig_layer_cost = self._compute_layer_cost(original_layer_shape, output_shape, op_type)
split_layers_mem_cost = 0
split_layers_mac_cost = 0
for layer_shape in split_layers_shape:
mem_cost, mac_cost = self._compute_layer_cost(layer_shape, output_shape, op_type)
if not isinstance(mem_cost, int):
mem_cost = mem_cost.value
if not isinstance(mac_cost, int):
mac_cost = mac_cost.value
split_layers_mem_cost += mem_cost
split_layers_mac_cost += mac_cost
if self._metric is CostMetric.memory:
savings = orig_layer_cost[0] - split_layers_mem_cost
ratio = savings / orig_layer_cost[0]
logger.debug('Original Layer Cost: %s Memory Compression Ratio: %s', orig_layer_cost[0], ratio)
else:
savings = orig_layer_cost[1] - split_layers_mac_cost
ratio = savings / orig_layer_cost[1]
logger.debug('Original Layer Cost: %s MAC Compression Ratio: %s', orig_layer_cost[1], ratio)
return ratio
@staticmethod
def _reset_session(sess):
"""
Reset the given tf.compat.v1.Session
:param sess: tf.compat.v1.Session
:return: None
"""
tf.compat.v1.reset_default_graph()
sess.close()
@staticmethod
def _load_graph(graph, meta_graph, checkpoint):
"""
Load a graph and checkpoint and create a new tf.compat.v1.Session
:param graph: TF graph
:param meta_graph: Meta file
:param checkpoint: Checkpoint file
:return: Newly created session
"""
logger.info('Loading graph: %s', meta_graph)
sess = tf.compat.v1.Session(graph=graph)
# Open the graph and retore the parameters
saver = tf.compat.v1.train.import_meta_graph(meta_graph)
saver.restore(sess, checkpoint)
return sess, saver
@staticmethod
def _get_layer_type(op):
"""
Converts TF layer types into corresponding PyMo layer enumerated values
:param op: TF op
:return: PyMo enumerated value corresponding to the type of op
"""
if op.type in _SVD_LAYER_TYPES:
return _SVD_LAYER_TYPES[op.type]
return pymo.LAYER_TYPE_OTHER
class LayerSelectionScheme(Enum):
""" Enumeration of schemes supported to select layers for SVD compression """
manual = 1
top_n_layers = 2
top_x_percent = 3
@staticmethod
def _pick_compression_layers(sess, cost_metric, layer_select_scheme, **kwargs):
"""
Pick layers for SVD compression given parameters
:param sess: tf.compat.v1.Session
:param cost_metric: Metric to use for evaluating layer cost (either in terms of memory or mac)
:param layer_select_scheme: Layer selection scheme to use
:param kwargs: Keyword arguments that depend on which layer selection scheme is specified
top_n_layers:: num_layers: Number of layers to pick
top_x_percent:: percent_thresh: Top layers up to this parameter will be selected
manual:: layers_to_compress: List of layers (names) to compress
:return:
"""
# pylint: disable=too-many-locals,too-many-branches
if not isinstance(cost_metric, CostMetric):
raise TypeError("cost_metric is not of type CostMetric")
if not isinstance(layer_select_scheme, Svd.LayerSelectionScheme):
raise TypeError("layer_selection_scheme is not of type Svd.LayerSelectionScheme")
# Find all compressible ops
query = core.OpQuery(sess.graph)
compressible_ops = query.get_weight_ops()
compressible_ops = [op for op in compressible_ops if op.type in _SVD_SUPPORTED_LAYER_TYPES]
layer_attributes_list = Svd._create_layer_attributes_list(compressible_ops, sess)
network_cost = Svd._compute_network_cost(layer_attributes_list)
# Heuristic1: Reject any ops whose param shape does not meet a base criterion
pruned_list = []
for layer_attributes in layer_attributes_list:
h, w, n, c = layer_attributes.weight_shape
if (n >= _MIN_LAYER_DIM_FOR_SVD) and ((c * h * w) >= _MIN_LAYER_DIM_FOR_SVD):
pruned_list.append(layer_attributes)
else:
print("Pruning out {}: shape is {}".format(layer_attributes.layer_ref.name,
layer_attributes.weight_shape))
# Reset layer_attributes_list for the next phase
layer_attributes_list = pruned_list
pruned_list = []
# Sort the attribute list based on cost
if cost_metric == CostMetric.memory:
layer_attributes_list.sort(key=lambda x: x.cost[0], reverse=True)
else:
layer_attributes_list.sort(key=lambda x: x.cost[1], reverse=True)
if layer_select_scheme == Svd.LayerSelectionScheme.top_n_layers:
num_layers = kwargs['num_layers']
pruned_list = layer_attributes_list[:num_layers]
elif layer_select_scheme == Svd.LayerSelectionScheme.top_x_percent:
percent_thresh = kwargs['percent_thresh']
accum_cost = 0.
total_cost = network_cost[0] if (cost_metric == CostMetric.memory) else network_cost[1]
for layer in layer_attributes_list:
cost = layer.cost[0] if (cost_metric == CostMetric.memory) else layer.cost[1]
if (100 * (cost + accum_cost)/total_cost) < percent_thresh:
pruned_list.append(layer)
accum_cost += cost
elif layer_select_scheme == Svd.LayerSelectionScheme.manual:
layers_to_compress = kwargs['layers_to_compress']
for layer in layer_attributes_list:
if layer.layer_ref.name in layers_to_compress:
pruned_list.append(layer)
if not pruned_list:
raise RuntimeError('No suitable layers found in the model.')
return pruned_list, network_cost
@staticmethod
def _create_layer_attributes_list(ops_to_use, sess):
"""
Creates list of layer attributes given a set of TF ops
:param ops_to_use: TF ops to collect layer attributes for
:param sess: tf.compat.v1.Session to use
:return: Created list of layer attributes
"""
query = core.OpQuery(sess.graph)
layer_attributes_list = []
for op in ops_to_use:
weight_shape = query.get_weights_for_op(op).eval(session=sess).shape
if op.type == 'MatMul':
n, c = weight_shape
weight_shape = (1, 1, n, c)
output_dims = op.outputs[0].shape
cost = Svd._compute_layer_cost(weight_shape, output_dims, op.type)
layer_attributes_list.append(LayerAttributes(op, cost, weight_shape))
return layer_attributes_list
@staticmethod
def _compute_network_cost(layer_attributes_list):
"""
Compute aggregate cost of the layers included in the layer attributes list
:param layer_attributes_list: List of layer attributes
:return: Computed cost
"""
mac_cost = 0
mem_cost = 0
for layer_attributes in layer_attributes_list:
op_mem_cost, op_mac_cost = layer_attributes.cost
mem_cost += op_mem_cost
mac_cost += op_mac_cost
return mem_cost, mac_cost
@staticmethod
def _compute_layer_cost(weights_shape, output_dims, op_type):
"""
Compute cost of a layer
:param weights_shape: Shape of the weights of this layer
:param output_dims: Shape of the output of this layer
:param op_type: Type of this TF op
:return: Computed layer cost
"""
# for outputs, TF uses dims [N,H,W,C]
mem_cost = reduce(operator.mul, weights_shape)
if op_type == 'Conv2D':
mac_cost = mem_cost * int(output_dims[1]) * int(output_dims[2])
elif op_type == 'MatMul':
mac_cost = mem_cost
return mem_cost, mac_cost
def _compute_compression_ratio(self, sess, cost_metric):
"""
Compute compression ratio
:param sess: tf.compat.v1.Session
:return: Computed compression ratio
"""
query = core.OpQuery(sess.graph)
compressible_ops = query.get_weight_ops()
compressible_ops = [op for op in compressible_ops if op.type in _SVD_SUPPORTED_LAYER_TYPES]
layer_attributes_list = Svd._create_layer_attributes_list(compressible_ops, sess)
selected_layers_ops = [layer.layer_ref.name for layer in self._selected_layers]
layer_attributes_list = [layer for layer in layer_attributes_list if layer.layer_ref.name not in selected_layers_ops]
compressed_network_cost = Svd._compute_network_cost(layer_attributes_list)
if cost_metric is CostMetric.memory:
savings = self._networkCost[0] - compressed_network_cost[0]
ratio = savings/self._networkCost[0]
else:
savings = self._networkCost[1] - compressed_network_cost[1]
ratio = savings/self._networkCost[1]
return ratio
def _store_net_stats(self, sess):
"""
Store layer attributes in the PyMo library instance
:param sess: tf.compat.v1.Session
:return: None
"""
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
if self._metric == CostMetric.memory:
pymo_metric = pymo.COST_TYPE_MEMORY
else:
pymo_metric = pymo.COST_TYPE_MAC
self._svd.SetCostMetric(pymo_metric)
# Layer-selection
if self._layers_to_compress:
selected_layers, network_cost = self._pick_compression_layers(sess,
self._metric,
self.LayerSelectionScheme.manual,
layers_to_compress=self._layers_to_compress)
elif self._num_layers > 0:
selected_layers, network_cost = self._pick_compression_layers(sess,
self._metric,
self.LayerSelectionScheme.top_n_layers,
num_layers=self._num_layers)
else:
percent_thresh = self._layer_selection_threshold * 100
selected_layers, network_cost = self._pick_compression_layers(sess,
self._metric,
self.LayerSelectionScheme.top_x_percent,
percent_thresh=percent_thresh)
self._networkCost = network_cost
print("Selected Layers:")
for layer in selected_layers:
print(layer.layer_ref.name)
self._selected_layers = selected_layers
# Get the op query module and query for all Conv/FC layers
query = core.OpQuery(sess.graph)
self._compressible_ops = query.get_weight_ops()
# Set up the layer attributes for each Conv/FC layer (this also checks for trailing
# bias adds
for i, op in enumerate(self._compressible_ops):
# If op is not a selected layer, skip
if not any(op is layer.layer_ref for layer in selected_layers):
continue
attr = pymo.LayerAttributes()
layerName = op.name
output_dims = op.outputs[0].shape # TF uses dims [N,H,W,C]
attr.layerType = self._get_layer_type(op)
if self.svd_type == pymo.TYPE_SINGLE:
attr.mode = self._svd.GetCompressionType(attr.layerType, 'single')
else:
attr.mode = self._svd.GetCompressionType(attr.layerType, 'successive')
if op.type == 'Conv2D' or op.type == 'MatMul':
logger.info('Setting layer attributes for: %s', layerName+'('+op.type+')')
# Get weights
weights = query.get_weights_for_op(op).eval(session=sess)
w_shape = weights.shape
logger.debug('Got weight shape: %s', w_shape)
# Check for bias op
bias = None
if (i+1) < len(self._compressible_ops):
bias = query.get_bias_for_op(self._compressible_ops[i+1])
if bias is not None:
bias = bias.eval(session=sess)
logger.debug('Got %s w/bias. Shape: %s', op.type, str(bias.shape))
if op.type == 'Conv2D':
attr.shape = [w_shape[3], w_shape[2], w_shape[0], w_shape[1]] # TF Conv weight order [KH,KW,ID,OD]
attr.activation_dims = (output_dims[1], output_dims[2]) # (H,W)
# CONV weights are stored in the order {H,W,I,O} in Tensorflow
# Re-order them to the form {O,I,H,W}
weights = np.transpose(weights, (3, 2, 0, 1))
elif op.type == 'MatMul':
attr.shape = [w_shape[1], w_shape[0], 1, 1] # TF FC weight order [ID,OD], SVD expects [OD,ID]
attr.activation_dims = (1, 1)
weights = np.transpose(weights, (1, 0))
# blobs is a numpy array... add to list then set
params = [weights.flatten()]
if bias is not None:
params.append(bias.flatten())
attr.blobs = params
# Save the attributes for this layer
self._svd.StoreLayerAttributes(layerName, attr)
def _compute_objective_score(self, model_perf, compression_score):
"""
Compute objective score of a given compression model
:param model_perf: Performance of compressed model
:param compression_score: Compression ratio
:return: Computed objective score
"""
if model_perf + (self._error_margin / 100) >= self._baseline_perf:
objective_score = 1 - model_perf + (1 - compression_score)
else:
objective_score = 1 + (1 - compression_score) # treat lower accuracies as 0
return objective_score
def _split_conv_layer(self, sess, svd_ranks, attr, op_name, bias_op_name=None):
"""
Split a given conv layer given a rank
:param sess: tf.compat.v1.Session
:param svd_ranks: Rank to split the layer with (two ranks in case of SSVD)
:param attr: Reference to the corresponding layer attribute
:param op_name: Name of the op to split
:param bias_op_name: Name of the corresponding bias op (if any)
:return: None
"""
# pylint: disable=too-many-statements,too-many-branches,too-many-locals
logger.info('Splitting conv op: %s', op_name)
# Retrieve the op(s) from the current graph
op = sess.graph.get_operation_by_name(op_name)
bias_op = None
if bias_op_name:
bias_op = sess.graph.get_operation_by_name(bias_op_name)
# Create new 'conv_a' layer
pad_mode = op.get_attr('padding')
data_format = op.get_attr('data_format').decode('utf-8')
strides = op.get_attr('strides')
# Print current conv weight shape
query = core.OpQuery(sess.graph)
w_shape = query.get_weights_for_op(op).get_shape().as_list()
logger.debug('Original %s weight shape: %s', op.name, str(w_shape))
split_weights, weight_sizes = [], []
split_biases, bias_sizes = [], []
# TF weights are in [H,W,I,O] order. We must reshape the split weights to SVD format [O,I,H,W]
# and then transpose back
# Conv a weights are: [1, 1, w_shape[2], svd_ranks[0]]
split_conv_a_w_shape = (svd_ranks[0], w_shape[2], 1, 1)
conv_a_weights = np.zeros(split_conv_a_w_shape) # transpose(2,3,1,0)
split_weights.append(conv_a_weights.flatten().tolist())
weight_sizes.append(conv_a_weights.size)
if bias_op:
conv_a_bias = np.zeros(svd_ranks[0])
split_biases.append(conv_a_bias.flatten().tolist())
bias_sizes.append(conv_a_bias.size)
num_filters = w_shape[3]
if len(svd_ranks) >= 2 and attr.mode == pymo.TYPE_SUCCESSIVE:
# Output channels = output_rank (s)
num_filters = svd_ranks[1]
# Conv b weights are: [w_shape[0],w_shape[1],svd_ranks[0],num_filters]
split_conv_b_w_shape = (num_filters, svd_ranks[0], w_shape[0], w_shape[1])
conv_b_weights = np.zeros(split_conv_b_w_shape)
conv_b_bias = np.zeros(num_filters)
split_weights.append(conv_b_weights.flatten().tolist())
weight_sizes.append(conv_b_weights.size)
if bias_op:
split_biases.append(conv_b_bias.flatten().tolist())
bias_sizes.append(conv_b_bias.size)
# Only create a third conv layer when performing successive SVD
if len(svd_ranks) >= 2 and attr.mode == pymo.TYPE_SUCCESSIVE:
# Conv c weights are: [1,1,num_filters,w_shape[3]]
split_conv_c_w_shape = (w_shape[3], num_filters, 1, 1)
conv_c_weights = np.zeros(split_conv_c_w_shape)
conv_c_bias = np.zeros(w_shape[3])
split_weights.append(conv_c_weights.flatten().tolist())
weight_sizes.append(conv_c_weights.size)
if bias_op:
split_biases.append(conv_c_bias.flatten().tolist())
bias_sizes.append(conv_c_bias.size)
# Split the weights and biases according to the number of layers and ranks
split_weights = self._svd.SplitLayerWeights(op.name, split_weights, weight_sizes, svd_ranks)
split_biases = self._svd.SplitLayerBiases(op.name, split_biases, bias_sizes, svd_ranks)
if split_weights:
conv_a_name = op.name+'_a'
conv_a_weights = np.array(split_weights[0]).reshape(split_conv_a_w_shape).transpose(2, 3, 1, 0)
conv_a_w = tf.Variable(initial_value=conv_a_weights, name=conv_a_name+'_w', dtype=tf.float32)
logger.debug('%s weight shape: %s', conv_a_name, str(conv_a_weights.shape))
# Create conv_a using default strides (1,1)
# pylint: disable=no-member
conv_acts = tf.nn.conv2d(op.inputs[0], conv_a_w, strides=[1, 1, 1, 1], data_format=data_format,
padding=pad_mode, name=op.name+'_a') # dilation_rate=dilation_rate
if bias_op:
conv_a_bias = tf.Variable(initial_value=split_biases[0], name=conv_a_name+'_bias', dtype=tf.float32)
conv_acts = conv_acts + conv_a_bias # tf.nn.bias_add(conv_acts, split_biases[0])
if len(split_weights) > 1:
# Create conv_b
conv_b_name = op.name+'_b'
conv_b_weights = np.array(split_weights[1]).reshape(split_conv_b_w_shape).transpose(2, 3, 1, 0)
conv_b_w = tf.Variable(initial_value=conv_b_weights, name=conv_b_name+'_w', dtype=tf.float32)
logger.debug('%s weight shape: %s', conv_b_name, str(conv_b_weights.shape))
# pylint: disable=no-member
conv_acts = tf.nn.conv2d(conv_acts, conv_b_w, strides=strides, data_format=data_format, padding=pad_mode, name=conv_b_name) #dilation_rate=dilation_rate
if bias_op:
conv_b_bias = tf.Variable(initial_value=split_biases[1], name=conv_b_name+'_bias', dtype=tf.float32)
conv_acts = conv_acts + conv_b_bias # tf.nn.bias_add(conv_acts, split_biases[1])
ratio = self._compute_per_layer_compression_ratio([conv_a_w.shape, conv_b_w.shape], conv_acts.shape, w_shape, "Conv2D")
# Only create a third conv layer when performing successive SVD
if len(split_weights) > 2 and len(svd_ranks) >= 2 and attr.mode == pymo.TYPE_SUCCESSIVE:
# Create conv_c, using default strides (1,1)
conv_c_name = op.name+'_c'
conv_c_weights = np.array(split_weights[2]).reshape(split_conv_c_w_shape).transpose(2, 3, 1, 0)
conv_c_w = tf.Variable(initial_value=conv_c_weights, name=conv_c_name+'_w', dtype=tf.float32)
logger.debug('%s weight shape: %s', conv_c_name, str(conv_c_weights.shape))
# pylint: disable=no-member
conv_acts = tf.nn.conv2d(conv_acts, conv_c_w, strides=[1, 1, 1, 1], data_format=data_format,
padding=pad_mode, name=conv_c_name)
if bias_op:
conv_c_bias = tf.Variable(initial_value=split_biases[2], name=conv_c_name+'_bias', dtype=tf.float32)
conv_acts = conv_acts + conv_c_bias # tf.nn.bias_add(conv_acts, split_biases[2])
consumers = []
rerouted_inputs = [bias_op.outputs[0]] if bias_op else [op.outputs[0]]
for inp in rerouted_inputs:
for consumer in inp.consumers():
consumers.append(consumer)
_ = graph_editor.reroute_ts(conv_acts, rerouted_inputs, can_modify=consumers)
return ratio
def _split_fc_layer(self, sess, svd_ranks, op_name, bias_op_name=None):
"""
Split a given conv layer given a rank
:param sess: tf.compat.v1.Session
:param svd_ranks: Rank to split the layer with (two ranks in case of SSVD)
:param op_name: Name of the op to split
:param bias_op_name: Name of the corresponding bias op (if any)
:return: None
"""
# pylint: disable=too-many-statements, too-many-locals
logger.info('Splitting fully connected op: %s', op_name)
# Retrieve the op(s) from the current graph
op = sess.graph.get_operation_by_name(op_name)
bias_op = None
if bias_op_name:
bias_op = sess.graph.get_operation_by_name(bias_op_name)
# Print current conv weight shape
query = core.OpQuery(sess.graph)
w_shape = query.get_weights_for_op(op).get_shape().as_list()
logger.debug('Original %s weight shape: %s', op.name, str(w_shape))
split_weights, weight_sizes = [], []
split_biases, bias_sizes = [], []
# FC weights are: [w_shape[2],svd_ranks[0]] in [I,O] order.
# We must reshape the split weights to SVD format [O,I] and then transpose to NHWC
split_fc_a_w_shape = (svd_ranks[0], w_shape[0])
fc_a_weights = np.zeros(split_fc_a_w_shape)
fc_a_bias = np.zeros(svd_ranks[0])
split_weights.append(fc_a_weights.flatten().tolist())
weight_sizes.append(fc_a_weights.size)
if bias_op:
split_biases.append(fc_a_bias.flatten().tolist())
bias_sizes.append(fc_a_bias.size)
# FC b weights are: [svd_ranks[0],num_filters] in [H,W,I,O] order.
# We must reshape the split weights to SVD format [O,I,H,W] and then transpose to NHWC
split_fc_b_w_shape = (w_shape[1], svd_ranks[0])
fc_b_weights = np.zeros(split_fc_b_w_shape)
split_weights.append(fc_b_weights.flatten().tolist())
weight_sizes.append(fc_b_weights.size)
if bias_op:
fc_b_bias = np.zeros(w_shape[1])
split_biases.append(fc_b_bias.flatten().tolist())
bias_sizes.append(fc_b_bias.size)
# Split the weights and biases according to the number of layers and ranks
split_weights = self._svd.SplitLayerWeights(op.name, split_weights, weight_sizes, svd_ranks)
split_biases = self._svd.SplitLayerBiases(op.name, split_biases, bias_sizes, svd_ranks)
if split_weights:
fc_a_name = op.name+'_a'
fc_a_weights = np.array(split_weights[0]).reshape(split_fc_a_w_shape).transpose(1, 0)
fc_a_w = tf.Variable(initial_value=fc_a_weights, name=fc_a_name+'_w', dtype=tf.float32)
logger.debug('%s weight shape: %s', fc_a_name, str(fc_a_weights.shape))
# Create fc_a using default strides (1,1)
fc_acts = tf.matmul(op.inputs[0], fc_a_w, name=fc_a_name)
if bias_op:
fc_a_bias = tf.Variable(initial_value=split_biases[0], name=fc_a_name+'_bias', dtype=tf.float32)
fc_acts = fc_acts + fc_a_bias
if len(split_weights) > 1:
# Create fc_b
fc_b_name = op.name+'_b'
fc_b_weights = np.array(split_weights[1]).reshape(split_fc_b_w_shape).transpose(1, 0)
fc_b_w = tf.Variable(initial_value=fc_b_weights, name=fc_b_name+'_w', dtype=tf.float32)
logger.debug('%s weight shape: %s', fc_b_name, str(fc_b_weights.shape))
fc_acts = tf.matmul(fc_acts, fc_b_w, name=fc_b_name)
if bias_op:
fc_b_bias = tf.Variable(initial_value=split_biases[1], name=fc_b_name+'_bias', dtype=tf.float32)
fc_acts = fc_acts + fc_b_bias
ratio = self._compute_per_layer_compression_ratio([fc_a_w.shape, fc_b_w.shape], fc_acts.shape, w_shape, 'MatMul')
consumers = []
rerouted_inputs = [bias_op.outputs[0]] if bias_op else [op.outputs[0]]
for inp in rerouted_inputs:
for consumer in inp.consumers():
consumers.append(consumer)
_ = graph_editor.reroute_ts(fc_acts, rerouted_inputs, can_modify=consumers)
return ratio
def _split_layers(self, sess, rank_index, use_best_ranks):
"""
Split all the selected layers given a rank index
:param sess: tf.compat.v1.Session
:param rank_index: Rank index to use for finding the ranks
:param use_best_ranks: Use the best rank index (for final compressed network)
:return: None
"""
layer_stats = list()
for i, op in enumerate(self._compressible_ops):
# If op is not a selected layer, skip
if not any(op is layer.layer_ref for layer in self._selected_layers):
continue
# Bias is taken care of as part of the Conv/FC op
if op.type in ['Add', 'BiasAdd']:
continue
# Get the stored attributes for this op
attr = self._svd.GetLayerAttributes(op.name)
if not attr:
raise RuntimeError("Layer attributes not available for layer"+op.name)
if use_best_ranks:
svd_ranks = attr.bestRanks
else:
svd_ranks = self._svd.GetCandidateRanks(op.name, rank_index)
if svd_ranks:
bias_op = None
if i+1 < len(self._compressible_ops):
bias_op = self._compressible_ops[i+1]
bias_op = bias_op.name if bias_op.type in ['Add', 'BiasAdd'] else None
if op.type in ['Conv2D']:
ratio = self._split_conv_layer(sess, svd_ranks, attr, op.name, bias_op)
elif op.type in ['MatMul']:
ratio = self._split_fc_layer(sess, svd_ranks, op.name, bias_op)
per_layer_stats = stats_u.SvdStatistics.PerSelectedLayer(op.name, svd_ranks, ratio)
layer_stats.append(per_layer_stats)
return layer_stats
def _create_compressed_network(self, sess, rank_index, use_best_ranks):
"""
Create a compressed network for a given rank index
:param sess: tf.compat.v1.Session
:param rank_index: Rank index to use for finding the ranks
:param use_best_ranks: Use the best rank index (for final compressed network)
:return: None
"""
# Split the network layers and update the connections
per_layer_stats = self._split_layers(sess, rank_index, use_best_ranks)
return per_layer_stats
def _perform_rank_selection(self):
"""
Perform rank selection procedure
:return: None
"""
# pylint: disable=too-many-locals
stats_per_rank_index = list()
self._svd.ComputeNetworkCost()
self._num_ranks = self._svd.SetCandidateRanks(self._num_ranks)
if not self._num_ranks:
raise RuntimeError('No good candidate ranks found for compressing specified layers.')
# Ranks are in order from least compression to highest
best_index = -1
optimal_score = 0.0
for rank_index in range(self._num_ranks):
g = tf.Graph()
with g.as_default():
# Create a new network for each rank_index
self._svd.PrintCandidateRanks(rank_index, False)
# Load the default graph so we are operating on a fresh copy of the original graph
sess, saver = self._load_graph(g, self._default_meta_graph, self._default_checkpoint)
per_layer_stats = self._create_compressed_network(sess, rank_index, False)
# Save the temp model
output_file = os.path.join(self._output_dir, 'svd_rank_index_' + str(rank_index))
self._save_graph(sess, saver, output_file)
# Reset the session and start a new graph for loading the compressed model
self._reset_session(sess)
g = tf.Graph()
with g.as_default():
# In TF after making changes to the graph you must save and reload, then evaluate
sess, saver = self._load_graph(g, output_file+'.meta', output_file)
model_perf = self._run_graph(sess, self._generator, self._eval_names, self._eval_func, self._iterations)
logger.info('%s performance: %s', output_file, str(model_perf))
self._model_performance_candidate_ranks.append(model_perf * 100)
# Estimate relative compression score for this rank_index
compression_score = self._compute_compression_ratio(sess, self._metric)
objective_score = self._compute_objective_score(model_perf, compression_score)
rank_data = stats_u.SvdStatistics.PerRankIndex(rank_index=rank_index, model_accuracy=model_perf,
model_compression_ratio=compression_score,
layer_stats_list=per_layer_stats)
stats_per_rank_index.append(rank_data)
logger.info('Compressed network with rank_index %i/%i: accuracy = %f percent '
'with %f percent compression (%r option) and an objective score of %f',
rank_index, self._num_ranks, model_perf * 100, compression_score * 100,
self._metric, objective_score)
if rank_index == 0:
optimal_score = objective_score
logger.info('Initializing objective score to %f at rank index %i', optimal_score, rank_index)
if model_perf + self._error_margin/100 < self._baseline_perf:
logger.info('Model performance %f falls below %f percent of baseline performance %f'
' Ending rank selection', model_perf, self._error_margin, self._baseline_perf)
break
else:
if objective_score <= optimal_score:
optimal_score = objective_score
logger.info('Found a better value for the objective score %f at rank_index %i',
optimal_score, rank_index)
best_index = rank_index
if best_index != -1:
self._svd.StoreBestRanks(best_index)
memory_compression_ratio = self._compute_compression_ratio(sess, CostMetric.memory)
mac_compression_ratio = self._compute_compression_ratio(sess, CostMetric.mac)
stats = stats_u.SvdStatistics(self._baseline_perf, model_perf, self._metric, best_index,
mem_comp_ratio=memory_compression_ratio, mac_comp_ratio=mac_compression_ratio,
rank_stats_list=stats_per_rank_index)
# close the session and reset the default graph
self._reset_session(sess)
return stats
# close the session and reset the default graph
self._reset_session(sess)
raise RuntimeError('No suitable ranks found to compress model within defined error bounds.')
def manual_rank_svd(self):
"""
Set provided ranks in the PyMo library
:return: None
"""
# Store total net cost
self._svd.ComputeNetworkCost()
# Ensure proper layer names are provided in no_eval mode
if not self._layer_ranks:
raise ValueError('Layer names MUST be specified in no_eval mode.')
# Ensure layer_ranks is in list of tuples format
if not all(isinstance(item, tuple) for item in self._layer_ranks):
raise ValueError('layer_ranks should be in list of tuples format for both SVD and SSVD')
# Check number of input ranks match with number of input layers
if len(self._layers_to_compress) != self._num_layer_ranks:
raise ValueError('Number of Input SVD ranks does not match number of layers.')
for layer_name, rank in zip(self._layers_to_compress, self._layer_ranks):
rank_list = list()
rank_list.append(rank[1])
if self.svd_type == _SVD_TYPES['ssvd']:
rank_list.append(rank[1])
self._svd.StoreBestRanks(layer_name, rank_list)
stats = self._stats_for_manual_rank_svd()
return stats
@staticmethod
def _save_graph(sess, saver, output_graph):
"""
Utility function to save a graph
:param sess: tf.compat.v1.Session
:param saver: TF save
:param output_graph: Filename and path for saving the output
:return:
"""
logger.info('Saving graph: %s', output_graph)
saver.save(sess, output_graph)
_ = tf.compat.v1.summary.FileWriter(os.path.dirname(output_graph)+"/models", sess.graph)
def _save_compressed_network(self):
"""
Create and save a compressed network (using the best ranks identified)
:return:
"""
logger.info('Saving final compressed network')
g = tf.Graph()
with g.as_default():
sess, saver = self._load_graph(g, self._default_meta_graph, self._default_checkpoint)
per_layer_stats = self._create_compressed_network(sess, 0, True)
# Save the final network
self._save_graph(sess, saver, self._output_file)
self._reset_session(sess)
return per_layer_stats
def _stats_for_manual_rank_svd(self):
per_layer_stats = self._save_compressed_network()
g = tf.Graph()
with g.as_default():
# Load and evaluate the final network
sess, _ = self._load_graph(g, self._output_file+'.meta', self._output_file)
model_perf = self._run_graph(sess, self._generator, self._eval_names, self._eval_func, self._iterations)
logger.info('%s performance: %s', self._output_file, str(model_perf))
# Estimate relative compression score for this rank_index
self._svd.PrintCandidateRanks(0, True)
# Estimate relative compression score for this rank_index
compression_score = self._compute_compression_ratio(sess, self._metric)
logger.info('Evaluating final model using layer(s): %s. '
'Final accuracy = %f percent with %f percent compression (%r option).',
self._eval_names, model_perf*100, compression_score*100, self._metric)
memory_compression_ratio = self._compute_compression_ratio(sess,
CostMetric.memory)
mac_compression_ratio = self._compute_compression_ratio(sess,
CostMetric.mac)
rank_data = stats_u.SvdStatistics.PerRankIndex(rank_index=0, model_accuracy=model_perf,
model_compression_ratio=compression_score,
layer_stats_list=per_layer_stats)
rank_data_list = list()
rank_data_list.append(rank_data)
stats = stats_u.SvdStatistics(self._baseline_perf, model_perf, self._metric, 0,
mem_comp_ratio=memory_compression_ratio,
mac_comp_ratio=mac_compression_ratio,
rank_stats_list=rank_data_list)
return stats
def compress_net(self, generator, eval_names=None, run_graph=graph_eval.evaluate_graph,
eval_func=graph_eval.default_eval_func, error_margin=2, iterations=100):
"""
Compresses the network using SVD
Runs rank selection on the network, and compresses it using the method and parameters
passed during construction of the Svd object.
:param generator: The generator which should be used for generating data for quantization
:param eval_names: The list of names to use for calculating model performance
:param run_graph: The function to use for running data through the graph and evaluating
the network's performance. This function must return only a single number representing the
avg performance of the model over the dataset batches.
See the 'graph_eval' module's 'evaluate_graph' function for the prototype
:param eval_func: The function to use for evaluating the network performance. This function should always
return a single number that can be used for comparing different graph's performance.
(The default is accuracy)
:param error_margin: The acceptable degradation in network accuracy from the original.
1 for 1% drop, etc. Defaults to 2%.
:param iterations: The number of iterations (data batches) to run through the network for analysis
:return: An object containing compression statistics
:raises: - ValueError: An invalid parameter was passed
- RuntimeError: An error occurred analyzing or compressing the network. The associated error
and other information will be returned with the error.
"""
self._generator = generator
if not eval_names:
eval_names = ['accuracy']
self._eval_names = eval_names
self._run_graph = run_graph
self._eval_func = eval_func
if error_margin <= 0:
raise ValueError('Invalid error_margin: '+str(error_margin)+'. Must pass error_margin > 0')
self._error_margin = error_margin
if iterations <= 0:
raise ValueError('Invalid iterations: '+str(iterations)+'. Number of iterations must be > 0')
self._iterations = iterations
# Get baseline accuracy, then store the network stats
g = tf.Graph()
with g.as_default():
sess, _ = self._load_graph(g, self._default_meta_graph, self._default_checkpoint)
self._baseline_perf = run_graph(sess, generator, eval_names, eval_func, iterations)
logger.info('Baseline performance: %f', self._baseline_perf)
self._store_net_stats(sess)
self._reset_session(sess)
if self._no_eval:
# Set Manual rank
stats = self.manual_rank_svd()
else:
# Perform rank selection
stats = self._perform_rank_selection()
self._save_compressed_network()
return stats
| [
"aimet_common.utils.AimetLogger.get_area_logger",
"tensorflow.matmul",
"aimet_tensorflow.common.core.OpQuery",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"aimet_common.statistics_util.SvdStatistics.PerRankIndex",
"os.path.dirname",
"os.path.exists",
"numpy.transpose",
"tensorflow.compat.v1.Ses... | [((2354, 2407), 'aimet_common.utils.AimetLogger.get_area_logger', 'AimetLogger.get_area_logger', (['AimetLogger.LogAreas.Svd'], {}), '(AimetLogger.LogAreas.Svd)\n', (2381, 2407), False, 'from aimet_common.utils import AimetLogger\n'), ((6460, 6488), 'os.path.dirname', 'os.path.dirname', (['output_file'], {}), '(output_file)\n', (6475, 6488), False, 'import os\n'), ((7424, 7445), 'libpymo.GetSVDInstance', 'pymo.GetSVDInstance', ([], {}), '()\n', (7443, 7445), True, 'import libpymo as pymo\n'), ((10512, 10546), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (10544, 10546), True, 'import tensorflow as tf\n'), ((10955, 10988), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (10975, 10988), True, 'import tensorflow as tf\n'), ((11057, 11105), 'tensorflow.compat.v1.train.import_meta_graph', 'tf.compat.v1.train.import_meta_graph', (['meta_graph'], {}), '(meta_graph)\n', (11093, 11105), True, 'import tensorflow as tf\n'), ((12885, 12909), 'aimet_tensorflow.common.core.OpQuery', 'core.OpQuery', (['sess.graph'], {}), '(sess.graph)\n', (12897, 12909), False, 'from aimet_tensorflow.common import core, graph_eval\n'), ((15710, 15734), 'aimet_tensorflow.common.core.OpQuery', 'core.OpQuery', (['sess.graph'], {}), '(sess.graph)\n', (15722, 15734), False, 'from aimet_tensorflow.common import core, graph_eval\n'), ((17198, 17233), 'functools.reduce', 'reduce', (['operator.mul', 'weights_shape'], {}), '(operator.mul, weights_shape)\n', (17204, 17233), False, 'from functools import reduce\n'), ((17666, 17690), 'aimet_tensorflow.common.core.OpQuery', 'core.OpQuery', (['sess.graph'], {}), '(sess.graph)\n', (17678, 17690), False, 'from aimet_tensorflow.common import core, graph_eval\n'), ((20657, 20681), 'aimet_tensorflow.common.core.OpQuery', 'core.OpQuery', (['sess.graph'], {}), '(sess.graph)\n', (20669, 20681), False, 'from aimet_tensorflow.common import core, graph_eval\n'), ((25014, 25038), 'aimet_tensorflow.common.core.OpQuery', 'core.OpQuery', (['sess.graph'], {}), '(sess.graph)\n', (25026, 25038), False, 'from aimet_tensorflow.common import core, graph_eval\n'), ((25561, 25591), 'numpy.zeros', 'np.zeros', (['split_conv_a_w_shape'], {}), '(split_conv_a_w_shape)\n', (25569, 25591), True, 'import numpy as np\n'), ((26290, 26320), 'numpy.zeros', 'np.zeros', (['split_conv_b_w_shape'], {}), '(split_conv_b_w_shape)\n', (26298, 26320), True, 'import numpy as np\n'), ((26343, 26364), 'numpy.zeros', 'np.zeros', (['num_filters'], {}), '(num_filters)\n', (26351, 26364), True, 'import numpy as np\n'), ((30703, 30776), 'aimet_tensorflow.graph_editor.reroute_ts', 'graph_editor.reroute_ts', (['conv_acts', 'rerouted_inputs'], {'can_modify': 'consumers'}), '(conv_acts, rerouted_inputs, can_modify=consumers)\n', (30726, 30776), False, 'from aimet_tensorflow import graph_editor\n'), ((31626, 31650), 'aimet_tensorflow.common.core.OpQuery', 'core.OpQuery', (['sess.graph'], {}), '(sess.graph)\n', (31638, 31650), False, 'from aimet_tensorflow.common import core, graph_eval\n'), ((32123, 32151), 'numpy.zeros', 'np.zeros', (['split_fc_a_w_shape'], {}), '(split_fc_a_w_shape)\n', (32131, 32151), True, 'import numpy as np\n'), ((32172, 32194), 'numpy.zeros', 'np.zeros', (['svd_ranks[0]'], {}), '(svd_ranks[0])\n', (32180, 32194), True, 'import numpy as np\n'), ((32682, 32710), 'numpy.zeros', 'np.zeros', (['split_fc_b_w_shape'], {}), '(split_fc_b_w_shape)\n', (32690, 32710), True, 'import numpy as np\n'), ((34917, 34988), 'aimet_tensorflow.graph_editor.reroute_ts', 'graph_editor.reroute_ts', (['fc_acts', 'rerouted_inputs'], {'can_modify': 'consumers'}), '(fc_acts, rerouted_inputs, can_modify=consumers)\n', (34940, 34988), False, 'from aimet_tensorflow import graph_editor\n'), ((43989, 43999), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (43997, 43999), True, 'import tensorflow as tf\n'), ((44481, 44491), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (44489, 44491), True, 'import tensorflow as tf\n'), ((48882, 48892), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (48890, 48892), True, 'import tensorflow as tf\n'), ((6504, 6536), 'os.path.exists', 'os.path.exists', (['self._output_dir'], {}), '(self._output_dir)\n', (6518, 6536), False, 'import os\n'), ((6550, 6579), 'os.makedirs', 'os.makedirs', (['self._output_dir'], {}), '(self._output_dir)\n', (6561, 6579), False, 'import os\n'), ((21079, 21101), 'libpymo.LayerAttributes', 'pymo.LayerAttributes', ([], {}), '()\n', (21099, 21101), True, 'import libpymo as pymo\n'), ((25776, 25798), 'numpy.zeros', 'np.zeros', (['svd_ranks[0]'], {}), '(svd_ranks[0])\n', (25784, 25798), True, 'import numpy as np\n'), ((26912, 26942), 'numpy.zeros', 'np.zeros', (['split_conv_c_w_shape'], {}), '(split_conv_c_w_shape)\n', (26920, 26942), True, 'import numpy as np\n'), ((26969, 26989), 'numpy.zeros', 'np.zeros', (['w_shape[3]'], {}), '(w_shape[3])\n', (26977, 26989), True, 'import numpy as np\n'), ((27732, 27821), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'conv_a_weights', 'name': "(conv_a_name + '_w')", 'dtype': 'tf.float32'}), "(initial_value=conv_a_weights, name=conv_a_name + '_w', dtype=tf\n .float32)\n", (27743, 27821), True, 'import tensorflow as tf\n'), ((28024, 28151), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['op.inputs[0]', 'conv_a_w'], {'strides': '[1, 1, 1, 1]', 'data_format': 'data_format', 'padding': 'pad_mode', 'name': "(op.name + '_a')"}), "(op.inputs[0], conv_a_w, strides=[1, 1, 1, 1], data_format=\n data_format, padding=pad_mode, name=op.name + '_a')\n", (28036, 28151), True, 'import tensorflow as tf\n'), ((28689, 28778), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'conv_b_weights', 'name': "(conv_b_name + '_w')", 'dtype': 'tf.float32'}), "(initial_value=conv_b_weights, name=conv_b_name + '_w', dtype=tf\n .float32)\n", (28700, 28778), True, 'import tensorflow as tf\n'), ((28925, 29040), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['conv_acts', 'conv_b_w'], {'strides': 'strides', 'data_format': 'data_format', 'padding': 'pad_mode', 'name': 'conv_b_name'}), '(conv_acts, conv_b_w, strides=strides, data_format=data_format,\n padding=pad_mode, name=conv_b_name)\n', (28937, 29040), True, 'import tensorflow as tf\n'), ((29832, 29921), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'conv_c_weights', 'name': "(conv_c_name + '_w')", 'dtype': 'tf.float32'}), "(initial_value=conv_c_weights, name=conv_c_name + '_w', dtype=tf\n .float32)\n", (29843, 29921), True, 'import tensorflow as tf\n'), ((30068, 30189), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['conv_acts', 'conv_c_w'], {'strides': '[1, 1, 1, 1]', 'data_format': 'data_format', 'padding': 'pad_mode', 'name': 'conv_c_name'}), '(conv_acts, conv_c_w, strides=[1, 1, 1, 1], data_format=\n data_format, padding=pad_mode, name=conv_c_name)\n', (30080, 30189), True, 'import tensorflow as tf\n'), ((32864, 32884), 'numpy.zeros', 'np.zeros', (['w_shape[1]'], {}), '(w_shape[1])\n', (32872, 32884), True, 'import numpy as np\n'), ((33457, 33542), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'fc_a_weights', 'name': "(fc_a_name + '_w')", 'dtype': 'tf.float32'}), "(initial_value=fc_a_weights, name=fc_a_name + '_w', dtype=tf.float32\n )\n", (33468, 33542), True, 'import tensorflow as tf\n'), ((33697, 33744), 'tensorflow.matmul', 'tf.matmul', (['op.inputs[0]', 'fc_a_w'], {'name': 'fc_a_name'}), '(op.inputs[0], fc_a_w, name=fc_a_name)\n', (33706, 33744), True, 'import tensorflow as tf\n'), ((34146, 34231), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'fc_b_weights', 'name': "(fc_b_name + '_w')", 'dtype': 'tf.float32'}), "(initial_value=fc_b_weights, name=fc_b_name + '_w', dtype=tf.float32\n )\n", (34157, 34231), True, 'import tensorflow as tf\n'), ((34331, 34373), 'tensorflow.matmul', 'tf.matmul', (['fc_acts', 'fc_b_w'], {'name': 'fc_b_name'}), '(fc_acts, fc_b_w, name=fc_b_name)\n', (34340, 34373), True, 'import tensorflow as tf\n'), ((36693, 36758), 'aimet_common.statistics_util.SvdStatistics.PerSelectedLayer', 'stats_u.SvdStatistics.PerSelectedLayer', (['op.name', 'svd_ranks', 'ratio'], {}), '(op.name, svd_ranks, ratio)\n', (36731, 36758), True, 'from aimet_common import statistics_util as stats_u\n'), ((38014, 38024), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (38022, 38024), True, 'import tensorflow as tf\n'), ((38814, 38824), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (38822, 38824), True, 'import tensorflow as tf\n'), ((41467, 41673), 'aimet_common.statistics_util.SvdStatistics', 'stats_u.SvdStatistics', (['self._baseline_perf', 'model_perf', 'self._metric', 'best_index'], {'mem_comp_ratio': 'memory_compression_ratio', 'mac_comp_ratio': 'mac_compression_ratio', 'rank_stats_list': 'stats_per_rank_index'}), '(self._baseline_perf, model_perf, self._metric,\n best_index, mem_comp_ratio=memory_compression_ratio, mac_comp_ratio=\n mac_compression_ratio, rank_stats_list=stats_per_rank_index)\n', (41488, 41673), True, 'from aimet_common import statistics_util as stats_u\n'), ((45745, 45906), 'aimet_common.statistics_util.SvdStatistics.PerRankIndex', 'stats_u.SvdStatistics.PerRankIndex', ([], {'rank_index': '(0)', 'model_accuracy': 'model_perf', 'model_compression_ratio': 'compression_score', 'layer_stats_list': 'per_layer_stats'}), '(rank_index=0, model_accuracy=model_perf,\n model_compression_ratio=compression_score, layer_stats_list=per_layer_stats\n )\n', (45779, 45906), True, 'from aimet_common import statistics_util as stats_u\n'), ((46117, 46308), 'aimet_common.statistics_util.SvdStatistics', 'stats_u.SvdStatistics', (['self._baseline_perf', 'model_perf', 'self._metric', '(0)'], {'mem_comp_ratio': 'memory_compression_ratio', 'mac_comp_ratio': 'mac_compression_ratio', 'rank_stats_list': 'rank_data_list'}), '(self._baseline_perf, model_perf, self._metric, 0,\n mem_comp_ratio=memory_compression_ratio, mac_comp_ratio=\n mac_compression_ratio, rank_stats_list=rank_data_list)\n', (46138, 46308), True, 'from aimet_common import statistics_util as stats_u\n'), ((28267, 28359), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'split_biases[0]', 'name': "(conv_a_name + '_bias')", 'dtype': 'tf.float32'}), "(initial_value=split_biases[0], name=conv_a_name + '_bias',\n dtype=tf.float32)\n", (28278, 28359), True, 'import tensorflow as tf\n'), ((29120, 29212), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'split_biases[1]', 'name': "(conv_b_name + '_bias')", 'dtype': 'tf.float32'}), "(initial_value=split_biases[1], name=conv_b_name + '_bias',\n dtype=tf.float32)\n", (29131, 29212), True, 'import tensorflow as tf\n'), ((30276, 30368), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'split_biases[2]', 'name': "(conv_c_name + '_bias')", 'dtype': 'tf.float32'}), "(initial_value=split_biases[2], name=conv_c_name + '_bias',\n dtype=tf.float32)\n", (30287, 30368), True, 'import tensorflow as tf\n'), ((33797, 33888), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'split_biases[0]', 'name': "(fc_a_name + '_bias')", 'dtype': 'tf.float32'}), "(initial_value=split_biases[0], name=fc_a_name + '_bias', dtype=\n tf.float32)\n", (33808, 33888), True, 'import tensorflow as tf\n'), ((34426, 34517), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'split_biases[1]', 'name': "(fc_b_name + '_bias')", 'dtype': 'tf.float32'}), "(initial_value=split_biases[1], name=fc_b_name + '_bias', dtype=\n tf.float32)\n", (34437, 34517), True, 'import tensorflow as tf\n'), ((39609, 39780), 'aimet_common.statistics_util.SvdStatistics.PerRankIndex', 'stats_u.SvdStatistics.PerRankIndex', ([], {'rank_index': 'rank_index', 'model_accuracy': 'model_perf', 'model_compression_ratio': 'compression_score', 'layer_stats_list': 'per_layer_stats'}), '(rank_index=rank_index, model_accuracy=\n model_perf, model_compression_ratio=compression_score, layer_stats_list\n =per_layer_stats)\n', (39643, 39780), True, 'from aimet_common import statistics_util as stats_u\n'), ((43708, 43737), 'os.path.dirname', 'os.path.dirname', (['output_graph'], {}), '(output_graph)\n', (43723, 43737), False, 'import os\n'), ((22667, 22702), 'numpy.transpose', 'np.transpose', (['weights', '(3, 2, 0, 1)'], {}), '(weights, (3, 2, 0, 1))\n', (22679, 22702), True, 'import numpy as np\n'), ((22942, 22971), 'numpy.transpose', 'np.transpose', (['weights', '(1, 0)'], {}), '(weights, (1, 0))\n', (22954, 22971), True, 'import numpy as np\n'), ((27630, 27656), 'numpy.array', 'np.array', (['split_weights[0]'], {}), '(split_weights[0])\n', (27638, 27656), True, 'import numpy as np\n'), ((28587, 28613), 'numpy.array', 'np.array', (['split_weights[1]'], {}), '(split_weights[1])\n', (28595, 28613), True, 'import numpy as np\n'), ((29730, 29756), 'numpy.array', 'np.array', (['split_weights[2]'], {}), '(split_weights[2])\n', (29738, 29756), True, 'import numpy as np\n'), ((33365, 33391), 'numpy.array', 'np.array', (['split_weights[0]'], {}), '(split_weights[0])\n', (33373, 33391), True, 'import numpy as np\n'), ((34054, 34080), 'numpy.array', 'np.array', (['split_weights[1]'], {}), '(split_weights[1])\n', (34062, 34080), True, 'import numpy as np\n')] |
# Module that contains the necessary functions to implement the ALCOVE model
# Author: <NAME>
import numpy as np
def hidden_layer_activations(current_stimulus, stimulus_representation, hidden_representation, alpha, r, q, c):
"""
Function that calculates the hidden layer activations (equation 1 in [Krus92]_)
Parameters
----------
current_stimulus : list
Presenting a stimulus activates all the hidden layer nodes to different extents. The current stimulus is
represented as co-ordinates in psychological space and is given as a list of length N, where N is the number
of dimensions of the psychological space. For example, current_stimulus could be [1,0,1,1]
stimulus_representation : np.array
The stimuli are given to this function in the form of a n x N matrix, where n is the number of stimuli and N is
the number of dimensions of each stimuli in the psychological space
hidden_representation : np.array
The hidden layer nodes are again represented as co-ordinates in the psychological space. The hidden layer node
representations are given to this function in the form of a n x N matrix, where n is the number of hidden layer
nodes and N is the number of dimensions of the psychological space
alpha : list
Attentional weights for each dimension. For example, if there are four dimensions in the psychological space and
if equal attention is assumed for all these dimensions, then alpha is supplied as [0.25, 0.25, 0.25, 0.25]
r : int
This is the Minkowski's distance metric. A value of 1 corresponds to city-block metric (generally used when the
stimuli has separable dimensions) ; A value of 2 corresponds to Eucledian distance metric (generally used when
the stimuli has integral dimensions)
q : int
Similarity Gradient. A value of 1 corresponds to exponential similarity gradient. A value of 2 corresponds to
Gaussian similarity gradient
c : float
Specificity constant. This determines the overall width of the activation profiles of the hidden layer nodes.
Large values of c results in rapid decrease in similarity and hence narrow activation profiles, whereas small
values of c results in wide activation profiles. It is one of the free parameters of the model
Returns
-------
list
A list containing the activations of each node in the hidden layer
"""
num_hidden_nodes = np.shape(hidden_representation)[0]
num_dimensions = np.shape(stimulus_representation)[1]
hidden_activations = []
for l in range(num_hidden_nodes):
s = 0
for k in range(num_dimensions):
s += alpha[k] * (abs(hidden_representation[l][k] - current_stimulus[k])) ** r
s = s ** (q / r)
s *= (-c)
s = np.exp(s)
hidden_activations.append(s)
return hidden_activations
def output_layer_activations(categories, hidden_activations, w):
"""
Each category is represented as a node in the output layer. This function calculates the activations of each of
these output category nodes (equation 2 in [Krus92]_)
Parameters
----------
categories : list
This is the list that indicates which stimulus belongs to which category. For example, out of 5 stimuli, if
stimuli #0, #2, #4 belongs to category 0 and the rest belongs to category 1, then
categories_Idx = [[0,2,4],[1,3]]
hidden_activations : list
This is the list that contains all the hidden layer node activations
w : list
This is the list of association weights from hidden layer to output layer. If there are J hidden layer nodes and
K output category nodes, then this list has dimensions J x K. For example, if there are two output category
nodes and three hidden layer nodes, then w could be [[0.5, 0.2], [0.3, 0.7], [-0.33, -0.77]]
Returns
-------
list
A list containing the activations of each node in the output category layer
"""
num_categories = len(categories)
num_hidden_nodes = len(hidden_activations)
output_activations = []
for k in range(num_categories):
s = 0
for j in range(num_hidden_nodes):
s += w[j][k] * hidden_activations[j]
output_activations.append(s)
return output_activations
def probability_of_category(K, phi, output_activations):
"""
Function that calculates the probability of categorizing the current stimulus into category K (equation 3 in
[Krus92]_)
Parameters
----------
K : int
Category number
phi : float
Probability mapping constant. It is a free parameter of the model
output_activations : list
This is the list containing the activations of each node in the output category layer
Returns
-------
float
The probability of categorizing the current stimulus into category K
"""
num_output_nodes = len(output_activations)
numerator = np.exp(phi * output_activations[K])
denominator = 0
for k in range(num_output_nodes):
denominator += np.exp(phi * output_activations[k])
return numerator / denominator
def teacher(i, K, categories, output_activations):
"""
Function that calculates the feedback in learning, which is supplied in the form of teacher values (equation 4b in
[Krus92]_)
Parameters
----------
i : int
Stimulus ID or stimulus number
K : int
Category number
categories : list
This is the list that indicates which stimulus belongs to which category. For example, out of 5 stimuli, if
stimuli #0, #2, #4 belongs to category 0 and the rest belongs to category 1, then
categories_Idx = [[0,2,4],[1,3]]
output_activations : list
This is the list containing the activations of each node in the output category layer
Returns
-------
float
Feedback of the model's performance to the current stimulus in the form of a value that is used in the learning
phase.
"""
num_categories = len(categories)
correct_category = 0
for k in range(num_categories):
if i in categories[k]:
correct_category = k
if correct_category == K:
return max(1, output_activations[K])
else:
return min(-1, output_activations[K])
def find_del_w(lambda_w, output_activations, hidden_activations, categories):
"""
Function that calculates the amount of change that should be added to w in the learning phase (equation 5 in
[Krus92]_)
Parameters
----------
lambda_w : float
Learning rate for the weights. The same learning rate applies to all the weights. It is one of the free
parameters of the model
output_activations : list
This is the list containing the activations of each node in the output category layer
hidden_activations : list
This is the list that contains all the hidden layer node activations
categories : list
This is the list that indicates which stimulus belongs to which category. For example, out of 5 stimuli, if
stimuli #0, #2, #4 belongs to category 0 and the rest belongs to category 1, then
categories_Idx = [[0,2,4],[1,3]]
Returns
-------
float
The amount of change that should be added to w in the learning phase
"""
num_categories = len(output_activations)
num_hidden_layer_nodes = len(hidden_activations)
del_w = np.zeros([num_hidden_layer_nodes, num_categories], dtype=float)
for j in range(num_hidden_layer_nodes):
for k in range(num_categories):
tr = teacher(j, k, categories, output_activations)
del_w[j][k] += (lambda_w * (tr - output_activations[k]) * hidden_activations[j])
return del_w
def find_del_alpha(current_stimulus_id, current_stimulus, lambda_alpha, c, hidden_representation,
output_activations, hidden_activations, w, categories):
"""
Function that calculates the amount of change that should be added to each dimension's attentional weight
in the learning phase (equation 6 in [Krus92]_)
Parameters
----------
current_stimulus_id : int
Current stimulus ID or stimulus number
current_stimulus : list
Current stimulus representation in the psychological space. For example, current stimulus could be [0, 1, 1, 0]
lambda_alpha : float
Learning rate for the attentional weights. The same learning rate applies to all the weights.
It is one of the free parameters of the model
c : float
Specificity constant. It is one of the free parameters of the model
hidden_representation : np.array
The hidden layer nodes are again represented as co-ordinates in the psychological space. The hidden layer node
representations are given to this function in the form of a n x N matrix, where n is the number of hidden layer
nodes and N is the number of dimensions of the psychological space
output_activations : list
This is the list containing the activations of each node in the output category layer
hidden_activations : list
This is the list that contains all the hidden layer node activations
w : list
This is the list of association weights from hidden layer to output layer. If there are J hidden layer nodes and
K output category nodes, then this list has dimensions J x K. For example, if there are two output category
nodes and three hidden layer nodes, then w could be [[0.5, 0.2], [0.3, 0.7], [-0.33, -0.77]]
categories : list
This is the list that indicates which stimulus belongs to which category. For example, out of 5 stimuli, if
stimuli #0, #2, #4 belongs to category 0 and the rest belongs to category 1, then
categories_Idx = [[0,2,4],[1,3]]
Returns
-------
list
A list containing the amount of change that should be added to each dimension's attentional weight in the
learning phase
"""
num_categories = len(output_activations)
num_hidden_layer_nodes = len(hidden_activations)
num_dimensions = len(current_stimulus)
del_alpha = []
for i in range(num_dimensions):
s2 = 0
for j in range(num_hidden_layer_nodes):
s1 = 0
for k in range(num_categories):
tr = teacher(current_stimulus_id, k, categories, output_activations)
s1 += (tr - output_activations[k]) * w[j][k]
s1 *= hidden_activations[j] * c * abs(hidden_representation[j][i] - current_stimulus[i])
s2 += s1
current_alpha = (-lambda_alpha * s2)
del_alpha.append(current_alpha)
return del_alpha
| [
"numpy.shape",
"numpy.zeros",
"numpy.exp"
] | [((5053, 5088), 'numpy.exp', 'np.exp', (['(phi * output_activations[K])'], {}), '(phi * output_activations[K])\n', (5059, 5088), True, 'import numpy as np\n'), ((7562, 7625), 'numpy.zeros', 'np.zeros', (['[num_hidden_layer_nodes, num_categories]'], {'dtype': 'float'}), '([num_hidden_layer_nodes, num_categories], dtype=float)\n', (7570, 7625), True, 'import numpy as np\n'), ((2504, 2535), 'numpy.shape', 'np.shape', (['hidden_representation'], {}), '(hidden_representation)\n', (2512, 2535), True, 'import numpy as np\n'), ((2560, 2593), 'numpy.shape', 'np.shape', (['stimulus_representation'], {}), '(stimulus_representation)\n', (2568, 2593), True, 'import numpy as np\n'), ((2862, 2871), 'numpy.exp', 'np.exp', (['s'], {}), '(s)\n', (2868, 2871), True, 'import numpy as np\n'), ((5170, 5205), 'numpy.exp', 'np.exp', (['(phi * output_activations[k])'], {}), '(phi * output_activations[k])\n', (5176, 5205), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class NTM_Memory(nn.Module):
def __init__(self, address_count, address_dimension, batch_size):
super(NTM_Memory, self).__init__()
self.initial_memory = nn.Parameter(torch.zeros(1, address_count, address_dimension))
self.batch_size = batch_size
self.reset_parameters()
self.initialize_state()
def reset_parameters(self):
_, N, M = self.initial_memory.size()
stdev = 1 / np.sqrt(N + M)
nn.init.uniform(self.initial_memory, -stdev, stdev)
def initialize_state(self):
self.memory = self.initial_memory.repeat(self.batch_size, 1, 1)
def address_memory(self, key_vec, prev_address_vec, β, g, s, γ):
EPSILON = 1e-16
result = F.cosine_similarity((key_vec + EPSILON).unsqueeze(1).expand_as(self.memory),
self.memory + EPSILON, dim=2)
result = F.softmax(β * result, dim=1)
result = g * result + (1 - g) * prev_address_vec
result = torch.cat((result[:, 1:], result[:, :1]), 1) * s[:, 0:1] + result * s[:, 1:2] + \
torch.cat((result[:, -1:], result[:, :-1]), 1) * s[:, 2:3]
# result = result ** γ
# result = result / (result.sum(1, keepdim=True) + EPSILON)
return result
def read_memory(self, address_vec):
return torch.bmm(self.memory.transpose(1, 2), address_vec.unsqueeze(2)).squeeze(2)
def update_memory(self, address_vec, erase_vec, add_vec):
self.memory = self.memory * (1 - torch.bmm(address_vec.unsqueeze(2), erase_vec.unsqueeze(1)))
self.memory += torch.bmm(address_vec.unsqueeze(2), add_vec.unsqueeze(1)) | [
"torch.cat",
"torch.nn.functional.softmax",
"torch.zeros",
"torch.nn.init.uniform",
"numpy.sqrt"
] | [((546, 597), 'torch.nn.init.uniform', 'nn.init.uniform', (['self.initial_memory', '(-stdev)', 'stdev'], {}), '(self.initial_memory, -stdev, stdev)\n', (561, 597), True, 'import torch.nn as nn\n'), ((975, 1003), 'torch.nn.functional.softmax', 'F.softmax', (['(β * result)'], {'dim': '(1)'}), '(β * result, dim=1)\n', (984, 1003), True, 'import torch.nn.functional as F\n'), ((273, 321), 'torch.zeros', 'torch.zeros', (['(1)', 'address_count', 'address_dimension'], {}), '(1, address_count, address_dimension)\n', (284, 321), False, 'import torch\n'), ((523, 537), 'numpy.sqrt', 'np.sqrt', (['(N + M)'], {}), '(N + M)\n', (530, 537), True, 'import numpy as np\n'), ((1177, 1223), 'torch.cat', 'torch.cat', (['(result[:, -1:], result[:, :-1])', '(1)'], {}), '((result[:, -1:], result[:, :-1]), 1)\n', (1186, 1223), False, 'import torch\n'), ((1078, 1122), 'torch.cat', 'torch.cat', (['(result[:, 1:], result[:, :1])', '(1)'], {}), '((result[:, 1:], result[:, :1]), 1)\n', (1087, 1122), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import sys
class Gaussian_Process_Regression():
def __init__(self):
self.K = None
self.kernel_name1 = 'RBF'
self.a1_1 = 200.0
self.a2_1 = 20.0
self.a3_1 = 0.0
def xx2K(self,xn,xm):
if (len(xn) == 1):
self.K = np.zeros([1,len(xm)])
else :
self.K = 0.0*np.outer(xn,xm)
for i in range(len(xn)):
self.K[i,:] = self.a1_1*np.exp(-(xn[i] - xm[:])**2/self.a2_1) + self.a3_1
return self.K
def xsample2meanvariance(self,_xsample, _ysample, _x, eps = 1.0e-8):
self.K = self.xx2K(_xsample,_xsample) + eps*np.eye(len(_xsample))
L = np.linalg.cholesky(self.K)
#plt.matshow(K)
#plt.matshow(L)
kast = self.xx2K(_xsample,_x)
kastast = self.xx2K(_x,_x)
w = np.linalg.solve(L, _ysample)
z = np.linalg.solve(L.T, w)
mean = np.dot(kast.T, z)
W = np.linalg.solve(L, kast)
Z = np.linalg.solve(L.T, W)
fvariance = kastast - np.dot(kast.T, Z)
fvariance = np.diag(fvariance)
std = np.sqrt(fvariance)
return mean, std
class Bayesian_opt():
def __init__(self):
self.aqui_name = 'PI' #'PI', 'EI', 'UCB'
self.xi = 0.01
#### PI
def aqui_PI(self, mean, std, maxval):
Z = (mean - maxval - self.xi)/std
return norm.cdf(Z)
#### EI
def aqui_EI(self, mean, std, maxval):
Z = (mean - maxval - self.xi)/std
return (mean - maxval - self.xi)*norm.cdf(Z) + std*norm.pdf(Z)
#### UCB
def aqui_UCB(self, mean, std, maxval):
return mean + 1.0*std
def get_aqui(self, mean, std, maxval):
if (self.aqui_name == 'PI'):
aqui = self.aqui_PI(mean, std, maxval)
elif (self.aqui_name == 'EI'):
aqui = self.aqui_EI(mean, std, maxval)
elif (self.aqui_name == 'UCB'):
aqui = self.aqui_UCB(mean, std, maxval)
else:
print('# ERROR: undefined acquisition function called.')
sys.exit()
return aqui
| [
"numpy.outer",
"scipy.stats.norm.pdf",
"sys.exit",
"scipy.stats.norm.cdf",
"numpy.exp",
"numpy.dot",
"numpy.linalg.solve",
"numpy.linalg.cholesky",
"numpy.diag",
"numpy.sqrt"
] | [((751, 777), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['self.K'], {}), '(self.K)\n', (769, 777), True, 'import numpy as np\n'), ((911, 939), 'numpy.linalg.solve', 'np.linalg.solve', (['L', '_ysample'], {}), '(L, _ysample)\n', (926, 939), True, 'import numpy as np\n'), ((952, 975), 'numpy.linalg.solve', 'np.linalg.solve', (['L.T', 'w'], {}), '(L.T, w)\n', (967, 975), True, 'import numpy as np\n'), ((991, 1008), 'numpy.dot', 'np.dot', (['kast.T', 'z'], {}), '(kast.T, z)\n', (997, 1008), True, 'import numpy as np\n'), ((1021, 1045), 'numpy.linalg.solve', 'np.linalg.solve', (['L', 'kast'], {}), '(L, kast)\n', (1036, 1045), True, 'import numpy as np\n'), ((1058, 1081), 'numpy.linalg.solve', 'np.linalg.solve', (['L.T', 'W'], {}), '(L.T, W)\n', (1073, 1081), True, 'import numpy as np\n'), ((1150, 1168), 'numpy.diag', 'np.diag', (['fvariance'], {}), '(fvariance)\n', (1157, 1168), True, 'import numpy as np\n'), ((1183, 1201), 'numpy.sqrt', 'np.sqrt', (['fvariance'], {}), '(fvariance)\n', (1190, 1201), True, 'import numpy as np\n'), ((1456, 1467), 'scipy.stats.norm.cdf', 'norm.cdf', (['Z'], {}), '(Z)\n', (1464, 1467), False, 'from scipy.stats import norm\n'), ((1112, 1129), 'numpy.dot', 'np.dot', (['kast.T', 'Z'], {}), '(kast.T, Z)\n', (1118, 1129), True, 'import numpy as np\n'), ((430, 446), 'numpy.outer', 'np.outer', (['xn', 'xm'], {}), '(xn, xm)\n', (438, 446), True, 'import numpy as np\n'), ((1602, 1613), 'scipy.stats.norm.cdf', 'norm.cdf', (['Z'], {}), '(Z)\n', (1610, 1613), False, 'from scipy.stats import norm\n'), ((1620, 1631), 'scipy.stats.norm.pdf', 'norm.pdf', (['Z'], {}), '(Z)\n', (1628, 1631), False, 'from scipy.stats import norm\n'), ((515, 556), 'numpy.exp', 'np.exp', (['(-(xn[i] - xm[:]) ** 2 / self.a2_1)'], {}), '(-(xn[i] - xm[:]) ** 2 / self.a2_1)\n', (521, 556), True, 'import numpy as np\n'), ((2123, 2133), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2131, 2133), False, 'import sys\n')] |
from __future__ import absolute_import
import os
import numpy as np
import contextlib
import warnings
import tempfile
import shutil
import argparse
import json
@contextlib.contextmanager
def fixed_seed(seed, strict=False):
"""Fix random seed to improve the reproducibility.
Args:
seed (float): Random seed
strict (bool, optional): If True, cuDNN works under deterministic mode.
Defaults to False.
TODO: Even if `strict` is set to True, the reproducibility cannot be guaranteed under the `MultiprocessIterator`.
If your dataset has stochastic behavior, such as data augmentation, you should use the `SerialIterator` or `MultithreadIterator`.
"""
import random
import torch
import copy
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
if strict:
warnings.warn('Even if `strict` is set to True, the reproducibility cannot be guaranteed under the `MultiprocessIterator`. \
If your dataset has stochastic behavior such as data augmentation, you should use the `SerialIterator` or `MultithreadIterator`.')
_deterministic = copy.copy(torch.backends.cudnn.deterministic)
_benchmark = copy.copy(torch.backends.cudnn.benchmark)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
yield
if strict:
torch.backends.cudnn.deterministic = _deterministic
torch.backends.cudnn.benchmark = _benchmark
# https://github.com/chainer/chainerui/blob/master/chainerui/utils/tempdir.py
@contextlib.contextmanager
def tempdir(**kwargs):
# A context manager that defines a lifetime of a temporary directory.
ignore_errors = kwargs.pop('ignore_errors', False)
temp_dir = tempfile.mkdtemp(**kwargs)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir, ignore_errors=ignore_errors)
# https://github.com/chainer/chainerui/blob/master/chainerui/utils/save_args.py
def convert_dict(conditions):
if isinstance(conditions, argparse.Namespace):
return vars(conditions)
return conditions
# https://github.com/chainer/chainerui/blob/master/chainerui/utils/save_args.py
def save_args(conditions, out_path):
"""A util function to save experiment condition for job table.
Args:
conditions (:class:`argparse.Namespace` or dict): Experiment conditions
to show on a job table. Keys are show as table header and values
are show at a job row.
out_path (str): Output directory name to save conditions.
"""
args = convert_dict(conditions)
try:
os.makedirs(out_path)
except OSError:
pass
with tempdir(prefix='args', dir=out_path) as tempd:
path = os.path.join(tempd, 'args.json')
with open(path, 'w') as f:
json.dump(args, f, indent=4)
new_path = os.path.join(out_path, 'args')
shutil.move(path, new_path)
# https://github.com/chainer/chainer/blob/v7.1.0/chainer/training/extensions/_snapshot.py
def _find_snapshot_files(fmt, path):
'''Only prefix and suffix match
TODO(kuenishi): currently clean format string such as
"snapshot{.iteration}.npz" can only be parsed, but tricky (or
invalid) formats like "snapshot{{.iteration}}.npz" are hard to
detect and to properly show errors, just ignored or fails so far.
Args:
fmt (str): format string to match with file names of
existing snapshots, where prefix and suffix are
only examined. Also, files' staleness is judged
by timestamps. The default is metime.
path (str): a directory path to search for snapshot files.
Returns:
A sorted list of pair of ``mtime, filename``, whose file
name that matched the format ``fmt`` directly under ``path``.
'''
prefix = fmt.split('{')[0]
suffix = fmt.split('}')[-1]
matched_files = (file for file in os.listdir(path)
if file.startswith(prefix) and file.endswith(suffix))
def _prepend_mtime(f):
t = os.stat(os.path.join(path, f)).st_mtime
return (t, f)
return sorted(_prepend_mtime(file) for file in matched_files)
# https://github.com/chainer/chainer/blob/v7.1.0/chainer/training/extensions/_snapshot.py
def _find_latest_snapshot(fmt, path):
"""Finds the latest snapshots in a directory
Args:
fmt (str): format string to match with file names of
existing snapshots, where prefix and suffix are
only examined. Also, files' staleness is judged
by timestamps. The default is metime.
path (str): a directory path to search for snapshot files.
Returns:
Latest snapshot file, in terms of a file that has newest
``mtime`` that matches format ``fmt`` directly under
``path``. If no such file found, it returns ``None``.
"""
snapshot_files = _find_snapshot_files(fmt, path)
if len(snapshot_files) > 0:
_, filename = snapshot_files[-1]
return filename
return None
def find_latest_snapshot(fmt, path, return_fullpath=True):
'''Alias of :func:`_find_latest_snapshot`
'''
ret = _find_latest_snapshot(fmt, path)
if ret is None:
raise FileNotFoundError('cannot find snapshot for <%s>' %
os.path.join(path, fmt))
if return_fullpath:
return os.path.join(path, ret)
return ret
| [
"json.dump",
"numpy.random.seed",
"os.makedirs",
"shutil.rmtree",
"torch.manual_seed",
"torch.cuda.manual_seed",
"copy.copy",
"tempfile.mkdtemp",
"torch.cuda.is_available",
"random.seed",
"shutil.move",
"warnings.warn",
"os.path.join",
"os.listdir"
] | [((761, 778), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (772, 778), False, 'import random\n'), ((783, 803), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (797, 803), True, 'import numpy as np\n'), ((808, 831), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (825, 831), False, 'import torch\n'), ((839, 864), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (862, 864), False, 'import torch\n'), ((1840, 1866), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '(**kwargs)\n', (1856, 1866), False, 'import tempfile\n'), ((874, 902), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (896, 902), False, 'import torch\n'), ((927, 1200), 'warnings.warn', 'warnings.warn', (['"""Even if `strict` is set to True, the reproducibility cannot be guaranteed under the `MultiprocessIterator`. If your dataset has stochastic behavior such as data augmentation, you should use the `SerialIterator` or `MultithreadIterator`."""'], {}), "(\n 'Even if `strict` is set to True, the reproducibility cannot be guaranteed under the `MultiprocessIterator`. If your dataset has stochastic behavior such as data augmentation, you should use the `SerialIterator` or `MultithreadIterator`.'\n )\n", (940, 1200), False, 'import warnings\n'), ((1219, 1264), 'copy.copy', 'copy.copy', (['torch.backends.cudnn.deterministic'], {}), '(torch.backends.cudnn.deterministic)\n', (1228, 1264), False, 'import copy\n'), ((1286, 1327), 'copy.copy', 'copy.copy', (['torch.backends.cudnn.benchmark'], {}), '(torch.backends.cudnn.benchmark)\n', (1295, 1327), False, 'import copy\n'), ((1920, 1972), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {'ignore_errors': 'ignore_errors'}), '(temp_dir, ignore_errors=ignore_errors)\n', (1933, 1972), False, 'import shutil\n'), ((2707, 2728), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (2718, 2728), False, 'import os\n'), ((2834, 2866), 'os.path.join', 'os.path.join', (['tempd', '"""args.json"""'], {}), "(tempd, 'args.json')\n", (2846, 2866), False, 'import os\n'), ((2963, 2993), 'os.path.join', 'os.path.join', (['out_path', '"""args"""'], {}), "(out_path, 'args')\n", (2975, 2993), False, 'import os\n'), ((3002, 3029), 'shutil.move', 'shutil.move', (['path', 'new_path'], {}), '(path, new_path)\n', (3013, 3029), False, 'import shutil\n'), ((5492, 5515), 'os.path.join', 'os.path.join', (['path', 'ret'], {}), '(path, ret)\n', (5504, 5515), False, 'import os\n'), ((2914, 2942), 'json.dump', 'json.dump', (['args', 'f'], {'indent': '(4)'}), '(args, f, indent=4)\n', (2923, 2942), False, 'import json\n'), ((4022, 4038), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4032, 4038), False, 'import os\n'), ((4162, 4183), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (4174, 4183), False, 'import os\n'), ((5427, 5450), 'os.path.join', 'os.path.join', (['path', 'fmt'], {}), '(path, fmt)\n', (5439, 5450), False, 'import os\n')] |
import numpy as np
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score
nmi = normalized_mutual_info_score
ari = adjusted_rand_score
def acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
from sklearn.utils.linear_assignment_ import linear_assignment
ind = linear_assignment(w.max() - w)
return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], Y[i]] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w | [
"numpy.zeros"
] | [((587, 619), 'numpy.zeros', 'np.zeros', (['(D, D)'], {'dtype': 'np.int64'}), '((D, D), dtype=np.int64)\n', (595, 619), True, 'import numpy as np\n'), ((1026, 1058), 'numpy.zeros', 'np.zeros', (['(D, D)'], {'dtype': 'np.int64'}), '((D, D), dtype=np.int64)\n', (1034, 1058), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from MonotonicTime import monotonic_time
import numpy as np
_current_time = monotonic_time
class PID(object):
def __init__(self, kp=0.3, ki=0.5, kd=0.002):
self.kp = kp # Constants (kp, ki, kd)
self.ki = ki
self.kd = kd
# Storing the errors
self.p_errors = np.array([0.0, 0.0, 0.0], dtype=float) # Contains the proportional error for each dimension
self.i_errors = np.array([0.0, 0.0, 0.0], dtype=float) # Contains the integral error for each dimension
self.d_errors = np.array([0.0, 0.0, 0.0], dtype=float) # Contains the integral error for each dimension
self.last_error = np.array([0.0, 0.0, 0.0], dtype=float) # Contains the last calculated error for each dimension
self.time = _current_time() # Current monotonic time
self.current = None # Current velocity in all 3 dimension (set to None)
self.set_point = None # Desired velocity in all 3 dimension (set to None)
self.output = np.array([0.0, 0.0, 0.0], dtype=float)
self.__min__ = -20 # Minimum output (negative means the robot wants to go backward)
self.__max__ = 20 # Maximum output
def set_value(self, current, set_point):
self.current = np.array(current, dtype=float)
self.set_point = np.array(set_point, dtype=float)
def values(self):
self.current = self.control_loop()
return self.current
def calculate_error(self):
now = _current_time() # Get current monotonic time
delta_error = self.set_point - self.current # Calculates error between desired velocity and current velocity in all 3 dimensions
delta_time = now - self.time if now - self.time else 1e-16 # Calculate change in time (if it is 0, then return time as 1e-16)
self.time = now # saving the current monotonic time
return delta_error, delta_time
def control_loop(self):
delta_error, delta_time = self.calculate_error() # Value for change in error and change in time
# Classical PID controller formula implemented in Python using NumPy library
self.p_errors = delta_error
self.i_errors += delta_error * delta_time
self.d_errors = (delta_error - self.last_error) / delta_time
self.last_error = delta_error
outputs = self.p_errors * self.kp + self.i_errors * self.ki + self.d_errors * self.kd
outputs = np.where(outputs > self.__max__, self.__max__, outputs) # Replacing terms too big
outputs = np.where(outputs < self.__min__, self.__min__, outputs) # Replacing terms too small
return outputs
| [
"numpy.where",
"numpy.array"
] | [((330, 368), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {'dtype': 'float'}), '([0.0, 0.0, 0.0], dtype=float)\n', (338, 368), True, 'import numpy as np\n'), ((447, 485), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {'dtype': 'float'}), '([0.0, 0.0, 0.0], dtype=float)\n', (455, 485), True, 'import numpy as np\n'), ((560, 598), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {'dtype': 'float'}), '([0.0, 0.0, 0.0], dtype=float)\n', (568, 598), True, 'import numpy as np\n'), ((675, 713), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {'dtype': 'float'}), '([0.0, 0.0, 0.0], dtype=float)\n', (683, 713), True, 'import numpy as np\n'), ((1019, 1057), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {'dtype': 'float'}), '([0.0, 0.0, 0.0], dtype=float)\n', (1027, 1057), True, 'import numpy as np\n'), ((1265, 1295), 'numpy.array', 'np.array', (['current'], {'dtype': 'float'}), '(current, dtype=float)\n', (1273, 1295), True, 'import numpy as np\n'), ((1321, 1353), 'numpy.array', 'np.array', (['set_point'], {'dtype': 'float'}), '(set_point, dtype=float)\n', (1329, 1353), True, 'import numpy as np\n'), ((2437, 2492), 'numpy.where', 'np.where', (['(outputs > self.__max__)', 'self.__max__', 'outputs'], {}), '(outputs > self.__max__, self.__max__, outputs)\n', (2445, 2492), True, 'import numpy as np\n'), ((2538, 2593), 'numpy.where', 'np.where', (['(outputs < self.__min__)', 'self.__min__', 'outputs'], {}), '(outputs < self.__min__, self.__min__, outputs)\n', (2546, 2593), True, 'import numpy as np\n')] |
import time
import cv2
import numpy as np
import tensorflow.compat.v1 as tf
import os
import sys
import argparse
import matplotlib.pyplot as plt
from sys import platform
from scipy.optimize import curve_fit
import json
from math import pi
from ball import balls
tf.disable_v2_behavior()
##### ball detection function is in the one_ball class
##### get openpose data function is independant function, here i just wrote a read json file function
##### player_list should be updated in the ReID process and here we will use the most updated player_list
class player:
def __init__(self, person_id):
self.id = person_id
self.img_path = []
self.model_path = []
self.time_frame = []
self.current_img_position = np.zeros([1,2], dtype='float32')
self.current_model_position = np.zeros([1,2], dtype='float32')
self.previous_img_position = np.zeros([1,2], dtype='float32')
self.previous_model_position = np.zeros([1,2], dtype='float32')
self.skip_frames = int
self.statistics = {
'attempts': 0,
'made': 0,
'miss': 0,
'duration': 0,
'attempt_time':[],
'made_position': [],
'miss_position': [],
'attempt_position': []
}
# pose data
self.wrists_positions = [] # list of lists
self.nose_position = []
self.body_center_position = []
self.current_wrists_positions = []
self.current_nose_position = []
self.current_body_center_position = []
# relationship with ball
self.ball_in_hand = False
self.previous_hold_position = []
self.previous_hold_model_position = []
self.shooting_now = True
#### create player and player list using ReID and openpose
# here the code is for test
player_A = player('player_0')
player_B = player('player_1')
player_list = [player_A, player_B]
#### read openpose data into player class
#### In reality, the cropped openpose body box will be matched with ReID dictionary,
# find the match one and at the same time update the player class
# or create a new one and then update the player list
# always make sure to run openpose and ReID just one time to same computation.
def distance(x, y):
return (np.linalg.norm(x - y))
def read_json_with_video(frame_index, file_path, court_class):
global player_list
# calculate the angle between three points abc
def calculate_angle(a, b, c):
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return angle
def determine_if_p_in_quanlilateral(point_array, p):
angle = 0
for i in range(point_array.shape[0]):
if i < 3:
angle = angle + calculate_angle(point_array[i, :], p, point_array[i + 1, :])
else:
angle = angle + calculate_angle(point_array[i, :], p, point_array[0, :])
lower_bound = 2 * pi * 0.95
upper_bound = 2 * pi * 1.05
if (angle > lower_bound) & (angle < upper_bound):
return True
else:
return False
"""
// {0, "Nose"},
// {1, "Neck"},
// {2, "RShoulder"},
// {3, "RElbow"},
// {4, "RWrist"},
// {5, "LShoulder"},
// {6, "LElbow"},
// {7, "LWrist"},
// {8, "MidHip"},
// {9, "RHip"},
// {10, "RKnee"},
// {11, "RAnkle"},
// {12, "LHip"},
// {13, "LKnee"},
// {14, "LAnkle"},
// {15, "REye"},
// {16, "LEye"},
// {17, "REar"},
// {18, "LEar"},
// {19, "LBigToe"},
// {20, "LSmallToe"},
// {21, "LHeel"},
// {22, "RBigToe"},
// {23, "RSmallToe"},
// {24, "RHeel"},
// {25, "Background"}
"""
total_frames = 2000
foot_x_index = [33, 42, 57, 66]
foot_y_index = [34, 43, 58, 67]
foot_probability_index = [35, 44, 59, 68]
left_wrist_xy_index = [12, 13]
right_wrist_xy_index = [21, 22]
head_xy_index = [0, 1]
body_x_index = [3, 24]
body_y_index = [4, 25]
counter_json = 0
time_frame_list = []
person_list = []
# generate the json file name
frame_index_str = str(frame_index)
# part_name = 'v4_000000000000'
part_name = 'img_0000'
json_filepath = file_path + "/" + part_name[:-len(frame_index_str)] + frame_index_str + '.json'
with open(json_filepath) as f:
person_dict = json.load(f)
# people = person_dict
people = person_dict
# using person_id to get the right person trajectory
# using person_id to get the right person trajectory
# using person_id to get the right person trajectory
if len(people) == 0:
pass
else:
for person in people:
# find the right player
this_player_index = next((index for index, player in enumerate(player_list) if player.id == person["person_id"]), None)
# print(this_player_index)
if this_player_index == None:
print("No this player")
else:
# print(player_list[this_player_index])
pose_keypoints_2d = person["pose_keypoints_2d"]
flat_pose_list = [item for sublist in pose_keypoints_2d for item in sublist]
# feet
feet_position_x = np.mean([flat_pose_list[i] for i in foot_x_index])
feet_position_y = np.mean([flat_pose_list[j] for j in foot_y_index])
# feet_position_x = np.mean([pose_keypoints_2d[i] for i in foot_x_index])
# feet_position_y = np.mean([pose_keypoints_2d[j] for j in foot_y_index])
feet_position = np.array([[feet_position_x, feet_position_y]], dtype='float32')
# wrist
left_wrist_position = np.array([flat_pose_list[i] for i in left_wrist_xy_index])
right_wrist_position = np.array([flat_pose_list[i] for i in right_wrist_xy_index])
wrists_positions = [left_wrist_position, right_wrist_position]
# nose
nose_position = np.array([flat_pose_list[i] for i in head_xy_index])
# body
body_position_x = np.mean([flat_pose_list[i] for i in body_x_index])
body_position_y = np.mean([flat_pose_list[j] for j in body_y_index])
body_position = np.array([[body_position_x, body_position_y]], dtype='float32')
# print(feet_position[0,:])
# condition, the feet position should be inside the court
if determine_if_p_in_quanlilateral(court_class.img_corners, feet_position[0,:]):
# print("inside the court")
# transformed to model coordinates:
feet_image_positions = feet_position[:, np.newaxis, :]
# finally, get the mapping
feet_model_position = cv2.perspectiveTransform(feet_image_positions, court_class.H)
# print(feet_model_position[0, 0, :])
# player_list[this_player_index].img_path.append(feet_position[0, :])
# player_list[this_player_index].model_path.append(feet_model_position[0, 0, :])
# player_list[this_player_index].time_frame.append(frame_index)
if len(player_list[this_player_index].time_frame) == 0:
player_list[this_player_index].previous_img_position = feet_position[0,:]
dist = 0
else:
dist = distance(feet_position[0,:], player_list[this_player_index].previous_img_position) / (player_list[this_player_index].skip_frames + 1)
# print(dist)
if dist < 80:
# print("added")
# update previous_feet_position
player_list[this_player_index].previous_img_position = player_list[this_player_index].current_img_position
player_list[this_player_index].previous_model_position = player_list[this_player_index].current_model_position
# update the player's positions
player_list[this_player_index].img_path.append(feet_position[0,:])
player_list[this_player_index].model_path.append(feet_model_position[0, 0, :])
player_list[this_player_index].time_frame.append(frame_index)
player_list[this_player_index].skip_frames = 0
player_list[this_player_index].current_img_position = feet_position[0,:]
player_list[this_player_index].current_model_position = feet_model_position[0, 0, :]
# updata current pose position
player_list[this_player_index].current_wrists_positions = wrists_positions
player_list[this_player_index].current_nose_position = nose_position
player_list[this_player_index].current_body_center_position= body_position
player_list[this_player_index].wrists_positions.append(wrists_positions)
player_list[this_player_index].nose_position.append(nose_position)
player_list[this_player_index].body_center_position.append(body_position)
else:
player_list[this_player_index].skip_frames = player_list[this_player_index].skip_frames + 1
# append the same value as before to path
player_list[this_player_index].img_path.append(player_list[this_player_index].previous_img_position)
player_list[this_player_index].model_path.append(player_list[this_player_index].previous_model_position)
player_list[this_player_index].time_frame.append(frame_index)
# append current pose position to path
player_list[this_player_index].wrists_positions.append(player_list[this_player_index].current_wrists_positions)
player_list[this_player_index].nose_position.append(player_list[this_player_index].current_nose_position)
player_list[this_player_index].body_center_position.append(player_list[this_player_index].current_body_center_position)
# # update previous_feet_position
# player_list[this_player_index].current_img_position = player_list[this_player_index].previous_img_position
# player_list[this_player_index].previous_img_position = player_list[this_player_index].previous_img_position
# player_list[this_player_index].previous_model_position = player_list[this_player_index].previous_model_position
counter_json = counter_json + 1
#### use player list and one ball data to judge the gestures
# assume only one ball during the game
# # pose data
# self.wrists_positions = []
# self.nose_position = []
# self.body_center_position = []
#
# # relationship with ball
# self.ball_in_hand = False
# self.previous_hold_position = []
def match_player_with_ball(frame, trace, balls):
global player_list
# if court != None:
# previous_hold_model_position
#### who is holding the ball?
## Either wrist is close the ball
# change all other player's ball in hand to False and change this player's ball_in_hand = True
# always record the position when ball is not in hands. previous_hold_position = [], which will be the shooting position
# one_ball.position should be a list of positions in at this frame.
min_ball_hand_distance = 2000
ball_player_ID = str
## fail to detect ball or no one holds the ball, then keep the ball_in_hand same as before
# find the minimum distance between hand and ball, find the player_id with the minimum distance.
if len(balls.positions_at_frame) != 0:
# print(len(balls.positions_at_frame))
for one_ball_position in balls.positions_at_frame:
# print(one_ball_position)
for one_player in player_list:
# reset the shooting_now variable:
one_player.shooting_now = False
for one_wrist_position in one_player.current_wrists_positions:
# print(one_wrist_position)
dist_hand_ball = distance(one_ball_position, one_wrist_position)
if dist_hand_ball < min_ball_hand_distance:
min_ball_hand_distance = dist_hand_ball
ball_player_ID = one_player.id
# print("draw the wrists!")
#
# cv2.circle(img=frame, center=(one_player.current_img_position[0],
# one_player.current_img_position[1]), radius=3,
# color=(0, 0, 255), thickness=3)
# cv2.circle(img=trace, center=(one_player.current_img_position[0],
# one_player.current_img_position[1]), radius=3,
# color=(0, 0, 255), thickness=3)
# display the player's wrist
# cv2.putText(frame, str("player's wrist!"), one_wrist_position,
# cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
# cv2.putText(trace, str("player's wrist!"), one_wrist_position,
# cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
# change all other player's ball in hand to False and change this player's ball_in_hand = True
if min_ball_hand_distance < 20:
# print(min_ball_hand_distance)
for one_player in player_list:
if one_player.ball_in_hand == False and one_player.id == ball_player_ID:
one_player.ball_in_hand = True
one_player.previous_hold_position = one_player.current_img_position
# one_player.previous_hold_model_position = court.transformed_img_2_model_point(one_player.previous_hold_position)
# display the text
cv2.putText(frame, str("{} is holding the ball".format(one_player.id)), (50,50),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
elif one_player.ball_in_hand == True and one_player.id != ball_player_ID:
one_player.previous_hold_position = one_player.previous_img_position
one_player.ball_in_hand = False
# display the text
cv2.putText(frame, str("{} stole the ball".format(one_player.id)), (50,50),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
elif one_player.ball_in_hand == True and one_player.id == ball_player_ID:
one_player.previous_hold_position = one_player.current_img_position
# one_player.previous_hold_model_position = court.transformed_img_2_model_point(one_player.current_img_position)
# display the text
cv2.putText(frame, str("* {} is holding the ball".format(one_player.id)), (50,50),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
else:
one_player.ball_in_hand = False
#### check ball.made_or_not if shot is made
## if made, then locate the player with ball_in_hand is True.
# update the made number, miss number and the shoot position(previous_hold_position)
if balls.made_or_not_at_frame == True:
# find the right player
this_player_index = next((index for index, player in enumerate(player_list) if player.ball_in_hand == True), None)
player_list[this_player_index].shooting_now = True
if this_player_index != None:
player_list[this_player_index].statistics['made_position'].append(player_list[this_player_index].previous_hold_position)
player_list[this_player_index].statistics['attempts'] += 1
player_list[this_player_index].statistics['made'] += 1
player_list[this_player_index].statistics['attempt_position'].append(
player_list[this_player_index].previous_hold_position)
# display the text
cv2.putText(frame, str("Shot from here!"), (int(player_list[this_player_index].previous_hold_position[0]+50),int(player_list[this_player_index].previous_hold_position[1])),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
cv2.putText(trace, str("Shot from here!"), (int(player_list[this_player_index].previous_hold_position[0]+50),int(player_list[this_player_index].previous_hold_position[1])),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
cv2.circle(img=frame, center=(int(player_list[this_player_index].previous_hold_position[0]),int(player_list[this_player_index].previous_hold_position[1])), radius=3,
color=(0, 255, 255), thickness=3)
cv2.circle(img=trace, center=(int(player_list[this_player_index].previous_hold_position[0]),int(player_list[this_player_index].previous_hold_position[1])), radius=3,
color=(0, 255, 255), thickness=3)
elif balls.missing_or_not_at_frame == True:
# find the right player
this_player_index = next((index for index, player in enumerate(player_list) if player.ball_in_hand == True),None)
player_list[this_player_index].shooting_now = True
if this_player_index != None:
player_list[this_player_index].statistics['miss_position'].append(
player_list[this_player_index].previous_hold_position)
player_list[this_player_index].statistics['attempts'] += 1
player_list[this_player_index].statistics['miss'] += 1
player_list[this_player_index].statistics['attempt_position'].append(
player_list[this_player_index].previous_hold_position)
# display the text
cv2.putText(frame, str("Miss from here!"), (player_list[this_player_index].previous_hold_position[0],
player_list[this_player_index].previous_hold_position[1]),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
cv2.putText(trace, str("Miss from here!"), (player_list[this_player_index].previous_hold_position[0],
player_list[this_player_index].previous_hold_position[1]),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
cv2.circle(img=frame, center=(player_list[this_player_index].previous_hold_position[0],
player_list[this_player_index].previous_hold_position[1]), radius=3,
color=(0, 255, 255), thickness=3)
cv2.circle(img=trace, center=(player_list[this_player_index].previous_hold_position[0],
player_list[this_player_index].previous_hold_position[1]), radius=3,
color=(0, 255, 255), thickness=3)
combined = np.concatenate((frame, trace), axis=1)
return combined, trace
## change all player's ball_in_hand to False
def display_trajectory_on_model(court, balls):
global player_list
# blank_image = np.zeros((height, width, 3), np.uint8)
def array_2_int_turple(point_array):
return (int(point_array[0]), int(point_array[1]))
colors = [(0,0,255), (255,0,255)]
model_image = court.court_model
for player, clr in zip(player_list, colors):
# skip the empty value
# draw lines to connect current foot point and previous foot point
compare = player.current_img_position == np.zeros([1,2],dtype='float32')
equal_arrays = compare.all()
if equal_arrays != True:
player_model_position = court.transformed_img_2_model_point(player.current_img_position)
# draw foot point
cv2.circle(img=model_image, center=player_model_position, radius=3,
color=clr, thickness=3)
# draw made position
if player.shooting_now == True and balls.made_or_not_at_frame == True:
# get player model position
player_made_model_position = court.transformed_img_2_model_point(player.previous_hold_position)
cv2.circle(img=model_image, center=player_made_model_position, radius=3,
color=(0, 255, 255), thickness=3)
cv2.putText(model_image, str("{} Shot from here and made".format(player.id)), (player_made_model_position[0] + 50, player_made_model_position[1]),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
# draw missed position
elif player.shooting_now == True and balls.missing_or_not_at_frame == True:
# get player model position
player_missed_model_position = court.transformed_img_2_model_point(player.previous_hold_position)
cv2.circle(img=model_image, center=player_missed_model_position, radius=3,
color=(0, 255, 255), thickness=3)
cv2.putText(model_image, str("{} Shot from here and missed".format(player.id)), (player_missed_model_position[0] + 50, player_missed_model_position[1]),
cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 255), 3)
# draw lines to connect current foot point and previous foot point
compare = player.previous_img_position == np.zeros([1,2],dtype='float32')
equal_arrays = compare.all()
if equal_arrays != True:
player_model_previous_position = court.transformed_img_2_model_point(player.previous_img_position)
cv2.line(model_image, player_model_position, player_model_previous_position, color=clr, thickness=1, lineType=8)
return model_image
### TO DO LATER: input a frame, get ReID and openpose results , update player_list and player_dictionary
### TO DO LATER: input a frame, get ReID and openpose results , update player_list and player_dictionary
### TO DO LATER: input a frame, get ReID and openpose results , update player_list and player_dictionary
def openpose_init():
try:
if platform == "win32":
sys.path.append('./OpenPose/Release')
import pyopenpose as op
else:
sys.path.append('./OpenPose')
from Release import pyopenpose as op
except ImportError as e:
print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
raise e
# Custom Params (refer to include/openpose/flags.hpp for more parameters)
params = dict()
params["model_folder"] = "./OpenPose/models"
# Starting OpenPose
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
# Process Image
datum = op.Datum()
return datum, opWrapper
# datum, opWrapper = openpose_init()
def read_openpose_update_player(frame, datum, opWrapper):
def calculateAngle(a, b, c):
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return round(np.degrees(angle), 2)
def get_openpose_data():
# getting openpose keypoints
datum.cvInputData = frame
opWrapper.emplaceAndPop([datum])
try:
headX, headY, headConf = datum.poseKeypoints[0][0]
handX, handY, handConf = datum.poseKeypoints[0][4]
elbowAngle, kneeAngle, elbowCoord, kneeCoord = getAngleFromDatum(datum)
except:
print("Something went wrong with OpenPose")
headX = 0
headY = 0
handX = 0
handY = 0
elbowAngle = 0
kneeAngle = 0
elbowCoord = np.array([0, 0])
kneeCoord = np.array([0, 0])
def getAngleFromDatum(datum):
hipX, hipY, _ = datum.poseKeypoints[0][9]
kneeX, kneeY, _ = datum.poseKeypoints[0][10]
ankleX, ankleY, _ = datum.poseKeypoints[0][11]
shoulderX, shoulderY, _ = datum.poseKeypoints[0][2]
elbowX, elbowY, _ = datum.poseKeypoints[0][3]
wristX, wristY, _ = datum.poseKeypoints[0][4]
kneeAngle = calculateAngle(np.array([hipX, hipY]), np.array([kneeX, kneeY]), np.array([ankleX, ankleY]))
elbowAngle = calculateAngle(np.array([shoulderX, shoulderY]), np.array([elbowX, elbowY]),
np.array([wristX, wristY]))
elbowCoord = np.array([int(elbowX), int(elbowY)])
kneeCoord = np.array([int(kneeX), int(kneeY)])
return elbowAngle, kneeAngle, elbowCoord, kneeCoord | [
"sys.path.append",
"cv2.line",
"json.load",
"cv2.circle",
"numpy.degrees",
"numpy.zeros",
"Release.pyopenpose.WrapperPython",
"numpy.mean",
"numpy.linalg.norm",
"numpy.array",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.dot",
"cv2.perspectiveTransform",
"numpy.arccos",
"Release.py... | [((265, 289), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (287, 289), True, 'import tensorflow.compat.v1 as tf\n'), ((2312, 2333), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - y)'], {}), '(x - y)\n', (2326, 2333), True, 'import numpy as np\n'), ((19798, 19836), 'numpy.concatenate', 'np.concatenate', (['(frame, trace)'], {'axis': '(1)'}), '((frame, trace), axis=1)\n', (19812, 19836), True, 'import numpy as np\n'), ((23603, 23621), 'Release.pyopenpose.WrapperPython', 'op.WrapperPython', ([], {}), '()\n', (23619, 23621), True, 'from Release import pyopenpose as op\n'), ((23709, 23719), 'Release.pyopenpose.Datum', 'op.Datum', ([], {}), '()\n', (23717, 23719), True, 'from Release import pyopenpose as op\n'), ((756, 789), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': '"""float32"""'}), "([1, 2], dtype='float32')\n", (764, 789), True, 'import numpy as np\n'), ((827, 860), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': '"""float32"""'}), "([1, 2], dtype='float32')\n", (835, 860), True, 'import numpy as np\n'), ((897, 930), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': '"""float32"""'}), "([1, 2], dtype='float32')\n", (905, 930), True, 'import numpy as np\n'), ((969, 1002), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': '"""float32"""'}), "([1, 2], dtype='float32')\n", (977, 1002), True, 'import numpy as np\n'), ((2647, 2670), 'numpy.arccos', 'np.arccos', (['cosine_angle'], {}), '(cosine_angle)\n', (2656, 2670), True, 'import numpy as np\n'), ((4492, 4504), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4501, 4504), False, 'import json\n'), ((24018, 24041), 'numpy.arccos', 'np.arccos', (['cosine_angle'], {}), '(cosine_angle)\n', (24027, 24041), True, 'import numpy as np\n'), ((2572, 2586), 'numpy.dot', 'np.dot', (['ba', 'bc'], {}), '(ba, bc)\n', (2578, 2586), True, 'import numpy as np\n'), ((20429, 20462), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': '"""float32"""'}), "([1, 2], dtype='float32')\n", (20437, 20462), True, 'import numpy as np\n'), ((20676, 20772), 'cv2.circle', 'cv2.circle', ([], {'img': 'model_image', 'center': 'player_model_position', 'radius': '(3)', 'color': 'clr', 'thickness': '(3)'}), '(img=model_image, center=player_model_position, radius=3, color=\n clr, thickness=3)\n', (20686, 20772), False, 'import cv2\n'), ((23041, 23078), 'sys.path.append', 'sys.path.append', (['"""./OpenPose/Release"""'], {}), "('./OpenPose/Release')\n", (23056, 23078), False, 'import sys\n'), ((23141, 23170), 'sys.path.append', 'sys.path.append', (['"""./OpenPose"""'], {}), "('./OpenPose')\n", (23156, 23170), False, 'import sys\n'), ((23943, 23957), 'numpy.dot', 'np.dot', (['ba', 'bc'], {}), '(ba, bc)\n', (23949, 23957), True, 'import numpy as np\n'), ((24063, 24080), 'numpy.degrees', 'np.degrees', (['angle'], {}), '(angle)\n', (24073, 24080), True, 'import numpy as np\n'), ((25147, 25169), 'numpy.array', 'np.array', (['[hipX, hipY]'], {}), '([hipX, hipY])\n', (25155, 25169), True, 'import numpy as np\n'), ((25171, 25195), 'numpy.array', 'np.array', (['[kneeX, kneeY]'], {}), '([kneeX, kneeY])\n', (25179, 25195), True, 'import numpy as np\n'), ((25197, 25223), 'numpy.array', 'np.array', (['[ankleX, ankleY]'], {}), '([ankleX, ankleY])\n', (25205, 25223), True, 'import numpy as np\n'), ((25261, 25293), 'numpy.array', 'np.array', (['[shoulderX, shoulderY]'], {}), '([shoulderX, shoulderY])\n', (25269, 25293), True, 'import numpy as np\n'), ((25295, 25321), 'numpy.array', 'np.array', (['[elbowX, elbowY]'], {}), '([elbowX, elbowY])\n', (25303, 25321), True, 'import numpy as np\n'), ((25359, 25385), 'numpy.array', 'np.array', (['[wristX, wristY]'], {}), '([wristX, wristY])\n', (25367, 25385), True, 'import numpy as np\n'), ((2590, 2608), 'numpy.linalg.norm', 'np.linalg.norm', (['ba'], {}), '(ba)\n', (2604, 2608), True, 'import numpy as np\n'), ((2611, 2629), 'numpy.linalg.norm', 'np.linalg.norm', (['bc'], {}), '(bc)\n', (2625, 2629), True, 'import numpy as np\n'), ((19255, 19455), 'cv2.circle', 'cv2.circle', ([], {'img': 'frame', 'center': '(player_list[this_player_index].previous_hold_position[0], player_list[\n this_player_index].previous_hold_position[1])', 'radius': '(3)', 'color': '(0, 255, 255)', 'thickness': '(3)'}), '(img=frame, center=(player_list[this_player_index].\n previous_hold_position[0], player_list[this_player_index].\n previous_hold_position[1]), radius=3, color=(0, 255, 255), thickness=3)\n', (19265, 19455), False, 'import cv2\n'), ((19523, 19723), 'cv2.circle', 'cv2.circle', ([], {'img': 'trace', 'center': '(player_list[this_player_index].previous_hold_position[0], player_list[\n this_player_index].previous_hold_position[1])', 'radius': '(3)', 'color': '(0, 255, 255)', 'thickness': '(3)'}), '(img=trace, center=(player_list[this_player_index].\n previous_hold_position[0], player_list[this_player_index].\n previous_hold_position[1]), radius=3, color=(0, 255, 255), thickness=3)\n', (19533, 19723), False, 'import cv2\n'), ((21082, 21192), 'cv2.circle', 'cv2.circle', ([], {'img': 'model_image', 'center': 'player_made_model_position', 'radius': '(3)', 'color': '(0, 255, 255)', 'thickness': '(3)'}), '(img=model_image, center=player_made_model_position, radius=3,\n color=(0, 255, 255), thickness=3)\n', (21092, 21192), False, 'import cv2\n'), ((22258, 22291), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': '"""float32"""'}), "([1, 2], dtype='float32')\n", (22266, 22291), True, 'import numpy as np\n'), ((22500, 22616), 'cv2.line', 'cv2.line', (['model_image', 'player_model_position', 'player_model_previous_position'], {'color': 'clr', 'thickness': '(1)', 'lineType': '(8)'}), '(model_image, player_model_position, player_model_previous_position,\n color=clr, thickness=1, lineType=8)\n', (22508, 22616), False, 'import cv2\n'), ((23961, 23979), 'numpy.linalg.norm', 'np.linalg.norm', (['ba'], {}), '(ba)\n', (23975, 23979), True, 'import numpy as np\n'), ((23982, 24000), 'numpy.linalg.norm', 'np.linalg.norm', (['bc'], {}), '(bc)\n', (23996, 24000), True, 'import numpy as np\n'), ((24690, 24706), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (24698, 24706), True, 'import numpy as np\n'), ((24731, 24747), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (24739, 24747), True, 'import numpy as np\n'), ((5467, 5517), 'numpy.mean', 'np.mean', (['[flat_pose_list[i] for i in foot_x_index]'], {}), '([flat_pose_list[i] for i in foot_x_index])\n', (5474, 5517), True, 'import numpy as np\n'), ((5556, 5606), 'numpy.mean', 'np.mean', (['[flat_pose_list[j] for j in foot_y_index]'], {}), '([flat_pose_list[j] for j in foot_y_index])\n', (5563, 5606), True, 'import numpy as np\n'), ((5831, 5894), 'numpy.array', 'np.array', (['[[feet_position_x, feet_position_y]]'], {'dtype': '"""float32"""'}), "([[feet_position_x, feet_position_y]], dtype='float32')\n", (5839, 5894), True, 'import numpy as np\n'), ((5966, 6024), 'numpy.array', 'np.array', (['[flat_pose_list[i] for i in left_wrist_xy_index]'], {}), '([flat_pose_list[i] for i in left_wrist_xy_index])\n', (5974, 6024), True, 'import numpy as np\n'), ((6068, 6127), 'numpy.array', 'np.array', (['[flat_pose_list[i] for i in right_wrist_xy_index]'], {}), '([flat_pose_list[i] for i in right_wrist_xy_index])\n', (6076, 6127), True, 'import numpy as np\n'), ((6275, 6327), 'numpy.array', 'np.array', (['[flat_pose_list[i] for i in head_xy_index]'], {}), '([flat_pose_list[i] for i in head_xy_index])\n', (6283, 6327), True, 'import numpy as np\n'), ((6394, 6444), 'numpy.mean', 'np.mean', (['[flat_pose_list[i] for i in body_x_index]'], {}), '([flat_pose_list[i] for i in body_x_index])\n', (6401, 6444), True, 'import numpy as np\n'), ((6483, 6533), 'numpy.mean', 'np.mean', (['[flat_pose_list[j] for j in body_y_index]'], {}), '([flat_pose_list[j] for j in body_y_index])\n', (6490, 6533), True, 'import numpy as np\n'), ((6570, 6633), 'numpy.array', 'np.array', (['[[body_position_x, body_position_y]]'], {'dtype': '"""float32"""'}), "([[body_position_x, body_position_y]], dtype='float32')\n", (6578, 6633), True, 'import numpy as np\n'), ((21748, 21860), 'cv2.circle', 'cv2.circle', ([], {'img': 'model_image', 'center': 'player_missed_model_position', 'radius': '(3)', 'color': '(0, 255, 255)', 'thickness': '(3)'}), '(img=model_image, center=player_missed_model_position, radius=3,\n color=(0, 255, 255), thickness=3)\n', (21758, 21860), False, 'import cv2\n'), ((7155, 7216), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['feet_image_positions', 'court_class.H'], {}), '(feet_image_positions, court_class.H)\n', (7179, 7216), False, 'import cv2\n')] |
#!/usr/bin/env python3
import sys
import os
import itertools
import numpy as np
from scipy import signal, constants, fftpack
import pyaudio
from pydub import AudioSegment, exceptions
from pydub.utils import make_chunks
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIntValidator
PyAudio = pyaudio.PyAudio
# Development switches and non-exposed settings
MANUAL_CONVOLVE = False # Use manual convolution instead of scipy.signal.fftconvolve
MANUAL_FILTER = False # Use manual filter instead of scipy.signal.lfilter
MANUAL_FILTER_TEST = False # Run test on manual filter if enabled (compare with scipy.signal.lfilter)
class MainWindow(QWidget):
sig_sound_play_at = pyqtSignal(float)
sig_sound_pause = pyqtSignal()
sig_sound_stop = pyqtSignal()
sig_record_stop = pyqtSignal()
def __init__(self):
super().__init__()
self.sound = None
self.signal = None
self.plotbackground = None
self.playing = False
self.sound_paused = False
self.sound_start_at = 0
self.recording = False
self.doppler = False # Doppler simulation
self.initUI()
def initUI(self):
spacer = QSpacerItem(50, 0, QSizePolicy.Minimum)
# File selector
lbl_file = QLabel("File:")
self.txt_file = QLineEdit()
self.txt_file.setPlaceholderText("Select file ...")
btn_file = QPushButton("Select")
btn_file.clicked.connect(self.show_open_dialog)
# Save
self.btn_save = QPushButton("Save")
self.btn_save.setDisabled(True)
self.btn_save.clicked.connect(self.show_save_dialog)
# Audio controls
self.btn_pause = QPushButton("Pause")
self.btn_pause.setDisabled(True)
self.btn_pause.clicked.connect(self.sound_pause)
self.sound_mutex = QMutex()
self.sound_pause_cond = QWaitCondition()
self.btn_play = QPushButton("Play")
self.btn_play.setDisabled(True)
self.btn_play.clicked.connect(self.sound_play)
self.btn_stop = QPushButton("Stop")
self.btn_stop.setDisabled(True)
self.btn_stop.clicked.connect(self.sound_stop)
# Doppler Shift simulation
self.cb_source_speed = QComboBox()
self.cb_source_speed.setToolTip("Source speed")
self.cb_source_speed.addItems(["20 km/h", "50 km/h", "100 km/h", "150 km/h", "200 km/h"])
self.cb_source_speed.setCurrentIndex(2)
self.source_speeds = [5.56, 13.89, 27.78, 41.67, 55.56] # Same indexes as text above (in m/s)
self.btn_doppler = QPushButton("Simulate Doppler")
self.btn_doppler.setToolTip("Apply simple Doppler Shift simulation")
self.btn_doppler.setDisabled(True)
self.btn_doppler.clicked.connect(self.doppler_simulate)
# Effects
self.cb_effect = QComboBox()
self.cb_effect.setToolTip("Preset effects")
self.cb_effect.setMaximumWidth(150)
self.effects = []
for root, dirs, files in os.walk("resources/impulses"):
for file in files:
if file.endswith(".wav") or file.endswith(".mp3"):
self.cb_effect.addItem(file.split(".")[0])
self.effects.append(os.path.join(root, file))
self.btn_effect = QPushButton("Apply Effect")
self.btn_effect.clicked.connect(self.effect_apply)
self.btn_effect_load = QPushButton("Load Effect")
self.btn_effect_load.clicked.connect(self.effect_load)
# Recording
self.cb_sample_rate = QComboBox()
self.cb_sample_rate.setToolTip("Sampling rate")
self.cb_sample_rate.addItems(["8.000 Hz", "11.025 Hz", "22.050 Hz", "44.100 Hz"])
self.cb_sample_rate.setCurrentIndex(3)
self.sampling_rates = [8000, 11025, 22050, 44100] # Same indexes as text above
self.btn_record = QPushButton("Record")
self.btn_record.setMinimumWidth(100)
self.btn_record.clicked.connect(self.record)
self.cb_bit_depth = QComboBox()
self.cb_bit_depth.setToolTip("Bit depth")
self.cb_bit_depth.addItems(["8 b", "16 b"])
self.cb_bit_depth.setCurrentIndex(1)
self.bit_depths = [pyaudio.paUInt8, pyaudio.paInt16] # Same indexes as text above
# Analysis (ST-DFT)
self.stdft_window = QLineEdit()
self.stdft_window.setText("256")
self.stdft_window.setToolTip("Window length")
self.stdft_window.setMaximumWidth(35)
self.stdft_window.setValidator(QIntValidator(0, 2147483647))
self.stdft_noverlap = QLineEdit()
self.stdft_noverlap.setText("128")
self.stdft_noverlap.setMaximumWidth(35)
self.stdft_noverlap.setValidator(QIntValidator(0, 2147483647))
self.stdft_noverlap.setToolTip("Overlap between windows (must be smaller than window length)")
self.btn_analyse = QPushButton("Analyse")
self.btn_analyse.setToolTip("Perform Short Time Discrete Fourier Transform analysis (spectrogram)")
self.btn_analyse.setDisabled(True)
self.btn_analyse.clicked.connect(lambda: self.analyse())
# Filter
self.filter_order = QLineEdit()
self.filter_order.setText("5")
self.filter_order.setToolTip("Filter order")
self.filter_order.setMaximumWidth(25)
self.filter_order.setValidator(QIntValidator(0, 100))
self.filter_cut_low = QLineEdit()
self.filter_cut_low.setText("500")
self.filter_cut_low.setToolTip("Low critical frequency")
self.filter_cut_low.setMaximumWidth(35)
self.filter_cut_low.setValidator(QIntValidator(0, 2147483647))
self.filter_cut_high = QLineEdit()
self.filter_cut_high.setText("5000")
self.filter_cut_high.setToolTip("High critical frequency")
self.filter_cut_high.setMaximumWidth(35)
self.filter_cut_high.setValidator(QIntValidator(0, 2147483647))
self.btn_filter = QPushButton("Filter")
self.btn_filter.setToolTip("Filter frequencies")
self.btn_filter.setDisabled(True)
self.btn_filter.clicked.connect(self.filter)
# Graph space
self.figure = Figure()
FigureCanvas(self.figure)
self.figure.canvas.setMinimumHeight(400)
self.figure.canvas.mpl_connect("button_press_event", self.on_plot_click)
self.figure.canvas.mpl_connect("motion_notify_event", self.on_plot_over)
# Graph toolbar
self.plotnav = NavigationToolbar(self.figure.canvas, self.figure.canvas)
self.plotnav.setStyleSheet("QToolBar { border: 0px }")
self.plotnav.setOrientation(Qt.Vertical)
# Layout
hbox_top = QHBoxLayout()
hbox_top.addWidget(lbl_file)
hbox_top.addWidget(self.txt_file)
hbox_top.addWidget(btn_file)
hbox_top.addWidget(self.btn_save)
hbox_top.addStretch()
hbox_top.addSpacerItem(spacer)
hbox_top.addWidget(self.btn_pause)
hbox_top.addWidget(self.btn_play)
hbox_top.addWidget(self.btn_stop)
hbox_bot = QHBoxLayout()
hbox_bot.addWidget(self.cb_source_speed)
hbox_bot.addWidget(self.btn_doppler)
hbox_bot.addStretch()
hbox_bot.addSpacerItem(spacer)
hbox_bot.addWidget(self.cb_effect)
hbox_bot.addWidget(self.btn_effect)
hbox_bot.addWidget(self.btn_effect_load)
hbox_bot.addStretch()
hbox_bot.addSpacerItem(spacer)
hbox_bot.addWidget(self.cb_sample_rate)
hbox_bot.addWidget(self.cb_bit_depth)
hbox_bot.addWidget(self.btn_record)
hbox_bot2 = QHBoxLayout()
hbox_bot2.addWidget(self.stdft_window)
hbox_bot2.addWidget(self.stdft_noverlap)
hbox_bot2.addWidget(self.btn_analyse)
hbox_bot2.addStretch()
hbox_bot2.addWidget(self.filter_order)
hbox_bot2.addWidget(self.filter_cut_low)
hbox_bot2.addWidget(self.filter_cut_high)
hbox_bot2.addWidget(self.btn_filter)
vbox = QVBoxLayout()
vbox.addLayout(hbox_top)
vbox.addWidget(self.figure.canvas)
vbox.addLayout(hbox_bot)
vbox.addLayout(hbox_bot2)
# Window
self.setLayout(vbox)
self.setGeometry(300, 300, 1000, 500)
self.setWindowTitle("Signal Processor - Sound")
self.show()
# Overriden resize event
def resizeEvent(self, resizeEvent):
if self.is_sound_loaded():
self.on_plot_change(None)
self.plotnav.move(self.width() - 55, 0)
def update_ui(self):
block_general = self.playing or self.sound_paused or self.recording
self.btn_save.setDisabled(not self.is_sound_loaded())
self.btn_pause.setDisabled(not self.playing)
self.btn_pause.setText("Resume" if self.sound_paused else "Pause")
self.btn_play.setDisabled(self.playing or self.recording)
self.btn_stop.setDisabled(not self.playing or self.recording)
self.plotnav.setDisabled(self.playing and not self.sound_paused)
self.btn_doppler.setDisabled(not self.is_sound_loaded() or self.doppler)
self.btn_effect.setDisabled(block_general)
self.btn_effect_load.setDisabled(block_general)
self.btn_record.setDisabled(self.playing or self.sound_paused)
self.btn_record.setText("Stop Recording" if self.recording else "Record")
self.btn_analyse.setDisabled(block_general)
self.btn_filter.setDisabled(block_general)
def show_open_dialog(self):
fname = QFileDialog.getOpenFileName(self, "Open file", filter="Audio (*.wav *.mp3)")
if fname[0] and self.load_sound(fname[0]):
self.txt_file.setText(fname[0])
def show_save_dialog(self):
fname = QFileDialog.getSaveFileName(self, "Save file", filter="Audio (*.wav *.mp3)")
if fname[0] and self.is_sound_loaded():
ext = fname[0].rsplit(".", 1)[-1]
try:
self.sound.export(fname[0], format=ext)
except exceptions.CouldntEncodeError:
print("Failed to save signal!")
else:
self.txt_file.setText(fname[0])
def load_sound(self, file):
self.sound_stop()
self.doppler = False
try:
self.sound = AudioSegment.from_file(file)
self.signal = np.array(self.sound.get_array_of_samples())
except exceptions.CouldntDecodeError:
print("Failed to load sound!")
self.sound = None
self.signal = None
return False
else:
self.update_ui()
self.plot(self.signal, self.sound)
return True
def is_sound_loaded(self):
return self.sound is not None and self.signal is not None
def load_signal(self, data, sample_width, rate, channels):
self.sound = AudioSegment(
data=data,
sample_width=sample_width, # 3 (24-bit) not supported by pydub
frame_rate=rate,
channels=channels)
self.signal = np.array(self.sound.get_array_of_samples())
self.update_ui()
self.plot(self.signal, self.sound)
def effect_load(self):
feffect = self.effects[self.cb_effect.currentIndex()]
if self.load_sound(feffect):
self.txt_file.setText(feffect)
self.plot(self.signal, self.sound)
def effect_apply(self):
if not self.is_sound_loaded():
print("Failed to apply effect! No sound loaded!")
return
if self.sound.channels > 2:
print("Failed to apply effect! Sound has more than 2 channels!")
return
feffect = self.effects[self.cb_effect.currentIndex()]
try:
effect_sound = AudioSegment.from_file(feffect)
effect_signal = np.array(effect_sound.get_array_of_samples())
except exceptions.CouldntDecodeError:
print("Failed to load effect!")
if effect_sound.frame_rate != self.sound.frame_rate:
print("Failed to apply effect! Effect rate ({}) not same as sound rate ({})!"
.format(effect_sound.frame_rate, self.sound.frame_rate))
return
# Create stereo in case original sound is mono
sound_channels = self.sound.channels
if self.sound.channels < 2:
self.sound = AudioSegment.from_mono_audiosegments(self.sound, self.sound)
self.signal = np.array(self.sound.get_array_of_samples())
# Convolve signals using fast fourier transform (into stereo, each channel separately)
step = effect_sound.channels
left = None
right = None
for i in range(0, sound_channels):
if MANUAL_CONVOLVE:
# Manual convolve
n = fftpack.helper.next_fast_len(len(self.signal[i::step]) + len(effect_signal[i::step]) - 1)
x = np.fft.rfft(np.append(self.signal[i::step], np.zeros(len(effect_signal[i::step]) - 1)), n)
y = np.fft.rfft(np.append(effect_signal[i::step], np.zeros(len(self.signal[i::step]) - 1)), n)
ch = np.fft.irfft(x * y)
else:
# SciPy fftconvolve
ch = signal.fftconvolve(self.signal[i::step], effect_signal[i::step])
# Normalize and amplify
ch = np.array(ch / np.linalg.norm(ch))
ch = np.multiply(ch, 65535) # float to int
volume_diff = np.max(self.signal[i::step]) / np.max(ch)
ch = np.multiply(ch, volume_diff)
if i == 0:
left = ch
if sound_channels == 1:
right = left # Mono input, copy channel
else:
right = ch
# Join channels back together and load signal
final = np.empty(left.size + right.size, np.int16)
final[0::step] = left.astype(np.int16)
final[1::step] = right.astype(np.int16)
self.load_signal(b''.join(final), 2, self.sound.frame_rate, effect_sound.channels)
def doppler_simulate(self):
self.doppler = True
self.update_ui()
speed_source = self.source_speeds[self.cb_source_speed.currentIndex()]
# Frequency manipulation
speed_sound = constants.speed_of_sound
freq_in = speed_sound / (speed_sound - speed_source) * self.sound.frame_rate
freq_out = speed_sound / (speed_sound + speed_source) * self.sound.frame_rate
half1 = self.sound[0:int(len(self.sound) * 0.5)]
half1 = AudioSegment(
data=half1.get_array_of_samples(),
sample_width=self.sound.sample_width,
frame_rate=int(freq_in),
channels=self.sound.channels)
half2 = self.sound[int(len(self.sound) * 0.5):]
half2 = AudioSegment(
data=half2.get_array_of_samples(),
sample_width=self.sound.sample_width,
frame_rate=int(freq_out),
channels=self.sound.channels)
self.sound = half1.append(half2, crossfade=100)
self.signal = np.array(self.sound.get_array_of_samples())
# Volume manipulation (decrease with distance)
half_time = half1.duration_seconds
dist_max = speed_source * half_time
print("Maximum distance: {} m".format(dist_max))
distances = np.linspace(
0.0, speed_source * (len(self.signal) / self.sound.frame_rate / self.sound.channels),
num=int(len(self.signal) / self.sound.channels)) # Plot distances
distances -= dist_max # Take away maximum distance to get relative from center
distances = np.absolute(distances) # Make positive in both directions (_/^\_)
distances = np.maximum(distances, 1.0) # Prevent center clipping
new_volumes = np.power(distances, -1.0) # Scale volume with distance
for i in range(0, self.sound.channels): # Apply to all channels
self.signal[i::self.sound.channels] = np.multiply(self.signal[i::self.sound.channels], new_volumes)
self.signal = self.signal.astype(np.int16)
# Load and plot new signal with doppler and visualization subplot
self.load_signal(b''.join(self.signal), self.sound.sample_width, self.sound.frame_rate, self.sound.channels)
self.plot(self.signal, self.sound, doppler_max=half_time)
def analyse(self, filter_w=[], filter_h=[], filter_cl=-1.0, filter_ch=-1.0):
if not self.stdft_window.text() or not self.stdft_noverlap.text():
print("Failed to analyse! Invalid input (must be integers)!")
return
window = int(self.stdft_window.text())
noverlap = int(self.stdft_noverlap.text())
if window <= 0 or noverlap <= 0:
print("Failed to analyse! Invalid input (must be integers greater than 0)!")
return
if noverlap >= window:
print("Failed to analyse! Overlap must be less than window size!")
return
if self.sound.channels > 1:
print("Warning! Analysing only first channel!")
self.plot(self.signal, self.sound, stdft_window=window, stdft_noverlap=noverlap,
filter_w=filter_w, filter_h=filter_h, filter_cl=filter_cl, filter_ch=filter_ch)
def filter(self):
if not self.filter_order.text() or not self.filter_cut_low.text() or not self.filter_cut_high.text():
print("Failed to filter! Invalid input (must be integers)!")
return
order = int(self.filter_order.text())
cut_low = int(self.filter_cut_low.text())
cut_high = int(self.filter_cut_high.text())
if order < 0 or cut_low < 0 or cut_high < 0:
print("Failed to filter! Invalid input (must be integers greater or equal 0)!")
return
# Normalize critical frequencies (Nyquist as 1)
cut_low = cut_low / (self.sound.frame_rate * 0.5)
cut_high = cut_high / (self.sound.frame_rate * 0.5)
# Design filter
b, a = signal.butter(order, [cut_low, cut_high], "bandstop")
w, h = signal.freqz(b, a)
# Filter each channel
for i in range(0, self.sound.channels):
x = np.array(self.signal[i::self.sound.channels]) # Original
y = np.zeros(len(x)) # Filtered
if MANUAL_FILTER:
# Manual filter
for n in range(len(x)):
y[n] = 0
for k in range(len(b)):
if n - k >= 0:
y[n] = y[n] + b[k] * x[n - k]
for k in range(1, len(a)):
if n - k >= 0:
y[n] = y[n] - a[k] * y[n - k]
if MANUAL_FILTER_TEST:
y_sp = signal.lfilter(b, a, x)
if np.allclose(y, y_sp, rtol=1e-02, atol=1e-08):
print("Manual filter test passed!")
else:
print("Manual filter test failed!")
else:
# SciPy lfilter
y = signal.lfilter(b, a, x)
self.signal[i::self.sound.channels] = y
# Load and analyse filtered signal
self.load_signal(b''.join(self.signal), self.sound.sample_width, self.sound.frame_rate, self.sound.channels)
self.analyse(filter_w=w, filter_h=h, filter_cl=cut_low, filter_ch=cut_high)
def plot(self, sig, sound, doppler_max=-1.0, stdft_window=-1, stdft_noverlap=-1,
filter_w=[], filter_h=[], filter_cl=-1.0, filter_ch=-1.0):
self.figure.clear()
self.subplots = []
self.lclick = []
self.lclick_pos = 0
self.lover = []
self.lover_pos = 0
self.lframe = []
self.lframe_pos = 0
self.sound_start_at = 0
doppler = doppler_max != -1.0
analysis = stdft_window != -1 and stdft_noverlap != -1
filter_fr = len(filter_w) != 0 and len(filter_h) != 0 and filter_cl != -1.0 and filter_ch != -1.0
subplots = sound.channels + doppler + analysis + filter_fr + (1 if filter_fr else 0)
# X axis as time in seconds
time = np.linspace(0, sound.duration_seconds, num=len(sig))
for i in range(0, sound.channels):
ax = self.figure.add_subplot(subplots, 1, i + 1)
# Plot current channel, slicing it away
ax.plot(time[i::sound.channels], sig[i::sound.channels]) # [samp1L, samp1R, samp2L, samp2R]
ax.margins(0)
# Hide X axis on all but last channel
if i + 1 < subplots - filter_fr:
ax.get_xaxis().set_visible(False)
# Display Y label somewhere in the middle
if i == max(int(sound.channels / 2) - 1, 0):
ax.set_ylabel("Amplitude")
self.subplots.append(ax)
if doppler:
ax = self.figure.add_subplot(subplots, 1, sound.channels + analysis + 1)
ax.margins(0)
ax.plot(time, sig * [0])
ax.axhline(0, linewidth=2, color="black")
ax.axvline(doppler_max, ymin=0.25, ymax=0.75, linewidth=2, color="blue")
ax.set_ylim([-1, 1])
ax.get_yaxis().set_ticks([])
ax.set_ylabel("Doppler Sim")
self.subplots.append(ax)
if analysis:
ax = self.figure.add_subplot(subplots, 1, sound.channels + doppler + 1)
ax.margins(0)
ax.specgram(sig[0::sound.channels], Fs=self.sound.frame_rate,
NFFT=stdft_window, noverlap=stdft_noverlap)
ax.set_ylabel("Freq (Hz)")
self.subplots.append(ax)
self.figure.subplots_adjust(hspace=0.0)
ax.set_xlabel("Time (s)")
if filter_fr:
ax = self.figure.add_subplot(subplots, 1, sound.channels + analysis + 2)
ax.margins(0, 0.1)
ax.plot(filter_w / np.pi * self.sound.frame_rate * 0.5, abs(filter_h) * max(sig[0::sound.channels]))
ax.set_xlabel("Frequency (Hz)")
ax.set_ylabel("Amplitude")
ax.axvline(filter_cl, color="green") # Cutoff frequency start
ax.axvline(filter_ch, color="green") # Cutoff frequency stop
self.subplots.append(ax)
# Handle zoom/pan events
for ax in self.subplots:
ax.callbacks.connect("xlim_changed", self.on_plot_change)
ax.callbacks.connect("ylim_changed", self.on_plot_change)
self.figure.canvas.draw()
# Save background for updating on the fly
self.plotbackground = self.figure.canvas.copy_from_bbox(self.figure.bbox)
# Create lines (for later use, hidden until first update)
for ax in self.subplots:
line = ax.axvline(0, linewidth=1, color="black")
self.lclick.append(line)
line = ax.axvline(0, linewidth=1, color="grey")
self.lover.append(line)
line = ax.axvline(0, linewidth=1, color="blue")
self.lframe.append(line)
def on_plot_change(self, axes):
# Hide all lines to not save them as part of background
for line in itertools.chain(self.lclick, self.lover, self.lframe):
line.set_visible(False)
# Redraw and resave new layout background
self.figure.canvas.draw()
self.plotbackground = self.figure.canvas.copy_from_bbox(self.figure.bbox)
# Reshow all lines
for line in itertools.chain(self.lclick, self.lover, self.lframe):
line.set_visible(True)
def is_plotnav_active(self):
return self.plotnav._active is None
def on_plot_click(self, event):
if not self.is_plotnav_active():
return
if event.xdata is not None and event.ydata is not None:
self.sound_start_at = event.xdata
self.sound_play()
self.update_ui()
# Update lines
self.lclick_pos = event.xdata
self.plot_update()
def on_plot_over(self, event):
if not self.is_plotnav_active():
return
# Update lines
if event.xdata is not None and event.ydata is not None:
self.lover_pos = event.xdata
else:
self.lover_pos = 0
if self.plotbackground is not None:
self.plot_update()
def plot_frame(self, x):
# Update lines
self.lframe_pos = x
self.plot_update()
def plot_update(self):
self.figure.canvas.restore_region(self.plotbackground)
for i, (lclick, lover, lframe) in enumerate(zip(self.lclick, self.lover, self.lframe)):
lclick.set_xdata([self.lclick_pos])
lover.set_xdata([self.lover_pos])
lframe.set_xdata([self.lframe_pos])
self.subplots[i].draw_artist(lclick)
self.subplots[i].draw_artist(lover)
self.subplots[i].draw_artist(lframe)
self.figure.canvas.blit(self.figure.bbox)
def sound_play(self):
if self.playing:
self.sig_sound_play_at.emit(self.sound_start_at)
elif self.is_sound_loaded():
self.sound_thread = SoundThread(self.sound, self.sound_start_at, self.sound_mutex, self.sound_pause_cond)
self.sig_sound_play_at.connect(self.sound_thread.play_at)
self.sig_sound_pause.connect(self.sound_thread.pause)
self.sig_sound_stop.connect(self.sound_thread.stop)
self.sound_thread.sig_frame.connect(self.plot_frame)
self.sound_thread.finished.connect(self.on_sound_done)
self.sound_thread.start()
self.playing = True
self.update_ui()
def sound_stop(self):
self.sig_sound_stop.emit()
self.sound_mutex.lock()
self.sound_pause_cond.wakeAll()
self.sound_mutex.unlock()
def sound_pause(self): # Toggle
if self.sound_paused:
self.sig_sound_pause.emit()
self.sound_mutex.lock()
self.sound_pause_cond.wakeAll()
self.sound_mutex.unlock()
else:
self.sig_sound_pause.emit()
self.sound_paused = not self.sound_paused
self.update_ui()
def on_sound_done(self):
self.playing = False
self.sound_paused = False
self.update_ui()
self.lframe_pos = 0
self.plot_update()
def record(self): # Toggle
if self.recording:
self.sig_record_stop.emit()
else:
self.recording = True
bit_depth = self.bit_depths[self.cb_bit_depth.currentIndex()]
rate = self.sampling_rates[self.cb_sample_rate.currentIndex()]
self.record_thread = RecordThread(bit_depth, rate, 2) # Always record in stereo (2 channels)
self.sig_record_stop.connect(self.record_thread.stop)
self.record_thread.sig_return.connect(self.on_record_return)
self.record_thread.start()
self.update_ui()
def on_record_return(self, data, sample_width, rate, channels):
self.load_signal(data, sample_width, rate, channels)
self.recording = False
self.update_ui()
class SoundThread(QThread):
sig_frame = pyqtSignal(float)
def __init__(self, sound, start_at, mutex, pause_cond):
QThread.__init__(self)
self.sound = sound
self.start_at = start_at
self.restart = True # Start on True for first start
self.running = True
self.paused = False
self.mutex = mutex
self.pause_cond = pause_cond
def __del__(self):
self.wait()
def run(self):
p = PyAudio()
stream = p.open(
format=p.get_format_from_width(self.sound.sample_width),
channels=self.sound.channels,
rate=self.sound.frame_rate,
output=True)
while self.restart:
self.restart = False
# Break into 0.5 second chunks
start = self.start_at * 1000
current_time = self.start_at
for chunk in make_chunks(self.sound[start:], 50):
if not self.running or self.restart:
break
stream.write(chunk._data)
current_time += 0.05
self.sig_frame.emit(current_time)
if self.paused:
self.mutex.lock()
self.pause_cond.wait(self.mutex)
self.mutex.unlock()
# Reopen stream due to an issue on Linux
# where stream stops begin read by backend
stream = p.open(
format=p.get_format_from_width(self.sound.sample_width),
channels=self.sound.channels,
rate=self.sound.frame_rate,
output=True)
stream.close()
p.terminate()
def play_at(self, start_at=0):
self.start_at = start_at
self.restart = True
def pause(self): # Toggle
self.paused = not self.paused
def stop(self):
self.running = False
class RecordThread(QThread):
sig_return = pyqtSignal(bytes, int, int, int)
def __init__(self, bit_depth, rate, channels):
QThread.__init__(self)
self.bit_depth = bit_depth
self.rate = rate
self.channels = channels
self.running = True
def __del__(self):
self.wait()
def run(self):
p = PyAudio()
stream = p.open(
format=self.bit_depth,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer=1024)
data = []
while self.running:
data.append(stream.read(1024))
stream.close()
# Return recording data
self.sig_return.emit(b''.join(data), p.get_sample_size(self.bit_depth), self.rate, self.channels)
p.terminate()
def stop(self):
self.running = False
if __name__ == "__main__":
# Create Qt application with window
app = QApplication(sys.argv)
main_win = MainWindow()
# Execute application (blocking)
app.exec_()
sys.exit(0)
| [
"PyQt5.QtCore.pyqtSignal",
"numpy.absolute",
"PyQt5.QtCore.QMutex",
"numpy.maximum",
"numpy.empty",
"numpy.allclose",
"os.walk",
"pydub.utils.make_chunks",
"numpy.linalg.norm",
"scipy.signal.fftconvolve",
"pydub.AudioSegment.from_file",
"PyQt5.QtCore.QWaitCondition",
"os.path.join",
"numpy... | [((958, 975), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['float'], {}), '(float)\n', (968, 975), False, 'from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition\n'), ((998, 1010), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (1008, 1010), False, 'from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition\n'), ((1032, 1044), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (1042, 1044), False, 'from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition\n'), ((1067, 1079), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (1077, 1079), False, 'from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition\n'), ((27492, 27509), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['float'], {}), '(float)\n', (27502, 27509), False, 'from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition\n'), ((29453, 29485), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['bytes', 'int', 'int', 'int'], {}), '(bytes, int, int, int)\n', (29463, 29485), False, 'from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition\n'), ((30476, 30487), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (30484, 30487), False, 'import sys\n'), ((2115, 2123), 'PyQt5.QtCore.QMutex', 'QMutex', ([], {}), '()\n', (2121, 2123), False, 'from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition\n'), ((2156, 2172), 'PyQt5.QtCore.QWaitCondition', 'QWaitCondition', ([], {}), '()\n', (2170, 2172), False, 'from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition\n'), ((3291, 3320), 'os.walk', 'os.walk', (['"""resources/impulses"""'], {}), "('resources/impulses')\n", (3298, 3320), False, 'import os\n'), ((6452, 6460), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (6458, 6460), False, 'from matplotlib.figure import Figure\n'), ((6469, 6494), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.figure'], {}), '(self.figure)\n', (6481, 6494), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((6754, 6811), 'matplotlib.backends.backend_qt5.NavigationToolbar2QT', 'NavigationToolbar', (['self.figure.canvas', 'self.figure.canvas'], {}), '(self.figure.canvas, self.figure.canvas)\n', (6771, 6811), True, 'from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar\n'), ((11129, 11219), 'pydub.AudioSegment', 'AudioSegment', ([], {'data': 'data', 'sample_width': 'sample_width', 'frame_rate': 'rate', 'channels': 'channels'}), '(data=data, sample_width=sample_width, frame_rate=rate,\n channels=channels)\n', (11141, 11219), False, 'from pydub import AudioSegment, exceptions\n'), ((14095, 14137), 'numpy.empty', 'np.empty', (['(left.size + right.size)', 'np.int16'], {}), '(left.size + right.size, np.int16)\n', (14103, 14137), True, 'import numpy as np\n'), ((15912, 15934), 'numpy.absolute', 'np.absolute', (['distances'], {}), '(distances)\n', (15923, 15934), True, 'import numpy as np\n'), ((15999, 16025), 'numpy.maximum', 'np.maximum', (['distances', '(1.0)'], {}), '(distances, 1.0)\n', (16009, 16025), True, 'import numpy as np\n'), ((16076, 16101), 'numpy.power', 'np.power', (['distances', '(-1.0)'], {}), '(distances, -1.0)\n', (16084, 16101), True, 'import numpy as np\n'), ((18295, 18348), 'scipy.signal.butter', 'signal.butter', (['order', '[cut_low, cut_high]', '"""bandstop"""'], {}), "(order, [cut_low, cut_high], 'bandstop')\n", (18308, 18348), False, 'from scipy import signal, constants, fftpack\n'), ((18364, 18382), 'scipy.signal.freqz', 'signal.freqz', (['b', 'a'], {}), '(b, a)\n', (18376, 18382), False, 'from scipy import signal, constants, fftpack\n'), ((23433, 23486), 'itertools.chain', 'itertools.chain', (['self.lclick', 'self.lover', 'self.lframe'], {}), '(self.lclick, self.lover, self.lframe)\n', (23448, 23486), False, 'import itertools\n'), ((23739, 23792), 'itertools.chain', 'itertools.chain', (['self.lclick', 'self.lover', 'self.lframe'], {}), '(self.lclick, self.lover, self.lframe)\n', (23754, 23792), False, 'import itertools\n'), ((27579, 27601), 'PyQt5.QtCore.QThread.__init__', 'QThread.__init__', (['self'], {}), '(self)\n', (27595, 27601), False, 'from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition\n'), ((29546, 29568), 'PyQt5.QtCore.QThread.__init__', 'QThread.__init__', (['self'], {}), '(self)\n', (29562, 29568), False, 'from PyQt5.QtCore import Qt, QThread, pyqtSignal, QMutex, QWaitCondition\n'), ((4801, 4829), 'PyQt5.QtGui.QIntValidator', 'QIntValidator', (['(0)', '(2147483647)'], {}), '(0, 2147483647)\n', (4814, 4829), False, 'from PyQt5.QtGui import QIntValidator\n'), ((5005, 5033), 'PyQt5.QtGui.QIntValidator', 'QIntValidator', (['(0)', '(2147483647)'], {}), '(0, 2147483647)\n', (5018, 5033), False, 'from PyQt5.QtGui import QIntValidator\n'), ((5639, 5660), 'PyQt5.QtGui.QIntValidator', 'QIntValidator', (['(0)', '(100)'], {}), '(0, 100)\n', (5652, 5660), False, 'from PyQt5.QtGui import QIntValidator\n'), ((5901, 5929), 'PyQt5.QtGui.QIntValidator', 'QIntValidator', (['(0)', '(2147483647)'], {}), '(0, 2147483647)\n', (5914, 5929), False, 'from PyQt5.QtGui import QIntValidator\n'), ((6177, 6205), 'PyQt5.QtGui.QIntValidator', 'QIntValidator', (['(0)', '(2147483647)'], {}), '(0, 2147483647)\n', (6190, 6205), False, 'from PyQt5.QtGui import QIntValidator\n'), ((10558, 10586), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['file'], {}), '(file)\n', (10580, 10586), False, 'from pydub import AudioSegment, exceptions\n'), ((12038, 12069), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['feffect'], {}), '(feffect)\n', (12060, 12069), False, 'from pydub import AudioSegment, exceptions\n'), ((12642, 12702), 'pydub.AudioSegment.from_mono_audiosegments', 'AudioSegment.from_mono_audiosegments', (['self.sound', 'self.sound'], {}), '(self.sound, self.sound)\n', (12678, 12702), False, 'from pydub import AudioSegment, exceptions\n'), ((13675, 13697), 'numpy.multiply', 'np.multiply', (['ch', '(65535)'], {}), '(ch, 65535)\n', (13686, 13697), True, 'import numpy as np\n'), ((13799, 13827), 'numpy.multiply', 'np.multiply', (['ch', 'volume_diff'], {}), '(ch, volume_diff)\n', (13810, 13827), True, 'import numpy as np\n'), ((16255, 16316), 'numpy.multiply', 'np.multiply', (['self.signal[i::self.sound.channels]', 'new_volumes'], {}), '(self.signal[i::self.sound.channels], new_volumes)\n', (16266, 16316), True, 'import numpy as np\n'), ((18478, 18523), 'numpy.array', 'np.array', (['self.signal[i::self.sound.channels]'], {}), '(self.signal[i::self.sound.channels])\n', (18486, 18523), True, 'import numpy as np\n'), ((28345, 28380), 'pydub.utils.make_chunks', 'make_chunks', (['self.sound[start:]', '(50)'], {}), '(self.sound[start:], 50)\n', (28356, 28380), False, 'from pydub.utils import make_chunks\n'), ((13410, 13429), 'numpy.fft.irfft', 'np.fft.irfft', (['(x * y)'], {}), '(x * y)\n', (13422, 13429), True, 'import numpy as np\n'), ((13505, 13569), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['self.signal[i::step]', 'effect_signal[i::step]'], {}), '(self.signal[i::step], effect_signal[i::step])\n', (13523, 13569), False, 'from scipy import signal, constants, fftpack\n'), ((13740, 13768), 'numpy.max', 'np.max', (['self.signal[i::step]'], {}), '(self.signal[i::step])\n', (13746, 13768), True, 'import numpy as np\n'), ((13771, 13781), 'numpy.max', 'np.max', (['ch'], {}), '(ch)\n', (13777, 13781), True, 'import numpy as np\n'), ((19375, 19398), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'x'], {}), '(b, a, x)\n', (19389, 19398), False, 'from scipy import signal, constants, fftpack\n'), ((13638, 13656), 'numpy.linalg.norm', 'np.linalg.norm', (['ch'], {}), '(ch)\n', (13652, 13656), True, 'import numpy as np\n'), ((19066, 19089), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'x'], {}), '(b, a, x)\n', (19080, 19089), False, 'from scipy import signal, constants, fftpack\n'), ((19113, 19156), 'numpy.allclose', 'np.allclose', (['y', 'y_sp'], {'rtol': '(0.01)', 'atol': '(1e-08)'}), '(y, y_sp, rtol=0.01, atol=1e-08)\n', (19124, 19156), True, 'import numpy as np\n'), ((3523, 3547), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (3535, 3547), False, 'import os\n')] |
import numpy as np
def tensorize(x):
return np.squeeze(np.asfarray(x))
# ####### HELPER METHODS #########
# implement Stochastic Gradient Descent to be used by our Network for training
def sgd(net, loss, T, batch_size=1, max_iter=1, learning_rate_init=1e-3,
tol=1e-6, n_iter_no_change=10):
N = len(T['y'])
idx = np.argsort(np.random.random(N))
x_scr = T['x'][idx] # Scramble x
y_scr = T['y'][idx] # Scramble y
split_x = np.split(x_scr, int(N / batch_size), axis=0) # split data into batches of equal size
split_y = np.split(y_scr, int(N / batch_size), axis=0)
LT = np.zeros(max_iter)
no_change = 0
# Start looping through the Epochs
for e in range(max_iter):
step_loss = 0
for j in range(int(N / batch_size)):
w_0 = net.getWeights() # Obtain current weights
l_grads = np.zeros(len(w_0))
for obs in range(len(split_x[j])):
backprop = net.backprop(split_x[j][obs], split_y[j][obs], loss)
l_grads += backprop[1]
step_loss += backprop[0]
dLw = l_grads / batch_size # mean of gradients of loss
w_0 = w_0 - learning_rate_init * dLw
net.setWeights(w_0) # Update weights
LT[e] = step_loss / N # Divide the losses acquired by dataset size
# Check for ending
if e > 0 and (abs(LT[e - 1] - LT[e])) < tol:
no_change += 1
if no_change >= n_iter_no_change:
return (LT[:e]) # Return up until the current epoch
else:
no_change = 0
return (LT)
| [
"numpy.asfarray",
"numpy.random.random",
"numpy.zeros"
] | [((612, 630), 'numpy.zeros', 'np.zeros', (['max_iter'], {}), '(max_iter)\n', (620, 630), True, 'import numpy as np\n'), ((60, 74), 'numpy.asfarray', 'np.asfarray', (['x'], {}), '(x)\n', (71, 74), True, 'import numpy as np\n'), ((346, 365), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (362, 365), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Utilities for spectral processing'''
import warnings
import numpy as np
import scipy
import six
from . import time_frequency
from .fft import get_fftlib
from .._cache import cache
from .. import util
from ..util.exceptions import ParameterError
from ..filters import get_window, window_sumsquare
__all__ = ['stft', 'istft', 'magphase',
'power_to_db', 'db_to_power',
'amplitude_to_db', 'db_to_amplitude']
def stft(y, n_fft=2048, hop_length=None, win_length=None, window='hann',
center=True, dtype=np.complex64, pad_mode='reflect'):
"""Short-time Fourier transform (STFT)
Returns a complex-valued matrix D such that
`np.abs(D[f, t])` is the magnitude of frequency bin `f`
at frame `t`
`np.angle(D[f, t])` is the phase of frequency bin `f`
at frame `t`
Parameters
----------
y : np.ndarray [shape=(n,)], real-valued
the input signal (audio time series)
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
number audio of frames between STFT columns.
If unspecified, defaults `win_length / 4`.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=dtype]
STFT matrix
See Also
--------
istft : Inverse STFT
ifgram : Instantaneous frequency spectrogram
np.pad : array padding
Examples
--------
>>> y, sr = minispec.load(minispec.util.example_audio_file())
>>> D = np.abs(minispec.stft(y))
>>> D
array([[2.58028018e-03, 4.32422794e-02, 6.61255598e-01, ...,
6.82710262e-04, 2.51654536e-04, 7.23036574e-05],
[2.49403086e-03, 5.15930466e-02, 6.00107312e-01, ...,
3.48026224e-04, 2.35853557e-04, 7.54836728e-05],
[7.82410789e-04, 1.05394892e-01, 4.37517226e-01, ...,
6.29352580e-04, 3.38571583e-04, 8.38094638e-05],
...,
[9.48568513e-08, 4.74725084e-07, 1.50052492e-05, ...,
1.85637656e-08, 2.89708542e-08, 5.74304337e-09],
[1.25165826e-07, 8.58259284e-07, 1.11157215e-05, ...,
3.49099771e-08, 3.11740926e-08, 5.29926236e-09],
[1.70630571e-07, 8.92518756e-07, 1.23656537e-05, ...,
5.33256745e-08, 3.33264900e-08, 5.13272980e-09]], dtype=float32)
Use left-aligned frames, instead of centered frames
>>> D_left = np.abs(minispec.stft(y, center=False))
Use a shorter hop length
>>> D_short = np.abs(minispec.stft(y, hop_length=64))
Display a spectrogram
>>> import matplotlib.pyplot as plt
>>> minispec.display.specshow(minispec.amplitude_to_db(D,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
"""
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
fft_window = get_window(window, win_length, fftbins=True)
# Pad the window out to n_fft size
fft_window = util.pad_center(fft_window, n_fft)
# Reshape so that the window can be broadcast
fft_window = fft_window.reshape((-1, 1))
# Check audio is valid
util.valid_audio(y)
# Pad the time series so that frames are centered
if center:
y = np.pad(y, int(n_fft // 2), mode=pad_mode)
# Window the time series.
y_frames = util.frame(y, frame_length=n_fft, hop_length=hop_length)
# Pre-allocate the STFT matrix
stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[1]),
dtype=dtype,
order='F')
fft = get_fftlib()
# how many columns can we fit within MAX_MEM_BLOCK?
n_columns = int(util.MAX_MEM_BLOCK / (stft_matrix.shape[0] *
stft_matrix.itemsize))
for bl_s in range(0, stft_matrix.shape[1], n_columns):
bl_t = min(bl_s + n_columns, stft_matrix.shape[1])
stft_matrix[:, bl_s:bl_t] = fft.rfft(fft_window *
y_frames[:, bl_s:bl_t],
axis=0)
return stft_matrix
def istft(stft_matrix, hop_length=None, win_length=None, window='hann',
center=True, dtype=np.float32, length=None):
"""
Inverse short-time Fourier transform (ISTFT).
Converts a complex-valued spectrogram `stft_matrix` to time-series `y`
by minimizing the mean squared error between `stft_matrix` and STFT of
`y` as described in [1]_ up to Section 2 (reconstruction from MSTFT).
In general, window function, hop length and other parameters should be same
as in stft, which mostly leads to perfect reconstruction of a signal from
unmodified `stft_matrix`.
.. [1] <NAME> and <NAME>,
"Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.
Parameters
----------
stft_matrix : np.ndarray [shape=(1 + n_fft/2, t)]
STFT matrix from `stft`
hop_length : int > 0 [scalar]
Number of frames between STFT columns.
If unspecified, defaults to `win_length / 4`.
win_length : int <= n_fft = 2 * (stft_matrix.shape[0] - 1)
When reconstructing the time series, each frame is windowed
and each sample is normalized by the sum of squared window
according to the `window` function (see below).
If unspecified, defaults to `n_fft`.
window : string, tuple, number, function, np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, `D` is assumed to have centered frames.
- If `False`, `D` is assumed to have left-aligned frames.
dtype : numeric type
Real numeric type for `y`. Default is 32-bit float.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
Returns
-------
y : np.ndarray [shape=(n,)]
time domain signal reconstructed from `stft_matrix`
See Also
--------
stft : Short-time Fourier Transform
Examples
--------
>>> y, sr = minispec.load(minispec.util.example_audio_file())
>>> D = minispec.stft(y)
>>> y_hat = minispec.istft(D)
>>> y_hat
array([ -4.812e-06, -4.267e-06, ..., 6.271e-06, 2.827e-07], dtype=float32)
Exactly preserving length of the input signal requires explicit padding.
Otherwise, a partial frame at the end of `y` will not be represented.
>>> n = len(y)
>>> n_fft = 2048
>>> y_pad = minispec.util.fix_length(y, n + n_fft // 2)
>>> D = minispec.stft(y_pad, n_fft=n_fft)
>>> y_out = minispec.istft(D, length=n)
>>> np.max(np.abs(y - y_out))
1.4901161e-07
"""
n_fft = 2 * (stft_matrix.shape[0] - 1)
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
ifft_window = get_window(window, win_length, fftbins=True)
# Pad out to match n_fft, and add a broadcasting axis
ifft_window = util.pad_center(ifft_window, n_fft)[:, np.newaxis]
n_frames = stft_matrix.shape[1]
expected_signal_len = n_fft + hop_length * (n_frames - 1)
y = np.zeros(expected_signal_len, dtype=dtype)
n_columns = int(util.MAX_MEM_BLOCK // (stft_matrix.shape[0] *
stft_matrix.itemsize))
fft = get_fftlib()
frame = 0
for bl_s in range(0, n_frames, n_columns):
bl_t = min(bl_s + n_columns, n_frames)
# invert the block and apply the window function
ytmp = ifft_window * fft.irfft(stft_matrix[:, bl_s:bl_t], axis=0)
# Overlap-add the istft block starting at the i'th frame
__overlap_add(y[frame * hop_length:], ytmp, hop_length)
frame += (bl_t - bl_s)
# Normalize by sum of squared window
ifft_window_sum = window_sumsquare(window,
n_frames,
win_length=win_length,
n_fft=n_fft,
hop_length=hop_length,
dtype=dtype)
approx_nonzero_indices = ifft_window_sum > util.tiny(ifft_window_sum)
y[approx_nonzero_indices] /= ifft_window_sum[approx_nonzero_indices]
if length is None:
# If we don't need to control length, just do the usual center trimming
# to eliminate padded data
if center:
y = y[int(n_fft // 2):-int(n_fft // 2)]
else:
if center:
# If we're centering, crop off the first n_fft//2 samples
# and then trim/pad to the target length.
# We don't trim the end here, so that if the signal is zero-padded
# to a longer duration, the decay is smooth by windowing
start = int(n_fft // 2)
else:
# If we're not centering, start at 0 and trim/pad as necessary
start = 0
y = util.fix_length(y[start:], length)
return y
def __overlap_add(y, ytmp, hop_length):
# overlap add for inverse stft
# y is the pre-allocated output buffer
# ytmp is the windowed inverse-stft frames
# hop_length is the hop-length of the STFT analysis
n_fft = ytmp.shape[0]
for frame in range(ytmp.shape[1]):
sample = frame * hop_length
y[sample:(sample + n_fft)] += ytmp[:, frame]
def magphase(D, power=1):
"""Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that `D = S * P`.
Parameters
----------
D : np.ndarray [shape=(d, t), dtype=complex]
complex-valued spectrogram
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
Returns
-------
D_mag : np.ndarray [shape=(d, t), dtype=real]
magnitude of `D`, raised to `power`
D_phase : np.ndarray [shape=(d, t), dtype=complex]
`exp(1.j * phi)` where `phi` is the phase of `D`
Examples
--------
>>> y, sr = minispec.load(minispec.util.example_audio_file())
>>> D = minispec.stft(y)
>>> magnitude, phase = minispec.magphase(D)
>>> magnitude
array([[ 2.524e-03, 4.329e-02, ..., 3.217e-04, 3.520e-05],
[ 2.645e-03, 5.152e-02, ..., 3.283e-04, 3.432e-04],
...,
[ 1.966e-05, 9.828e-06, ..., 3.164e-07, 9.370e-06],
[ 1.966e-05, 9.830e-06, ..., 3.161e-07, 9.366e-06]], dtype=float32)
>>> phase
array([[ 1.000e+00 +0.000e+00j, 1.000e+00 +0.000e+00j, ...,
-1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j],
[ 1.000e+00 +1.615e-16j, 9.950e-01 -1.001e-01j, ...,
9.794e-01 +2.017e-01j, 1.492e-02 -9.999e-01j],
...,
[ 1.000e+00 -5.609e-15j, -5.081e-04 +1.000e+00j, ...,
-9.549e-01 -2.970e-01j, 2.938e-01 -9.559e-01j],
[ -1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j, ...,
-1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j]], dtype=complex64)
Or get the phase angle (in radians)
>>> np.angle(phase)
array([[ 0.000e+00, 0.000e+00, ..., 3.142e+00, 3.142e+00],
[ 1.615e-16, -1.003e-01, ..., 2.031e-01, -1.556e+00],
...,
[ -5.609e-15, 1.571e+00, ..., -2.840e+00, -1.273e+00],
[ 3.142e+00, 3.142e+00, ..., 3.142e+00, 3.142e+00]], dtype=float32)
"""
mag = np.abs(D)
mag **= power
phase = np.exp(1.j * np.angle(D))
return mag, phase
@cache(level=30)
def power_to_db(S, ref=1.0, amin=1e-10, top_db=80.0):
"""Convert a power spectrogram (amplitude squared) to decibel (dB) units
This computes the scaling ``10 * log10(S / ref)`` in a numerically
stable way.
Parameters
----------
S : np.ndarray
input power
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`10 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `abs(S)` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(10 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S_db ~= 10 * log10(S) - 10 * log10(ref)``
See Also
--------
perceptual_weighting
db_to_power
amplitude_to_db
db_to_amplitude
Examples
--------
Get a power spectrogram from a waveform ``y``
>>> y, sr = minispec.load(minispec.util.example_audio_file())
>>> S = np.abs(minispec.stft(y))
>>> minispec.power_to_db(S**2)
array([[-33.293, -27.32 , ..., -33.293, -33.293],
[-33.293, -25.723, ..., -33.293, -33.293],
...,
[-33.293, -33.293, ..., -33.293, -33.293],
[-33.293, -33.293, ..., -33.293, -33.293]], dtype=float32)
Compute dB relative to peak power
>>> minispec.power_to_db(S**2, ref=np.max)
array([[-80. , -74.027, ..., -80. , -80. ],
[-80. , -72.431, ..., -80. , -80. ],
...,
[-80. , -80. , ..., -80. , -80. ],
[-80. , -80. , ..., -80. , -80. ]], dtype=float32)
Or compare to median power
>>> minispec.power_to_db(S**2, ref=np.median)
array([[-0.189, 5.784, ..., -0.189, -0.189],
[-0.189, 7.381, ..., -0.189, -0.189],
...,
[-0.189, -0.189, ..., -0.189, -0.189],
[-0.189, -0.189, ..., -0.189, -0.189]], dtype=float32)
And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> minispec.display.specshow(S**2, sr=sr, y_axis='log')
>>> plt.colorbar()
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> minispec.display.specshow(minispec.power_to_db(S**2, ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log-Power spectrogram')
>>> plt.tight_layout()
"""
S = np.asarray(S)
if amin <= 0:
raise ParameterError('amin must be strictly positive')
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn('power_to_db was called on complex input so phase '
'information will be discarded. To suppress this warning, '
'call power_to_db(np.abs(D)**2) instead.')
magnitude = np.abs(S)
else:
magnitude = S
if six.callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
log_spec = 10.0 * np.log10(np.maximum(amin, magnitude))
log_spec -= 10.0 * np.log10(np.maximum(amin, ref_value))
if top_db is not None:
if top_db < 0:
raise ParameterError('top_db must be non-negative')
log_spec = np.maximum(log_spec, log_spec.max() - top_db)
return log_spec
def db_to_power(S_db, ref=1.0):
'''Convert a dB-scale spectrogram to a power spectrogram.
This effectively inverts `power_to_db`:
`db_to_power(S_db) ~= ref * 10.0**(S_db / 10)`
Parameters
----------
S_db : np.ndarray
dB-scaled spectrogram
ref : number > 0
Reference power: output will be scaled by this value
Returns
-------
S : np.ndarray
Power spectrogram
'''
return ref * np.power(10.0, 0.1 * S_db)
def amplitude_to_db(S, ref=1.0, amin=1e-5, top_db=80.0):
'''Convert an amplitude spectrogram to dB-scaled spectrogram.
This is equivalent to ``power_to_db(S**2)``, but is provided for convenience.
Parameters
----------
S : np.ndarray
input amplitude
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`20 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `S` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(20 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S`` measured in dB
See Also
--------
power_to_db, db_to_amplitude
'''
S = np.asarray(S)
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn('amplitude_to_db was called on complex input so phase '
'information will be discarded. To suppress this warning, '
'call amplitude_to_db(np.abs(S)) instead.')
magnitude = np.abs(S)
if six.callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
power = np.square(magnitude, out=magnitude)
return power_to_db(power, ref=ref_value**2, amin=amin**2,
top_db=top_db)
def db_to_amplitude(S_db, ref=1.0):
'''Convert a dB-scaled spectrogram to an amplitude spectrogram.
This effectively inverts `amplitude_to_db`:
`db_to_amplitude(S_db) ~= 10.0**(0.5 * (S_db + log10(ref)/10))`
Parameters
----------
S_db : np.ndarray
dB-scaled spectrogram
ref: number > 0
Optional reference power.
Returns
-------
S : np.ndarray
Linear magnitude spectrogram
'''
return db_to_power(S_db, ref=ref**2)**0.5
def _spectrogram(y=None, S=None, n_fft=2048, hop_length=512, power=1,
win_length=None, window='hann', center=True, pad_mode='reflect'):
'''Helper function to retrieve a magnitude spectrogram.
This is primarily used in feature extraction functions that can operate on
either audio time-series or spectrogram input.
Parameters
----------
y : None or np.ndarray [ndim=1]
If provided, an audio time series
S : None or np.ndarray
Spectrogram input, optional
n_fft : int > 0
STFT window size
hop_length : int > 0
STFT hop length
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
S_out : np.ndarray [dtype=np.float32]
- If `S` is provided as input, then `S_out == S`
- Else, `S_out = |stft(y, ...)|**power`
n_fft : int > 0
- If `S` is provided, then `n_fft` is inferred from `S`
- Else, copied from input
'''
if S is not None:
# Infer n_fft from spectrogram shape
n_fft = 2 * (S.shape[0] - 1)
else:
# Otherwise, compute a magnitude spectrogram from input
S = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, center=center,
window=window, pad_mode=pad_mode))**power
return S, n_fft
| [
"numpy.abs",
"numpy.maximum",
"numpy.power",
"numpy.asarray",
"numpy.square",
"numpy.zeros",
"numpy.angle",
"six.callable",
"warnings.warn",
"numpy.issubdtype"
] | [((8875, 8917), 'numpy.zeros', 'np.zeros', (['expected_signal_len'], {'dtype': 'dtype'}), '(expected_signal_len, dtype=dtype)\n', (8883, 8917), True, 'import numpy as np\n'), ((13171, 13180), 'numpy.abs', 'np.abs', (['D'], {}), '(D)\n', (13177, 13180), True, 'import numpy as np\n'), ((15913, 15926), 'numpy.asarray', 'np.asarray', (['S'], {}), '(S)\n', (15923, 15926), True, 'import numpy as np\n'), ((16017, 16059), 'numpy.issubdtype', 'np.issubdtype', (['S.dtype', 'np.complexfloating'], {}), '(S.dtype, np.complexfloating)\n', (16030, 16059), True, 'import numpy as np\n'), ((16352, 16369), 'six.callable', 'six.callable', (['ref'], {}), '(ref)\n', (16364, 16369), False, 'import six\n'), ((18232, 18245), 'numpy.asarray', 'np.asarray', (['S'], {}), '(S)\n', (18242, 18245), True, 'import numpy as np\n'), ((18254, 18296), 'numpy.issubdtype', 'np.issubdtype', (['S.dtype', 'np.complexfloating'], {}), '(S.dtype, np.complexfloating)\n', (18267, 18296), True, 'import numpy as np\n'), ((18541, 18550), 'numpy.abs', 'np.abs', (['S'], {}), '(S)\n', (18547, 18550), True, 'import numpy as np\n'), ((18559, 18576), 'six.callable', 'six.callable', (['ref'], {}), '(ref)\n', (18571, 18576), False, 'import six\n'), ((18732, 18767), 'numpy.square', 'np.square', (['magnitude'], {'out': 'magnitude'}), '(magnitude, out=magnitude)\n', (18741, 18767), True, 'import numpy as np\n'), ((16069, 16241), 'warnings.warn', 'warnings.warn', (['"""power_to_db was called on complex input so phase information will be discarded. To suppress this warning, call power_to_db(np.abs(D)**2) instead."""'], {}), "(\n 'power_to_db was called on complex input so phase information will be discarded. To suppress this warning, call power_to_db(np.abs(D)**2) instead.'\n )\n", (16082, 16241), False, 'import warnings\n'), ((16302, 16311), 'numpy.abs', 'np.abs', (['S'], {}), '(S)\n', (16308, 16311), True, 'import numpy as np\n'), ((16500, 16511), 'numpy.abs', 'np.abs', (['ref'], {}), '(ref)\n', (16506, 16511), True, 'import numpy as np\n'), ((17293, 17319), 'numpy.power', 'np.power', (['(10.0)', '(0.1 * S_db)'], {}), '(10.0, 0.1 * S_db)\n', (17301, 17319), True, 'import numpy as np\n'), ((18306, 18483), 'warnings.warn', 'warnings.warn', (['"""amplitude_to_db was called on complex input so phase information will be discarded. To suppress this warning, call amplitude_to_db(np.abs(S)) instead."""'], {}), "(\n 'amplitude_to_db was called on complex input so phase information will be discarded. To suppress this warning, call amplitude_to_db(np.abs(S)) instead.'\n )\n", (18319, 18483), False, 'import warnings\n'), ((18707, 18718), 'numpy.abs', 'np.abs', (['ref'], {}), '(ref)\n', (18713, 18718), True, 'import numpy as np\n'), ((13224, 13235), 'numpy.angle', 'np.angle', (['D'], {}), '(D)\n', (13232, 13235), True, 'import numpy as np\n'), ((16544, 16571), 'numpy.maximum', 'np.maximum', (['amin', 'magnitude'], {}), '(amin, magnitude)\n', (16554, 16571), True, 'import numpy as np\n'), ((16605, 16632), 'numpy.maximum', 'np.maximum', (['amin', 'ref_value'], {}), '(amin, ref_value)\n', (16615, 16632), True, 'import numpy as np\n')] |
"""
@brief test log(time=1s)
"""
import os
import unittest
import pandas
import numpy
from pyquickhelper.loghelper import fLOG, CustomLog
from pyquickhelper.pycode import get_temp_folder, ExtTestCase
from pyquickhelper.pycode import fix_tkinter_issues_virtualenv
from pyensae.graphhelper import Corrplot
class TestGraph(ExtTestCase):
def test_graph_corrplot(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_corrplot")
clog = CustomLog(temp)
clog("fix")
fix_tkinter_issues_virtualenv(fLOG=fLOG)
clog("import")
from matplotlib import pyplot as plt
letters = "ABCDEFGHIJKLMNOP"[0:10]
df = pandas.DataFrame(
dict(((k, numpy.random.random(10) + ord(k) - 65) for k in letters)))
df = df.corr()
clog("figure")
subplot_kw = dict(aspect='equal', facecolor='white')
fig, ax = plt.subplots(1, 1, subplot_kw=subplot_kw)
clog("corrplot")
c = Corrplot(df)
clog("plot")
for up in ['lower', 'upper', 'method', 'both']:
ax = c.plot(fig=fig, ax=ax, colorbar=up == 'lower')
clog("save")
fLOG("save")
img = os.path.join(temp, "corrplot.png")
fig.savefig(img)
fLOG("close")
clog("close")
if __name__ == "__main__":
plt.show()
plt.close('all')
clog("end")
fLOG("end")
self.assertExists(img)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"matplotlib.pyplot.show",
"os.path.join",
"matplotlib.pyplot.close",
"pyensae.graphhelper.Corrplot",
"pyquickhelper.loghelper.fLOG",
"numpy.random.random",
"pyquickhelper.pycode.fix_tkinter_issues_virtualenv",
"pyquickhelper.loghelper.CustomLog",
"matplotlib.pyplot.subplots",
"p... | [((1586, 1601), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1599, 1601), False, 'import unittest\n'), ((385, 457), 'pyquickhelper.loghelper.fLOG', 'fLOG', (['__file__', 'self._testMethodName'], {'OutputPrint': "(__name__ == '__main__')"}), "(__file__, self._testMethodName, OutputPrint=__name__ == '__main__')\n", (389, 457), False, 'from pyquickhelper.loghelper import fLOG, CustomLog\n'), ((511, 553), 'pyquickhelper.pycode.get_temp_folder', 'get_temp_folder', (['__file__', '"""temp_corrplot"""'], {}), "(__file__, 'temp_corrplot')\n", (526, 553), False, 'from pyquickhelper.pycode import get_temp_folder, ExtTestCase\n'), ((569, 584), 'pyquickhelper.loghelper.CustomLog', 'CustomLog', (['temp'], {}), '(temp)\n', (578, 584), False, 'from pyquickhelper.loghelper import fLOG, CustomLog\n'), ((613, 653), 'pyquickhelper.pycode.fix_tkinter_issues_virtualenv', 'fix_tkinter_issues_virtualenv', ([], {'fLOG': 'fLOG'}), '(fLOG=fLOG)\n', (642, 653), False, 'from pyquickhelper.pycode import fix_tkinter_issues_virtualenv\n'), ((1004, 1045), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'subplot_kw': 'subplot_kw'}), '(1, 1, subplot_kw=subplot_kw)\n', (1016, 1045), True, 'from matplotlib import pyplot as plt\n'), ((1084, 1096), 'pyensae.graphhelper.Corrplot', 'Corrplot', (['df'], {}), '(df)\n', (1092, 1096), False, 'from pyensae.graphhelper import Corrplot\n'), ((1268, 1280), 'pyquickhelper.loghelper.fLOG', 'fLOG', (['"""save"""'], {}), "('save')\n", (1272, 1280), False, 'from pyquickhelper.loghelper import fLOG, CustomLog\n'), ((1295, 1329), 'os.path.join', 'os.path.join', (['temp', '"""corrplot.png"""'], {}), "(temp, 'corrplot.png')\n", (1307, 1329), False, 'import os\n'), ((1363, 1376), 'pyquickhelper.loghelper.fLOG', 'fLOG', (['"""close"""'], {}), "('close')\n", (1367, 1376), False, 'from pyquickhelper.loghelper import fLOG, CustomLog\n'), ((1465, 1481), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1474, 1481), True, 'from matplotlib import pyplot as plt\n'), ((1510, 1521), 'pyquickhelper.loghelper.fLOG', 'fLOG', (['"""end"""'], {}), "('end')\n", (1514, 1521), False, 'from pyquickhelper.loghelper import fLOG, CustomLog\n'), ((1446, 1456), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1454, 1456), True, 'from matplotlib import pyplot as plt\n'), ((819, 842), 'numpy.random.random', 'numpy.random.random', (['(10)'], {}), '(10)\n', (838, 842), False, 'import numpy\n')] |
import torch
import torch.nn as nn
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import LambdaLR, StepLR
from pytorch_lightning.core import LightningModule
import MinkowskiEngine as ME
from examples.minkunet_sparse import MinkUNet34C, MinkUNet14A, MinkUNet34CShallow
# from examples.minkunetodd import MinkUNet34C as MinkUNet34Codd
from examples.BaseSegLightning import BaseSegmentationModule
from examples.str2bool import str2bool
from examples.basic_blocks import MLP, norm_layer
from examples.utils import interpolate_grid_feats, interpolate_sparsegrid_feats
import numpy as np
class MinkowskiSegmentationModuleLIG(BaseSegmentationModule):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.mink_sdf_to_seg:
self.model = MinkUNet34C(self.feat_channels, self.feat_channels)
self.mlp_channels = [int(i) for i in self.mlp_channels.split(',')]
if self.relative_mlp_channels:
self.mlp_channels = (self.seg_feat_channels) * np.array(self.mlp_channels)
# print(self.mlp_channels)
else:
self.mlp_channels = [self.seg_feat_channels] + self.mlp_channels
seg_head_list = []
if self.seg_head_in_bn:
seg_head_list.append(norm_layer(norm_type='batch', nc=self.mlp_channels[0]))
seg_head_list += [MLP(self.mlp_channels, dropout=self.seg_head_dropout),
nn.Conv1d(self.mlp_channels[-1], self.num_classes, kernel_size=1, bias=True)]
self.seg_head = nn.Sequential(*seg_head_list)
if self.pretrained_minkunet_ckpt is not None:
# print(self.model)
pretrained_ckpt = torch.load(self.pretrained_minkunet_ckpt)
if 'state_dict' in pretrained_ckpt:
pretrained_ckpt = pretrained_ckpt['state_dict']
# print(pretrained_ckpt['state_dict'].keys())
del pretrained_ckpt['conv0p1s1.kernel']
del pretrained_ckpt['final.kernel']
del pretrained_ckpt['final.bias']
# print(pretrained_ckpt)
self.model.load_state_dict(pretrained_ckpt, strict=False)
# print(self)
def forward(self, batch):
pts = batch['pts']
lats = batch['feats']
coords = batch['coords']
feats = batch['seg_feats']
in_field = ME.TensorField(
features=lats,
coordinates=coords,
quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED,
# minkowski_algorithm=ME.MinkowskiAlgorithm.MEMORY_EFFICIENT,
device=self.device,
)
x = in_field.sparse()
bs = len(pts)
if self.mink_sdf_to_seg:
sparse_lats = self.model(x)
else:
sparse_lats = x
# seg_occ_in_list = []
# weights_list = []
logits_list = []
for i in range(bs):
lat, xloc, weights = interpolate_sparsegrid_feats(pts[i], sparse_lats.coordinates_at(batch_index=i),
sparse_lats.features_at(batch_index=i),
overlap_factor=self.overlap_factor) # (num_pts, 2**dim, c), (num_pts, 2**dim, 3)
if self.interpolate_grid_feats and self.average_xlocs:
xloc = xloc.mean(axis=1, keepdim=True).repeat(1, lat.shape[1], 1)
if feats[i] is not None:
seg_occ_in = torch.cat([lat, xloc, feats[i].unsqueeze(1).repeat(1,lat.shape[1],1)], dim=-1)
else:
seg_occ_in = torch.cat([lat, xloc], dim=-1)
weights = weights.unsqueeze(dim=-1)
seg_occ_in = seg_occ_in.transpose(1,2)
# print(weights.shape, seg_occ_in.shape)
if self.interpolate_grid_feats:
weighted_feats = torch.bmm(seg_occ_in, weights) # (num_pts, c + 3, 1)
logits = self.seg_head(weighted_feats).squeeze(dim=-1) # (num_pts, out_c, 1)
else:
seg_probs = self.seg_head(seg_occ_in) # (num_pts, out_c, 2**dim)
logits = torch.bmm(seg_probs, weights).squeeze(dim=-1) # (num_pts, out_c)
logits_list.append(logits)
# seg_occ_in_list.append(cur_seg_occ_in)
# weights_list.append(weights)
# seg_occ_in = torch.cat(seg_occ_in_list, dim=0).transpose(1,2) # (b x num_pts, c + 3, 2**dim)
# weights = torch.cat(weights_list, dim=0) # (b x num_pts, 2**dim)
# weights = weights.unsqueeze(dim=-1) # (b x num_pts, 2**dim, 1)
logits = torch.cat(logits_list, dim=0) # (b x num_pts, out_c)
return logits
def convert_sync_batchnorm(self):
if self.mink_sdf_to_seg:
self.model = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(self.model)
@staticmethod
def add_argparse_args(parent_parser):
parent_parser = BaseSegmentationModule.add_argparse_args(parent_parser)
parser = parent_parser.add_argument_group("MinkSegModelLIG")
parser.add_argument("--interpolate_grid_feats", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("--average_xlocs", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("--pretrained_minkunet_ckpt", type=str, default=None)
parser.add_argument("--shallow_model", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("--mink_sdf_to_seg", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("--seg_head_in_bn", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('--seg_head_dropout', type=float, default=0.3)
parser.add_argument("--mlp_channels", type=str, default='1,4,8,4')
parser.add_argument("--relative_mlp_channels", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("--mlp_extra_in_channels", type=int, default=3)
parser.add_argument("--overlap_factor", type=int, default=2)
return parent_parser
| [
"examples.BaseSegLightning.BaseSegmentationModule.add_argparse_args",
"examples.minkunet_sparse.MinkUNet34C",
"torch.bmm",
"torch.nn.Sequential",
"examples.basic_blocks.norm_layer",
"torch.load",
"torch.nn.Conv1d",
"torch.cat",
"numpy.array",
"examples.basic_blocks.MLP",
"MinkowskiEngine.TensorF... | [((1547, 1576), 'torch.nn.Sequential', 'nn.Sequential', (['*seg_head_list'], {}), '(*seg_head_list)\n', (1560, 1576), True, 'import torch.nn as nn\n'), ((2356, 2564), 'MinkowskiEngine.TensorField', 'ME.TensorField', ([], {'features': 'lats', 'coordinates': 'coords', 'quantization_mode': 'ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE', 'minkowski_algorithm': 'ME.MinkowskiAlgorithm.SPEED_OPTIMIZED', 'device': 'self.device'}), '(features=lats, coordinates=coords, quantization_mode=ME.\n SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE, minkowski_algorithm=ME\n .MinkowskiAlgorithm.SPEED_OPTIMIZED, device=self.device)\n', (2370, 2564), True, 'import MinkowskiEngine as ME\n'), ((4689, 4718), 'torch.cat', 'torch.cat', (['logits_list'], {'dim': '(0)'}), '(logits_list, dim=0)\n', (4698, 4718), False, 'import torch\n'), ((5007, 5062), 'examples.BaseSegLightning.BaseSegmentationModule.add_argparse_args', 'BaseSegmentationModule.add_argparse_args', (['parent_parser'], {}), '(parent_parser)\n', (5047, 5062), False, 'from examples.BaseSegLightning import BaseSegmentationModule\n'), ((790, 841), 'examples.minkunet_sparse.MinkUNet34C', 'MinkUNet34C', (['self.feat_channels', 'self.feat_channels'], {}), '(self.feat_channels, self.feat_channels)\n', (801, 841), False, 'from examples.minkunet_sparse import MinkUNet34C, MinkUNet14A, MinkUNet34CShallow\n'), ((1365, 1418), 'examples.basic_blocks.MLP', 'MLP', (['self.mlp_channels'], {'dropout': 'self.seg_head_dropout'}), '(self.mlp_channels, dropout=self.seg_head_dropout)\n', (1368, 1418), False, 'from examples.basic_blocks import MLP, norm_layer\n'), ((1445, 1521), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.mlp_channels[-1]', 'self.num_classes'], {'kernel_size': '(1)', 'bias': '(True)'}), '(self.mlp_channels[-1], self.num_classes, kernel_size=1, bias=True)\n', (1454, 1521), True, 'import torch.nn as nn\n'), ((1694, 1735), 'torch.load', 'torch.load', (['self.pretrained_minkunet_ckpt'], {}), '(self.pretrained_minkunet_ckpt)\n', (1704, 1735), False, 'import torch\n'), ((4861, 4921), 'MinkowskiEngine.MinkowskiSyncBatchNorm.convert_sync_batchnorm', 'ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm', (['self.model'], {}), '(self.model)\n', (4909, 4921), True, 'import MinkowskiEngine as ME\n'), ((1032, 1059), 'numpy.array', 'np.array', (['self.mlp_channels'], {}), '(self.mlp_channels)\n', (1040, 1059), True, 'import numpy as np\n'), ((1283, 1337), 'examples.basic_blocks.norm_layer', 'norm_layer', ([], {'norm_type': '"""batch"""', 'nc': 'self.mlp_channels[0]'}), "(norm_type='batch', nc=self.mlp_channels[0])\n", (1293, 1337), False, 'from examples.basic_blocks import MLP, norm_layer\n'), ((3690, 3720), 'torch.cat', 'torch.cat', (['[lat, xloc]'], {'dim': '(-1)'}), '([lat, xloc], dim=-1)\n', (3699, 3720), False, 'import torch\n'), ((3951, 3981), 'torch.bmm', 'torch.bmm', (['seg_occ_in', 'weights'], {}), '(seg_occ_in, weights)\n', (3960, 3981), False, 'import torch\n'), ((4221, 4250), 'torch.bmm', 'torch.bmm', (['seg_probs', 'weights'], {}), '(seg_probs, weights)\n', (4230, 4250), False, 'import torch\n')] |
import logging
import cv2
import math
import random
import numpy as np
from collections import defaultdict
from itertools import combinations
from opensfm.unionfind import UnionFind
logger = logging.getLogger(__name__)
def load_pairwise_transforms(dataset, images):
pairs = {}
for im1 in images:
try:
im1_transforms = dataset.load_transforms(im1)
except IOError:
continue
for im2 in im1_transforms:
if im2 in images:
pairs[im1, im2] = im1_transforms[im2]
return pairs
def triplet_filter(data, images, matches, pairs):
"""
find all triplets and see if they are valid. a voting scheme is used to find
the bad edges. the voting scheme follows Cui, et al, "Efficient Large-Scale
Structure From Motion by Fusing Auxiliary Imaging Information"
:param data:
:param images:
:param matches:
:param pairs:
:return:
"""
logger.debug("triplet filtering start")
cnt_good = {}
cnt_bad = {}
gap = 6
for i in range(len(images)):
for j in range(i+1, i+1+gap):
for k in range(len(images)):
if i != k and j != k:
im1 = _int_to_shot_id(i)
im2 = _int_to_shot_id(j)
im3 = _int_to_shot_id(k)
if edge_exists_all([im1, im2, im3], matches):
if is_triplet_valid([im1, im2, im3], pairs):
cnt = cnt_good
else:
cnt = cnt_bad
#incr_cnt(pairs, cnt, im1, im2)
incr_cnt(pairs, cnt, im2, im3)
incr_cnt(pairs, cnt, im3, im1)
edges_to_remove = []
for (im1, im2) in matches:
good = 0
bad = 0
if (im1, im2) in cnt_good:
good = cnt_good[im1, im2]
if (im1, im2) in cnt_bad:
bad = cnt_bad[im1, im2]
# we will not remove any edge with small sequence number difference unless there's
# stronger evidence.
if abs(_shot_id_to_int(im1) - _shot_id_to_int(im2)) < gap:
if bad+good > 3 and (bad/(bad+good)) > data.config['filtering_triplet_bad_ratio']:
#logger.debug("removing close edge {} {} good={}, bad={}".format(im1, im2, good, bad))
edges_to_remove.append((im1, im2))
else:
if bad+good == 0 or (bad/(bad+good)) > data.config['filtering_triplet_bad_ratio']:
#logger.debug("removing {} {} good={}, bad={}".format(im1, im2, good, bad))
edges_to_remove.append((im1, im2))
for edge in sorted(edges_to_remove):
logger.debug("triplet removing edge {} -{}".format(edge[0], edge[1]))
matches.pop(edge)
logger.debug("triplet filtering end, removed {} edges, {:2.1f}% of all".
format(len(edges_to_remove), 100*len(edges_to_remove)/len(pairs)))
return matches
def loop_filter(data, images, features, matches, pairs):
"""
if there’s an edge between (i, j) where i and j are sequence numbers far apart, check that
there also exists an edge (i plus/minus k, j plus/minus k), where k is a small integer,
and that the loop formed by the four nodes pass the multiplying-to-identity check. if so,
this is a valid "quad".
we then merge quads into clusters. each cluster is a loop candidate. we perform checks on
the candidates to filter out bad ones, and remove all edges in them.
:param data:
:param images:
:param matches:
:param pairs:
:return:
"""
logger.debug("loop pass 1 filtering start")
common_feature_thresh = data.config['filtering_common_feature_thresh']
# TODO: cren optionize the following thresholds
gap = 6
edges_to_remove = []
all_valid_triplets = []
for (im1, im2) in matches:
if abs(_shot_id_to_int(im1) - _shot_id_to_int(im2)) > gap:
valid_triplets = get_valid_triplets(im1, im2, matches, pairs)
if valid_triplets:
all_valid_triplets.extend(valid_triplets)
else:
edges_to_remove.append((im1, im2))
for edge in sorted(edges_to_remove):
logger.debug("loop pass 1 removing edge {} -{}".format(edge[0], edge[1]))
matches.pop(edge)
logger.debug("loop pass 1 filtering end, removed {} edges, {:2.1f}% of all".
format(len(edges_to_remove), 100*len(edges_to_remove)/len(pairs)))
logger.debug("loop pass 2 filtering start")
radius = gap/2
valid_triplets_set = set(tuple(triplet) for triplet in all_valid_triplets)
# cluster quads into loop candidates
loop_candidates = cluster_triplets(valid_triplets_set, radius)
# apply various checks to figure out bad loop candidates
bad_candidates = filter_candidates(images, loop_candidates, matches, features, pairs, common_feature_thresh)
# remove matches in bad loop candidates
edges_to_remove = set()
for cand in bad_candidates:
loop_candidates.remove(cand)
for im1 in cand.get_ids_0():
for im2 in cand.get_ids_1():
if abs(_shot_id_to_int(im1) - _shot_id_to_int(im2)) > radius:
if (im1, im2) in matches:
edges_to_remove.add((im1, im2))
elif (im2, im1) in matches:
edges_to_remove.add((im2, im1))
for edge in sorted(edges_to_remove):
#logger.debug("loop removing edge {} -{}".format(edge[0], edge[1]))
matches.pop(edge)
logger.debug("loop pass 2 filtering end, removed {} edges, {:2.1f}% of all".
format(len(edges_to_remove), 100*len(edges_to_remove)/len(pairs)))
return matches #, loop_candidates
def filter_candidates(images, loop_candidates, matches, features, pairs, common_feature_thresh):
"""
two types of filtering are performed:
1. based on rotation
2. based on common features
:param images:
:param loop_candidates:
:param matches:
:param features:
:param pairs:
:param common_feature_thresh:
:return:
"""
path_finder = PathFinder(images, matches, pairs)
bad_candidates = []
for cand in loop_candidates:
if validate_loop_rotations(cand, matches, pairs, path_finder) == 'badloop':
bad_candidates.append(cand)
logger.debug("invalid loop: only bad loops found")
elif validate_loop_features(cand, matches, features) < common_feature_thresh:
bad_candidates.append(cand)
logger.debug("invalid loop: missing features")
return bad_candidates
def validate_loop_features(loop_candidate, matches, features):
'''
take two images n1, n2 from one side of the loop candidate, and one image m from the other side. the two images
n1 and n2 presumably have a valid match. we compare how much features n1/n2/m have in common vs n1/n2 have in
common. if this is a true loop, then the ratio of common features should be large. conversely, if a significant
portion of features is missing, then this is likely to be a false loop. we also take feature distribution into
consideration - if this is a false loop, then common features n1/n2 should be more evenly distributed in the
image than the n1/m common features.
this algorithm is inspired by Zach "What Can Missing Correspondences Tell Us About 3D Structure and Motion",
although the Bayesian formulation considered there is not really necessary in our case
:param loop_candidate:
:param matches:
:param features:
:return:
'''
common_ratios = []
ns = sorted(loop_candidate.get_ids_0())
ms = sorted(loop_candidate.get_ids_1())
for n1, n2 in zip(ns, ns[1:]):
ratio_max = 0
fids_n1_n2 = common_fids(n1, n2, matches)
if len(fids_n1_n2) < 50:
continue
grid_size = math.sqrt(len(fids_n1_n2))
fo_n1_n2 = feature_occupancy(n1, fids_n1_n2, features, grid_size)
for m in ms:
if edge_exists_all([n1, n2, m], matches):
ratio, fids_n1_n2_m = common_ratio(n1, n2, m, matches)
fo_n1_n2_m = feature_occupancy(n1, fids_n1_n2_m, features, grid_size)
feature_distribution_ratio = fo_n1_n2_m/fo_n1_n2
ratio = ratio * feature_distribution_ratio
if ratio > ratio_max:
ratio_max = ratio
if ratio_max > 0:
common_ratios.append(ratio_max)
for m1, m2 in zip(ms, ms[1:]):
ratio_max = 0
fids_m1_m2 = common_fids(m1, m2, matches)
if len(fids_m1_m2) < 50:
continue
grid_size = math.sqrt(len(fids_m1_m2))
fo_m1_m2 = feature_occupancy(m1, fids_m1_m2, features, grid_size)
for n in ns:
if edge_exists_all([m1, m2, n], matches):
ratio, fids_m1_m2_n = common_ratio(m1, m2, n, matches)
fo_m1_m2_n = feature_occupancy(m1, fids_m1_m2_n, features, grid_size)
feature_distribution_ratio = fo_m1_m2_n/fo_m1_m2
ratio = ratio * feature_distribution_ratio
if ratio > ratio_max:
ratio_max = ratio
if ratio_max > 0:
common_ratios.append(ratio_max)
avg_common_ratio = 0
if common_ratios:
avg_common_ratio = sum(common_ratios) / len(common_ratios)
logger.debug("average overlap {}".format(avg_common_ratio))
return avg_common_ratio
def validate_loop_rotations(loop_candidate, matches, pairs, path_finder):
"""
this method returns:
'goodloop' if a valid loop has been found
'badloop' if all we found are invalid loops
'noloop' if there is no loop found (different from bad loop - the loop candidate may still be valid)
:param loop_candidate:
:param matches:
:param pairs:
:param path_finder:
:return:
"""
ret_val = 'noloop'
center_0 = int(loop_candidate.get_center_0())
center_1 = int(loop_candidate.get_center_1())
ids_0 = sorted(loop_candidate.get_ids_0())
ids_1 = sorted(loop_candidate.get_ids_1(), reverse=True)
logger.debug("loop candidate center {:4.1f}-{:4.1f}, "
"members {} - {}".format(center_0, center_1, ids_0, ids_1))
# we make a weak assumption that our path is generally free of cycles, e.g., the path wouldn't loop
# inside an apartment for more than once. this translates into an additional constraint in camera
# orientations. that is, if two images are far apart in index, and their relative rotation is close
# to zero, then their match is regarded as highly suspicious and thus rejected as bad loop.
if center_1 - center_0 > 10:
rs = []
for i in range(-1, 2):
n1 = _int_to_shot_id(center_0 + i)
n2 = _int_to_shot_id(center_1 + i)
if (n1, n2) in matches or (n2, n1) in matches:
r = get_transform(n1, n2, pairs)
rs.append(np.linalg.norm(cv2.Rodrigues(r)[0].ravel()))
#logger.debug("{} - {} rotation {}".format(n1, n2, rs[-1]))
if len(rs) > 0:
avg_rotation = sum(rs) / len(rs)
#logger.debug("average rotation {}".format(avg_rotation))
if avg_rotation < math.pi/9:
return 'badloop'
for start_id in ids_0:
for end_id in ids_1:
if start_id >= end_id:
continue
if (start_id, end_id) in matches or (end_id, start_id) in matches:
max_retries = 100
retries = 0
while retries < max_retries:
result = path_finder.findPath(start_id, end_id)
if result == 'goodloop':
return 'goodloop'
elif result == 'badloop':
# if loop was found but bad, keep retrying. remember we found bad loop
ret_val = 'badloop'
else:
# if no loop is found, break and try different start/end point
break
retries += 1
return ret_val
def common_fids(im1, im2, matches):
fids = []
if (im1, im2) in matches:
for f1, f2 in matches[im1, im2]:
fids.append(f1)
elif (im2, im1) in matches:
for f1, f2 in matches[im2, im1]:
fids.append(f2)
return fids
def feature_occupancy(im, fids, features, grid_size):
occupied = set()
for id in fids:
x, y, s = features[im][id]
occupied.add((int(x*grid_size), int(y*grid_size)))
return len(occupied)
def common_ratio(n1, n2, m, matches):
"""
calculates the ratio of # of common features of the triplet (n1, n2, m) to
# of common features of the pair (n1, n2). the larger the ratio the more
likely m is correctly related to n1, n2.
:param n1:
:param n2:
:param m:
:param matches:
:return:
"""
uf = UnionFind()
if (n1, n2) in matches:
base_cnt = len(matches[n1, n2])
for f1, f2 in matches[n1, n2]:
uf.union((n1, f1), (n2, f2))
else:
base_cnt = len(matches[n2, n1])
for f1, f2 in matches[n2, n1]:
uf.union((n2, f1), (n1, f2))
if (n1, m) in matches:
for f1, f2 in matches[n1, m]:
uf.union((n1, f1), (m, f2))
else:
for f1, f2 in matches[m, n1]:
uf.union((m, f1), (n1, f2))
if (n2, m) in matches:
for f1, f2 in matches[n2, m]:
uf.union((n2, f1), (m, f2))
else:
for f1, f2 in matches[m, n2]:
uf.union((m, f1), (n2, f2))
sets = {}
for i in uf:
p = uf[i]
if p in sets:
sets[p].append(i)
else:
sets[p] = [i]
tracks = [t for t in sets.values() if _good_track(t, 3)]
cnt = 0
fids = []
if (n1, n2) in matches:
for f1, f2 in matches[n1, n2]:
for track in tracks:
if (n1, f1) in track and (n2, f2) in track:
fids.append(f1)
cnt += 1
break
else:
for f1, f2 in matches[n2, n1]:
for track in tracks:
if (n2, f1) in track and (n1, f2) in track:
fids.append(f2)
cnt += 1
break
return cnt/base_cnt, fids
def cluster_triplets(valid_triplets, radius):
"""
merge similar triplets into loop candidates
:param valid_triplets:
:param radius:
:return:
"""
loop_candidates = []
for triplet in sorted(valid_triplets):
added = False
for cand in loop_candidates:
if cand.is_close_to(triplet):
cand.add(triplet)
added = True
#break
if not added:
new_cand = LoopCandidate(radius)
new_cand.add(triplet)
loop_candidates.append(new_cand)
# merge loop candidates that are close together
while True:
can_merge = False
for cand1, cand2 in combinations(loop_candidates, 2):
if cand1.combine(cand2):
loop_candidates.remove(cand2)
can_merge = True
break
if not can_merge:
break
# if the centers are close together, this is really not a 'loop' but a line
remove_candidates = []
for cand in loop_candidates:
if cand.get_center_1() - cand.get_center_0() < 6:
remove_candidates.append(cand)
for cand in remove_candidates:
loop_candidates.remove(cand)
return loop_candidates
def cluster_quads(valid_quads, radius):
"""
merge similar quads into loop candidates
:param valid_quads:
:param radius:
:return:
"""
loop_candidates = []
for quad in sorted(valid_quads):
added = False
for cand in loop_candidates:
if cand.is_close_to(quad):
cand.add(quad)
added = True
#break
if not added:
new_cand = LoopCandidate(radius)
new_cand.add(quad)
loop_candidates.append(new_cand)
# merge loop candidates that are close together
while True:
can_merge = False
for cand1, cand2 in combinations(loop_candidates, 2):
if cand1.combine(cand2):
loop_candidates.remove(cand2)
can_merge = True
break
if not can_merge:
break
# if the centers are close together, this is really not a 'loop' but a line
remove_candidates = []
for cand in loop_candidates:
if cand.get_center_1() - cand.get_center_0() < 6:
remove_candidates.append(cand)
for cand in remove_candidates:
loop_candidates.remove(cand)
return loop_candidates
class PathFinder:
"""
at initialization, we construct a directed graph that consists of
'trusted' edges. an edge is considered trusted if it is part of a
valid triplet.
the main utility of this class is to return a 'path' between any
two images, start_id/end_id. a 'path' is set of images in ascending
order, with a trusted edge between each neighboring pair. each path
generally goes from one image to a close neighbors at each leg, but
(occasionally and at random places) jumps over some images. this is
designed to skip a small subset of images in each path, in the event
they have bad epipolar geometry.
"""
def __init__(self, images, matches, pairs, max_jump=5):
self.path = []
self.numVertices = 0 # No. of vertices
self.start = self.finish = 0
self.pairs = pairs
self.graph = defaultdict(set) # default dictionary to store graph
for im1 in sorted(images):
for i in range(1, max_jump):
im2 = _int_to_shot_id(_shot_id_to_int(im1) + i)
for j in range(1, max_jump):
im3 = _int_to_shot_id(_shot_id_to_int(im2) + j)
if edge_exists_all([im1, im2, im3], matches):
if is_triplet_valid([im1, im2, im3], pairs):
#logger.debug("adding edge {} - {} - {}".format(im1, im2, im3))
self.addEdge(_shot_id_to_int(im1), _shot_id_to_int(im2))
self.addEdge(_shot_id_to_int(im1), _shot_id_to_int(im3))
# function to add an edge to graph
def addEdge(self, v, w):
self.graph[v].add(w) # Add w to v_s list
# A recursive function that uses visited[] to detect valid path
def findPathUtil(self, v, visited, recStack, random_skip):
# push the current node to stack
visited[v-self.start] = True
recStack[v-self.start] = True
'''
curr_path = []
for k, is_in_stack in enumerate(recStack):
if is_in_stack:
curr_path.append(_int_to_shot_id(k + self.start))
logger.debug("on stack {}".format(curr_path))
'''
# Recur until we reach the end_id. if random_skip is true, most of the time we sort the
# neighboring nodes with closest indexed image first, so that we tend to find the longest
# path. however we occasionally flip the sorting order, in order to randomly skip some
# vertices (in case they are bad)
if random_skip:
isReversed = random.choices(population=[True, False], weights=[0.1, 0.9], k=1)[0]
else:
isReversed = False
for i in sorted(self.graph[v], reverse=isReversed):
if i < self.finish:
if not visited[i-self.start]:
if self.findPathUtil(i, visited, recStack, random_skip):
return True
elif i == self.finish:
self.path = []
for j, is_in_stack in enumerate(recStack):
if is_in_stack:
self.path.append(_int_to_shot_id(j+self.start))
self.path.append(_int_to_shot_id(self.finish))
return True
# pop this node from stack
recStack[v-self.start] = False
return False
# Returns true if the graph contains a path from start id to end id, else false.
def findPath(self, start_id, end_id, random_skip=True):
self.start = _shot_id_to_int(start_id)
self.finish = _shot_id_to_int(end_id)
self.numVertices = self.finish - self.start + 1
# Mark all the vertices as not visited
visited = [False] * self.numVertices
recStack = [False] * self.numVertices
# Call the recursive helper function to detect valid path in different DFS trees
if self.findPathUtil(self.start, visited, recStack, random_skip):
#logger.debug("path {}".format(self.path))
if is_loop_valid(self.path, self.pairs):
return 'goodloop'
else:
return 'badloop'
else:
return 'noloop'
class LoopCandidate(object):
"""
Loop candidate
"""
def __init__(self, r):
self.radius = r
self.center_0 = -1
self.center_1 = -1
self.ids_0 = set()
self.ids_1 = set()
def add(self, triplet):
self.ids_0.add(triplet[0])
self.ids_1.add(triplet[2])
i = _shot_id_to_int(triplet[0])
j = _shot_id_to_int(triplet[1])
k = _shot_id_to_int(triplet[2])
if j < (i+k)/2:
self.ids_0.add(triplet[1])
else:
self.ids_1.add(triplet[1])
# update loop center
self.center_0 = self.get_average(self.ids_0)
self.center_1 = self.get_average(self.ids_1)
def get_average(self, ids):
total = 0
for id in ids:
total += _shot_id_to_int(id)
return total/len(ids)
def is_close_to(self, triplet):
return abs(self.center_0 - _shot_id_to_int(triplet[0])) < self.radius and \
abs(self.center_1 - _shot_id_to_int(triplet[2])) < self.radius
def combine(self, another):
if abs(self.get_center_0() - another.get_center_0()) < self.radius and \
abs(self.get_center_1() - another.get_center_1()) < self.radius:
self.ids_0 = self.ids_0 | another.get_ids_0()
self.ids_1 = self.ids_1 | another.get_ids_1()
# update loop center
self.center_0 = self.get_average(self.ids_0)
self.center_1 = self.get_average(self.ids_1)
return True
else:
return False
def get_center_0(self):
return self.center_0
def get_center_1(self):
return self.center_1
def get_ids_0(self):
return self.ids_0
def get_ids_1(self):
return self.ids_1
def get_valid_triplets(im1, im2, matches, pairs):
k = 3
triplets = []
ind_im1 = _shot_id_to_int(im1)
ind_im2 = _shot_id_to_int(im2)
for i in range(-k, k+1):
if i == 0:
continue
im1_neighbor = _int_to_shot_id(ind_im1+i)
im2_neighbor = _int_to_shot_id(ind_im2+i)
if edge_exists_all([im1, im1_neighbor, im2], matches):
if is_triplet_valid([im1, im1_neighbor, im2], pairs):
triplets.append(sorted((im1, im1_neighbor, im2)))
if edge_exists_all([im1, im2_neighbor, im2], matches):
if is_triplet_valid([im1, im2_neighbor, im2], pairs):
triplets.append(sorted((im1, im2_neighbor, im2)))
return triplets
def get_valid_quads(im1, im2, matches, pairs):
k = 3
quads = []
ind_im1 = _shot_id_to_int(im1)
ind_im2 = _shot_id_to_int(im2)
for i in range(ind_im1-k, ind_im1+k+1):
if i == ind_im1:
continue
for j in range(ind_im2 - k, ind_im2 + k + 1):
if j == ind_im2:
continue
im1_neighbor = _int_to_shot_id(i)
im2_neighbor = _int_to_shot_id(j)
if edge_exists_all([im1, im1_neighbor, im2, im2_neighbor], matches):
if is_triplet_valid([im1, im1_neighbor, im2], pairs) and \
is_triplet_valid([im2, im2_neighbor, im1_neighbor], pairs) and \
is_triplet_valid([im1, im1_neighbor, im2_neighbor], pairs) and \
is_triplet_valid([im2, im2_neighbor, im1], pairs):
quads.append(sorted((im1, im1_neighbor, im2, im2_neighbor)))
'''
if edge_exists_all([im1, im1_neighbor, im2], matches) and \
edge_exists_all([im1_neighbor, im2_neighbor, im2], matches):
quads.append(sorted([im1, im1_neighbor, im2, im2_neighbor]))
'''
return quads
def incr_cnt(pairs, cnt, i, j):
if (i, j) in pairs:
if (i, j) in cnt:
cnt[i, j] = cnt[i, j] + 1
else:
cnt[i, j] = 1
else:
if (j, i) in cnt:
cnt[j, i] = cnt[j, i] + 1
else:
cnt[j, i] = 1
def get_transform(i, j, pairs):
R = np.array([])
if (i, j) in pairs:
R = pairs[i, j][:, :3]
elif (j, i) in pairs:
R = pairs[j, i][:, :3].T
return R
def edge_exists(im1, im2, matches):
return (im1, im2) in matches or (im2, im1) in matches
def edge_exists_all(node_list, matches):
for im1, im2 in combinations(node_list, 2):
if not edge_exists(im1, im2, matches):
return False
return True
def is_triplet_valid(triplet, pairs):
'''
Rji = get_transform(i, j, pairs)
Rkj = get_transform(j, k, pairs)
Rik = get_transform(k, i, pairs)
if np.linalg.norm(cv2.Rodrigues(Rik.dot(Rkj.dot(Rji)))[0].ravel()) < math.pi/12:
return True
else:
return False
'''
return is_loop_valid(triplet, pairs, thresh=math.pi/18)
def is_loop_valid(path, pairs, thresh=math.pi/9):
R = np.identity(3, dtype=float)
for n1, n2 in zip(path, path[1:]):
r = get_transform(n1, n2, pairs)
if r.size == 0:
return False
R = r.dot(R)
r = get_transform(path[-1], path[0], pairs)
if r.size == 0:
return False
R = r.dot(R)
#logger.debug("error={} thresh={}".format(np.linalg.norm(cv2.Rodrigues(R)[0].ravel()), thresh))
if np.linalg.norm(cv2.Rodrigues(R)[0].ravel()) < thresh:
return True
else:
return False
def rotation_close_to_preint(im1, im2, T, pdr_shots_dict):
"""
compare relative rotation of robust matching to that of imu gyro preintegration,
if they are not close, it is considered to be an erroneous epipoar geometry
"""
if abs(_shot_id_to_int(im1) - _shot_id_to_int(im2)) >= 5:
# because of drift, we don't perform pre-integration check if im1 and im2 are
# far apart in sequence number
return True
# calculate relative rotation from preintegrated gyro input
preint_im1_rot = cv2.Rodrigues(np.asarray([pdr_shots_dict[im1][7], pdr_shots_dict[im1][8], pdr_shots_dict[im1][9]]))[0]
preint_im2_rot = cv2.Rodrigues(np.asarray([pdr_shots_dict[im2][7], pdr_shots_dict[im2][8], pdr_shots_dict[im2][9]]))[0]
preint_rel_rot = np.dot(preint_im2_rot, preint_im1_rot.T)
# convert this rotation from sensor frame to camera frame
b_to_c = np.asarray([1, 0, 0, 0, 0, -1, 0, 1, 0]).reshape(3, 3)
preint_rel_rot = cv2.Rodrigues(b_to_c.dot(cv2.Rodrigues(preint_rel_rot)[0].ravel()))[0]
# get relative rotation from T obtained from robust matching
robust_match_rel_rot = T[:, :3]
# calculate difference between the two relative rotations. this is the geodesic distance
# see <NAME> "Metrics for 3D Rotations: Comparison and Analysis" equation 23
diff_rot = np.dot(preint_rel_rot, robust_match_rel_rot.T)
geo_diff = np.linalg.norm(cv2.Rodrigues(diff_rot)[0].ravel())
if geo_diff < math.pi/6.0:
logger.debug("{} {} preint/robust geodesic {} within threshold".format(im1, im2, geo_diff))
return True
else:
#logger.debug("preint rel rot axis/angle = {}".format(_get_axis_angle(preint_rel_rot)))
#logger.debug("robust rel rot axis/angle = {}".format(_get_axis_angle(robust_match_rel_rot)))
logger.debug("{} {} preint/robust geodesic {} exceeds threshold".format(im1, im2, geo_diff))
return False
def _get_axis_angle(rot_mat):
axis_angle = cv2.Rodrigues(rot_mat)[0].ravel()
angle = np.linalg.norm(axis_angle)
axis = axis_angle / angle
return axis, angle
def _shot_id_to_int(shot_id):
"""
Returns: shot id to integer
"""
tokens = shot_id.split(".")
return int(tokens[0])
def _int_to_shot_id(shot_int):
"""
Returns: integer to shot id
"""
return str(shot_int).zfill(10) + ".jpg"
def _prev_shot_id(curr_shot_id):
"""
Returns: previous shot id
"""
return _int_to_shot_id(_shot_id_to_int(curr_shot_id) - 1)
def _next_shot_id(curr_shot_id):
"""
Returns: next shot id
"""
return _int_to_shot_id(_shot_id_to_int(curr_shot_id) + 1)
def _good_track(track, min_length):
if len(track) < min_length:
return False
images = [f[0] for f in track]
if len(images) != len(set(images)):
return False
return True
| [
"opensfm.unionfind.UnionFind",
"numpy.asarray",
"random.choices",
"numpy.identity",
"collections.defaultdict",
"itertools.combinations",
"cv2.Rodrigues",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"logging.getLogger"
] | [((195, 222), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (212, 222), False, 'import logging\n'), ((13083, 13094), 'opensfm.unionfind.UnionFind', 'UnionFind', ([], {}), '()\n', (13092, 13094), False, 'from opensfm.unionfind import UnionFind\n'), ((25233, 25245), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (25241, 25245), True, 'import numpy as np\n'), ((25533, 25559), 'itertools.combinations', 'combinations', (['node_list', '(2)'], {}), '(node_list, 2)\n', (25545, 25559), False, 'from itertools import combinations\n'), ((26075, 26102), 'numpy.identity', 'np.identity', (['(3)'], {'dtype': 'float'}), '(3, dtype=float)\n', (26086, 26102), True, 'import numpy as np\n'), ((27356, 27396), 'numpy.dot', 'np.dot', (['preint_im2_rot', 'preint_im1_rot.T'], {}), '(preint_im2_rot, preint_im1_rot.T)\n', (27362, 27396), True, 'import numpy as np\n'), ((27912, 27958), 'numpy.dot', 'np.dot', (['preint_rel_rot', 'robust_match_rel_rot.T'], {}), '(preint_rel_rot, robust_match_rel_rot.T)\n', (27918, 27958), True, 'import numpy as np\n'), ((28602, 28628), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_angle'], {}), '(axis_angle)\n', (28616, 28628), True, 'import numpy as np\n'), ((15205, 15237), 'itertools.combinations', 'combinations', (['loop_candidates', '(2)'], {}), '(loop_candidates, 2)\n', (15217, 15237), False, 'from itertools import combinations\n'), ((16435, 16467), 'itertools.combinations', 'combinations', (['loop_candidates', '(2)'], {}), '(loop_candidates, 2)\n', (16447, 16467), False, 'from itertools import combinations\n'), ((17875, 17891), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (17886, 17891), False, 'from collections import defaultdict\n'), ((27122, 27211), 'numpy.asarray', 'np.asarray', (['[pdr_shots_dict[im1][7], pdr_shots_dict[im1][8], pdr_shots_dict[im1][9]]'], {}), '([pdr_shots_dict[im1][7], pdr_shots_dict[im1][8], pdr_shots_dict[\n im1][9]])\n', (27132, 27211), True, 'import numpy as np\n'), ((27246, 27335), 'numpy.asarray', 'np.asarray', (['[pdr_shots_dict[im2][7], pdr_shots_dict[im2][8], pdr_shots_dict[im2][9]]'], {}), '([pdr_shots_dict[im2][7], pdr_shots_dict[im2][8], pdr_shots_dict[\n im2][9]])\n', (27256, 27335), True, 'import numpy as np\n'), ((27473, 27513), 'numpy.asarray', 'np.asarray', (['[1, 0, 0, 0, 0, -1, 0, 1, 0]'], {}), '([1, 0, 0, 0, 0, -1, 0, 1, 0])\n', (27483, 27513), True, 'import numpy as np\n'), ((19575, 19640), 'random.choices', 'random.choices', ([], {'population': '[True, False]', 'weights': '[0.1, 0.9]', 'k': '(1)'}), '(population=[True, False], weights=[0.1, 0.9], k=1)\n', (19589, 19640), False, 'import random\n'), ((28556, 28578), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rot_mat'], {}), '(rot_mat)\n', (28569, 28578), False, 'import cv2\n'), ((27989, 28012), 'cv2.Rodrigues', 'cv2.Rodrigues', (['diff_rot'], {}), '(diff_rot)\n', (28002, 28012), False, 'import cv2\n'), ((26483, 26499), 'cv2.Rodrigues', 'cv2.Rodrigues', (['R'], {}), '(R)\n', (26496, 26499), False, 'import cv2\n'), ((27574, 27603), 'cv2.Rodrigues', 'cv2.Rodrigues', (['preint_rel_rot'], {}), '(preint_rel_rot)\n', (27587, 27603), False, 'import cv2\n'), ((11092, 11108), 'cv2.Rodrigues', 'cv2.Rodrigues', (['r'], {}), '(r)\n', (11105, 11108), False, 'import cv2\n')] |
"""
Based on HybridZonotope from DfifAI (https://github.com/eth-sri/diffai/blob/master/ai.py)
"""
import numpy as np
import torch
import torch.nn.functional as F
def clamp_image(x, eps):
min_x = torch.clamp(x-eps, min=0)
max_x = torch.clamp(x+eps, max=1)
x_center = 0.5 * (max_x + min_x)
x_beta = 0.5 * (max_x - min_x)
return x_center, x_beta
def get_new_errs(should_box, newhead, newbeta):
new_err_pos = (should_box.long().sum(dim=0) > 0).nonzero()
num_new_errs = new_err_pos.size()[0]
nnz = should_box.nonzero()
if len(newhead.size()) == 2:
batch_size, n = newhead.size()[0], newhead.size()[1]
ids_mat = torch.zeros(n, dtype=torch.long).to(newhead.device)
ids_mat[new_err_pos[:, 0]] = torch.arange(num_new_errs).to(newhead.device)
beta_values = newbeta[nnz[:, 0], nnz[:, 1]]
new_errs = torch.zeros((num_new_errs, batch_size, n)).to(newhead.device, dtype=newhead.dtype)
err_ids = ids_mat[nnz[:, 1]]
new_errs[err_ids, nnz[:, 0], nnz[:, 1]] = beta_values
else:
batch_size, n_channels, img_dim = newhead.size()[0], newhead.size()[1], newhead.size()[2]
ids_mat = torch.zeros((n_channels, img_dim, img_dim), dtype=torch.long).to(newhead.device)
ids_mat[new_err_pos[:, 0], new_err_pos[:, 1], new_err_pos[:, 2]] = torch.arange(num_new_errs).to(newhead.device)
beta_values = newbeta[nnz[:, 0], nnz[:, 1], nnz[:, 2], nnz[:, 3]]
new_errs = torch.zeros((num_new_errs, batch_size, n_channels, img_dim, img_dim)).to(newhead.device, dtype=newhead.dtype)
err_ids = ids_mat[nnz[:, 1], nnz[:, 2], nnz[:, 3]]
new_errs[err_ids, nnz[:, 0], nnz[:, 1], nnz[:, 2], nnz[:, 3]] = beta_values
return new_errs
class HybridZonotope:
def __init__(self, head, beta, errors, domain):
self.head = head
self.beta = beta
self.errors = errors
self.domain = domain
self.device = self.head.device
assert not torch.isnan(self.head).any()
assert self.beta is None or (not torch.isnan(self.beta).any())
assert self.errors is None or (not torch.isnan(self.errors).any())
@staticmethod
def zonotope_from_noise(x, eps, domain, dtype=torch.float32):
batch_size = x.size()[0]
n_elements = x[0].numel()
ei = torch.eye(n_elements).expand(batch_size, n_elements, n_elements).permute(1, 0, 2).to(x.device)
x_center, x_beta = clamp_image(x, eps)
x_center, x_beta = x_center.to(dtype=dtype), x_beta.to(dtype=dtype)
if len(x.size()) > 2:
ei = ei.contiguous().view(n_elements, *x.size())
return HybridZonotope(x_center, None, ei * x_beta.unsqueeze(0), domain)
@staticmethod
def box_from_noise(x, eps):
x_center, x_beta = clamp_image(x, eps)
return HybridZonotope(x_center, x_beta, None, 'zono')
def size(self):
return self.head.size()
def view(self, size):
return HybridZonotope(self.head.view(*size),
None if self.beta is None else self.beta.view(size),
None if self.errors is None else self.errors.view(self.errors.size()[0], *size),
self.domain)
def normalize(self, mean, sigma):
return (self - mean) / sigma
def __sub__(self, other):
if isinstance(other, torch.Tensor):
return HybridZonotope(self.head - other, self.beta, self.errors, self.domain)
else:
assert False, 'Unknown type of other object'
def __add__(self, other):
if isinstance(other, torch.Tensor):
return HybridZonotope(self.head + other, self.beta, self.errors, self.domain)
else:
assert False, 'Unknown type of other object'
def __truediv__(self, other):
if isinstance(other, torch.Tensor):
return HybridZonotope(self.head / other,
None if self.beta is None else self.beta / abs(other),
None if self.errors is None else self.errors / other,
self.domain)
else:
assert False, 'Unknown type of other object'
def clone(self):
return HybridZonotope(self.head.clone(),
None if self.beta is None else self.beta.clone(),
None if self.errors is None else self.errors.clone(),
self.domain)
def detach(self):
return HybridZonotope(self.head.detach(),
None if self.beta is None else self.beta.detach(),
None if self.errors is None else self.errors.detach(),
self.domain)
def avg_pool2d(self, kernel_size, stride):
new_head = F.avg_pool2d(self.head, kernel_size, stride)
new_beta = None if self.beta is None else F.avg_pool2d(self.beta.view(-1, *self.head.shape[1:]), kernel_size, stride)
new_errors = None if self.errors is None else F.avg_pool2d(self.errors.view(-1, *self.head.shape[1:]), kernel_size, stride)
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def conv2d(self, weight, bias, stride, padding, dilation, groups):
new_head = F.conv2d(self.head, weight, bias, stride, padding, dilation, groups)
new_beta = None if self.beta is None else F.conv2d(self.beta, weight.abs(), None, stride, padding, dilation, groups)
if self.errors is not None:
errors_resized = self.errors.view(-1, *self.errors.size()[2:])
new_errors = F.conv2d(errors_resized, weight, None, stride, padding, dilation, groups)
new_errors = new_errors.view(self.errors.size()[0], self.errors.size()[1], *new_errors.size()[1:])
else:
new_errors = None
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def linear(self, weight, bias):
return self.matmul(weight.t()) + bias.unsqueeze(0)
def matmul(self, other):
return HybridZonotope(self.head.matmul(other),
None if self.beta is None else self.beta.matmul(other.abs()),
None if self.errors is None else self.errors.matmul(other),
self.domain)
def relu(self, deepz_lambda, bounds, init_lambda):
if self.errors is None:
min_relu, max_relu = F.relu(self.head - self.beta), F.relu(self.head + self.beta)
return HybridZonotope(0.5 * (max_relu + min_relu), 0.5 * (max_relu - min_relu), None, self.domain)
assert self.beta is None
delta = torch.sum(torch.abs(self.errors), 0)
lb, ub = self.head - delta, self.head + delta
if bounds is not None:
lb_refined, ub_refined = bounds
lb = torch.max(lb_refined, lb)
ub = torch.min(ub_refined, ub)
is_cross = (lb < 0) & (ub > 0)
D = 1e-6
relu_lambda = torch.where(is_cross, ub/(ub-lb+D), (lb >= 0).float())
if self.domain == 'zono_iter':
if init_lambda:
# print(relu_lambda.size())
# print(deepz_lambda.size())
deepz_lambda.data = relu_lambda.data.squeeze(0)
assert (deepz_lambda >= 0).all() and (deepz_lambda <= 1).all()
relu_lambda_cross = deepz_lambda.unsqueeze(0)
relu_mu_cross = torch.where(relu_lambda_cross < relu_lambda, 0.5*ub*(1-relu_lambda_cross), -0.5*relu_lambda_cross*lb)
# relu_lambda_cross = deepz_lambda * relu_lambda
# relu_mu_cross = 0.5*ub*(1-relu_lambda_cross)
# relu_lambda_cross = relu_lambda + (1 - deepz_lambda) * (1 - relu_lambda)
# relu_mu_cross = -0.5*relu_lambda_cross*lb
relu_lambda = torch.where(is_cross, relu_lambda_cross, (lb >= 0).float())
relu_mu = torch.where(is_cross, relu_mu_cross, torch.zeros(lb.size()).to(self.device))
else:
relu_mu = torch.where(is_cross, -0.5*ub*lb/(ub-lb+D), torch.zeros(lb.size()).to(self.device))
assert (not torch.isnan(relu_mu).any()) and (not torch.isnan(relu_lambda).any())
new_head = self.head * relu_lambda + relu_mu
old_errs = self.errors * relu_lambda
new_errs = get_new_errs(is_cross, new_head, relu_mu)
new_errors = torch.cat([old_errs, new_errs], dim=0)
assert (not torch.isnan(new_head).any()) and (not torch.isnan(new_errors).any())
return HybridZonotope(new_head, None, new_errors, self.domain)
def concretize(self):
delta = 0
if self.beta is not None:
delta = delta + self.beta
if self.errors is not None:
delta = delta + self.errors.abs().sum(0)
return self.head - delta, self.head + delta
def avg_width(self):
lb, ub = self.concretize()
return (ub - lb).mean()
def is_greater(self, i, j):
if self.errors is not None:
diff_errors = (self.errors[:, :, i] - self.errors[:, :, j]).abs().sum(dim=0)
diff_head = self.head[:, i] - self.head[:, j]
delta = diff_head - diff_errors
if self.beta is not None:
delta -= self.beta[:, i].abs() + self.beta[:, j].abs()
return delta, delta > 0
else:
diff_head = (self.head[:, i] - self.head[:, j])
diff_beta = (self.beta[:, i] + self.beta[:, j]).abs()
delta = (diff_head - diff_beta)
return delta, delta > 0
def verify(self, targets):
n_class = self.head.size()[1]
verified = torch.zeros(targets.size(), dtype=torch.uint8).to(self.head.device)
verified_corr = torch.zeros(targets.size(), dtype=torch.uint8).to(self.head.device)
for i in range(n_class):
isg = torch.ones(targets.size(), dtype=torch.uint8).to(self.head.device)
for j in range(n_class):
if i != j:
_, ok = self.is_greater(i, j)
isg = isg & ok.byte()
verified = verified | isg
verified_corr = verified_corr | (targets.eq(i).byte() & isg)
return verified, verified_corr
def get_min_diff(self, i, j):
""" returns minimum of logit[i] - logit[j] """
return self.is_greater(i, j)[0]
def get_wc_logits(self, targets):
batch_size = targets.size()[0]
lb, ub = self.concretize()
wc_logits = ub
wc_logits[np.arange(batch_size), targets] = lb[np.arange(batch_size), targets]
return wc_logits
def ce_loss(self, targets):
wc_logits = self.get_wc_logits(targets)
return F.cross_entropy(wc_logits, targets)
| [
"torch.isnan",
"torch.eye",
"torch.where",
"torch.nn.functional.avg_pool2d",
"torch.nn.functional.conv2d",
"torch.nn.functional.cross_entropy",
"torch.cat",
"torch.clamp",
"torch.max",
"torch.arange",
"numpy.arange",
"torch.nn.functional.relu",
"torch.zeros",
"torch.abs",
"torch.min"
] | [((201, 228), 'torch.clamp', 'torch.clamp', (['(x - eps)'], {'min': '(0)'}), '(x - eps, min=0)\n', (212, 228), False, 'import torch\n'), ((239, 266), 'torch.clamp', 'torch.clamp', (['(x + eps)'], {'max': '(1)'}), '(x + eps, max=1)\n', (250, 266), False, 'import torch\n'), ((4848, 4892), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['self.head', 'kernel_size', 'stride'], {}), '(self.head, kernel_size, stride)\n', (4860, 4892), True, 'import torch.nn.functional as F\n'), ((5317, 5385), 'torch.nn.functional.conv2d', 'F.conv2d', (['self.head', 'weight', 'bias', 'stride', 'padding', 'dilation', 'groups'], {}), '(self.head, weight, bias, stride, padding, dilation, groups)\n', (5325, 5385), True, 'import torch.nn.functional as F\n'), ((8413, 8451), 'torch.cat', 'torch.cat', (['[old_errs, new_errs]'], {'dim': '(0)'}), '([old_errs, new_errs], dim=0)\n', (8422, 8451), False, 'import torch\n'), ((10735, 10770), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['wc_logits', 'targets'], {}), '(wc_logits, targets)\n', (10750, 10770), True, 'import torch.nn.functional as F\n'), ((5647, 5720), 'torch.nn.functional.conv2d', 'F.conv2d', (['errors_resized', 'weight', 'None', 'stride', 'padding', 'dilation', 'groups'], {}), '(errors_resized, weight, None, stride, padding, dilation, groups)\n', (5655, 5720), True, 'import torch.nn.functional as F\n'), ((6709, 6731), 'torch.abs', 'torch.abs', (['self.errors'], {}), '(self.errors)\n', (6718, 6731), False, 'import torch\n'), ((6883, 6908), 'torch.max', 'torch.max', (['lb_refined', 'lb'], {}), '(lb_refined, lb)\n', (6892, 6908), False, 'import torch\n'), ((6926, 6951), 'torch.min', 'torch.min', (['ub_refined', 'ub'], {}), '(ub_refined, ub)\n', (6935, 6951), False, 'import torch\n'), ((7469, 7584), 'torch.where', 'torch.where', (['(relu_lambda_cross < relu_lambda)', '(0.5 * ub * (1 - relu_lambda_cross))', '(-0.5 * relu_lambda_cross * lb)'], {}), '(relu_lambda_cross < relu_lambda, 0.5 * ub * (1 -\n relu_lambda_cross), -0.5 * relu_lambda_cross * lb)\n', (7480, 7584), False, 'import torch\n'), ((662, 694), 'torch.zeros', 'torch.zeros', (['n'], {'dtype': 'torch.long'}), '(n, dtype=torch.long)\n', (673, 694), False, 'import torch\n'), ((751, 777), 'torch.arange', 'torch.arange', (['num_new_errs'], {}), '(num_new_errs)\n', (763, 777), False, 'import torch\n'), ((868, 910), 'torch.zeros', 'torch.zeros', (['(num_new_errs, batch_size, n)'], {}), '((num_new_errs, batch_size, n))\n', (879, 910), False, 'import torch\n'), ((1176, 1237), 'torch.zeros', 'torch.zeros', (['(n_channels, img_dim, img_dim)'], {'dtype': 'torch.long'}), '((n_channels, img_dim, img_dim), dtype=torch.long)\n', (1187, 1237), False, 'import torch\n'), ((1332, 1358), 'torch.arange', 'torch.arange', (['num_new_errs'], {}), '(num_new_errs)\n', (1344, 1358), False, 'import torch\n'), ((1471, 1540), 'torch.zeros', 'torch.zeros', (['(num_new_errs, batch_size, n_channels, img_dim, img_dim)'], {}), '((num_new_errs, batch_size, n_channels, img_dim, img_dim))\n', (1482, 1540), False, 'import torch\n'), ((6478, 6507), 'torch.nn.functional.relu', 'F.relu', (['(self.head - self.beta)'], {}), '(self.head - self.beta)\n', (6484, 6507), True, 'import torch.nn.functional as F\n'), ((6509, 6538), 'torch.nn.functional.relu', 'F.relu', (['(self.head + self.beta)'], {}), '(self.head + self.beta)\n', (6515, 6538), True, 'import torch.nn.functional as F\n'), ((10545, 10566), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (10554, 10566), True, 'import numpy as np\n'), ((10582, 10603), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (10591, 10603), True, 'import numpy as np\n'), ((1987, 2009), 'torch.isnan', 'torch.isnan', (['self.head'], {}), '(self.head)\n', (1998, 2009), False, 'import torch\n'), ((2057, 2079), 'torch.isnan', 'torch.isnan', (['self.beta'], {}), '(self.beta)\n', (2068, 2079), False, 'import torch\n'), ((2130, 2154), 'torch.isnan', 'torch.isnan', (['self.errors'], {}), '(self.errors)\n', (2141, 2154), False, 'import torch\n'), ((8163, 8183), 'torch.isnan', 'torch.isnan', (['relu_mu'], {}), '(relu_mu)\n', (8174, 8183), False, 'import torch\n'), ((8200, 8224), 'torch.isnan', 'torch.isnan', (['relu_lambda'], {}), '(relu_lambda)\n', (8211, 8224), False, 'import torch\n'), ((8472, 8493), 'torch.isnan', 'torch.isnan', (['new_head'], {}), '(new_head)\n', (8483, 8493), False, 'import torch\n'), ((8510, 8533), 'torch.isnan', 'torch.isnan', (['new_errors'], {}), '(new_errors)\n', (8521, 8533), False, 'import torch\n'), ((2327, 2348), 'torch.eye', 'torch.eye', (['n_elements'], {}), '(n_elements)\n', (2336, 2348), False, 'import torch\n')] |
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from microtc.textmodel import TextModel
from microtc.params import OPTION_NONE
from glob import glob
from collections import Counter
import numpy as np
from scipy.optimize import minimize
from matplotlib import pylab as plt
from nltk.stem.porter import PorterStemmer
from typing import Callable, Iterable
tm = TextModel(num_option=OPTION_NONE,
usr_option=OPTION_NONE,
url_option=OPTION_NONE,
emo_option=OPTION_NONE,
hashtag_option=OPTION_NONE,
ent_option=OPTION_NONE,
lc=False, del_dup=False, del_punc=False,
del_diac=False, token_list=[-1])
tm.tokenize("Hello good morning")
# Count the number of words
def N_tokens_types(fname: str,
counter: Counter,
tm: Callable[[str], Iterable[str]]):
txt = open(fname).read()
tokens = tm(txt)
counter.update(tokens)
N = sum([v for v in counter.values()])
return N, len(counter)
counter = Counter()
heaps = [N_tokens_types(fname, counter, tm.tokenize)
for fname in glob("books/*.txt")]
plt.plot([x for x, _ in heaps], [x for _, x in heaps], '*')
plt.grid()
plt.xlabel("N")
plt.ylabel("|V|")
plt.tight_layout()
plt.savefig("heaps-law2.png", dpi=300)
def error(coef):
y = [y for _, y in heaps]
x = np.array([x for x, _ in heaps])
hy = coef[0] * x**coef[1]
return ((y - hy)**2).sum()
res = minimize(error, [1, 0.7],
method='nelder-mead',
options={'disp': True})
plt.plot([x for x, _ in heaps], [x for _, x in heaps], '.')
plt.grid()
x = np.array([x for x, _ in heaps])
hy = res.x[0] * x**res.x[1]
plt.plot(x, hy)
plt.xlabel("N")
plt.ylabel("|V|")
plt.tight_layout()
plt.savefig("heaps-law3.png", dpi=300)
stemmer = PorterStemmer()
stemmer.stem("playing")
## Using another tokenizer
tm = TextModel(num_option=OPTION_NONE,
usr_option=OPTION_NONE,
url_option=OPTION_NONE,
emo_option=OPTION_NONE,
hashtag_option=OPTION_NONE,
ent_option=OPTION_NONE,
lc=True, del_dup=False, del_punc=False,
del_diac=False, token_list=[-1])
counter = Counter()
heaps = [N_tokens_types(fname, counter, tm.tokenize)
for fname in glob("books/*.txt")]
res = minimize(error, [1, 0.7],
method='nelder-mead',
options={'disp': True})
res.x
def n_grams(words: list, n: int):
ww = [words[i:] for i in range(n)]
_ = ["~".join(x) for x in zip(*ww)]
return _
words = ['a', 'b', 'c', 'd']
n_grams(words, 2)
# ['a~b', 'b~c', 'c~d']
n_grams(words, 3)
# ['a~b~c', 'b~c~d']
n_grams(words, 4)
# ['a~b~c~d'] | [
"matplotlib.pylab.savefig",
"scipy.optimize.minimize",
"nltk.stem.porter.PorterStemmer",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.plot",
"numpy.array",
"matplotlib.pylab.tight_layout",
"glob.glob",
"collections.Counter",
"matplotlib.pylab.xlabel",
"matplotlib.pylab.grid",
"microtc.textmode... | [((879, 1123), 'microtc.textmodel.TextModel', 'TextModel', ([], {'num_option': 'OPTION_NONE', 'usr_option': 'OPTION_NONE', 'url_option': 'OPTION_NONE', 'emo_option': 'OPTION_NONE', 'hashtag_option': 'OPTION_NONE', 'ent_option': 'OPTION_NONE', 'lc': '(False)', 'del_dup': '(False)', 'del_punc': '(False)', 'del_diac': '(False)', 'token_list': '[-1]'}), '(num_option=OPTION_NONE, usr_option=OPTION_NONE, url_option=\n OPTION_NONE, emo_option=OPTION_NONE, hashtag_option=OPTION_NONE,\n ent_option=OPTION_NONE, lc=False, del_dup=False, del_punc=False,\n del_diac=False, token_list=[-1])\n', (888, 1123), False, 'from microtc.textmodel import TextModel\n'), ((1573, 1582), 'collections.Counter', 'Counter', ([], {}), '()\n', (1580, 1582), False, 'from collections import Counter\n'), ((1681, 1740), 'matplotlib.pylab.plot', 'plt.plot', (['[x for x, _ in heaps]', '[x for _, x in heaps]', '"""*"""'], {}), "([x for x, _ in heaps], [x for _, x in heaps], '*')\n", (1689, 1740), True, 'from matplotlib import pylab as plt\n'), ((1741, 1751), 'matplotlib.pylab.grid', 'plt.grid', ([], {}), '()\n', (1749, 1751), True, 'from matplotlib import pylab as plt\n'), ((1752, 1767), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""N"""'], {}), "('N')\n", (1762, 1767), True, 'from matplotlib import pylab as plt\n'), ((1768, 1785), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""|V|"""'], {}), "('|V|')\n", (1778, 1785), True, 'from matplotlib import pylab as plt\n'), ((1786, 1804), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1802, 1804), True, 'from matplotlib import pylab as plt\n'), ((1805, 1843), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""heaps-law2.png"""'], {'dpi': '(300)'}), "('heaps-law2.png', dpi=300)\n", (1816, 1843), True, 'from matplotlib import pylab as plt\n'), ((2002, 2073), 'scipy.optimize.minimize', 'minimize', (['error', '[1, 0.7]'], {'method': '"""nelder-mead"""', 'options': "{'disp': True}"}), "(error, [1, 0.7], method='nelder-mead', options={'disp': True})\n", (2010, 2073), False, 'from scipy.optimize import minimize\n'), ((2107, 2166), 'matplotlib.pylab.plot', 'plt.plot', (['[x for x, _ in heaps]', '[x for _, x in heaps]', '"""."""'], {}), "([x for x, _ in heaps], [x for _, x in heaps], '.')\n", (2115, 2166), True, 'from matplotlib import pylab as plt\n'), ((2167, 2177), 'matplotlib.pylab.grid', 'plt.grid', ([], {}), '()\n', (2175, 2177), True, 'from matplotlib import pylab as plt\n'), ((2182, 2213), 'numpy.array', 'np.array', (['[x for x, _ in heaps]'], {}), '([x for x, _ in heaps])\n', (2190, 2213), True, 'import numpy as np\n'), ((2242, 2257), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'hy'], {}), '(x, hy)\n', (2250, 2257), True, 'from matplotlib import pylab as plt\n'), ((2258, 2273), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""N"""'], {}), "('N')\n", (2268, 2273), True, 'from matplotlib import pylab as plt\n'), ((2274, 2291), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""|V|"""'], {}), "('|V|')\n", (2284, 2291), True, 'from matplotlib import pylab as plt\n'), ((2292, 2310), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2308, 2310), True, 'from matplotlib import pylab as plt\n'), ((2311, 2349), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""heaps-law3.png"""'], {'dpi': '(300)'}), "('heaps-law3.png', dpi=300)\n", (2322, 2349), True, 'from matplotlib import pylab as plt\n'), ((2363, 2378), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (2376, 2378), False, 'from nltk.stem.porter import PorterStemmer\n'), ((2437, 2680), 'microtc.textmodel.TextModel', 'TextModel', ([], {'num_option': 'OPTION_NONE', 'usr_option': 'OPTION_NONE', 'url_option': 'OPTION_NONE', 'emo_option': 'OPTION_NONE', 'hashtag_option': 'OPTION_NONE', 'ent_option': 'OPTION_NONE', 'lc': '(True)', 'del_dup': '(False)', 'del_punc': '(False)', 'del_diac': '(False)', 'token_list': '[-1]'}), '(num_option=OPTION_NONE, usr_option=OPTION_NONE, url_option=\n OPTION_NONE, emo_option=OPTION_NONE, hashtag_option=OPTION_NONE,\n ent_option=OPTION_NONE, lc=True, del_dup=False, del_punc=False,\n del_diac=False, token_list=[-1])\n', (2446, 2680), False, 'from microtc.textmodel import TextModel\n'), ((2788, 2797), 'collections.Counter', 'Counter', ([], {}), '()\n', (2795, 2797), False, 'from collections import Counter\n'), ((2901, 2972), 'scipy.optimize.minimize', 'minimize', (['error', '[1, 0.7]'], {'method': '"""nelder-mead"""', 'options': "{'disp': True}"}), "(error, [1, 0.7], method='nelder-mead', options={'disp': True})\n", (2909, 2972), False, 'from scipy.optimize import minimize\n'), ((1901, 1932), 'numpy.array', 'np.array', (['[x for x, _ in heaps]'], {}), '([x for x, _ in heaps])\n', (1909, 1932), True, 'import numpy as np\n'), ((1658, 1677), 'glob.glob', 'glob', (['"""books/*.txt"""'], {}), "('books/*.txt')\n", (1662, 1677), False, 'from glob import glob\n'), ((2873, 2892), 'glob.glob', 'glob', (['"""books/*.txt"""'], {}), "('books/*.txt')\n", (2877, 2892), False, 'from glob import glob\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 27 13:57:14 2021
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from dataclasses import dataclass, field
class Tensor:
""" Creates a tensor object with apropriate 3x3 size. It Starts with 3x3 zeros,
if only one valu is given, it is considered to be sigma11"""
def __init__(self, values = None):
self.tensor = np.zeros(shape=[3,3])
if not isinstance(values, np.ndarray):
values = np.array(values)
if values.size > 1:
r,c = values.shape
self.tensor[0:r,0:c] = values
else:
self.tensor[0,0] = values
self.I1 = self.Inv_1()
self.I2 = self.Inv_2()
self.I3 = self.Inv_3()
self.p = self.calc_p()
self.q = self.calc_q()
def Inv_1(self):
self.I1 = np.trace(self.tensor)
return self.I1
def Inv_2(self):
self.I2 = (np.trace(self.tensor)**2 - np.trace(self.tensor**2))/2
return self.I2
def Inv_3(self):
self.I3 = np.linalg.det(self.tensor)
return self.I3
def calc_p(self):
self.p = self.Inv_1()/3
return self.p
def calc_q(self):
self.q = np.sqrt(((3.0/2.0)*( np.trace(self.tensor**2) -
(1.0/3.0)*np.trace(self.tensor)**2)))
return self.q
@dataclass
class DP_cap:
d: float = 10
beta: float = 15
R: float = 0.01
pb: float = 2
alpha: float = 0.01
k: float = 1
hardening: list[float] = field(default_factory=list)
pa = (pb - R*d)/(1 + R*np.tan(np.deg2rad(beta)))
br = np.deg2rad(beta)
class Plot_DP_cap:
def __init__(self, material):
# self.p = np.linspace([0, material.pb])
self.dp_surf(material)
self.cap_surf(material)
def dp_surf(self,material):
p = np.array([-material.d/material.br, material.pa])
DP_s = p*np.tan(material.br)+material.d
plt.plot(p, DP_s)
# return DP_s
def cap_surf(self,material):
p = np.linspace(material.pa,material.pb,20)
cap_s = (
(1+material.alpha -material.alpha/np.cos(material.br))/material.R) * np.sqrt(
((material.R*(material.d + material.pa*np.tan(material.br)))**2)-(p-material.pa)**2)
cte = ((material.R*(material.d + material.pa*np.tan(material.br)))**2)
deltap = (p-material.pa)**2
cte = np.ones(deltap.shape)*cte
# plt.figure()
# plt.plot(cte,'-r')
# plt.plot(deltap,'-b')
print(cap_s)
print(p)
plt.plot(p, cap_s,'-or')
return cap_s
def trans_surf(self,material):
trans_s = (1-(material.alpha/np.cos(material.br))) * (material.d + material.pa*
np.tan(material.br)) + np.sqrt(material.alpha**2 *
(material.d + material.pa * np.tan(np.deg2rad(material.br)))**2 -
(self.p - material.pa**2))
return trans_s
if __name__ == '__main__':
print('ok')
t = Tensor(np.array([[2,0],[0,0]]))
material = DP_cap(0.1, 50, 1, 2.5715, 1.0e-6, 1, [0, 5])
# ptfe = DP_cap()
Plot_DP_cap(material)
| [
"numpy.trace",
"matplotlib.pyplot.plot",
"numpy.deg2rad",
"numpy.zeros",
"numpy.ones",
"dataclasses.field",
"numpy.tan",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"numpy.linalg.det"
] | [((1818, 1845), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (1823, 1845), False, 'from dataclasses import dataclass, field\n'), ((1923, 1939), 'numpy.deg2rad', 'np.deg2rad', (['beta'], {}), '(beta)\n', (1933, 1939), True, 'import numpy as np\n'), ((446, 468), 'numpy.zeros', 'np.zeros', ([], {'shape': '[3, 3]'}), '(shape=[3, 3])\n', (454, 468), True, 'import numpy as np\n'), ((1021, 1042), 'numpy.trace', 'np.trace', (['self.tensor'], {}), '(self.tensor)\n', (1029, 1042), True, 'import numpy as np\n'), ((1272, 1298), 'numpy.linalg.det', 'np.linalg.det', (['self.tensor'], {}), '(self.tensor)\n', (1285, 1298), True, 'import numpy as np\n'), ((2217, 2267), 'numpy.array', 'np.array', (['[-material.d / material.br, material.pa]'], {}), '([-material.d / material.br, material.pa])\n', (2225, 2267), True, 'import numpy as np\n'), ((2351, 2368), 'matplotlib.pyplot.plot', 'plt.plot', (['p', 'DP_s'], {}), '(p, DP_s)\n', (2359, 2368), True, 'import matplotlib.pyplot as plt\n'), ((2454, 2495), 'numpy.linspace', 'np.linspace', (['material.pa', 'material.pb', '(20)'], {}), '(material.pa, material.pb, 20)\n', (2465, 2495), True, 'import numpy as np\n'), ((3078, 3103), 'matplotlib.pyplot.plot', 'plt.plot', (['p', 'cap_s', '"""-or"""'], {}), "(p, cap_s, '-or')\n", (3086, 3103), True, 'import matplotlib.pyplot as plt\n'), ((3619, 3645), 'numpy.array', 'np.array', (['[[2, 0], [0, 0]]'], {}), '([[2, 0], [0, 0]])\n', (3627, 3645), True, 'import numpy as np\n'), ((554, 570), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (562, 570), True, 'import numpy as np\n'), ((2858, 2879), 'numpy.ones', 'np.ones', (['deltap.shape'], {}), '(deltap.shape)\n', (2865, 2879), True, 'import numpy as np\n'), ((1168, 1194), 'numpy.trace', 'np.trace', (['(self.tensor ** 2)'], {}), '(self.tensor ** 2)\n', (1176, 1194), True, 'import numpy as np\n'), ((2292, 2311), 'numpy.tan', 'np.tan', (['material.br'], {}), '(material.br)\n', (2298, 2311), True, 'import numpy as np\n'), ((1140, 1161), 'numpy.trace', 'np.trace', (['self.tensor'], {}), '(self.tensor)\n', (1148, 1161), True, 'import numpy as np\n'), ((1490, 1516), 'numpy.trace', 'np.trace', (['(self.tensor ** 2)'], {}), '(self.tensor ** 2)\n', (1498, 1516), True, 'import numpy as np\n'), ((1889, 1905), 'numpy.deg2rad', 'np.deg2rad', (['beta'], {}), '(beta)\n', (1899, 1905), True, 'import numpy as np\n'), ((2567, 2586), 'numpy.cos', 'np.cos', (['material.br'], {}), '(material.br)\n', (2573, 2586), True, 'import numpy as np\n'), ((2782, 2801), 'numpy.tan', 'np.tan', (['material.br'], {}), '(material.br)\n', (2788, 2801), True, 'import numpy as np\n'), ((3212, 3231), 'numpy.cos', 'np.cos', (['material.br'], {}), '(material.br)\n', (3218, 3231), True, 'import numpy as np\n'), ((3299, 3318), 'numpy.tan', 'np.tan', (['material.br'], {}), '(material.br)\n', (3305, 3318), True, 'import numpy as np\n'), ((1565, 1586), 'numpy.trace', 'np.trace', (['self.tensor'], {}), '(self.tensor)\n', (1573, 1586), True, 'import numpy as np\n'), ((2666, 2685), 'numpy.tan', 'np.tan', (['material.br'], {}), '(material.br)\n', (2672, 2685), True, 'import numpy as np\n'), ((3422, 3445), 'numpy.deg2rad', 'np.deg2rad', (['material.br'], {}), '(material.br)\n', (3432, 3445), True, 'import numpy as np\n')] |
# coding: utf-8
import os
import cv2
import warnings
import numpy as np
from .drawing import cv2WHITE
from ..utils.generic_utils import filenaming
from ..utils._colorings import toBLUE
def cv2paste(bg_img, fg_img, points=(0,0), inplace=False):
"""Pastes ``fg_image`` into ``bg_image``
Args:
bg_img (ndarray) : Background Image. shape=(H,W,ch)
fg_img (ndarray) : Background Image. shape=(H,W,ch)
points (tuple) : Coordinates to paste. (x,y)
inplace (bool) : Whether to transform input ( ``bg_img`` ) using no auxiliary data structure.
Returns:
bg_img (ndarray) : pasted image.
Examples:
>>> import cv2
>>> from pycharmers.opencv import SAMPLE_LENA_IMG, cv2read_mpl, cv2plot, cv2paste
>>> bg_img = cv2read_mpl(SAMPLE_LENA_IMG)
>>> fg_img = cv2.resize(bg_img, dsize=(256,256))
>>> ax = cv2plot(cv2paste(bg_img, fg_img, points=(128,128)))
"""
if not inplace:
bg_img = bg_img.copy()
x,y = points
bg_h, bg_w, _ = bg_img.shape
fg_h, fg_w, _ = fg_img.shape
if ((-fg_w < x < bg_w) and (-fg_h < y < bg_h)):
if not inplace:
bg_img = bg_img.copy()
bg_img[max(0,y):min(y+fg_h, bg_h), max(0,x):min(x+fg_w, bg_w), :] = fg_img[max(0,0-y):bg_h-y, max(0,0-x):bg_w-x, :]
return bg_img
def vconcat_resize_min(*images, interpolation=cv2.INTER_CUBIC):
"""Concat vertically while resizing to the smallest width.
Args:
images (np.ndarray) : OpenCV images
interpolation (int) : interpolation method, see `OpenCV Documentations #InterpolationFlags <https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga5bb5a1fea74ea38e1a5445ca803ff121>`_
Examples:
>>> import cv2
>>> from pycharmers.opencv import vconcat_resize_min, cv2plot
>>> images = [cv2.imread(path) for path in os.listdir("images")]
>>> vconcat_img = vconcat_resize_min(*images)
>>> ax = cv2plot(vconcat_img)
"""
w_min = min(img.shape[1] for img in images)
return cv2.vconcat([
cv2.resize(src=img,
dsize=(w_min, int(img.shape[0]*w_min/img.shape[1])),
interpolation=interpolation
) for img in images
])
def hconcat_resize_min(*images, interpolation=cv2.INTER_CUBIC):
"""Concat horizontally while resizing to the smallest height.
Args:
images (np.ndarray) : OpenCV images
interpolation (int) : interpolation method, see `OpenCV Documentations #InterpolationFlags <https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga5bb5a1fea74ea38e1a5445ca803ff121>`_
Examples:
>>> import cv2
>>> from pycharmers.opencv import hconcat_resize_min, cv2plot
>>> images = [cv2.imread(path) for path in os.listdir("images")]
>>> hconcat_img = hconcat_resize_min(*images)
>>> ax = cv2plot(hconcat_img)
"""
h_min = min(img.shape[0] for img in images)
return cv2.hconcat([
cv2.resize(src=img,
dsize=(int(img.shape[1]*h_min/img.shape[0]), h_min),
interpolation=interpolation
) for img in images
])
def resize_aspect(src, dsize, interpolation=cv2.INTER_AREA):
"""Resize the image while keeping the aspect ratio.
Args:
src (np.ndarray) : Input image.
dsize (tuple) : Output image size ( ``width`` , ``height``)
interpolation (int) : Interpolation method (default= ``cv2.INTER_AREA`` )
Returns:
resized (np.ndarray) : Resized image.
Examples:
>>> import numpy as np
>>> from pycharmers.opencv import resize_aspect
>>> img = np.random.randint(low=0, high=255, size=(1080, 720, 3), dtype=np.uint8)
>>> resized = resize_aspect(src=img, dsize=(300, 300))
>>> resized.shape
(300, 200, 3)
"""
sh, sw = src.shape[:2]
dw, dh = dsize
if sh/sw > dh/dw:
ratio = dh/sh
else:
ratio = dw/sw
dsize = (int(ratio*sw), int(ratio*sh))
resized = cv2.resize(src=src, dsize=dsize, interpolation=interpolation)
return resized
def transparency(in_path, out_path=None, lower_bgr=cv2WHITE, upper_bgr=cv2WHITE, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE, thresh=None, check_exist=True):
"""Transparency processing.
Args:
in_path (str) : Path to input image.
out_path (str) : Path to output image.
lower_bgr (tuple/int) : Lower bound of image value to be transparent.
upper_bgr (tuple/int) : Upper bound of image value to be transparent.
mode (int) : Contour retrieval mode used in ``cv2.findContours`` (default = ``cv2.RETR_EXTERNAL`` )
method (int) : ontour approximation method used in ``cv2.findContours`` (default = ``cv2.CHAIN_APPROX_SIMPLE`` )
thresh (int) : Threshold value.
check_exist (bool) : If ``True``, there is a possibility of overwriting the image.
Examples:
>>> from pycharmers.opencv import transparency, SAMPLE_LENA_IMG
>>> transparency(SAMPLE_LENA_IMG)
Saved at /Users/iwasakishuto/.pycharmers/opencv/image/lena_transparency.png
"""
# Naming the output path.
if out_path is None:
root = os.path.splitext(in_path)[0] + "_transparency"
ext = ".png"
else:
root,ext = os.path.splitext(out_path)
if ext==".jpg":
warnings.warn("Since transparent image cannot be created with '.jpg' image, use '.png'.")
ext = ".png"
out_path = root + ext
if check_exist:
out_path = filenaming(out_path)
src = cv2.imread(filename=in_path, flags=cv2.IMREAD_UNCHANGED)
if src.shape[2]==3:
src = np.insert(src, 3, values=[0], axis=2)
if thresh is None:
# Checks if array elements lie between the elements of two other arrays.
binary = 255-cv2.inRange(src=src[:,:,:3], lowerb=np.asarray(lower_bgr), upperb=np.asarray(upper_bgr))
else:
# Thresholding
gray = cv2.imread(filename=in_path, flags=cv2.IMREAD_GRAYSCALE)
binary = cv2.threshold(gray, thresh=thresh, maxval=255, type=cv2.THRESH_BINARY)[1]
contours, _ = cv2.findContours(image=binary, mode=mode, method=method)
mask = np.zeros_like(binary, dtype=np.uint8)
src[:,:,3] = cv2.fillPoly(img=mask, pts=contours, color=255)
if cv2.imwrite(filename=out_path, img=src):
print(f"Saved at {toBLUE(out_path)}")
def pil2cv(img):
"""Convert ``PIL.Image`` object into ``numpy`` array. (BGR)"""
return cv2.cvtColor(np.asarray(img, dtype=np.uint8), cv2.COLOR_RGBA2BGR) | [
"cv2.resize",
"numpy.zeros_like",
"cv2.imwrite",
"numpy.asarray",
"cv2.threshold",
"cv2.fillPoly",
"numpy.insert",
"cv2.imread",
"os.path.splitext",
"warnings.warn",
"cv2.findContours"
] | [((4151, 4212), 'cv2.resize', 'cv2.resize', ([], {'src': 'src', 'dsize': 'dsize', 'interpolation': 'interpolation'}), '(src=src, dsize=dsize, interpolation=interpolation)\n', (4161, 4212), False, 'import cv2\n'), ((5763, 5819), 'cv2.imread', 'cv2.imread', ([], {'filename': 'in_path', 'flags': 'cv2.IMREAD_UNCHANGED'}), '(filename=in_path, flags=cv2.IMREAD_UNCHANGED)\n', (5773, 5819), False, 'import cv2\n'), ((6329, 6385), 'cv2.findContours', 'cv2.findContours', ([], {'image': 'binary', 'mode': 'mode', 'method': 'method'}), '(image=binary, mode=mode, method=method)\n', (6345, 6385), False, 'import cv2\n'), ((6397, 6434), 'numpy.zeros_like', 'np.zeros_like', (['binary'], {'dtype': 'np.uint8'}), '(binary, dtype=np.uint8)\n', (6410, 6434), True, 'import numpy as np\n'), ((6452, 6499), 'cv2.fillPoly', 'cv2.fillPoly', ([], {'img': 'mask', 'pts': 'contours', 'color': '(255)'}), '(img=mask, pts=contours, color=255)\n', (6464, 6499), False, 'import cv2\n'), ((6507, 6546), 'cv2.imwrite', 'cv2.imwrite', ([], {'filename': 'out_path', 'img': 'src'}), '(filename=out_path, img=src)\n', (6518, 6546), False, 'import cv2\n'), ((5488, 5514), 'os.path.splitext', 'os.path.splitext', (['out_path'], {}), '(out_path)\n', (5504, 5514), False, 'import os\n'), ((5858, 5895), 'numpy.insert', 'np.insert', (['src', '(3)'], {'values': '[0]', 'axis': '(2)'}), '(src, 3, values=[0], axis=2)\n', (5867, 5895), True, 'import numpy as np\n'), ((6163, 6219), 'cv2.imread', 'cv2.imread', ([], {'filename': 'in_path', 'flags': 'cv2.IMREAD_GRAYSCALE'}), '(filename=in_path, flags=cv2.IMREAD_GRAYSCALE)\n', (6173, 6219), False, 'import cv2\n'), ((6703, 6734), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (6713, 6734), True, 'import numpy as np\n'), ((5551, 5645), 'warnings.warn', 'warnings.warn', (['"""Since transparent image cannot be created with \'.jpg\' image, use \'.png\'."""'], {}), '(\n "Since transparent image cannot be created with \'.jpg\' image, use \'.png\'.")\n', (5564, 5645), False, 'import warnings\n'), ((6237, 6307), 'cv2.threshold', 'cv2.threshold', (['gray'], {'thresh': 'thresh', 'maxval': '(255)', 'type': 'cv2.THRESH_BINARY'}), '(gray, thresh=thresh, maxval=255, type=cv2.THRESH_BINARY)\n', (6250, 6307), False, 'import cv2\n'), ((5391, 5416), 'os.path.splitext', 'os.path.splitext', (['in_path'], {}), '(in_path)\n', (5407, 5416), False, 'import os\n'), ((6062, 6083), 'numpy.asarray', 'np.asarray', (['lower_bgr'], {}), '(lower_bgr)\n', (6072, 6083), True, 'import numpy as np\n'), ((6092, 6113), 'numpy.asarray', 'np.asarray', (['upper_bgr'], {}), '(upper_bgr)\n', (6102, 6113), True, 'import numpy as np\n')] |
""" Tests for molecule creation and file i/o
"""
import io
import os
import subprocess
from future.utils import PY2, native_str
from builtins import str
import collections
import pathlib
import gzip
import bz2
import pickle
import numpy
import pytest
import moldesign as mdt
mdt.compute.config.engine_type = 'docker'
from moldesign import units as u
from .helpers import get_data_path, native_str_buffer, requires_internet_connection
from .object_fixtures import h2_trajectory, h2_harmonic, h2
__PYTEST_MARK__ = 'io'
@pytest.fixture
def bipyridine_sdf():
return mdt.read(get_data_path('bipyridine.sdf'))
@pytest.fixture
def bipyridine_xyz():
return mdt.read(get_data_path('bipyridine.xyz'))
@pytest.fixture
def bipyridine_mol2():
return mdt.read(get_data_path('bipyridine.mol2'))
@pytest.fixture
def bipyridine_iupac():
return mdt.from_name('bipyridine')
@pytest.fixture
def bipyridine_inchi():
return mdt.from_inchi('InChI=1S/C10H8N2/c1-3-7-11-9(5-1)10-6-2-4-8-12-10/h1-8H')
@pytest.fixture
def bipyridine_smiles():
return mdt.from_smiles('c1ccnc(c1)c2ccccn2')
ATOMDATA = { # (symbol, valence, mass)
1: ('H', 1, 1.008 * u.amu),
6: ('C', 4, 12.000 * u.amu),
7: ('N', 3, 14.003 * u.amu),
8: ('O', 2, 15.995 * u.amu)}
@pytest.mark.parametrize('key', 'iupac smiles inchi xyz sdf'.split())
@pytest.mark.screening
def test_auto_unique_atom_names(key, request):
mol = request.getfixturevalue('bipyridine_'+key)
atomnames = set(atom.name for atom in mol.atoms)
assert len(atomnames) == mol.num_atoms
def test_atom_names_preserved_from_input_file_mol2(bipyridine_mol2):
mol = bipyridine_mol2
for atom in mol.atoms:
assert atom.name == atom.symbol + str(atom.index)
@pytest.fixture
def propane_pdb():
return mdt.read(get_data_path('propane.pdb'))
def test_pdb_with_missing_chains(propane_pdb):
""" In response to an observed bug where various conversions would fail with a PDB file
that's missing chain data
"""
mol = propane_pdb
if not mdt.compute.packages.openbabel.force_remote:
pbmol = mdt.interfaces.mol_to_pybel(mol)
assert len(pbmol.atoms) == mol.num_atoms
pmedmol = mdt.interfaces.mol_to_parmed(mol)
assert len(pmedmol.atoms) == mol.num_atoms
@pytest.mark.parametrize('key', 'mol2 xyz sdf iupac smiles inchi'.split())
@pytest.mark.screening
def test_read_bipyridine_from_format(key, request):
mol = request.getfixturevalue('bipyridine_'+key)
atomcounts = collections.Counter(atom.symbol for atom in mol.atoms)
assert len(atomcounts) == 3
assert atomcounts['C'] == 10
assert atomcounts['N'] == 2
assert atomcounts['H'] == 8
assert mol.charge == 0
assert abs(mol.mass - 156.069*u.amu) < 0.001 * u.amu
for atom in mol.atoms:
assert atom.formal_charge == 0.0
symb, val, mss = ATOMDATA[atom.atnum]
assert atom.symbol == symb
assert atom.valence == val
assert abs(atom.mass - mss) < 0.001 * u.amu
assert mol.num_bonds == 21
bondorders = collections.Counter(bond.order for bond in mol.bonds)
assert bondorders[2] == 6
assert bondorders[1] == 15
assert len(bondorders) == 2
@pytest.mark.parametrize('suffix', ['gz','bz2'])
def test_compressed_write(bipyridine_xyz, tmpdir, suffix):
# Note: compressed read is tested elsewhere when reading test data files
path = pathlib.Path(native_str(tmpdir))
dest = path / ('bipyr.xyz.' + suffix)
bipyridine_xyz.write(dest)
# don't use MDT's reader here! Need to make sure it's really gzip'd
if suffix == 'gz':
opener = gzip.open
elif suffix == 'bz2':
opener = bz2.BZ2File
else:
raise ValueError('Unrecognized suffix "%s"' % suffix)
if PY2:
mode = 'r'
else:
mode = 'rt'
if suffix == 'bz2':
opener = bz2.open
with opener(str(dest), mode) as infile:
content = infile.read()
mol = mdt.read(content, format='xyz')
assert mol.num_atoms == bipyridine_xyz.num_atoms
@pytest.fixture
def dna_pdb():
return mdt.read(pathlib.Path(get_data_path('ACTG.pdb')))
@pytest.fixture
def dna_mmcif():
return mdt.read(get_data_path('ACTG.cif'))
@pytest.fixture
def dna_sequence():
return mdt.build_bdna('ACTG')
@pytest.fixture
def pdb_1kbu():
return mdt.read(pathlib.Path(get_data_path('1KBU.pdb.bz2')))
@pytest.fixture
def mmcif_1kbu():
return mdt.read(get_data_path('1KBU.cif.bz2'))
@requires_internet_connection
def test_from_pdb_pdb_format():
mol = mdt.from_pdb('3aid')
assert mol.metadata.pdbid == '3aid'
assert mol.metadata.sourceformat == 'pdb'
assert mol.num_atoms == 1912
@requires_internet_connection
def test_from_pdb_mmcif_format():
mol = mdt.from_pdb('3aid', usecif=True)
assert mol.metadata.pdbid == '3aid'
assert mol.metadata.sourceformat == 'mmcif'
assert mol.metadata.sourceurl.split('.')[-1] == 'cif'
assert mol.num_atoms == 1912
@requires_internet_connection
@pytest.mark.skip("Takes over 10 minutes right now ...")
def test_mmcif_fallback_if_no_pdb_file():
mol = mdt.from_pdb('4V5X')
assert mol.metadata.pdbid.lower() == '4v5x'
assert mol.metadata.sourceformat == 'mmcif'
assert mol.metadata.sourceurl.split('.')[-1] == 'cif'
@pytest.mark.parametrize('key', 'pdb mmcif sequence'.split())
def test_read_dna_from_format(key, request):
if key == 'mmcif':
pytest.xfail(reason='Known mmcif parser bug, fix this by 0.7.4')
mol = request.getfixturevalue('dna_'+key)
def test_write_file_to_buffer(bipyridine_smiles):
mol = bipyridine_smiles
buffer = native_str_buffer()
mol.write(buffer, format='pdb')
buffer.seek(0)
newmol = mdt.read(buffer.getvalue(), format='pdb')
assert mol.num_atoms == newmol.num_atoms
def test_write_pickle_to_buffer(bipyridine_smiles):
mol = bipyridine_smiles
buffer = io.BytesIO()
mol.write(buffer, format='pkl')
newmol = pickle.loads(buffer.getvalue())
assert newmol.is_identical(mol, verbose=True)
def test_read_from_buffer():
s = native_str("2\nmy xyz file\n H 1.0 1.0 1.0\n H 1.0 2.0 1.0\n")
buffer = native_str_buffer(s)
h2 = mdt.read(buffer, format='xyz')
assert h2.num_atoms == 2
@pytest.mark.parametrize('key', '<KEY>'.split())
@pytest.mark.screening
def test_1kbu_assembly_data(key, request):
mol = request.getfixturevalue('%s_1kbu' % key)
assert len(mol.properties.bioassemblies) == 1
assert '1' in mol.properties.bioassemblies
assembly = mol.properties.bioassemblies['1']
assert len(assembly.transforms) == 2
assert set(assembly.chains) == set(c.name for c in mol.chains)
# first transform is identity
numpy.testing.assert_allclose(assembly.transforms[0],
numpy.identity(4))
# second transform's rotation is unitary
rot = assembly.transforms[1][:3,:3]
numpy.testing.assert_allclose(rot.dot(rot.T),
numpy.identity(3))
@pytest.mark.parametrize('key', '<KEY>'.split())
def test_1kbu_assembly_build(key, request):
asym = request.getfixturevalue('%s_1kbu' % key)
original = mdt.Molecule(asym)
assembly = asym.properties.bioassemblies['1']
rot = assembly.transforms[1][:3,:3]
move = assembly.transforms[1][:3,3] * u.angstrom
mol = mdt.build_assembly(asym, 1)
assert mol.num_chains == 2 * asym.num_chains
# test that original is unaffected
assert original.is_identical(asym)
testchain = assembly.chains[0]
new_chain_pos = mol.chains[testchain].positions.T.ldot(rot).T + move[None, :]
numpy.testing.assert_allclose(new_chain_pos.defunits_value(),
mol.chains[asym.num_chains].positions.defunits_value())
@pytest.mark.parametrize('fmt', 'smiles pdb mol2 sdf inchi mmcif pkl'.split())
def test_topology_preserved_in_serialization(bipyridine_smiles, fmt):
""" Test that bond topology is preserved even if it doesn't make sense from distances
"""
if fmt != 'pkl':
pytest.xfail("We are currently unable to get an unambiguous representation of a molecular "
"sructure with ANY current file formats or parsers.")
mol = bipyridine_smiles.copy() # don't screw up the fixture object
mol.bond_graph[mol.atoms[3]][mol.atoms[5]] = 3
mol.bond_graph[mol.atoms[5]][mol.atoms[3]] = 3
mol.atoms[3].x += 10.0 * u.angstrom
newmol = mdt.read(mol.write(format=fmt), format=fmt)
assert mol.same_bonds(newmol, verbose=True)
def test_write_traj(h2_trajectory, tmpdir):
path = os.path.join(str(tmpdir), 'traj.xyz')
h2_trajectory.write(path)
assert int(subprocess.check_output(['wc', '-l', path]).split()[0]) == (
(h2_trajectory.mol.num_atoms+2) * h2_trajectory.num_frames)
| [
"moldesign.from_inchi",
"moldesign.interfaces.mol_to_pybel",
"pytest.xfail",
"moldesign.interfaces.mol_to_parmed",
"moldesign.build_assembly",
"pytest.mark.parametrize",
"pytest.mark.skip",
"moldesign.from_name",
"moldesign.Molecule",
"moldesign.read",
"numpy.identity",
"future.utils.native_st... | [((3216, 3264), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""suffix"""', "['gz', 'bz2']"], {}), "('suffix', ['gz', 'bz2'])\n", (3239, 3264), False, 'import pytest\n'), ((5030, 5085), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Takes over 10 minutes right now ..."""'], {}), "('Takes over 10 minutes right now ...')\n", (5046, 5085), False, 'import pytest\n'), ((857, 884), 'moldesign.from_name', 'mdt.from_name', (['"""bipyridine"""'], {}), "('bipyridine')\n", (870, 884), True, 'import moldesign as mdt\n'), ((938, 1011), 'moldesign.from_inchi', 'mdt.from_inchi', (['"""InChI=1S/C10H8N2/c1-3-7-11-9(5-1)10-6-2-4-8-12-10/h1-8H"""'], {}), "('InChI=1S/C10H8N2/c1-3-7-11-9(5-1)10-6-2-4-8-12-10/h1-8H')\n", (952, 1011), True, 'import moldesign as mdt\n'), ((1066, 1103), 'moldesign.from_smiles', 'mdt.from_smiles', (['"""c1ccnc(c1)c2ccccn2"""'], {}), "('c1ccnc(c1)c2ccccn2')\n", (1081, 1103), True, 'import moldesign as mdt\n'), ((2208, 2241), 'moldesign.interfaces.mol_to_parmed', 'mdt.interfaces.mol_to_parmed', (['mol'], {}), '(mol)\n', (2236, 2241), True, 'import moldesign as mdt\n'), ((2512, 2566), 'collections.Counter', 'collections.Counter', (['(atom.symbol for atom in mol.atoms)'], {}), '(atom.symbol for atom in mol.atoms)\n', (2531, 2566), False, 'import collections\n'), ((3066, 3119), 'collections.Counter', 'collections.Counter', (['(bond.order for bond in mol.bonds)'], {}), '(bond.order for bond in mol.bonds)\n', (3085, 3119), False, 'import collections\n'), ((3975, 4006), 'moldesign.read', 'mdt.read', (['content'], {'format': '"""xyz"""'}), "(content, format='xyz')\n", (3983, 4006), True, 'import moldesign as mdt\n'), ((4285, 4307), 'moldesign.build_bdna', 'mdt.build_bdna', (['"""ACTG"""'], {}), "('ACTG')\n", (4299, 4307), True, 'import moldesign as mdt\n'), ((4568, 4588), 'moldesign.from_pdb', 'mdt.from_pdb', (['"""3aid"""'], {}), "('3aid')\n", (4580, 4588), True, 'import moldesign as mdt\n'), ((4784, 4817), 'moldesign.from_pdb', 'mdt.from_pdb', (['"""3aid"""'], {'usecif': '(True)'}), "('3aid', usecif=True)\n", (4796, 4817), True, 'import moldesign as mdt\n'), ((5138, 5158), 'moldesign.from_pdb', 'mdt.from_pdb', (['"""4V5X"""'], {}), "('4V5X')\n", (5150, 5158), True, 'import moldesign as mdt\n'), ((5928, 5940), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5938, 5940), False, 'import io\n'), ((6112, 6175), 'future.utils.native_str', 'native_str', (['"""2\nmy xyz file\n H 1.0 1.0 1.0\n H 1.0 2.0 1.0\n"""'], {}), '("""2\nmy xyz file\n H 1.0 1.0 1.0\n H 1.0 2.0 1.0\n""")\n', (6122, 6175), False, 'from future.utils import PY2, native_str\n'), ((6219, 6249), 'moldesign.read', 'mdt.read', (['buffer'], {'format': '"""xyz"""'}), "(buffer, format='xyz')\n", (6227, 6249), True, 'import moldesign as mdt\n'), ((7201, 7219), 'moldesign.Molecule', 'mdt.Molecule', (['asym'], {}), '(asym)\n', (7213, 7219), True, 'import moldesign as mdt\n'), ((7376, 7403), 'moldesign.build_assembly', 'mdt.build_assembly', (['asym', '(1)'], {}), '(asym, 1)\n', (7394, 7403), True, 'import moldesign as mdt\n'), ((2111, 2143), 'moldesign.interfaces.mol_to_pybel', 'mdt.interfaces.mol_to_pybel', (['mol'], {}), '(mol)\n', (2138, 2143), True, 'import moldesign as mdt\n'), ((3424, 3442), 'future.utils.native_str', 'native_str', (['tmpdir'], {}), '(tmpdir)\n', (3434, 3442), False, 'from future.utils import PY2, native_str\n'), ((5453, 5517), 'pytest.xfail', 'pytest.xfail', ([], {'reason': '"""Known mmcif parser bug, fix this by 0.7.4"""'}), "(reason='Known mmcif parser bug, fix this by 0.7.4')\n", (5465, 5517), False, 'import pytest\n'), ((6830, 6847), 'numpy.identity', 'numpy.identity', (['(4)'], {}), '(4)\n', (6844, 6847), False, 'import numpy\n'), ((7019, 7036), 'numpy.identity', 'numpy.identity', (['(3)'], {}), '(3)\n', (7033, 7036), False, 'import numpy\n'), ((8084, 8236), 'pytest.xfail', 'pytest.xfail', (['"""We are currently unable to get an unambiguous representation of a molecular sructure with ANY current file formats or parsers."""'], {}), "(\n 'We are currently unable to get an unambiguous representation of a molecular sructure with ANY current file formats or parsers.'\n )\n", (8096, 8236), False, 'import pytest\n'), ((8641, 8652), 'builtins.str', 'str', (['tmpdir'], {}), '(tmpdir)\n', (8644, 8652), False, 'from builtins import str\n'), ((3904, 3913), 'builtins.str', 'str', (['dest'], {}), '(dest)\n', (3907, 3913), False, 'from builtins import str\n'), ((1734, 1749), 'builtins.str', 'str', (['atom.index'], {}), '(atom.index)\n', (1737, 1749), False, 'from builtins import str\n'), ((8713, 8756), 'subprocess.check_output', 'subprocess.check_output', (["['wc', '-l', path]"], {}), "(['wc', '-l', path])\n", (8736, 8756), False, 'import subprocess\n')] |
from qiskit import *
from qiskit.circuit.library.standard_gates import SwapGate,CU1Gate,XGate,U1Gate
from math import pi,sqrt
from qiskit.quantum_info.operators import Operator
import numpy as np
def ini(circ,qr,ipt):
# Input binary form, and append [0] ahead for qr1 block.
for i in range(len(ipt)):
if ipt[len(ipt)-i-1]:
circ.x(qr[i])
return 0
def diffusion(n): # Matrix representation of the diffusion transformation.
N=2**n# for Grover search.
return [N*[2/N] for i in range(N)]-np.identity(N)
def phaseFlip(reg,theta):
# reg is a one qubit register (input a qubit is also OK).
if reg.__len__()!=1:
raise TypeError('The input quantum register contains more than one qubit.')
phaseCirc=QuantumCircuit(reg,name='p\nh\na\ns\ne\n')
phaseCirc.append(U1Gate(theta),reg)
phaseCirc.append(XGate(),reg)
phaseCirc.append(U1Gate(theta),reg)
phaseCirc.append(XGate(),reg)
return phaseCirc.to_instruction()
def CPhaseFlip(qReg,reg,theta):# If all ancilla qubits equal one, flip the phase of querry REG.
# In this place, reg is a one qubit quantum register, and qReg is a n qubits quantum register.
phaseCirc=QuantumCircuit(qReg,reg,name='p\nh\na\ns\ne\n')
num=qReg.__len__()
IN=[qReg[i] for i in range(num)]+[reg[0]]
CU1Gate=U1Gate(theta).control(num)
CXGate=XGate().control(num)
phaseCirc.append(CU1Gate,IN)
phaseCirc.append(CXGate,IN)
phaseCirc.append(CU1Gate,IN)
phaseCirc.append(CXGate,IN)
return phaseCirc.to_instruction()
def amulitpdeAmplification(query,criteria,ancilla,n):# for Grover search.
AACirc=QuantumCircuit(query,criteria,ancilla,name='A\nA\n')
AACirc.h(query)
from qiskit_code.Grover import check
CHECK=check(query,criteria,ancilla,n)
lst=range(n)# This looks rather awkward, I may (but not likely) try to change this later.
AC=[ancilla[i] for i in lst]
QUERY=[query[i] for i in lst]
CHECKIN=QUERY+[criteria[i] for i in lst]+AC
nCP=CPhaseFlip(ancilla,QuantumRegister(1),pi)
nCX=XGate().control(n)# Generate a controlled-X gate with n controls.
D=Operator(diffusion(n))
for i in range(int(pi*sqrt(2**n)/8+1)):#[1] Iteration times.
AACirc.append(CHECK,CHECKIN)
AACirc.append(nCP,AC+[query[0]])
AACirc.append(CHECK,CHECKIN)
AACirc.append(D,query)
# [1]<NAME>, <NAME>, <NAME> & <NAME>, Tight bounds on quantum searching,
#Proceedings, PhysComp 1996
return AACirc.to_instruction()
def bigCSwap(c,t0,t1,l):# Controlled Swap of two qubit blocks.
BCSC=QuantumCircuit(c,t0,t1,name='b\ni\ng\nC\nS\nw\na\np\n')
for i in range(l):
BCSC.cswap(c,t0[i],t1[i])
return BCSC.to_instruction()
def bigSwap(t0,t1,l):# Swap of two qubit blocks.
BSC=QuantumCircuit(t0,t1,name='b\ni\ng\nS\nw\na\np\n')
for i in range(l):
BSC.swap(t0[i],t1[i])
return BSC.to_instruction()
def c_cx(c0,c1,target,n):# CCX where the second control and the target are two blocks
c_cxC=QuantumCircuit(c0,c1,target,name='C\no\nn\nt\nr\no\nl\nl\ne\nd\n-\nC\nX\n')
for i in range(n):
c_cxC.ccx(c0,c1[i],target[i])
return c_cxC.to_instruction()
def bigCCSwap(c0,c1,reg1,reg2,l):# Controlled-Controlled Swap of two qubit blocks.
bigCCSwapC=QuantumCircuit(c0,c1,reg1,reg2,name='C\nC\nS\nw\na\np\n')
ccswap=SwapGate().control(2)
for i in range(l):
bigCCSwapC.append(ccswap,[c0[0],c1[0],reg1[i],reg2[i]])
return bigCCSwapC.to_instruction()
carry_q=QuantumRegister(4)
carryC=QuantumCircuit(carry_q,name='c\na\nr\nr\ny\n')
carryC.ccx(carry_q[1],carry_q[2],carry_q[3])
carryC.cx(carry_q[1],carry_q[2])
carryC.ccx(carry_q[0],carry_q[2],carry_q[3])
CARRY=carryC.to_instruction()
sum_q=QuantumRegister(3)
sumC=QuantumCircuit(sum_q,name='s\nu\nm')
sumC.cx(sum_q[1],sum_q[2])
sumC.cx(sum_q[0],sum_q[2])
SUM=sumC.to_instruction()
def add(q0,q1,q2,l):
# A quantum plain adder, as the main part of the oracle.
# <NAME>., <NAME>. and <NAME>., 1996.
# Quantum networks for elementary arithmetic operations. Physical Review A, 54(1), p.147.
add_circ=QuantumCircuit(q0,q1,q2,name='a\nd\nd')
for i in range(l-1):
add_circ.append(CARRY,[q2[i],q0[i],q1[i],q2[i+1]])
add_circ.append(CARRY,[q2[l-1],q0[l-1],q1[l-1],q1[l]])
add_circ.cx(q0[l-1],q1[l-1])
add_circ.append(SUM,[q2[l-1],q0[l-1],q1[l-1]])
RCARRY=CARRY.reverse_ops()#inverse()
for i in range(l-2,-1,-1):
add_circ.append(RCARRY,[q2[i],q0[i],q1[i],q2[i+1]])
add_circ.append(SUM,[q2[i],q0[i],q1[i]])
return add_circ.to_instruction()
def sub(q0,q1,q2,l):
RCARRY=CARRY.reverse_ops()
sub_circ=QuantumCircuit(q0,q1,q2,name='s\nu\nb')
for i in range(l):
sub_circ.append(SUM,[q2[i],q1[i],q0[i]])
if i==l-1:
sub_circ.cx(q0[i],q1[i])
sub_circ.append(CARRY,[q2[i],q1[i],q0[i],q2[i+1]])
for i in range(l-2,-1,-1):
sub_circ.append(RCARRY,[q2[i],q1[i],q0[i],q2[i+1]])
sub_circ.x(q2[l])
sub_circ.swap(q0,q1)
return sub_circ.to_instruction()
def adderMod(qr0,qr1,ac,Nr,swap_ac,t,l,ADD,SUB):
# 0<=a,b<N
AMC=QuantumCircuit(qr0,qr1,ac,Nr,swap_ac,t,name='a\nd\nd\ne\nr\nM\no\nd\n')
BigCSwap=bigCSwap(t,qr0,swap_ac,l)
BigSwap=bigSwap(qr0,Nr,l)
lst=range(l)
ADDIN=[qr0[i] for i in lst]+[qr1[i] for i in lst]+[ac[i] for i in lst]
BigSwapIN=[qr0[i] for i in lst]+[Nr[i] for i in lst]
BigCSwapIN=[t[0]]+[qr0[i] for i in lst]+[swap_ac[i] for i in lst]
AMC.append(ADD,ADDIN)
AMC.append(BigSwap,BigSwapIN)
AMC.append(SUB,ADDIN)
AMC.x(qr1[l-1])
AMC.cx(qr1[l-1],t)
AMC.x(qr1[l-1])
AMC.append(BigCSwap,BigCSwapIN)
AMC.append(ADD,ADDIN)
AMC.append(BigCSwap,BigCSwapIN)
AMC.append(BigSwap,BigSwapIN)
AMC.append(SUB,ADDIN)
AMC.cx(qr1[l-1],t)
AMC.append(ADD,ADDIN)
return AMC.to_instruction()
def c_mtpMOD(circ,qr0,qr1,qr2,ac,Nr,swap_ac,t,cReg,xReg,l,n):
ADD=add(qr0,qr1,ac,l)
SUB=sub(qr0,qr1,ac,l)
AddMOD=adderMod(qr0,qr1,ac,Nr,swap_ac,t,l,ADD,SUB)
iAddMOD=AddMOD.reverse_ops()
BigCCSwap=bigCCSwap(cReg,t,qr0,swap_ac,l)
CCX=c_cx(cReg,xReg,qr1,n)
lst=range(l)
AddMODIN=[qr0[i] for i in lst]+[qr1[i] for i in lst]+[ac[i] for i in lst]
AddMODIN+=[Nr[i] for i in lst]+[swap_ac[i] for i in lst]+[t[0]]
for i in range(n):
BigCCSwapIN=[cReg,xReg[i]]+[qr0[i] for i in lst]+[qr2[i] for i in lst]
circ.append(BigCCSwap,BigCCSwapIN)
circ.append(AddMOD,AddMODIN)
circ.append(BigCCSwap,BigCCSwapIN)
circ.x(cReg)
CCXIN=[cReg[0]]+[xReg[i] for i in range(n)]+[qr1[i] for i in range(l)]
circ.append(CCX,CCXIN)
circ.x(cReg)
return 0
def expMod(qr0,qr1,ac,circ,N,l):
BigSwap=bigSwap(xReg,qr1,l)
lst=range(l)
BigSwapIN=[xReg[i] for i in lst]+[qr1[i] for i in lst]
C_MtpMOD=c_mtpMOD(qr0,qr1,qr2,ac,Nr,swap_ac,t,cReg,xReg,l,n)
iC_MtpMOD=C_MtpMOD.reverse_ops()
for i in range(m):
MtpMODIN
circ.append(C_MtpMOD,MtpMODIN)
circ.append(BigSwap,BigSwapIN)
circ.append(C_MtpMOD,MtpMODIN)
return None
def qft(qReg):
# <NAME> and <NAME> (2000). Quantum Computation and Quantum
# Information. Cambridge: Cambridge University Press. ISBN 0-521-63503-9.
# OCLC 174527496. P219, section 5.1 The quantum Fourier transform
# https://qiskit.org/documentation/stubs/qiskit.circuit.library.QFT.html
qft_circ=QuantumCircuit(qReg,name='Q\nF\nT\n')
num=qReg.__len__()
for i in range(num-1,-1,-1):
qft_circ.h(qReg[i])
for j in range(i):
qft_circ.append(CU1Gate(pi/2**(i-j)),[qReg[i],qReg[j]])
# Reverse the qubit order
for i in range(int(num/2)):# int(0.5)=0, so odd/even does not matters
qft_circ.swap(qReg[i],qReg[num-1-i])
return qft_circ.to_instruction()
| [
"qiskit.circuit.library.standard_gates.SwapGate",
"qiskit.circuit.library.standard_gates.XGate",
"math.sqrt",
"numpy.identity",
"qiskit.circuit.library.standard_gates.U1Gate",
"qiskit.circuit.library.standard_gates.CU1Gate",
"qiskit_code.Grover.check"
] | [((1794, 1828), 'qiskit_code.Grover.check', 'check', (['query', 'criteria', 'ancilla', 'n'], {}), '(query, criteria, ancilla, n)\n', (1799, 1828), False, 'from qiskit_code.Grover import check\n'), ((538, 552), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (549, 552), True, 'import numpy as np\n'), ((834, 847), 'qiskit.circuit.library.standard_gates.U1Gate', 'U1Gate', (['theta'], {}), '(theta)\n', (840, 847), False, 'from qiskit.circuit.library.standard_gates import SwapGate, CU1Gate, XGate, U1Gate\n'), ((875, 882), 'qiskit.circuit.library.standard_gates.XGate', 'XGate', ([], {}), '()\n', (880, 882), False, 'from qiskit.circuit.library.standard_gates import SwapGate, CU1Gate, XGate, U1Gate\n'), ((910, 923), 'qiskit.circuit.library.standard_gates.U1Gate', 'U1Gate', (['theta'], {}), '(theta)\n', (916, 923), False, 'from qiskit.circuit.library.standard_gates import SwapGate, CU1Gate, XGate, U1Gate\n'), ((951, 958), 'qiskit.circuit.library.standard_gates.XGate', 'XGate', ([], {}), '()\n', (956, 958), False, 'from qiskit.circuit.library.standard_gates import SwapGate, CU1Gate, XGate, U1Gate\n'), ((1347, 1360), 'qiskit.circuit.library.standard_gates.U1Gate', 'U1Gate', (['theta'], {}), '(theta)\n', (1353, 1360), False, 'from qiskit.circuit.library.standard_gates import SwapGate, CU1Gate, XGate, U1Gate\n'), ((1386, 1393), 'qiskit.circuit.library.standard_gates.XGate', 'XGate', ([], {}), '()\n', (1391, 1393), False, 'from qiskit.circuit.library.standard_gates import SwapGate, CU1Gate, XGate, U1Gate\n'), ((2099, 2106), 'qiskit.circuit.library.standard_gates.XGate', 'XGate', ([], {}), '()\n', (2104, 2106), False, 'from qiskit.circuit.library.standard_gates import SwapGate, CU1Gate, XGate, U1Gate\n'), ((3419, 3429), 'qiskit.circuit.library.standard_gates.SwapGate', 'SwapGate', ([], {}), '()\n', (3427, 3429), False, 'from qiskit.circuit.library.standard_gates import SwapGate, CU1Gate, XGate, U1Gate\n'), ((7777, 7803), 'qiskit.circuit.library.standard_gates.CU1Gate', 'CU1Gate', (['(pi / 2 ** (i - j))'], {}), '(pi / 2 ** (i - j))\n', (7784, 7803), False, 'from qiskit.circuit.library.standard_gates import SwapGate, CU1Gate, XGate, U1Gate\n'), ((2222, 2234), 'math.sqrt', 'sqrt', (['(2 ** n)'], {}), '(2 ** n)\n', (2226, 2234), False, 'from math import pi, sqrt\n')] |
import abc
import typing
import numpy as np
from src.utils.utilities import rolling_window
from src.pose_estimation import PoseEstimation
class Feature(abc.ABC):
"""
Abstract Base Class to define a common interface for classes that implement
one or more related features
"""
# each subclass needs to define this name and feature_names
_name = None
# list of feature names, correspond to columns of feature values
_feature_names = None
# requirements for this feature to be available
_min_pose = 2
_static_objects = []
_SMOOTHING_WINDOW = 5
# _compute_window_feature uses numpy masked arrays, so we
# need to use the np.ma.* versions of these functions
# NOTE: Circular values need to override this as well as the window()
_window_operations = {
"mean": np.ma.mean,
"median": np.ma.median,
"std_dev": np.ma.std,
"max": np.ma.amax,
"min": np.ma.amin
}
def __init__(self, poses: PoseEstimation, pixel_scale: float):
super().__init__()
self._poses = poses
self._pixel_scale = pixel_scale
if self._name is None:
raise NotImplementedError(
"Base class must override _name class member")
if self._feature_names is None:
raise NotImplementedError(
"Base class must override _feature_names class member")
@classmethod
def name(cls) -> str:
""" return a string name of the feature """
return cls._name
@classmethod
def feature_names(cls) -> typing.List[str]:
"""
return a list of strings containing the names of the features for the
feature set
"""
return cls._feature_names
@classmethod
def is_supported(
cls, pose_version: int, static_objects: typing.List[str]) -> bool:
"""
:param pose_version:
:param static_objects:
:return:
"""
# check that the minimum pose version is met
if cls._min_pose > pose_version:
return False
# check that any static objects required by the feature are
# available
for obj in cls._static_objects:
if obj not in static_objects:
return False
return True
@abc.abstractmethod
def per_frame(self, identity: int) -> np.ndarray:
"""
each FeatureSet ubclass will implement this to compute the
features in the set
returns an ndarray containing the feature values.
The feature set could be a single feature, where this would be a 1D
numpy ndarray, or it could be a 2D ndarray for a set of related
features (for example the pairwise point distances, which is a 2D
ndarray where each row corresponds to the frame index, and each column
is one of the pairwise point distances)
"""
pass
def window(self, identity: int, window_size: int,
per_frame_values: np.ndarray) -> typing.Dict:
"""
standard method for computing window feature values
NOTE: some features may need to override this (for example, those with
circular values such as angles)
"""
values = {}
for op in self._window_operations:
values[op] = self._compute_window_feature(
per_frame_values, self._poses.identity_mask(identity),
window_size, self._window_operations[op]
)
return values
def _window_circular(self, identity: int, window_size: int,
per_frame_values: np.ndarray) -> typing.Dict:
values = {}
for op_name, op in self._window_operations.items():
values[op_name] = self._compute_window_features_circular(
per_frame_values, self._poses.identity_mask(identity),
window_size, op, op_name == 'std_dev')
return values
@staticmethod
def window_width(window_size: int) -> int:
return 2 * window_size + 1
def _window_masks(self, frame_mask: np.ndarray, window_size: int) -> np.ndarray:
"""
helper function for generating masks for all of the windows to be used
to compute window feature values
"""
window_width = self.window_width(window_size)
# generate a numpy mask array to mask out invalid frames
mask = np.full(self._poses.num_frames, 1)
mask[frame_mask == 1] = 0
# generate masks for all of the rolling windows
return rolling_window(
np.pad(mask, window_size, 'constant', constant_values=1),
window_width
)
def _compute_window_feature(self, feature_values: np.ndarray,
frame_mask: np.ndarray, window_size: int,
op: typing.Callable) -> np.ndarray:
"""
helper function to compute window feature values
:param feature_values: per frame feature values. Can be a 1D ndarray
for a single feature, or a 2D array for a set of related features
(e.g. pairwise point distances are stored as a 2D array)
:param frame_mask: array indicating which frames are valid for the
current identity
:param window_size: number of frames (in each direction) to include
in the window. The actual number of frames is 2 * window_size + 1
:param op: function to perform the actual computation
:return: numpy nd array containing feature values
"""
window_masks = self._window_masks(frame_mask, window_size)
window_width = self.window_width(window_size)
values = np.zeros_like(feature_values)
if feature_values.ndim == 1:
windows = rolling_window(
np.pad(feature_values, window_size),
window_width
)
mx = np.ma.masked_array(windows, window_masks)
values[:] = op(mx, axis=1)
else:
# if the feature is 2D, for example 'pairwise_distances',
# compute the window features for each column
for j in range(feature_values.shape[1]):
windows = rolling_window(
np.pad(feature_values[:, j], window_size),
window_width
)
mx = np.ma.masked_array(windows, window_masks)
values[:, j] = op(mx, axis=1)
return values
def _compute_window_features_circular(
self, feature_values: np.ndarray, frame_mask: np.ndarray,
window_size: int, op: typing.Callable,
scipy_workaround: bool = False
) -> typing.Dict:
"""
special case compute_window_features for circular measurements
:param feature_values: numpy array containing per-frame feature values
:param frame_mask: numpy array that indicates if the frame is valid or
not for the specific identity we are computing features for
:param window_size:
:param op:
:param scipy_workaround:
# scipy.stats.circstd has a bug that can result in nan
# and a warning message to stderr if passed an array of
# nearly identical values
#
# our work around is to suppress the warning and replace
# the nan with 0
#
# this will be fixed as of scipy 1.6.0, so this work-around can be
# removed once we can upgrade to scipy 1.6.0
:return: numpy nd array with circular feature values
"""
nframes = self._poses.num_frames
values = np.zeros_like(feature_values)
def func_wrapper(_values):
"""
implements work-around described in docstring
:param _values: values to use for computing window feature value
for a single frame
:return: window feature value
"""
with np.errstate(invalid='ignore'):
v = op(_values)
if np.isnan(v):
return 0.0
return v
# unfortunately the scipy.stats.circmean/circstd functions don't work
# with numpy masked arrays, so we need to iterate over each window and
# create a view with only the valid values
for i in range(nframes):
# identity doesn't exist for this frame don't bother to compute
if not frame_mask[i]:
continue
slice_start = max(0, i - window_size)
slice_end = min(i + window_size + 1, nframes)
slice_frames_valid = frame_mask[slice_start:slice_end]
if feature_values.ndim == 1:
window_values = feature_values[slice_start:slice_end][
slice_frames_valid == 1]
if scipy_workaround:
values[i] = func_wrapper(window_values)
else:
values[i] = op(window_values)
else:
for j in range(feature_values.shape[1]):
window_values = feature_values[slice_start:slice_end, j][slice_frames_valid == 1]
if scipy_workaround:
values[i, j] = func_wrapper(window_values)
else:
values[i, j] = op(window_values)
return values
| [
"numpy.full",
"numpy.pad",
"numpy.zeros_like",
"numpy.isnan",
"numpy.errstate",
"numpy.ma.masked_array"
] | [((4442, 4476), 'numpy.full', 'np.full', (['self._poses.num_frames', '(1)'], {}), '(self._poses.num_frames, 1)\n', (4449, 4476), True, 'import numpy as np\n'), ((5720, 5749), 'numpy.zeros_like', 'np.zeros_like', (['feature_values'], {}), '(feature_values)\n', (5733, 5749), True, 'import numpy as np\n'), ((7654, 7683), 'numpy.zeros_like', 'np.zeros_like', (['feature_values'], {}), '(feature_values)\n', (7667, 7683), True, 'import numpy as np\n'), ((4611, 4667), 'numpy.pad', 'np.pad', (['mask', 'window_size', '"""constant"""'], {'constant_values': '(1)'}), "(mask, window_size, 'constant', constant_values=1)\n", (4617, 4667), True, 'import numpy as np\n'), ((5938, 5979), 'numpy.ma.masked_array', 'np.ma.masked_array', (['windows', 'window_masks'], {}), '(windows, window_masks)\n', (5956, 5979), True, 'import numpy as np\n'), ((8055, 8066), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (8063, 8066), True, 'import numpy as np\n'), ((5841, 5876), 'numpy.pad', 'np.pad', (['feature_values', 'window_size'], {}), '(feature_values, window_size)\n', (5847, 5876), True, 'import numpy as np\n'), ((6391, 6432), 'numpy.ma.masked_array', 'np.ma.masked_array', (['windows', 'window_masks'], {}), '(windows, window_masks)\n', (6409, 6432), True, 'import numpy as np\n'), ((7977, 8006), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (7988, 8006), True, 'import numpy as np\n'), ((6276, 6317), 'numpy.pad', 'np.pad', (['feature_values[:, j]', 'window_size'], {}), '(feature_values[:, j], window_size)\n', (6282, 6317), True, 'import numpy as np\n')] |
import numpy as np
from cifar_loader import cifar10
from solver.solvers import CNN
import atexit
import matplotlib.pyplot as plt
def exit_handler():
print("Saving weights...")
print(weights["W1"][0,0,0,0])
np.save('train_weights.npy',weights)
def main():
train = True
# Set weights to the name of the file with compatible weights or None to train from scratch
weights = "trained_weights_val70"
# Load data
cifar10.maybe_download_and_extract()
train_x_raw, train_y, train_y_one_hot = cifar10.load_training_data()
test_x_raw, test_y, test_y_one_hot = cifar10.load_test_data()
classes = cifar10.load_class_names()
# Create validation set
train_x_validation = train_x_raw[:1000,...]
train_y_validation = train_y[:1000,...]
train_y_one_hot_validation = train_y_one_hot[:1000,...]
# Create Test set
train_x_raw = train_x_raw[0:49000,...]
train_y = train_y[0:49000,...]
train_y_one_hot=train_y_one_hot[0:49000,...]
# Normalization stats
train_mean = np.mean(train_x_raw )
train_max = np.max(train_x_raw )
train_min = np.min(train_x_raw )
# Normalize
train_x_raw = (train_x_raw - train_mean)/(train_max-train_min)
train_x_validation = (train_x_validation - train_mean)/(train_max-train_min)
# Initialize CNN
if train:
# Initialize CNN
cnn_1 = CNN(file=weights)
# Create file name to save weights to if model crashes
atexit.register(cnn_1.save_model,"WEIGHT_DUMP")
# Check gradients
grad_approx_inputs={'x':train_x_raw[0:2,...],'y':train_y_one_hot[0:2,...]}
cnn_1.verify_gradients(grad_approx_inputs,True)
# Format data
train_inputs={'x':train_x_raw,'y':train_y_one_hot}
val_inputs={'x':train_x_validation,'y':train_y_one_hot_validation}
cnn_1.train(train_inputs,val_inputs,0.0005,epochs=10,batch_size=32)
else:
cnn_1 = CNN(file="trained_weights_val70")
train_inputs={'x':train_x_raw,'y':train_y_one_hot}
val_inputs={'x':train_x_validation,'y':train_y_one_hot_validation}
validation_accuracy,results = cnn_1.eval(val_inputs)
print("Validation Accuracy: " + str(validation_accuracy) + "%")
train_x_validation = (train_x_validation + train_mean)*(train_max-train_min)
graph_inputs={'x':train_x_validation,'y':train_y_one_hot_validation,'yhat':results}
graph_results(graph_inputs,classes)
# Uncoment for test accuracy (time consuming 10k examples)
"""
test_inputs = {"x":test_x_raw,"y":test_y_one_hot}
print("Testing... This could take a while...")
test_accuracy = cnn_1.eval(test_inputs,batches=50)
print("Test Accuracy: " + str(test_accuracy) + "%")
"""
def graph_results(inputs,classes,num=20):
input_x = inputs['x']
input_y = inputs['y']
yhat = inputs['yhat']
for img in range(num):
fig = plt.figure(1)
fig.add_subplot(121)
plt.imshow(input_x[img,...])
fig.add_subplot(122)
y = yhat[img,...]
x = [0,1,2,3,4,5,6,7,8,9]
plt.yticks(np.arange(10), classes)
plt.barh(x,y)
plt.show()
if __name__ == "__main__":
main() | [
"cifar_loader.cifar10.load_training_data",
"solver.solvers.CNN",
"numpy.save",
"atexit.register",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"cifar_loader.cifar10.load_class_names",
"matplotlib.pyplot.barh",
"numpy.max",
"numpy.mean",
"numpy.min",
"matplotlib.pyplot.figure",
"nump... | [((219, 256), 'numpy.save', 'np.save', (['"""train_weights.npy"""', 'weights'], {}), "('train_weights.npy', weights)\n", (226, 256), True, 'import numpy as np\n'), ((430, 466), 'cifar_loader.cifar10.maybe_download_and_extract', 'cifar10.maybe_download_and_extract', ([], {}), '()\n', (464, 466), False, 'from cifar_loader import cifar10\n'), ((508, 536), 'cifar_loader.cifar10.load_training_data', 'cifar10.load_training_data', ([], {}), '()\n', (534, 536), False, 'from cifar_loader import cifar10\n'), ((575, 599), 'cifar_loader.cifar10.load_test_data', 'cifar10.load_test_data', ([], {}), '()\n', (597, 599), False, 'from cifar_loader import cifar10\n'), ((611, 637), 'cifar_loader.cifar10.load_class_names', 'cifar10.load_class_names', ([], {}), '()\n', (635, 637), False, 'from cifar_loader import cifar10\n'), ((983, 1003), 'numpy.mean', 'np.mean', (['train_x_raw'], {}), '(train_x_raw)\n', (990, 1003), True, 'import numpy as np\n'), ((1018, 1037), 'numpy.max', 'np.max', (['train_x_raw'], {}), '(train_x_raw)\n', (1024, 1037), True, 'import numpy as np\n'), ((1052, 1071), 'numpy.min', 'np.min', (['train_x_raw'], {}), '(train_x_raw)\n', (1058, 1071), True, 'import numpy as np\n'), ((1290, 1307), 'solver.solvers.CNN', 'CNN', ([], {'file': 'weights'}), '(file=weights)\n', (1293, 1307), False, 'from solver.solvers import CNN\n'), ((1368, 1416), 'atexit.register', 'atexit.register', (['cnn_1.save_model', '"""WEIGHT_DUMP"""'], {}), "(cnn_1.save_model, 'WEIGHT_DUMP')\n", (1383, 1416), False, 'import atexit\n'), ((1793, 1826), 'solver.solvers.CNN', 'CNN', ([], {'file': '"""trained_weights_val70"""'}), "(file='trained_weights_val70')\n", (1796, 1826), False, 'from solver.solvers import CNN\n'), ((2706, 2719), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2716, 2719), True, 'import matplotlib.pyplot as plt\n'), ((2745, 2774), 'matplotlib.pyplot.imshow', 'plt.imshow', (['input_x[img, ...]'], {}), '(input_x[img, ...])\n', (2755, 2774), True, 'import matplotlib.pyplot as plt\n'), ((2884, 2898), 'matplotlib.pyplot.barh', 'plt.barh', (['x', 'y'], {}), '(x, y)\n', (2892, 2898), True, 'import matplotlib.pyplot as plt\n'), ((2900, 2910), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2908, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2871), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2867, 2871), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.