code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
This submodule contains base tools for numerical integration of the Hamilton-
Jacobi-Bellman equation in the Square Root Velocity Framework. The methods
accepts and returns vectorized data only.
"""
# External dependencies:
import numpy as np
class Scheme:
"""
Object containing at least two methods: 'update' and 'alpha'.
Methods
-------
update(u,reg,i,j):
Computes the value function u[i,j] by approximating local solutions
to the HJB equation.
alpha(u,reg,i,j):
Computes the maximizer alpha corresponding to the solution of the
approximated HJB equation.
"""
filterCompatible = False
eps = np.finfo(float).eps
class Filter(Scheme):
def __init__(self,ascheme,mscheme,tol=1,order=.5):
assert ascheme.type == mscheme.type, "Solvers must be of same type"
self.type = ascheme.type
self.ascheme = ascheme
self.mscheme = mscheme
self.tol = tol
self.order = order
def update(self,u,hjbdata,i,j):
self.ascheme.update(u,hjbdata,i,j)
H = self.mscheme.hamiltonian(u,hjbdata,i,j)
H[u[i,j]<np.maximum(u[i,j-1],u[i-1,j])] = np.inf
x1,x2 = hjbdata.x1,hjbdata.x2
b = np.abs(H) > self.tol*((x1[i]-x1[i-1])*(x2[j]-x2[j-1]))**(0.5+0.5*self.order)
self.mscheme.update(u,hjbdata,i[b],j[b])
def alpha(self,u,hjbdata,i,j):
H = self.mscheme.hamiltonian(u,hjbdata,i,j)
x1,x2 = hjbdata.x1,hjbdata.x2
if np.abs(H) > self.tol*np.sqrt((x1[i]-x1[i-1])*(x2[j]-x2[j-1]))**(1+self.order):
return self.mscheme.alpha(u,hjbdata,i,j)
return self.ascheme.alpha(u,hjbdata,i,j)
class Maximal(Scheme):
def __init__(self,*schemes):
self.type = schemes[0].type
#assert ascheme.type == mscheme.type, "Solvers must be of same type"
self.schemes = schemes
def update(self,u,hjbdata,i,j):
for scheme in self.schemes:
temp = u[i,j]
scheme.update(u,hjbdata,i,j)
u[i,j] = np.maximum(temp,u[i,j])
def alpha(self,u,hjbdata,i,j):
return self.schemes[0].alpha(u,hjbdata,i,j)
class DDP(Scheme):
type = "u"
monotone = True
def update(u,hjbdata,i,j):
i2 = np.maximum(i[:,None]-hjbdata.nbhd[[0]],0)
j2 = np.maximum(j[:,None]-hjbdata.nbhd[[1]],0)
u[i,j] = (u[i2,j2] + hjbdata[i[:,None],j[:,None],i2,j2]).max(axis=1)
def prev(u,hjbdata,i,j):
i,j = np.array([i]),np.array([j])
i2 = np.maximum(i-hjbdata.nbhd[[0]],0)
j2 = np.maximum(j-hjbdata.nbhd[[1]],0)
optimal = (u[i2,j2] + hjbdata[i,j,i2,j2]).argmax(axis=1)
return [i2[np.arange(i.shape[0]),optimal], j2[np.arange(j.shape[0]),optimal]]
class U1(Scheme):
type = "u"
monotone = True
def __str__():
return "U1"
def update(u,hjbdata,i,j):
u01,u10 = u[i-1,j],u[i,j-1]
u[i,j] = 0.5*(u01 + u10 + np.sqrt((u01 - u10)**2 + hjbdata[i,j]**2))
def solve(u,hjbdata,i,j):
u01,u10 = u[i-1,j],u[i,j-1]
return 0.5*(u01 + u10 + np.sqrt((u01 - u10)**2 + hjbdata[i,j]**2))
def hamiltonian(u,hjbdata,i,j):
u01,u10 = u[i-1,j],u[i,j-1]
return 0.5*(u01 + u10 + np.sqrt((u01 - u10)**2 + hjbdata[i,j]**2)) - u[i,j]
def alpha(u,hjbdata,i,j):
u01,u10 = u[i-1,j],u[i,j-1]
fh2 = hjbdata[i,j]**2
if u10==u10==fh2==0:
return (1,0)
return (0.5 + 0.5*(u01-u10)/np.sqrt((u01-u10)**2 + fh2),
0.5 + 0.5*(u10-u01)/np.sqrt((u01-u10)**2 + fh2))
class UInf(Scheme):
type = "u"
monotone = True
def __str__():
return "UInf"
def update(u,hjbdata,i,j):
u0,u1 = u[i-1,j-1],np.maximum(u[i-1,j],u[i,j-1])
fh = hjbdata[i,j]
ufh2 = np.minimum(0.25*fh**2,(u1-u0)**2)
u[i,j] = np.maximum(u1 + ufh2/(u1-u0+Scheme.eps),u0+fh)
def solve(u,hjbdata,i,j):
u0,u1 = u[i-1,j-1],np.maximum(u[i-1,j],u[i,j-1])
fh = hjbdata[i,j]
ufh2 = np.minimum(0.25*fh**2,(u1-u0)**2)
return np.maximum(u1 + ufh2/(u1-u0+Scheme.eps),u0+fh)
def hamiltonian(u,hjbdata,i,j):
u0,u1 = u[i-1,j-1],np.maximum(u[i-1,j],u[i,j-1])
fh = hjbdata[i,j]
ufh2 = np.minimum(0.25*fh**2,(u1-u0)**2)
return np.maximum(u1 + ufh2/(u1-u0+Scheme.eps),u0+fh**2) - u[i,j]
def alpha(u,hjbdata,i,j):
u00,u01,u10 = u[i-1,j-1],u[i-1,j],u[i,j-1]
fh2 = hjbdata[i,j]**2
if u01>=u10:
a1,a2 = 1,min(1,fh2/(4*(u01-u00)**2+Scheme.eps))
else:
a1,a2 = min(1,fh2/(4*(u10-u00)**2+Scheme.eps)),1
return a1/(a1+a2), a2/(a1+a2)
class U2(Scheme):
type = "u"
monotone = False
def update(u,hjbdata,i,j):
u[i,j] = u[i-1,j-1] + np.sqrt((u[i-1,j] - u[i,j-1])**2 + hjbdata[i,j]**2)
def alpha(u,hjbdata,i,j):
u01,u10 = u[i-1,j],u[i,j-1]
fh2 = hjbdata[i,j]**2
return (0.5 + 0.5*(u01-u10)/np.sqrt((u01-u10)**2 + fh2),
0.5 + 0.5*(u10-u01)/np.sqrt((u01-u10)**2 + fh2))
class V1(Scheme):
type = "v"
monotone = True
def __str__():
return "V1"
def update(v,hjbdata,i,j):
v01,v10 = v[i-1,j],v[i,j-1]
fh2 = hjbdata[i,j]**2
v[i,j] = 0.5*(v01 + v10 + fh2 +
np.sqrt((v01 - v10)**2 +
2*(v01 + v10)*fh2 +
fh2**2))
def hamiltonian(v,hjbdata,i,j):
v01,v10,v11 = v[i-1,j],v[i,j-1],v[i,j]
v01 /= 2*np.sqrt(v11)+Scheme.eps
v10 /= 2*np.sqrt(v11)+Scheme.eps
v11 /= 2*np.sqrt(v11)+Scheme.eps
return 0.5*(v01+v10 + np.sqrt((v01 - v10)**2 + hjbdata[i,j]**2)) - v11
def alpha(v,hjbdata,i,j):
v01 = v[i-1,j]
v10 = v[i,j-1]
v11 = v[i,j]
fh2 = hjbdata[i,j]**2
if v10==v10==fh2==0:
return (1,0)
return (0.5 + 0.5*(v01-v10)/np.sqrt((v01-v10)**2 + 4*v11*fh2),
0.5 + 0.5*(v10-v01)/np.sqrt((v01-v10)**2 + 4*v11*fh2))
class V12(Scheme):
type = "v"
monotone = True
def __str__():
return "V12"
def update(v,hjbdata,i,j):
v11 = v[i-1,j-1]
v02 = np.maximum.reduce([v11,v[np.maximum(i-2,0),j],v[i,np.maximum(j-2,0)]])
fh2 = 4*hjbdata[i,j]**2
v[i,j] = 0.5*(2*v11 + fh2 +
np.sqrt(4*(v02 - v11)**2 +
4*v11*fh2 +
fh2**2))
def hamiltonian(v,hjbdata,i,j):
v01,v10,v11 = v[i-1,j],v[i,j-1],v[i,j]
v01 /= 2*np.sqrt(v11)+Scheme.eps
v10 /= 2*np.sqrt(v11)+Scheme.eps
v11 /= 2*np.sqrt(v11)+Scheme.eps
return 0.5*(v01+v10 + np.sqrt((v01 - v10)**2 + hjbdata[i,j]**2)) - v11
def alpha(v,hjbdata,i,j):
v01 = v[i-1,j]
v10 = v[i,j-1]
v11 = v[i,j]
fh2 = hjbdata[i,j]**2
if v10==v10==fh2==0:
return (1,0)
return (0.5 + 0.5*(v01-v10)/np.sqrt((v01-v10)**2 + 4*v11*fh2),
0.5 + 0.5*(v10-v01)/np.sqrt((v01-v10)**2 + 4*v11*fh2))
class VInf(Scheme):
type = "v"
monotone = True
def __str__():
return "VInf"
def solve(solver,i,j):
fh = solver.f(i,j)
fh2 = fh**2
v0 = solver.value[i-1,j-1]
v1 = np.maximum(solver.value[i-1,j],solver.value[i,j-1])
stable = v1*fh2 < (v1-v0)*(v1-v0-fh2)
v11 = (fh + np.sqrt(fh2+v0))**2
v11[stable] = (v1[stable]*(v1[stable]-v0[stable]))/(v1[stable]-v0[stable]-fh2[stable]+Scheme.eps)
return np.maximum(v11,v1)
def hamiltonian(v,hjbdata,i,j):
v0,v1,v11 = v[i-1,j-1],np.maximum(v[i,j-1],v[i-1,j]),v[i,j]
v0 /= 2*np.sqrt(v11)+Scheme.eps
v1 /= 2*np.sqrt(v11)+Scheme.eps
v11 /= 2*np.sqrt(v11)+Scheme.eps
fh = hjbdata[i,j]
ufh2 = np.minimum(0.25*fh**2,(v1-v0)**2)
return np.maximum(v1 + ufh2/(v1-v0+Scheme.eps),v0+fh) - v11
def alpha(v,hjbdata,i,j):
v00,v01,v10,v11 = v[i-1,j-1],v[i-1,j],v[i,j-1],v[i,j]
v00 /= 2*np.sqrt(v11)+Scheme.eps
v01 /= 2*np.sqrt(v11)+Scheme.eps
v10 /= 2*np.sqrt(v11)+Scheme.eps
fh2 = hjbdata[i,j]**2
if v01>=v10:
a1,a2 = 1,min(1,fh2/(4*(v01-v00)**2+Scheme.eps))
else:
a1,a2 = min(1,fh2/(4*(v10-v00)**2+Scheme.eps)),1
return a1/(a1+a2), a2/(a1+a2)
class V2b(Scheme):
type = "v"
monotone = False
def update(v,hjbdata,i,j):
v00,v01,v10 = v[i-1,j-1],v[i-1,j],v[i,j-1]
fh2 = hjbdata[i,j]**2
v[i,j] = np.maximum.reduce([
v00 + np.sqrt((v01 - v10)**2 + 2*(v01+v10)*hjbdata[i,j]**2),
v01,
v10
])
def alpha(v,hjbdata,i,j):
v01,v10 = v[i-1,j],v[i,j-1]
fh2 = 2*(v01+v10)*hjbdata[i,j]**2
return (1 + (v01-v10)/np.sqrt((v01-v10)**2 + fh2 + Scheme.eps),
1 + (v10-v01)/np.sqrt((v01-v10)**2 + fh2 + Scheme.eps))
class V2(Scheme):
type = "v"
monotone = False
def update(v,hjbdata,i,j):
v00,v01,v10 = v[i-1,j-1],v[i-1,j],v[i,j-1]
fh2 = hjbdata[i,j]**2
v[i,j] = np.maximum.reduce([
v00+0.5*fh2+np.sqrt((v01-v10)**2+fh2*(2*v00+v10+v01+0.25*fh2)),
v01,
v10
])
def alpha(v,hjbdata,i,j):
v01,v10 = v[i-1,j],v[i,j-1]
fh2 = 2*(v01+v10)*hjbdata[i,j]**2
return (1 + (v01-v10)/np.sqrt((v01-v10)**2 + fh2 + Scheme.eps),
1 + (v10-v01)/np.sqrt((v01-v10)**2 + fh2 + Scheme.eps))
class Vb24(Scheme):
type = "v"
monotone = False
def update(v,hjbdata,i,j):
v00,v01,v10 = v[i-1,j-1],v[i-1,j],v[i,j-1]
fh2 = hjbdata[i,j]**2
v[i,j] = v00+0.5*fh2+np.sqrt((v01-v10)**2+fh2*(2*v00+v10+v01+0.25*fh2))
def alpha(v,hjbdata,i,j):
v01,v10 = v[i-1,j],v[i,j-1]
fh2 = 2*(v01+v10)*hjbdata[i,j]**2
return (1 + (v01-v10)/np.sqrt((v01-v10)**2 + fh2 + Scheme.eps),
1 + (v10-v01)/np.sqrt((v01-v10)**2 + fh2 + Scheme.eps))
class Vb22(Scheme):
type = "v"
monotone = False
def update(v,hjbdata,i,j):
v00,v01,v10 = v[i-1,j-1],v[i-1,j],v[i,j-1]
fh2 = hjbdata[i,j]**2
v[i,j] = v00+fh2+np.sqrt((v01-v10)**2+fh2*(4*v00+fh2))
def alpha(v,hjbdata,i,j):
v00,v01,v10,v11 = v[i-1,j-1],v[i-1,j],v[i,j-1],v[i,j]
fh2 = 2*(v00+v11)*hjbdata[i,j]**2
return (1 + (v01-v10)/np.sqrt((v01-v10)**2 + fh2 + Scheme.eps),
1 + (v10-v01)/np.sqrt((v01-v10)**2 + fh2 + Scheme.eps))
class Vb23(Scheme):
type = "v"
monotone = False
def update(v,hjbdata,i,j):
v[i,j] = (v[i-1,j-1]**.5 + np.sqrt((v[i-1,j]**.5 - v[i,j-1]**.5)**2 + hjbdata[i,j]**2))**2
def alpha(v,hjbdata,i,j):
u01,u10 = v[i-1,j]**.5,v[i,j-1]**.5
fh2 = hjbdata[i,j]**2
return (1 + (u01-u10)/np.sqrt((u01-u10)**2 + fh2 + Scheme.eps),
1 + (u10-u01)/np.sqrt((u01-u10)**2 + fh2 + Scheme.eps))
| [
"numpy.minimum",
"numpy.maximum",
"numpy.abs",
"numpy.finfo",
"numpy.array",
"numpy.arange",
"numpy.sqrt"
] | [((670, 685), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (678, 685), True, 'import numpy as np\n'), ((2250, 2295), 'numpy.maximum', 'np.maximum', (['(i[:, None] - hjbdata.nbhd[[0]])', '(0)'], {}), '(i[:, None] - hjbdata.nbhd[[0]], 0)\n', (2260, 2295), True, 'import numpy as np\n'), ((2305, 2350), 'numpy.maximum', 'np.maximum', (['(j[:, None] - hjbdata.nbhd[[1]])', '(0)'], {}), '(j[:, None] - hjbdata.nbhd[[1]], 0)\n', (2315, 2350), True, 'import numpy as np\n'), ((2508, 2544), 'numpy.maximum', 'np.maximum', (['(i - hjbdata.nbhd[[0]])', '(0)'], {}), '(i - hjbdata.nbhd[[0]], 0)\n', (2518, 2544), True, 'import numpy as np\n'), ((2555, 2591), 'numpy.maximum', 'np.maximum', (['(j - hjbdata.nbhd[[1]])', '(0)'], {}), '(j - hjbdata.nbhd[[1]], 0)\n', (2565, 2591), True, 'import numpy as np\n'), ((3782, 3824), 'numpy.minimum', 'np.minimum', (['(0.25 * fh ** 2)', '((u1 - u0) ** 2)'], {}), '(0.25 * fh ** 2, (u1 - u0) ** 2)\n', (3792, 3824), True, 'import numpy as np\n'), ((3833, 3888), 'numpy.maximum', 'np.maximum', (['(u1 + ufh2 / (u1 - u0 + Scheme.eps))', '(u0 + fh)'], {}), '(u1 + ufh2 / (u1 - u0 + Scheme.eps), u0 + fh)\n', (3843, 3888), True, 'import numpy as np\n'), ((4008, 4050), 'numpy.minimum', 'np.minimum', (['(0.25 * fh ** 2)', '((u1 - u0) ** 2)'], {}), '(0.25 * fh ** 2, (u1 - u0) ** 2)\n', (4018, 4050), True, 'import numpy as np\n'), ((4057, 4112), 'numpy.maximum', 'np.maximum', (['(u1 + ufh2 / (u1 - u0 + Scheme.eps))', '(u0 + fh)'], {}), '(u1 + ufh2 / (u1 - u0 + Scheme.eps), u0 + fh)\n', (4067, 4112), True, 'import numpy as np\n'), ((4238, 4280), 'numpy.minimum', 'np.minimum', (['(0.25 * fh ** 2)', '((u1 - u0) ** 2)'], {}), '(0.25 * fh ** 2, (u1 - u0) ** 2)\n', (4248, 4280), True, 'import numpy as np\n'), ((7286, 7344), 'numpy.maximum', 'np.maximum', (['solver.value[i - 1, j]', 'solver.value[i, j - 1]'], {}), '(solver.value[i - 1, j], solver.value[i, j - 1])\n', (7296, 7344), True, 'import numpy as np\n'), ((7545, 7564), 'numpy.maximum', 'np.maximum', (['v11', 'v1'], {}), '(v11, v1)\n', (7555, 7564), True, 'import numpy as np\n'), ((7832, 7874), 'numpy.minimum', 'np.minimum', (['(0.25 * fh ** 2)', '((v1 - v0) ** 2)'], {}), '(0.25 * fh ** 2, (v1 - v0) ** 2)\n', (7842, 7874), True, 'import numpy as np\n'), ((1230, 1239), 'numpy.abs', 'np.abs', (['H'], {}), '(H)\n', (1236, 1239), True, 'import numpy as np\n'), ((1493, 1502), 'numpy.abs', 'np.abs', (['H'], {}), '(H)\n', (1499, 1502), True, 'import numpy as np\n'), ((2038, 2063), 'numpy.maximum', 'np.maximum', (['temp', 'u[i, j]'], {}), '(temp, u[i, j])\n', (2048, 2063), True, 'import numpy as np\n'), ((2467, 2480), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (2475, 2480), True, 'import numpy as np\n'), ((2481, 2494), 'numpy.array', 'np.array', (['[j]'], {}), '([j])\n', (2489, 2494), True, 'import numpy as np\n'), ((3711, 3747), 'numpy.maximum', 'np.maximum', (['u[i - 1, j]', 'u[i, j - 1]'], {}), '(u[i - 1, j], u[i, j - 1])\n', (3721, 3747), True, 'import numpy as np\n'), ((3937, 3973), 'numpy.maximum', 'np.maximum', (['u[i - 1, j]', 'u[i, j - 1]'], {}), '(u[i - 1, j], u[i, j - 1])\n', (3947, 3973), True, 'import numpy as np\n'), ((4167, 4203), 'numpy.maximum', 'np.maximum', (['u[i - 1, j]', 'u[i, j - 1]'], {}), '(u[i - 1, j], u[i, j - 1])\n', (4177, 4203), True, 'import numpy as np\n'), ((4287, 4347), 'numpy.maximum', 'np.maximum', (['(u1 + ufh2 / (u1 - u0 + Scheme.eps))', '(u0 + fh ** 2)'], {}), '(u1 + ufh2 / (u1 - u0 + Scheme.eps), u0 + fh ** 2)\n', (4297, 4347), True, 'import numpy as np\n'), ((4768, 4830), 'numpy.sqrt', 'np.sqrt', (['((u[i - 1, j] - u[i, j - 1]) ** 2 + hjbdata[i, j] ** 2)'], {}), '((u[i - 1, j] - u[i, j - 1]) ** 2 + hjbdata[i, j] ** 2)\n', (4775, 4830), True, 'import numpy as np\n'), ((7633, 7669), 'numpy.maximum', 'np.maximum', (['v[i, j - 1]', 'v[i - 1, j]'], {}), '(v[i, j - 1], v[i - 1, j])\n', (7643, 7669), True, 'import numpy as np\n'), ((7881, 7936), 'numpy.maximum', 'np.maximum', (['(v1 + ufh2 / (v1 - v0 + Scheme.eps))', '(v0 + fh)'], {}), '(v1 + ufh2 / (v1 - v0 + Scheme.eps), v0 + fh)\n', (7891, 7936), True, 'import numpy as np\n'), ((9731, 9799), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 * (2 * v00 + v10 + v01 + 0.25 * fh2))'], {}), '((v01 - v10) ** 2 + fh2 * (2 * v00 + v10 + v01 + 0.25 * fh2))\n', (9738, 9799), True, 'import numpy as np\n'), ((10228, 10277), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 * (4 * v00 + fh2))'], {}), '((v01 - v10) ** 2 + fh2 * (4 * v00 + fh2))\n', (10235, 10277), True, 'import numpy as np\n'), ((1140, 1176), 'numpy.maximum', 'np.maximum', (['u[i, j - 1]', 'u[i - 1, j]'], {}), '(u[i, j - 1], u[i - 1, j])\n', (1150, 1176), True, 'import numpy as np\n'), ((2935, 2981), 'numpy.sqrt', 'np.sqrt', (['((u01 - u10) ** 2 + hjbdata[i, j] ** 2)'], {}), '((u01 - u10) ** 2 + hjbdata[i, j] ** 2)\n', (2942, 2981), True, 'import numpy as np\n'), ((3076, 3122), 'numpy.sqrt', 'np.sqrt', (['((u01 - u10) ** 2 + hjbdata[i, j] ** 2)'], {}), '((u01 - u10) ** 2 + hjbdata[i, j] ** 2)\n', (3083, 3122), True, 'import numpy as np\n'), ((5300, 5360), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + 2 * (v01 + v10) * fh2 + fh2 ** 2)'], {}), '((v01 - v10) ** 2 + 2 * (v01 + v10) * fh2 + fh2 ** 2)\n', (5307, 5360), True, 'import numpy as np\n'), ((5514, 5526), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (5521, 5526), True, 'import numpy as np\n'), ((5555, 5567), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (5562, 5567), True, 'import numpy as np\n'), ((5596, 5608), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (5603, 5608), True, 'import numpy as np\n'), ((6349, 6405), 'numpy.sqrt', 'np.sqrt', (['(4 * (v02 - v11) ** 2 + 4 * v11 * fh2 + fh2 ** 2)'], {}), '(4 * (v02 - v11) ** 2 + 4 * v11 * fh2 + fh2 ** 2)\n', (6356, 6405), True, 'import numpy as np\n'), ((6557, 6569), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (6564, 6569), True, 'import numpy as np\n'), ((6598, 6610), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (6605, 6610), True, 'import numpy as np\n'), ((6639, 6651), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (6646, 6651), True, 'import numpy as np\n'), ((7404, 7421), 'numpy.sqrt', 'np.sqrt', (['(fh2 + v0)'], {}), '(fh2 + v0)\n', (7411, 7421), True, 'import numpy as np\n'), ((7686, 7698), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (7693, 7698), True, 'import numpy as np\n'), ((7726, 7738), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (7733, 7738), True, 'import numpy as np\n'), ((7767, 7779), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (7774, 7779), True, 'import numpy as np\n'), ((8045, 8057), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (8052, 8057), True, 'import numpy as np\n'), ((8086, 8098), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (8093, 8098), True, 'import numpy as np\n'), ((8127, 8139), 'numpy.sqrt', 'np.sqrt', (['v11'], {}), '(v11)\n', (8134, 8139), True, 'import numpy as np\n'), ((10667, 10743), 'numpy.sqrt', 'np.sqrt', (['((v[i - 1, j] ** 0.5 - v[i, j - 1] ** 0.5) ** 2 + hjbdata[i, j] ** 2)'], {}), '((v[i - 1, j] ** 0.5 - v[i, j - 1] ** 0.5) ** 2 + hjbdata[i, j] ** 2)\n', (10674, 10743), True, 'import numpy as np\n'), ((1514, 1564), 'numpy.sqrt', 'np.sqrt', (['((x1[i] - x1[i - 1]) * (x2[j] - x2[j - 1]))'], {}), '((x1[i] - x1[i - 1]) * (x2[j] - x2[j - 1]))\n', (1521, 1564), True, 'import numpy as np\n'), ((2673, 2694), 'numpy.arange', 'np.arange', (['i.shape[0]'], {}), '(i.shape[0])\n', (2682, 2694), True, 'import numpy as np\n'), ((2708, 2729), 'numpy.arange', 'np.arange', (['j.shape[0]'], {}), '(j.shape[0])\n', (2717, 2729), True, 'import numpy as np\n'), ((3223, 3269), 'numpy.sqrt', 'np.sqrt', (['((u01 - u10) ** 2 + hjbdata[i, j] ** 2)'], {}), '((u01 - u10) ** 2 + hjbdata[i, j] ** 2)\n', (3230, 3269), True, 'import numpy as np\n'), ((3461, 3492), 'numpy.sqrt', 'np.sqrt', (['((u01 - u10) ** 2 + fh2)'], {}), '((u01 - u10) ** 2 + fh2)\n', (3468, 3492), True, 'import numpy as np\n'), ((3526, 3557), 'numpy.sqrt', 'np.sqrt', (['((u01 - u10) ** 2 + fh2)'], {}), '((u01 - u10) ** 2 + fh2)\n', (3533, 3557), True, 'import numpy as np\n'), ((4952, 4983), 'numpy.sqrt', 'np.sqrt', (['((u01 - u10) ** 2 + fh2)'], {}), '((u01 - u10) ** 2 + fh2)\n', (4959, 4983), True, 'import numpy as np\n'), ((5017, 5048), 'numpy.sqrt', 'np.sqrt', (['((u01 - u10) ** 2 + fh2)'], {}), '((u01 - u10) ** 2 + fh2)\n', (5024, 5048), True, 'import numpy as np\n'), ((5650, 5696), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + hjbdata[i, j] ** 2)'], {}), '((v01 - v10) ** 2 + hjbdata[i, j] ** 2)\n', (5657, 5696), True, 'import numpy as np\n'), ((5916, 5957), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + 4 * v11 * fh2)'], {}), '((v01 - v10) ** 2 + 4 * v11 * fh2)\n', (5923, 5957), True, 'import numpy as np\n'), ((5987, 6028), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + 4 * v11 * fh2)'], {}), '((v01 - v10) ** 2 + 4 * v11 * fh2)\n', (5994, 6028), True, 'import numpy as np\n'), ((6693, 6739), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + hjbdata[i, j] ** 2)'], {}), '((v01 - v10) ** 2 + hjbdata[i, j] ** 2)\n', (6700, 6739), True, 'import numpy as np\n'), ((6959, 7000), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + 4 * v11 * fh2)'], {}), '((v01 - v10) ** 2 + 4 * v11 * fh2)\n', (6966, 7000), True, 'import numpy as np\n'), ((7030, 7071), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + 4 * v11 * fh2)'], {}), '((v01 - v10) ** 2 + 4 * v11 * fh2)\n', (7037, 7071), True, 'import numpy as np\n'), ((8601, 8665), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + 2 * (v01 + v10) * hjbdata[i, j] ** 2)'], {}), '((v01 - v10) ** 2 + 2 * (v01 + v10) * hjbdata[i, j] ** 2)\n', (8608, 8665), True, 'import numpy as np\n'), ((8838, 8882), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 + Scheme.eps)'], {}), '((v01 - v10) ** 2 + fh2 + Scheme.eps)\n', (8845, 8882), True, 'import numpy as np\n'), ((8910, 8954), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 + Scheme.eps)'], {}), '((v01 - v10) ** 2 + fh2 + Scheme.eps)\n', (8917, 8954), True, 'import numpy as np\n'), ((9181, 9249), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 * (2 * v00 + v10 + v01 + 0.25 * fh2))'], {}), '((v01 - v10) ** 2 + fh2 * (2 * v00 + v10 + v01 + 0.25 * fh2))\n', (9188, 9249), True, 'import numpy as np\n'), ((9415, 9459), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 + Scheme.eps)'], {}), '((v01 - v10) ** 2 + fh2 + Scheme.eps)\n', (9422, 9459), True, 'import numpy as np\n'), ((9487, 9531), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 + Scheme.eps)'], {}), '((v01 - v10) ** 2 + fh2 + Scheme.eps)\n', (9494, 9531), True, 'import numpy as np\n'), ((9920, 9964), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 + Scheme.eps)'], {}), '((v01 - v10) ** 2 + fh2 + Scheme.eps)\n', (9927, 9964), True, 'import numpy as np\n'), ((9992, 10036), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 + Scheme.eps)'], {}), '((v01 - v10) ** 2 + fh2 + Scheme.eps)\n', (9999, 10036), True, 'import numpy as np\n'), ((10430, 10474), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 + Scheme.eps)'], {}), '((v01 - v10) ** 2 + fh2 + Scheme.eps)\n', (10437, 10474), True, 'import numpy as np\n'), ((10502, 10546), 'numpy.sqrt', 'np.sqrt', (['((v01 - v10) ** 2 + fh2 + Scheme.eps)'], {}), '((v01 - v10) ** 2 + fh2 + Scheme.eps)\n', (10509, 10546), True, 'import numpy as np\n'), ((10865, 10909), 'numpy.sqrt', 'np.sqrt', (['((u01 - u10) ** 2 + fh2 + Scheme.eps)'], {}), '((u01 - u10) ** 2 + fh2 + Scheme.eps)\n', (10872, 10909), True, 'import numpy as np\n'), ((10937, 10981), 'numpy.sqrt', 'np.sqrt', (['((u01 - u10) ** 2 + fh2 + Scheme.eps)'], {}), '((u01 - u10) ** 2 + fh2 + Scheme.eps)\n', (10944, 10981), True, 'import numpy as np\n'), ((6213, 6233), 'numpy.maximum', 'np.maximum', (['(i - 2)', '(0)'], {}), '(i - 2, 0)\n', (6223, 6233), True, 'import numpy as np\n'), ((6238, 6258), 'numpy.maximum', 'np.maximum', (['(j - 2)', '(0)'], {}), '(j - 2, 0)\n', (6248, 6258), True, 'import numpy as np\n')] |
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.methods.posts import GetPosts, NewPost
from wordpress_xmlrpc.methods.users import GetUserInfo
from wordpress_xmlrpc.methods import media
from wordpress_xmlrpc.compat import xmlrpc_client
import requests
import numpy as np
import random
import time
import string
class WPPost(object):
""" WPPost class is used for sending GET/POST requests to a wordpress website.
Typical use:
wp = WPPost(conf)
"""
def __init__(self, conf):
self.conf = conf
self._connect()
def _connect(self):
self.wp_client = Client(self.conf.wp_hostxmlrpc,
self.conf.wp_user,
self.conf.wp_pass)
#TODO: media directory should be defined in the conf file and a randomly chose media file should be picked
def post(self):
filename = "linuxlogo.jpg"
data = {
'name': 'linuxlogo.jpg',
'type': 'image/jpeg',
}
with open(filename, 'rb') as img:
data['bits'] = xmlrpc_client.Binary(img.read())
r = self.wp_client.call(media.UploadFile(data))
attachment_id = r['id']
#Create random content in range of 1000 letters
s = np.random.uniform(0, 1, size=2)
s1 = int(s[0]*self.conf.post_nchars+1)
s2 = int(s[1]*self.conf.post_title_nchars+1)
content = "".join([random.choice(string.letters) for i in xrange(s1)])
random_title = "".join([random.choice(string.letters) for i in xrange(s2)])
post = WordPressPost()
post.title = 'Random title: ' + random_title
post.content = content
post.post_status = 'publish'
post.thumbnail = attachment_id
post.terms_names = {
'post_tag': ['test', 'firstpost'],
'category': ['Uncategorized']
}
return self.wp_client.call(NewPost(post))
#TODO: Should be random get
def get(self):
return requests.get(self.conf.wp_host)
| [
"numpy.random.uniform",
"wordpress_xmlrpc.WordPressPost",
"random.choice",
"wordpress_xmlrpc.Client",
"wordpress_xmlrpc.methods.posts.NewPost",
"requests.get",
"wordpress_xmlrpc.methods.media.UploadFile"
] | [((623, 692), 'wordpress_xmlrpc.Client', 'Client', (['self.conf.wp_hostxmlrpc', 'self.conf.wp_user', 'self.conf.wp_pass'], {}), '(self.conf.wp_hostxmlrpc, self.conf.wp_user, self.conf.wp_pass)\n', (629, 692), False, 'from wordpress_xmlrpc import Client, WordPressPost\n'), ((1282, 1313), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(2)'}), '(0, 1, size=2)\n', (1299, 1313), True, 'import numpy as np\n'), ((1610, 1625), 'wordpress_xmlrpc.WordPressPost', 'WordPressPost', ([], {}), '()\n', (1623, 1625), False, 'from wordpress_xmlrpc import Client, WordPressPost\n'), ((2031, 2062), 'requests.get', 'requests.get', (['self.conf.wp_host'], {}), '(self.conf.wp_host)\n', (2043, 2062), False, 'import requests\n'), ((1157, 1179), 'wordpress_xmlrpc.methods.media.UploadFile', 'media.UploadFile', (['data'], {}), '(data)\n', (1173, 1179), False, 'from wordpress_xmlrpc.methods import media\n'), ((1949, 1962), 'wordpress_xmlrpc.methods.posts.NewPost', 'NewPost', (['post'], {}), '(post)\n', (1956, 1962), False, 'from wordpress_xmlrpc.methods.posts import GetPosts, NewPost\n'), ((1450, 1479), 'random.choice', 'random.choice', (['string.letters'], {}), '(string.letters)\n', (1463, 1479), False, 'import random\n'), ((1534, 1563), 'random.choice', 'random.choice', (['string.letters'], {}), '(string.letters)\n', (1547, 1563), False, 'import random\n')] |
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
import numpy as np
def draw_contours(X, U, centers, cov, pnt_colors, center_color, title = None):
cluster_indices = np.argmax(U, axis=1)
cluster_groups = []
cluster_cov = []
x1 = np.linspace(-2, 10, 200)
x2 = np.linspace(-2, 10, 200)
M, N = np.meshgrid(x1, x2)
pos = np.empty(M.shape + (2,))
pos[:, :, 0] = M
pos[:, :, 1] = N
for i in range(centers.shape[0]):
cluster_groups.append(X[cluster_indices == i, :])
# in case no covariance matrix was given (which is the case for Fuzzy c mean)
if cov is None:
cluster_cov.append(np.identity(centers.shape[1]))
else:
cluster_cov.append(cov[i])
plt.figure(figsize=(17, 10))
plt.title(title)
for i in range(centers.shape[0]):
plt.scatter(cluster_groups[i][:, 0], cluster_groups[i][:, 1], color=pnt_colors[i], marker='o')
plt.plot(centers[i, 0], centers[i, 1], color=center_color, marker='x', linewidth=7, markersize=19)
plt.contour(M, N, multivariate_normal(centers[i], cluster_cov[i]).pdf(pos), colors=pnt_colors[i], alpha=0.5)
plt.grid()
plt.show()
def segment_image(im,labels,center,verbose):
n = len(np.unique(labels))
f_img = np.zeros_like(im,dtype=np.int64)
segs = list(np.unique(labels))
for i in range(n):
if verbose:
print(i)
mask_indx = np.where((labels==segs[i]))
mask = np.zeros_like(im[:,:,0],dtype=np.int64)
mask[mask_indx] = 1
f_img[:,:,0] += mask * int(center[segs[i],0] * 256)
f_img[:,:,1] += mask * int(center[segs[i],1] * 256)
f_img[:,:,2] += mask * int(center[segs[i],2] * 256)
return f_img
| [
"matplotlib.pyplot.title",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.zeros_like",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.empty",
"matplotlib.pyplot.scatter",
"numpy.identity",
"scipy.stats.multivariate_normal",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.linspace",
... | [((198, 218), 'numpy.argmax', 'np.argmax', (['U'], {'axis': '(1)'}), '(U, axis=1)\n', (207, 218), True, 'import numpy as np\n'), ((275, 299), 'numpy.linspace', 'np.linspace', (['(-2)', '(10)', '(200)'], {}), '(-2, 10, 200)\n', (286, 299), True, 'import numpy as np\n'), ((309, 333), 'numpy.linspace', 'np.linspace', (['(-2)', '(10)', '(200)'], {}), '(-2, 10, 200)\n', (320, 333), True, 'import numpy as np\n'), ((346, 365), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (357, 365), True, 'import numpy as np\n'), ((377, 401), 'numpy.empty', 'np.empty', (['(M.shape + (2,))'], {}), '(M.shape + (2,))\n', (385, 401), True, 'import numpy as np\n'), ((773, 801), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(17, 10)'}), '(figsize=(17, 10))\n', (783, 801), True, 'import matplotlib.pyplot as plt\n'), ((806, 822), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (815, 822), True, 'import matplotlib.pyplot as plt\n'), ((1194, 1204), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1202, 1204), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1219), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1217, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1309, 1342), 'numpy.zeros_like', 'np.zeros_like', (['im'], {'dtype': 'np.int64'}), '(im, dtype=np.int64)\n', (1322, 1342), True, 'import numpy as np\n'), ((870, 969), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cluster_groups[i][:, 0]', 'cluster_groups[i][:, 1]'], {'color': 'pnt_colors[i]', 'marker': '"""o"""'}), "(cluster_groups[i][:, 0], cluster_groups[i][:, 1], color=\n pnt_colors[i], marker='o')\n", (881, 969), True, 'import matplotlib.pyplot as plt\n'), ((973, 1075), 'matplotlib.pyplot.plot', 'plt.plot', (['centers[i, 0]', 'centers[i, 1]'], {'color': 'center_color', 'marker': '"""x"""', 'linewidth': '(7)', 'markersize': '(19)'}), "(centers[i, 0], centers[i, 1], color=center_color, marker='x',\n linewidth=7, markersize=19)\n", (981, 1075), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1295), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1287, 1295), True, 'import numpy as np\n'), ((1358, 1375), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1367, 1375), True, 'import numpy as np\n'), ((1461, 1488), 'numpy.where', 'np.where', (['(labels == segs[i])'], {}), '(labels == segs[i])\n', (1469, 1488), True, 'import numpy as np\n'), ((1504, 1546), 'numpy.zeros_like', 'np.zeros_like', (['im[:, :, 0]'], {'dtype': 'np.int64'}), '(im[:, :, 0], dtype=np.int64)\n', (1517, 1546), True, 'import numpy as np\n'), ((683, 712), 'numpy.identity', 'np.identity', (['centers.shape[1]'], {}), '(centers.shape[1])\n', (694, 712), True, 'import numpy as np\n'), ((1098, 1145), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['centers[i]', 'cluster_cov[i]'], {}), '(centers[i], cluster_cov[i])\n', (1117, 1145), False, 'from scipy.stats import multivariate_normal\n')] |
"""
Display a lot of line objects. Because of the architecture of wgpu,
this is still performant.
"""
# run_example = false - because it takes too long and times out
import time # noqa
import numpy as np
from wgpu.gui.auto import WgpuCanvas, run
import pygfx as gfx
canvas = WgpuCanvas(max_fps=999)
renderer = gfx.WgpuRenderer(canvas, show_fps=True)
scene = gfx.Scene()
# Define number of vertices
cols = 20
rows = 50
nvertices = 30000
use_thin_lines = True
print(nvertices * rows * cols, "vertices in total")
x = np.linspace(0.05, 0.95, nvertices, dtype=np.float32)
for row in range(rows):
for col in range(cols):
y = np.sin(x * 25) * 0.45 + np.random.normal(0, 0.02, len(x)).astype(np.float32)
positions = np.column_stack([x, y, np.zeros_like(x)])
geometry = gfx.Geometry(positions=positions)
if use_thin_lines:
material = gfx.LineThinMaterial(color=(col / cols, row / rows, 0.5, 1.0))
else:
material = gfx.LineMaterial(
thickness=0.2 + 2 * row / rows, color=(col / cols, row / rows, 0.5, 1.0)
)
line = gfx.Line(geometry, material)
line.position.x = col
line.position.y = row
scene.add(line)
camera = gfx.OrthographicCamera(cols, rows)
camera.maintain_aspect = False
controller = gfx.PanZoomController(camera.position.clone())
controller.pan(gfx.linalg.Vector3(cols / 2, rows / 2, 0))
controller.add_default_event_handlers(renderer, camera)
def animate():
controller.update_camera(camera)
t0 = time.perf_counter() # noqa
renderer.render(scene, camera)
# print(time.perf_counter() - t0)
canvas.request_draw()
if __name__ == "__main__":
canvas.request_draw(animate)
run()
| [
"pygfx.linalg.Vector3",
"numpy.zeros_like",
"pygfx.LineThinMaterial",
"pygfx.OrthographicCamera",
"time.perf_counter",
"wgpu.gui.auto.run",
"wgpu.gui.auto.WgpuCanvas",
"numpy.sin",
"pygfx.Scene",
"numpy.linspace",
"pygfx.Geometry",
"pygfx.Line",
"pygfx.WgpuRenderer",
"pygfx.LineMaterial"
] | [((280, 303), 'wgpu.gui.auto.WgpuCanvas', 'WgpuCanvas', ([], {'max_fps': '(999)'}), '(max_fps=999)\n', (290, 303), False, 'from wgpu.gui.auto import WgpuCanvas, run\n'), ((315, 354), 'pygfx.WgpuRenderer', 'gfx.WgpuRenderer', (['canvas'], {'show_fps': '(True)'}), '(canvas, show_fps=True)\n', (331, 354), True, 'import pygfx as gfx\n'), ((364, 375), 'pygfx.Scene', 'gfx.Scene', ([], {}), '()\n', (373, 375), True, 'import pygfx as gfx\n'), ((523, 575), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.95)', 'nvertices'], {'dtype': 'np.float32'}), '(0.05, 0.95, nvertices, dtype=np.float32)\n', (534, 575), True, 'import numpy as np\n'), ((1242, 1276), 'pygfx.OrthographicCamera', 'gfx.OrthographicCamera', (['cols', 'rows'], {}), '(cols, rows)\n', (1264, 1276), True, 'import pygfx as gfx\n'), ((1383, 1424), 'pygfx.linalg.Vector3', 'gfx.linalg.Vector3', (['(cols / 2)', '(rows / 2)', '(0)'], {}), '(cols / 2, rows / 2, 0)\n', (1401, 1424), True, 'import pygfx as gfx\n'), ((1545, 1564), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1562, 1564), False, 'import time\n'), ((1738, 1743), 'wgpu.gui.auto.run', 'run', ([], {}), '()\n', (1741, 1743), False, 'from wgpu.gui.auto import WgpuCanvas, run\n'), ((799, 832), 'pygfx.Geometry', 'gfx.Geometry', ([], {'positions': 'positions'}), '(positions=positions)\n', (811, 832), True, 'import pygfx as gfx\n'), ((1119, 1147), 'pygfx.Line', 'gfx.Line', (['geometry', 'material'], {}), '(geometry, material)\n', (1127, 1147), True, 'import pygfx as gfx\n'), ((883, 945), 'pygfx.LineThinMaterial', 'gfx.LineThinMaterial', ([], {'color': '(col / cols, row / rows, 0.5, 1.0)'}), '(color=(col / cols, row / rows, 0.5, 1.0))\n', (903, 945), True, 'import pygfx as gfx\n'), ((983, 1077), 'pygfx.LineMaterial', 'gfx.LineMaterial', ([], {'thickness': '(0.2 + 2 * row / rows)', 'color': '(col / cols, row / rows, 0.5, 1.0)'}), '(thickness=0.2 + 2 * row / rows, color=(col / cols, row /\n rows, 0.5, 1.0))\n', (999, 1077), True, 'import pygfx as gfx\n'), ((641, 655), 'numpy.sin', 'np.sin', (['(x * 25)'], {}), '(x * 25)\n', (647, 655), True, 'import numpy as np\n'), ((761, 777), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (774, 777), True, 'import numpy as np\n')] |
import argparse
import torch
from solver import Solver
import os
import numpy as np
import scipy.io
from torch.utils.data import DataLoader
from tqdm import tqdm
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MCD Implementation')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint', metavar='N',
help='source only or not')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate (default: 0.0002)')
parser.add_argument('--max_epoch', type=int, default=120, metavar='N',
help='how many epochs')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--num_k', type=int, default=3, metavar='N',
help='hyper paremeter for generator update')
parser.add_argument('--person', type=int, default=3, metavar='N',
help='the person for testing')
parser.add_argument('--one_step', action='store_true', default=False,
help='one step training with gradient reversal layer')
parser.add_argument('--optimizer', type=str, default='adam', metavar='N', help='which optimizer')
parser.add_argument('--resume_epoch', type=int, default=100, metavar='N',
help='epoch to resume')
parser.add_argument('--save_epoch', type=int, default=10, metavar='N',
help='when to restore the model')
parser.add_argument('--save_model', action='store_true', default=False,
help='save_model or not')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--use_abs_diff', action='store_true', default=False,
help='use absolute difference value as a measurement')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
def dataset_load(person_source=1, person_target=args.person):
X_source = np.array([])
y_source = np.array([])
data = scipy.io.loadmat('../train/%d.mat'%(person_source))['de_feature']
label = scipy.io.loadmat('../train/%d.mat'%(person_source))['label']
X_source = data
y_source = label
X_source = (X_source - np.min(X_source, axis=0)) / (np.max(X_source, axis=0) - np.min(X_source, axis=0))
X_source = torch.from_numpy(X_source).float()
y_source = torch.from_numpy(y_source).long().squeeze()
source_dataset = torch.utils.data.TensorDataset(X_source, y_source)
X_target = scipy.io.loadmat('../test/%d.mat'%(10 + person_target))['de_feature']
y_target = scipy.io.loadmat('../test/%d.mat'%(10 + person_target))['label']
X_target = (X_target - np.min(X_target, axis=0)) / (np.max(X_target, axis=0) - np.min(X_target, axis=0))
X_target = torch.from_numpy(X_target).float()
y_target = torch.from_numpy(y_target).long().squeeze()
target_dataset = torch.utils.data.TensorDataset(X_target, y_target)
return source_dataset, target_dataset
def main():
solvers = [Solver(args, learning_rate=args.lr, batch_size=args.batch_size,
optimizer=args.optimizer, num_k=args.num_k,
checkpoint_dir=args.checkpoint_dir,
save_epoch=args.save_epoch) for _ in range(10)]
datasets = [dataset_load(person_source=i) for i in range(1, 11)]
y_label = scipy.io.loadmat('../test/%d.mat'%(10 + args.person))['label']
y_label = torch.from_numpy(y_label).long().squeeze()
for t in range(args.max_epoch):
preds = []
accs = []
for idx, solver in tqdm(enumerate(solvers), total=len(solvers), leave=False):
source_dataset, target_dataset = datasets[idx]
source_loader = DataLoader(source_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
target_loader = DataLoader(target_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
solver.train(t, source_loader, target_loader)
if t % 1 == 0:
target_loader = DataLoader(target_dataset, batch_size=args.batch_size, num_workers=1)
pred = solver.test(t, target_loader, save_model=args.save_model)
preds.append(pred)
pred = torch.tensor(pred, dtype=torch.long)
tmp_acc = (y_label == pred).sum().item() / len(pred)
accs.append(tmp_acc)
voted_pred = []
for j in range(len(y_label)):
label_count = [0]*4
for i in range(len(preds)):
label_count[preds[i][j]] += 1
max_label = label_count.index(max(label_count))
voted_pred.append(max_label)
voted_pred = torch.tensor(voted_pred)
acc = (y_label == voted_pred).sum().item() / len(voted_pred)
print("In epoch %d, voted_acc: %.4f" %(t, acc))
if __name__ == '__main__':
main() | [
"solver.Solver",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.manual_seed",
"torch.cuda.manual_seed",
"numpy.min",
"numpy.max",
"numpy.array",
"torch.cuda.is_available",
"torch.utils.data.TensorDataset",
"torch.tensor",
"torch.from_numpy"
] | [((192, 257), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MCD Implementation"""'}), "(description='PyTorch MCD Implementation')\n", (215, 257), False, 'import argparse\n'), ((2100, 2128), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2117, 2128), False, 'import torch\n'), ((2074, 2099), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2097, 2099), False, 'import torch\n'), ((2147, 2180), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2169, 2180), False, 'import torch\n'), ((2260, 2272), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2268, 2272), True, 'import numpy as np\n'), ((2288, 2300), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2296, 2300), True, 'import numpy as np\n'), ((2733, 2783), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['X_source', 'y_source'], {}), '(X_source, y_source)\n', (2763, 2783), False, 'import torch\n'), ((3189, 3239), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['X_target', 'y_target'], {}), '(X_target, y_target)\n', (3219, 3239), False, 'import torch\n'), ((3311, 3491), 'solver.Solver', 'Solver', (['args'], {'learning_rate': 'args.lr', 'batch_size': 'args.batch_size', 'optimizer': 'args.optimizer', 'num_k': 'args.num_k', 'checkpoint_dir': 'args.checkpoint_dir', 'save_epoch': 'args.save_epoch'}), '(args, learning_rate=args.lr, batch_size=args.batch_size, optimizer=\n args.optimizer, num_k=args.num_k, checkpoint_dir=args.checkpoint_dir,\n save_epoch=args.save_epoch)\n', (3317, 3491), False, 'from solver import Solver\n'), ((4991, 5015), 'torch.tensor', 'torch.tensor', (['voted_pred'], {}), '(voted_pred)\n', (5003, 5015), False, 'import torch\n'), ((2521, 2545), 'numpy.min', 'np.min', (['X_source'], {'axis': '(0)'}), '(X_source, axis=0)\n', (2527, 2545), True, 'import numpy as np\n'), ((2550, 2574), 'numpy.max', 'np.max', (['X_source'], {'axis': '(0)'}), '(X_source, axis=0)\n', (2556, 2574), True, 'import numpy as np\n'), ((2577, 2601), 'numpy.min', 'np.min', (['X_source'], {'axis': '(0)'}), '(X_source, axis=0)\n', (2583, 2601), True, 'import numpy as np\n'), ((2618, 2644), 'torch.from_numpy', 'torch.from_numpy', (['X_source'], {}), '(X_source)\n', (2634, 2644), False, 'import torch\n'), ((2977, 3001), 'numpy.min', 'np.min', (['X_target'], {'axis': '(0)'}), '(X_target, axis=0)\n', (2983, 3001), True, 'import numpy as np\n'), ((3006, 3030), 'numpy.max', 'np.max', (['X_target'], {'axis': '(0)'}), '(X_target, axis=0)\n', (3012, 3030), True, 'import numpy as np\n'), ((3033, 3057), 'numpy.min', 'np.min', (['X_target'], {'axis': '(0)'}), '(X_target, axis=0)\n', (3039, 3057), True, 'import numpy as np\n'), ((3074, 3100), 'torch.from_numpy', 'torch.from_numpy', (['X_target'], {}), '(X_target)\n', (3090, 3100), False, 'import torch\n'), ((4014, 4101), 'torch.utils.data.DataLoader', 'DataLoader', (['source_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(1)'}), '(source_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=1)\n', (4024, 4101), False, 'from torch.utils.data import DataLoader\n'), ((4126, 4213), 'torch.utils.data.DataLoader', 'DataLoader', (['target_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(1)'}), '(target_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=1)\n', (4136, 4213), False, 'from torch.utils.data import DataLoader\n'), ((4328, 4397), 'torch.utils.data.DataLoader', 'DataLoader', (['target_dataset'], {'batch_size': 'args.batch_size', 'num_workers': '(1)'}), '(target_dataset, batch_size=args.batch_size, num_workers=1)\n', (4338, 4397), False, 'from torch.utils.data import DataLoader\n'), ((4537, 4573), 'torch.tensor', 'torch.tensor', (['pred'], {'dtype': 'torch.long'}), '(pred, dtype=torch.long)\n', (4549, 4573), False, 'import torch\n'), ((2668, 2694), 'torch.from_numpy', 'torch.from_numpy', (['y_source'], {}), '(y_source)\n', (2684, 2694), False, 'import torch\n'), ((3124, 3150), 'torch.from_numpy', 'torch.from_numpy', (['y_target'], {}), '(y_target)\n', (3140, 3150), False, 'import torch\n'), ((3724, 3749), 'torch.from_numpy', 'torch.from_numpy', (['y_label'], {}), '(y_label)\n', (3740, 3749), False, 'import torch\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import multiprocessing
import numpy as np
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
from paddle import fluid
# from ppocr.utils.utility import load_config, merge_config
from ppocr.data.reader_main import test_reader
import program
from paddle import fluid
from ppocr.utils.utility import initial_logger
logger = initial_logger()
from ppocr.data.reader_main import reader_main
from ppocr.utils.save_load import init_model
from ppocr.utils.character import CharacterOps
from ppocr.utils.utility import create_module
logger = initial_logger()
def main():
config = program.load_config(FLAGS.config)
program.merge_config(FLAGS.opt)
logger.info(config)
char_ops = CharacterOps(config['Global'])
config['Global']['char_ops'] = char_ops
# check if set use_gpu=True in paddlepaddle cpu version
use_gpu = config['Global']['use_gpu']
# check_gpu(use_gpu)
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
rec_model = create_module(config['Architecture']['function'])(params=config)
startup_prog = fluid.Program()
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
_, outputs = rec_model(mode="test")
fetch_name_list = list(outputs.keys())
fetch_varname_list = [outputs[v].name for v in fetch_name_list]
eval_prog = eval_prog.clone(for_test=True)
exe.run(startup_prog)
init_model(config, eval_prog, exe)
blobs = reader_main(config, 'test')
imgs = next(blobs())
for img in imgs:
predict = exe.run(program=eval_prog,
feed={"image": img},
fetch_list=fetch_varname_list,
return_numpy=False)
preds = np.array(predict[0])
if preds.shape[1] == 1:
preds = preds.reshape(-1)
preds_lod = predict[0].lod()[0]
preds_text = char_ops.decode(preds)
else:
end_pos = np.where(preds[0, :] == 1)[0]
if len(end_pos) <= 1:
preds_text = preds[0, 1:]
else:
preds_text = preds[0, 1:end_pos[1]]
preds_text = preds_text.reshape(-1)
preds_text = char_ops.decode(preds_text)
print(preds)
print(preds_text)
# save for inference model
target_var = []
for key, values in outputs.items():
target_var.append(values)
fluid.io.save_inference_model(
"./output/",
feeded_var_names=['image'],
target_vars=target_var,
executor=exe,
main_program=eval_prog,
model_filename="model",
params_filename="params")
if __name__ == '__main__':
parser = program.ArgsParser()
FLAGS = parser.parse_args()
main()
| [
"ppocr.utils.character.CharacterOps",
"paddle.fluid.Executor",
"ppocr.utils.utility.initial_logger",
"program.load_config",
"paddle.fluid.io.save_inference_model",
"paddle.fluid.CUDAPlace",
"paddle.fluid.unique_name.guard",
"ppocr.data.reader_main.reader_main",
"paddle.fluid.program_guard",
"ppocr... | [((1382, 1398), 'ppocr.utils.utility.initial_logger', 'initial_logger', ([], {}), '()\n', (1396, 1398), False, 'from ppocr.utils.utility import initial_logger\n'), ((1594, 1610), 'ppocr.utils.utility.initial_logger', 'initial_logger', ([], {}), '()\n', (1608, 1610), False, 'from ppocr.utils.utility import initial_logger\n'), ((1638, 1671), 'program.load_config', 'program.load_config', (['FLAGS.config'], {}), '(FLAGS.config)\n', (1657, 1671), False, 'import program\n'), ((1676, 1707), 'program.merge_config', 'program.merge_config', (['FLAGS.opt'], {}), '(FLAGS.opt)\n', (1696, 1707), False, 'import program\n'), ((1747, 1777), 'ppocr.utils.character.CharacterOps', 'CharacterOps', (["config['Global']"], {}), "(config['Global'])\n", (1759, 1777), False, 'from ppocr.utils.character import CharacterOps\n'), ((2029, 2050), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (2043, 2050), False, 'from paddle import fluid\n'), ((2153, 2168), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (2166, 2168), False, 'from paddle import fluid\n'), ((2185, 2200), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (2198, 2200), False, 'from paddle import fluid\n'), ((2549, 2583), 'ppocr.utils.save_load.init_model', 'init_model', (['config', 'eval_prog', 'exe'], {}), '(config, eval_prog, exe)\n', (2559, 2583), False, 'from ppocr.utils.save_load import init_model\n'), ((2597, 2624), 'ppocr.data.reader_main.reader_main', 'reader_main', (['config', '"""test"""'], {}), "(config, 'test')\n", (2608, 2624), False, 'from ppocr.data.reader_main import reader_main\n'), ((3558, 3748), 'paddle.fluid.io.save_inference_model', 'fluid.io.save_inference_model', (['"""./output/"""'], {'feeded_var_names': "['image']", 'target_vars': 'target_var', 'executor': 'exe', 'main_program': 'eval_prog', 'model_filename': '"""model"""', 'params_filename': '"""params"""'}), "('./output/', feeded_var_names=['image'],\n target_vars=target_var, executor=exe, main_program=eval_prog,\n model_filename='model', params_filename='params')\n", (3587, 3748), False, 'from paddle import fluid\n'), ((3840, 3860), 'program.ArgsParser', 'program.ArgsParser', ([], {}), '()\n', (3858, 3860), False, 'import program\n'), ((1967, 1985), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (1982, 1985), False, 'from paddle import fluid\n'), ((2002, 2018), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (2016, 2018), False, 'from paddle import fluid\n'), ((2068, 2117), 'ppocr.utils.utility.create_module', 'create_module', (["config['Architecture']['function']"], {}), "(config['Architecture']['function'])\n", (2081, 2117), False, 'from ppocr.utils.utility import create_module\n'), ((2210, 2254), 'paddle.fluid.program_guard', 'fluid.program_guard', (['eval_prog', 'startup_prog'], {}), '(eval_prog, startup_prog)\n', (2229, 2254), False, 'from paddle import fluid\n'), ((2883, 2903), 'numpy.array', 'np.array', (['predict[0]'], {}), '(predict[0])\n', (2891, 2903), True, 'import numpy as np\n'), ((868, 893), 'os.environ.get', 'os.environ.get', (['key', 'None'], {}), '(key, None)\n', (882, 893), False, 'import os\n'), ((2269, 2294), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (2292, 2294), False, 'from paddle import fluid\n'), ((3102, 3128), 'numpy.where', 'np.where', (['(preds[0, :] == 1)'], {}), '(preds[0, :] == 1)\n', (3110, 3128), True, 'import numpy as np\n')] |
"""
Example of hyperparameter search in MLflow using Hyperopt.
The run method will instantiate and run Hyperopt optimizer. Each parameter configuration is
evaluated in a new MLflow run invoking main entry point with selected parameters.
The runs are evaluated based on validation set loss. Test set score is calculated to verify the
results.
This example currently does not support parallel execution.
"""
import click
import numpy as np
from hyperopt import fmin, hp, tpe, rand
import mlflow.projects
from mlflow.tracking.client import MlflowClient
_inf = np.finfo(np.float64).max
@click.command(help="Perform hyperparameter search with Hyperopt library."
"Optimize dl_train target.")
@click.option("--max-runs", type=click.INT, default=10,
help="Maximum number of runs to evaluate.")
@click.option("--epochs", type=click.INT, default=500,
help="Number of epochs")
@click.option("--metric", type=click.STRING, default="rmse",
help="Metric to optimize on.")
@click.option("--algo", type=click.STRING, default="tpe.suggest",
help="Optimizer algorhitm.")
@click.option("--seed", type=click.INT, default=97531,
help="Seed for the random generator")
@click.argument("training_data")
def train(training_data, max_runs, epochs, metric, algo, seed):
"""
Run hyperparameter optimization.
"""
# create random file to store run ids of the training tasks
tracking_client = mlflow.tracking.MlflowClient()
def new_eval(nepochs,
experiment_id,
null_train_loss,
null_valid_loss,
null_test_loss,
return_all=False):
"""
Create a new eval function
:param nepochs: Number of epochs to train the model.
:experiment_id: Experiment id for the training run
:valid_null_loss: Loss of a null model on the validation dataset
:test_null_loss: Loss of a null model on the test dataset.
:return_test_loss: Return both validation and test loss if set.
:return: new eval function.
"""
def eval(params):
"""
Train Keras model with given parameters by invoking MLflow run.
Notice we store runUuid and resulting metric in a file. We will later use these to pick
the best run and to log the runUuids of the child runs as an artifact. This is a
temporary workaround until MLflow offers better mechanism of linking runs together.
:param params: Parameters to the train_keras script we optimize over:
learning_rate, drop_out_1
:return: The metric value evaluated on the validation data.
"""
import mlflow.tracking
lr, momentum = params
with mlflow.start_run(nested=True) as child_run:
p = mlflow.projects.run(
uri=".",
entry_point="train",
run_id=child_run.info.run_id,
parameters={
"training_data": training_data,
"epochs": str(nepochs),
"learning_rate": str(lr),
"momentum": str(momentum),
"seed": seed},
experiment_id=experiment_id,
use_conda=False, # We are already in the environment
synchronous=False # Allow the run to fail if a model is not properly created
)
succeeded = p.wait()
if succeeded:
training_run = tracking_client.get_run(p.run_id)
metrics = training_run.data.metrics
# cap the loss at the loss of the null model
train_loss = min(null_train_loss,
metrics["train_{}".format(metric)])
valid_loss = min(null_valid_loss,
metrics["val_{}".format(metric)])
test_loss = min(null_test_loss,
metrics["test_{}".format(metric)])
else:
# run failed => return null loss
tracking_client.set_terminated(p.run_id, "FAILED")
train_loss = null_train_loss
valid_loss = null_valid_loss
test_loss = null_test_loss
mlflow.log_metrics({
"train_{}".format(metric): train_loss,
"val_{}".format(metric): valid_loss,
"test_{}".format(metric): test_loss
})
if return_all:
return train_loss, valid_loss, test_loss
else:
return valid_loss
return eval
space = [
hp.uniform('lr', 1e-5, 1e-1),
hp.uniform('momentum', .0, 1.0),
]
with mlflow.start_run() as run:
experiment_id = run.info.experiment_id
# Evaluate null model first.
train_null_loss, valid_null_loss, test_null_loss = new_eval(0,
experiment_id,
_inf,
_inf,
_inf,
True)(params=[0, 0])
best = fmin(fn=new_eval(epochs,
experiment_id,
train_null_loss,
valid_null_loss,
test_null_loss),
space=space,
algo=tpe.suggest if algo == "tpe.suggest" else rand.suggest,
max_evals=max_runs)
mlflow.set_tag("best params", str(best))
# find the best run, log its metrics as the final metrics of this run.
client = MlflowClient()
runs = client.search_runs([experiment_id], "tags.mlflow.parentRunId = '{run_id}' ".format(
run_id=run.info.run_id
))
best_val_train = _inf
best_val_valid = _inf
best_val_test = _inf
best_run = None
for r in runs:
if r.data.metrics["val_rmse"] < best_val_valid:
best_run = r
best_val_train = r.data.metrics["train_rmse"]
best_val_valid = r.data.metrics["val_rmse"]
best_val_test = r.data.metrics["test_rmse"]
mlflow.set_tag("best_run", best_run.info.run_id)
mlflow.log_metrics({
"train_{}".format(metric): best_val_train,
"val_{}".format(metric): best_val_valid,
"test_{}".format(metric): best_val_test
})
if __name__ == '__main__':
train()
| [
"hyperopt.hp.uniform",
"click.argument",
"mlflow.tracking.client.MlflowClient",
"click.option",
"click.command",
"numpy.finfo"
] | [((593, 702), 'click.command', 'click.command', ([], {'help': '"""Perform hyperparameter search with Hyperopt library.Optimize dl_train target."""'}), "(help=\n 'Perform hyperparameter search with Hyperopt library.Optimize dl_train target.'\n )\n", (606, 702), False, 'import click\n'), ((717, 820), 'click.option', 'click.option', (['"""--max-runs"""'], {'type': 'click.INT', 'default': '(10)', 'help': '"""Maximum number of runs to evaluate."""'}), "('--max-runs', type=click.INT, default=10, help=\n 'Maximum number of runs to evaluate.')\n", (729, 820), False, 'import click\n'), ((831, 909), 'click.option', 'click.option', (['"""--epochs"""'], {'type': 'click.INT', 'default': '(500)', 'help': '"""Number of epochs"""'}), "('--epochs', type=click.INT, default=500, help='Number of epochs')\n", (843, 909), False, 'import click\n'), ((925, 1020), 'click.option', 'click.option', (['"""--metric"""'], {'type': 'click.STRING', 'default': '"""rmse"""', 'help': '"""Metric to optimize on."""'}), "('--metric', type=click.STRING, default='rmse', help=\n 'Metric to optimize on.')\n", (937, 1020), False, 'import click\n'), ((1031, 1129), 'click.option', 'click.option', (['"""--algo"""'], {'type': 'click.STRING', 'default': '"""tpe.suggest"""', 'help': '"""Optimizer algorhitm."""'}), "('--algo', type=click.STRING, default='tpe.suggest', help=\n 'Optimizer algorhitm.')\n", (1043, 1129), False, 'import click\n'), ((1140, 1236), 'click.option', 'click.option', (['"""--seed"""'], {'type': 'click.INT', 'default': '(97531)', 'help': '"""Seed for the random generator"""'}), "('--seed', type=click.INT, default=97531, help=\n 'Seed for the random generator')\n", (1152, 1236), False, 'import click\n'), ((1247, 1278), 'click.argument', 'click.argument', (['"""training_data"""'], {}), "('training_data')\n", (1261, 1278), False, 'import click\n'), ((565, 585), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (573, 585), True, 'import numpy as np\n'), ((4827, 4855), 'hyperopt.hp.uniform', 'hp.uniform', (['"""lr"""', '(1e-05)', '(0.1)'], {}), "('lr', 1e-05, 0.1)\n", (4837, 4855), False, 'from hyperopt import fmin, hp, tpe, rand\n'), ((4865, 4897), 'hyperopt.hp.uniform', 'hp.uniform', (['"""momentum"""', '(0.0)', '(1.0)'], {}), "('momentum', 0.0, 1.0)\n", (4875, 4897), False, 'from hyperopt import fmin, hp, tpe, rand\n'), ((6023, 6037), 'mlflow.tracking.client.MlflowClient', 'MlflowClient', ([], {}), '()\n', (6035, 6037), False, 'from mlflow.tracking.client import MlflowClient\n')] |
# -*- coding: utf-8 -*-
"""
Created on May 8, 2020
@author: <EMAIL>
"""
import pandas as pd
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import column
from hybrid_index import hybrid_index, cross_corr
import numpy as np
#load Arkansas precipiation anomalies and PDSI time series
df = pd.read_csv (r'pcp_pdsi_arkansas.csv')
pcp00 = df.values[:,2];
pdsi = df.values[:,3];
#rename year col. and convert to datetime
df = df.rename(columns={'%year':'year'})
df.insert(2,"day",list(np.ones(833).astype(int)),True)
datetime = pd.to_datetime(df[['year','month','day']])
#load f^-1 spectrum
evapo = pd.read_csv (r'evapo.csv')
evapo = evapo.values
#calculate exponentially weighted index for Arkansas based on the parameters
#of the exponentially weighted averages determined in Appendix B (see also Fig 10)
#of Chelton and Risien (2020)
tau = 5.051
alpha = 0.222
beta = 0.250
lagmax = 30
pcpexp = hybrid_index(pcp00, evapo, tau, lagmax, alpha, beta)
#calculate lagged correlations
lag, R_xy, p_xy_pcpexp = cross_corr(np.array(pcpexp), pcp00, 12, np.nan)
lag, R_xy, p_xy_pdsi = cross_corr(pdsi, pcp00, 12, np.nan)
#calculate the Arkansas hybrid precipitation index for differrent values of tau
#tau = 3, 10, 20, and 36
lagmax = 100
alpha = 0.0
beta = 0.0
pcpexp3 = hybrid_index(pcp00, evapo, 3.0, lagmax, alpha, beta)
pcpexp10 = hybrid_index(pcp00, evapo, 10.0, lagmax, alpha, beta)
pcpexp20 = hybrid_index(pcp00, evapo, 20.0, lagmax, alpha, beta)
pcpexp36 = hybrid_index(pcp00, evapo, 36.0, lagmax, alpha, beta)
#Plot the results for Arkansas
output_file("pcpexp_arkansas_results.html", title="Arkansas")
#upper panel
corr = figure(title= "Arkansas", x_axis_label= 'lag', y_axis_label= 'Correlation',
plot_width=800, plot_height=400, x_range=(-12, 12), y_range=(-1, 1))
corr.line(lag, p_xy_pcpexp, legend_label="Exp. Weighted Ave.(t) vs Precip. Anoms. (t+lag)", line_color="red", line_width = 1.4)
corr.line(lag, p_xy_pdsi, legend_label="PDSI(t) vs Precip. Anoms. (t+lag)", line_color="black", line_width = 2.2)
#lower panel
ts = figure(x_axis_type='datetime', x_axis_label= 'time', y_axis_label= 'index',
plot_width=800, plot_height=400, y_range=(-4, 4))
ts.line(datetime, pcpexp3, legend_label="Tau03", line_color="black", line_width = 2)
ts.line(datetime, pcpexp10, legend_label="Tau10", line_color="blue", line_width = 2)
ts.line(datetime, pcpexp20, legend_label="Tau20", line_color="green", line_width = 2)
ts.line(datetime, pcpexp36, legend_label="Tau36", line_color="red", line_width = 2)
show(column(corr, ts))
| [
"bokeh.plotting.figure",
"hybrid_index.cross_corr",
"hybrid_index.hybrid_index",
"pandas.read_csv",
"numpy.ones",
"bokeh.plotting.output_file",
"pandas.to_datetime",
"numpy.array",
"bokeh.layouts.column"
] | [((313, 349), 'pandas.read_csv', 'pd.read_csv', (['"""pcp_pdsi_arkansas.csv"""'], {}), "('pcp_pdsi_arkansas.csv')\n", (324, 349), True, 'import pandas as pd\n'), ((548, 592), 'pandas.to_datetime', 'pd.to_datetime', (["df[['year', 'month', 'day']]"], {}), "(df[['year', 'month', 'day']])\n", (562, 592), True, 'import pandas as pd\n'), ((620, 644), 'pandas.read_csv', 'pd.read_csv', (['"""evapo.csv"""'], {}), "('evapo.csv')\n", (631, 644), True, 'import pandas as pd\n'), ((924, 976), 'hybrid_index.hybrid_index', 'hybrid_index', (['pcp00', 'evapo', 'tau', 'lagmax', 'alpha', 'beta'], {}), '(pcp00, evapo, tau, lagmax, alpha, beta)\n', (936, 976), False, 'from hybrid_index import hybrid_index, cross_corr\n'), ((1105, 1140), 'hybrid_index.cross_corr', 'cross_corr', (['pdsi', 'pcp00', '(12)', 'np.nan'], {}), '(pdsi, pcp00, 12, np.nan)\n', (1115, 1140), False, 'from hybrid_index import hybrid_index, cross_corr\n'), ((1294, 1346), 'hybrid_index.hybrid_index', 'hybrid_index', (['pcp00', 'evapo', '(3.0)', 'lagmax', 'alpha', 'beta'], {}), '(pcp00, evapo, 3.0, lagmax, alpha, beta)\n', (1306, 1346), False, 'from hybrid_index import hybrid_index, cross_corr\n'), ((1358, 1411), 'hybrid_index.hybrid_index', 'hybrid_index', (['pcp00', 'evapo', '(10.0)', 'lagmax', 'alpha', 'beta'], {}), '(pcp00, evapo, 10.0, lagmax, alpha, beta)\n', (1370, 1411), False, 'from hybrid_index import hybrid_index, cross_corr\n'), ((1423, 1476), 'hybrid_index.hybrid_index', 'hybrid_index', (['pcp00', 'evapo', '(20.0)', 'lagmax', 'alpha', 'beta'], {}), '(pcp00, evapo, 20.0, lagmax, alpha, beta)\n', (1435, 1476), False, 'from hybrid_index import hybrid_index, cross_corr\n'), ((1488, 1541), 'hybrid_index.hybrid_index', 'hybrid_index', (['pcp00', 'evapo', '(36.0)', 'lagmax', 'alpha', 'beta'], {}), '(pcp00, evapo, 36.0, lagmax, alpha, beta)\n', (1500, 1541), False, 'from hybrid_index import hybrid_index, cross_corr\n'), ((1574, 1635), 'bokeh.plotting.output_file', 'output_file', (['"""pcpexp_arkansas_results.html"""'], {'title': '"""Arkansas"""'}), "('pcpexp_arkansas_results.html', title='Arkansas')\n", (1585, 1635), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1656, 1801), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""Arkansas"""', 'x_axis_label': '"""lag"""', 'y_axis_label': '"""Correlation"""', 'plot_width': '(800)', 'plot_height': '(400)', 'x_range': '(-12, 12)', 'y_range': '(-1, 1)'}), "(title='Arkansas', x_axis_label='lag', y_axis_label='Correlation',\n plot_width=800, plot_height=400, x_range=(-12, 12), y_range=(-1, 1))\n", (1662, 1801), False, 'from bokeh.plotting import figure, output_file, show\n'), ((2076, 2203), 'bokeh.plotting.figure', 'figure', ([], {'x_axis_type': '"""datetime"""', 'x_axis_label': '"""time"""', 'y_axis_label': '"""index"""', 'plot_width': '(800)', 'plot_height': '(400)', 'y_range': '(-4, 4)'}), "(x_axis_type='datetime', x_axis_label='time', y_axis_label='index',\n plot_width=800, plot_height=400, y_range=(-4, 4))\n", (2082, 2203), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1045, 1061), 'numpy.array', 'np.array', (['pcpexp'], {}), '(pcpexp)\n', (1053, 1061), True, 'import numpy as np\n'), ((2561, 2577), 'bokeh.layouts.column', 'column', (['corr', 'ts'], {}), '(corr, ts)\n', (2567, 2577), False, 'from bokeh.layouts import column\n'), ((505, 517), 'numpy.ones', 'np.ones', (['(833)'], {}), '(833)\n', (512, 517), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.optim as optim
from attack_model import \
transform_dataset, \
transform_dataset_census, \
transform_dataset_credit, \
attack_keras_model
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, TensorDataset, DataLoader
from torch.utils.data.dataset import random_split
from torch.optim.lr_scheduler import CosineAnnealingLR, ReduceLROnPlateau
from tqdm import trange
from tqdm.notebook import tnrange
from torch.utils.data.dataset import ConcatDataset
from sklearn import preprocessing
import pandas as pd
import os
import argparse
import logging
from torch.autograd import Function
import matplotlib.pyplot as plt
from bayesian_model import BayesianModel as bm
from pycm import ConfusionMatrix
from secml.array.c_array import CArray
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.DEBUG)
class GradientReversalFunction(Function):
"""
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_=1):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_)
class Net(nn.Module):
def __init__(self, input_shape, grl_lambda=100):
super(Net, self).__init__()
# an affine operation: y = Wx + b
self._grl_lambda = grl_lambda
self.fc1 = nn.Linear(input_shape, 32)
self.fc2 = nn.Linear(32, 32)
self.fc3 = nn.Linear(32, 32)
self.fc4 = nn.Linear(32, 1)
if self._grl_lambda != 0:
self.grl = GradientReversal(grl_lambda)
self.fc5 = nn.Linear(32, 2)
# self.grl = GradientReversal(100)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
hidden = F.dropout(hidden, 0.1)
y = self.fc4(hidden)
# y = F.dropout(y, 0.1)
if self._grl_lambda != 0:
s = self.grl(hidden)
s = self.fc5(s)
# s = F.sigmoid(s)
# s = F.dropout(s, 0.1)
return y, s
else:
return y
def get_metrics(results, args, threshold, fraction):
"Create the metrics from an output df."
# Calculate biases after training
dem_parity = abs(
bm(results).P(pred=lambda x: x > threshold).given(race=0)
- bm(results).P(pred=lambda x: x > threshold).given(
race=1))
eq_op = abs(
bm(results).P(pred=lambda x: x > threshold).given(race=0, compas=True)
- bm(results).P(pred=lambda x: x > threshold).given(race=1, compas=True))
dem_parity_ratio = abs(
bm(results).P(pred=lambda x: x > threshold).given(race=0)
/ bm(results).P(pred=lambda x: x > threshold).given(
race=1))
cm = ConfusionMatrix(actual_vector=(results['true'] == True).values,
predict_vector=(results['pred'] > threshold).values)
if args.dataset == 'compas':
cm_high_risk = ConfusionMatrix(actual_vector=(results['compas'] > 8).values,
predict_vector=(results['pred'] > 8).values)
result = {"DP": dem_parity,
"EO": eq_op,
"DP ratio": dem_parity_ratio,
"acc": cm.Overall_ACC,
"acc_ci_min": cm.CI95[0],
"acc_ci_max": cm.CI95[1],
"f1": cm.F1_Macro,
"acc_high_risk": cm_high_risk.Overall_ACC,
"acc_ci_min_high_risk": cm_high_risk.CI95[0],
"acc_ci_max_high_risk": cm_high_risk.CI95[1],
"f1_high_risk": cm_high_risk.F1_Macro,
"adversarial_fraction": fraction
}
else:
result = {"DP": dem_parity,
"EO": eq_op,
"DP ratio": dem_parity_ratio,
"acc": cm.Overall_ACC,
"acc_ci_min": cm.CI95[0],
"acc_ci_max": cm.CI95[1],
"f1": cm.F1_Macro,
"adversarial_fraction": fraction
}
return result
def train_and_evaluate(train_loader: DataLoader,
val_loader: DataLoader,
test_loader: DataLoader,
device,
args,
input_shape,
grl_lambda=None,
model=None):
"""
:param train_loader: Pytorch-like DataLoader with training data.
:param val_loader: Pytorch-like DataLoader with validation data.
:param test_loader: Pytorch-like DataLoader with testing data.
:param device: The target device for the training.
:return: A tuple: (trained Pytorch-like model, dataframe with results on test set)
"""
torch.manual_seed(0)
grl_lambda = grl_lambda if grl_lambda is not None else args.grl_lambda
if args.reset_attack or model is None:
# Redefine the model
model = Net(input_shape=input_shape, grl_lambda=grl_lambda).to(device)
criterion = nn.MSELoss().to(device)
criterion_bias = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-2)
scheduler = ReduceLROnPlateau(optimizer, threshold=0.3, cooldown=5)
training_losses = []
validation_losses = []
t_prog = trange(args.epochs, desc='Training neural network', leave=False, position=1, mininterval=5)
# t_prog = trange(50)
for epoch in t_prog:
model.train()
batch_losses = []
for x_batch, y_batch, _, s_batch in train_loader:
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
s_batch = s_batch.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
if grl_lambda is not None and grl_lambda != 0:
outputs, outputs_protected = model(x_batch)
loss = criterion(outputs, y_batch) + criterion_bias(outputs_protected, s_batch.argmax(dim=1))
else:
outputs = model(x_batch)
loss = criterion(outputs, y_batch)
loss.backward()
optimizer.step()
batch_losses.append(loss.item())
training_loss = np.mean(batch_losses)
training_losses.append(training_loss)
with torch.no_grad():
val_losses = []
for x_val, y_val, _, s_val in val_loader:
x_val = x_val.to(device)
y_val = y_val.to(device)
s_val = s_val.to(device)
model.eval()
if grl_lambda is not None and grl_lambda != 0:
yhat, s_hat = model(x_val)
val_loss = (criterion(y_val, yhat) + criterion_bias(s_val, s_hat.argmax(dim=1))).item()
else:
yhat = model(x_val)
val_loss = criterion(y_val, yhat).item()
val_losses.append(val_loss)
validation_loss = np.mean(val_losses)
validation_losses.append(validation_loss)
scheduler.step(val_loss)
t_prog.set_postfix({"epoch": epoch, "training_loss": training_loss,
"validation_loss": validation_loss}, refresh=False) # print last metrics
if args.show_graphs:
plt.plot(range(len(training_losses)), training_losses)
plt.plot(range(len(validation_losses)), validation_losses)
# plt.scatter(x_tensor, y_out.detach().numpy())
plt.ylabel('some numbers')
plt.show()
with torch.no_grad():
test_losses = []
test_results = []
for x_test, y_test, ytrue, s_true in test_loader:
x_test = x_test.to(device)
y_test = y_test.to(device)
s_true = s_true.to(device)
model.eval()
if grl_lambda is not None and grl_lambda != 0:
yhat, s_hat = model(x_test)
test_loss = (criterion(y_test, yhat) + criterion_bias(s_true, s_hat.argmax(dim=1))).item()
test_losses.append(val_loss)
test_results.append({"y_hat": yhat, "y_true": ytrue, "y_compas": y_test, "s": s_true, "s_hat": s_hat})
else:
yhat = model(x_test)
test_loss = (criterion(y_test, yhat)).item()
test_losses.append(val_loss)
test_results.append({"y_hat": yhat, "y_true": ytrue, "y_compas": y_test, "s": s_true})
# print({"Test loss": np.mean(test_losses)})
results = test_results[0]['y_hat']
outcome = test_results[0]['y_true']
compas = test_results[0]['y_compas']
protected_results = test_results[0]['s']
if grl_lambda is not None and grl_lambda != 0:
protected = test_results[0]['s_hat']
for r in test_results[1:]:
results = torch.cat((results, r['y_hat']))
outcome = torch.cat((outcome, r['y_true']))
compas = torch.cat((compas, r['y_compas']))
protected_results = torch.cat((protected_results, r['s']))
if grl_lambda is not None and grl_lambda != 0:
protected = torch.cat((protected, r['s_hat']))
df = pd.DataFrame(data=results.cpu().numpy(), columns=['pred'])
df['true'] = outcome.cpu().numpy()
df['compas'] = compas.cpu().numpy()
df['race'] = protected_results.cpu().numpy()[:, 0]
if grl_lambda is not None and grl_lambda != 0:
df['race_hat'] = protected.cpu().numpy()[:, 0]
return model, df
def main(args):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
logging.debug("using device {} for pytorch.".format(device))
# Make sure entire df is printed
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
if args.dataset == "compas":
df = pd.read_csv(os.path.join("..", "data", "csv", "scikit",
"compas_recidive_two_years_sanitize_age_category_jail_time_decile_score.csv"))
df_binary, Y, S, Y_true = transform_dataset(df)
Y = Y.to_numpy()
l_tensor = torch.tensor(Y_true.to_numpy().reshape(-1, 1).astype(np.float32))
elif args.dataset == "adult":
##load the census income data set instead of the COMPAS one
df = pd.read_csv(os.path.join("..", "data", "csv", "scikit", "adult.csv"))
df_binary, Y, S, _ = transform_dataset_census(df)
l_tensor = torch.tensor(Y.reshape(-1, 1).astype(np.float32))
elif args.dataset == "german":
##load the census income data set instead of the COMPAS one
df = pd.read_csv(os.path.join("..", "data", "csv", "scikit", "german.data"), header=None, sep="\s")
df_binary, Y, S, _ = transform_dataset_credit(df)
l_tensor = torch.tensor(Y.reshape(-1, 1).astype(np.float32))
else:
raise ValueError(
"The value given to the --dataset parameter is not valid; try --dataset=compas or --dataset=adult")
print(np.mean(Y))
x_tensor = torch.tensor(df_binary.to_numpy().astype(np.float32))
y_tensor = torch.tensor(Y.reshape(-1, 1).astype(np.float32))
s_tensor = torch.tensor(preprocessing.OneHotEncoder().fit_transform(np.array(S).reshape(-1, 1)).toarray())
dataset = TensorDataset(x_tensor, y_tensor, l_tensor, s_tensor) # dataset = CustomDataset(x_tensor, y_tensor)
base_size = len(dataset) // 10
split = [7 * base_size, 1 * base_size, len(dataset) - 8 * base_size] # Train, validation, test
train_dataset, val_dataset, test_dataset = random_split(dataset, split)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(dataset=val_dataset, batch_size=args.batch_size)
test_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size)
x_train_tensor = train_dataset[:][0]
y_train_tensor = train_dataset[:][1]
l_train_tensor = train_dataset[:][2]
s_train_tensor = train_dataset[:][3]
global_results = []
# get the classification threshold, we use the same scale for compas so 4 instead of 0.5
threshold = 4 if args.dataset == 'compas' else 0.5
_, results = train_and_evaluate(train_loader, val_loader, test_loader, device, args, input_shape=x_tensor.shape[1],
grl_lambda=0)
print(results)
result = get_metrics(results, args, threshold, 0)
global_results.append(result)
df = pd.DataFrame(global_results)
print(df)
t_main = trange(args.iterations, desc="Attack", leave=False, position=0)
# Define model as none, later it will be set and re-attacked
model = None
for i in t_main:
# Train network
model, results = train_and_evaluate(train_loader, val_loader, test_loader, device, args,
input_shape=x_tensor.shape[1], model=model)
result = get_metrics(results, args, threshold, fraction=(i*args.attack_size)/(base_size * 7))
t_main.set_postfix(result)
global_results.append(result)
# Attack
result_pts, result_class, labels = attack_keras_model(
CArray(x_train_tensor),
Y=CArray((y_train_tensor[:, 0] > threshold).int()),
S=s_train_tensor,
nb_attack=args.attack_size)
# incorporate adversarial points
result_pts = torch.tensor(np.around(result_pts.astype(np.float32), decimals=3)).clamp(0.0, 1.0)
result_pts[result_pts != result_pts] = 0.0
result_class[result_class != result_class] = 0.0
x_train_tensor = torch.cat((x_train_tensor, result_pts))
y_train_tensor = torch.cat(
(y_train_tensor, torch.tensor(result_class.reshape(-1, 1).astype(np.float32)).clamp(0, 10)))
l_train_tensor = torch.cat((l_train_tensor, torch.tensor(labels.tondarray().reshape(-1, 1).astype(np.float32))))
s = np.random.randint(2, size=len(result_class))
s_train_tensor = torch.cat((s_train_tensor, torch.tensor(np.array([s, 1 - s]).T.astype(np.float64))))
train_dataset = TensorDataset(x_train_tensor, y_train_tensor, l_train_tensor, s_train_tensor)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True)
logging.debug("New training dataset has size {} (original {}).".format(len(train_loader), base_size * 7))
df = pd.DataFrame(global_results)
print(df)
# Finally save experimental data if a save dir is specified
if args.save_dir:
import json
from datetime import datetime
if os.path.isdir(args.save_dir):
timestamp: str = datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
folder: str = "{}_{}".format(args.dataset, timestamp)
os.mkdir(os.path.join(args.save_dir, folder))
# save history
df.to_csv(os.path.join(args.save_dir, folder, "history.csv"))
# save experiment settings
with open(os.path.join(args.save_dir, folder, "settings.json"), "w") as fp:
json.dump(args.__dict__, fp)
# save latest model
torch.save(model.state_dict(), os.path.join(args.save_dir, folder, "model.pt"))
else:
raise ValueError("Path is not valid.")
if __name__ == '__main__':
# Define arguments for cli and run main function
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', default=1000, type=int)
parser.add_argument('--iterations', help="Number of attack iterations", default=20, type=int)
parser.add_argument('--batch-size', help="Size of each minibatch for the classifier", default=256, type=int)
parser.add_argument('--show-graphs', help="Shows graph of training, etc. if true.", default=True)
parser.add_argument('--grl-lambda', help="Gradient reversal parameter.", default=1, type=int)
parser.add_argument('--attack-size', help="Number of adversarial points for each attack.", default=25, type=int)
parser.add_argument('--reset-attack', help="Reuse the same model if False.", default=False, type=bool)
parser.add_argument('--dataset', help="The data set to use; values: compas or adult", default="compas", type=str)
parser.add_argument('--save-dir', help="Save history and setup if specified.", default=None)
args = parser.parse_args()
main(args)
| [
"argparse.ArgumentParser",
"attack_model.transform_dataset_credit",
"torch.nn.functional.dropout",
"torch.cat",
"numpy.mean",
"torch.utils.data.TensorDataset",
"torch.no_grad",
"pandas.set_option",
"os.path.join",
"pandas.DataFrame",
"torch.nn.MSELoss",
"torch.utils.data.dataset.random_split",... | [((835, 948), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG\n )\n", (854, 948), False, 'import logging\n'), ((3397, 3517), 'pycm.ConfusionMatrix', 'ConfusionMatrix', ([], {'actual_vector': "(results['true'] == True).values", 'predict_vector': "(results['pred'] > threshold).values"}), "(actual_vector=(results['true'] == True).values,\n predict_vector=(results['pred'] > threshold).values)\n", (3412, 3517), False, 'from pycm import ConfusionMatrix\n'), ((5397, 5417), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (5414, 5417), False, 'import torch\n'), ((5813, 5868), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['optimizer'], {'threshold': '(0.3)', 'cooldown': '(5)'}), '(optimizer, threshold=0.3, cooldown=5)\n', (5830, 5868), False, 'from torch.optim.lr_scheduler import CosineAnnealingLR, ReduceLROnPlateau\n'), ((5936, 6031), 'tqdm.trange', 'trange', (['args.epochs'], {'desc': '"""Training neural network"""', 'leave': '(False)', 'position': '(1)', 'mininterval': '(5)'}), "(args.epochs, desc='Training neural network', leave=False, position=1,\n mininterval=5)\n", (5942, 6031), False, 'from tqdm import trange\n'), ((10321, 10362), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(-1)'], {}), "('display.max_colwidth', -1)\n", (10334, 10362), True, 'import pandas as pd\n'), ((10367, 10409), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (10380, 10409), True, 'import pandas as pd\n'), ((10414, 10450), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', 'None'], {}), "('display.width', None)\n", (10427, 10450), True, 'import pandas as pd\n'), ((11919, 11972), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_tensor', 'y_tensor', 'l_tensor', 's_tensor'], {}), '(x_tensor, y_tensor, l_tensor, s_tensor)\n', (11932, 11972), False, 'from torch.utils.data import Dataset, TensorDataset, DataLoader\n'), ((12204, 12232), 'torch.utils.data.dataset.random_split', 'random_split', (['dataset', 'split'], {}), '(dataset, split)\n', (12216, 12232), False, 'from torch.utils.data.dataset import random_split\n'), ((12253, 12328), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=args.batch_size, shuffle=True)\n', (12263, 12328), False, 'from torch.utils.data import Dataset, TensorDataset, DataLoader\n'), ((12346, 12405), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'val_dataset', 'batch_size': 'args.batch_size'}), '(dataset=val_dataset, batch_size=args.batch_size)\n', (12356, 12405), False, 'from torch.utils.data import Dataset, TensorDataset, DataLoader\n'), ((12424, 12484), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'args.batch_size'}), '(dataset=test_dataset, batch_size=args.batch_size)\n', (12434, 12484), False, 'from torch.utils.data import Dataset, TensorDataset, DataLoader\n'), ((13114, 13142), 'pandas.DataFrame', 'pd.DataFrame', (['global_results'], {}), '(global_results)\n', (13126, 13142), True, 'import pandas as pd\n'), ((13172, 13235), 'tqdm.trange', 'trange', (['args.iterations'], {'desc': '"""Attack"""', 'leave': '(False)', 'position': '(0)'}), "(args.iterations, desc='Attack', leave=False, position=0)\n", (13178, 13235), False, 'from tqdm import trange\n'), ((16050, 16075), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (16073, 16075), False, 'import argparse\n'), ((2005, 2031), 'torch.nn.Linear', 'nn.Linear', (['input_shape', '(32)'], {}), '(input_shape, 32)\n', (2014, 2031), True, 'import torch.nn as nn\n'), ((2051, 2068), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(32)'], {}), '(32, 32)\n', (2060, 2068), True, 'import torch.nn as nn\n'), ((2088, 2105), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(32)'], {}), '(32, 32)\n', (2097, 2105), True, 'import torch.nn as nn\n'), ((2125, 2141), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(1)'], {}), '(32, 1)\n', (2134, 2141), True, 'import torch.nn as nn\n'), ((2384, 2398), 'torch.nn.functional.relu', 'F.relu', (['hidden'], {}), '(hidden)\n', (2390, 2398), True, 'import torch.nn.functional as F\n'), ((2416, 2438), 'torch.nn.functional.dropout', 'F.dropout', (['hidden', '(0.1)'], {}), '(hidden, 0.1)\n', (2425, 2438), True, 'import torch.nn.functional as F\n'), ((3595, 3705), 'pycm.ConfusionMatrix', 'ConfusionMatrix', ([], {'actual_vector': "(results['compas'] > 8).values", 'predict_vector': "(results['pred'] > 8).values"}), "(actual_vector=(results['compas'] > 8).values,\n predict_vector=(results['pred'] > 8).values)\n", (3610, 3705), False, 'from pycm import ConfusionMatrix\n'), ((6900, 6921), 'numpy.mean', 'np.mean', (['batch_losses'], {}), '(batch_losses)\n', (6907, 6921), True, 'import numpy as np\n'), ((8159, 8185), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""some numbers"""'], {}), "('some numbers')\n", (8169, 8185), True, 'import matplotlib.pyplot as plt\n'), ((8194, 8204), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8202, 8204), True, 'import matplotlib.pyplot as plt\n'), ((8215, 8230), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8228, 8230), False, 'import torch\n'), ((9486, 9518), 'torch.cat', 'torch.cat', (["(results, r['y_hat'])"], {}), "((results, r['y_hat']))\n", (9495, 9518), False, 'import torch\n'), ((9537, 9570), 'torch.cat', 'torch.cat', (["(outcome, r['y_true'])"], {}), "((outcome, r['y_true']))\n", (9546, 9570), False, 'import torch\n'), ((9588, 9622), 'torch.cat', 'torch.cat', (["(compas, r['y_compas'])"], {}), "((compas, r['y_compas']))\n", (9597, 9622), False, 'import torch\n'), ((9651, 9689), 'torch.cat', 'torch.cat', (["(protected_results, r['s'])"], {}), "((protected_results, r['s']))\n", (9660, 9689), False, 'import torch\n'), ((10177, 10202), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10200, 10202), False, 'import torch\n'), ((10705, 10726), 'attack_model.transform_dataset', 'transform_dataset', (['df'], {}), '(df)\n', (10722, 10726), False, 'from attack_model import transform_dataset, transform_dataset_census, transform_dataset_credit, attack_keras_model\n'), ((11646, 11656), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (11653, 11656), True, 'import numpy as np\n'), ((14257, 14296), 'torch.cat', 'torch.cat', (['(x_train_tensor, result_pts)'], {}), '((x_train_tensor, result_pts))\n', (14266, 14296), False, 'import torch\n'), ((14751, 14828), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_train_tensor', 'y_train_tensor', 'l_train_tensor', 's_train_tensor'], {}), '(x_train_tensor, y_train_tensor, l_train_tensor, s_train_tensor)\n', (14764, 14828), False, 'from torch.utils.data import Dataset, TensorDataset, DataLoader\n'), ((14852, 14927), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=args.batch_size, shuffle=True)\n', (14862, 14927), False, 'from torch.utils.data import Dataset, TensorDataset, DataLoader\n'), ((15056, 15084), 'pandas.DataFrame', 'pd.DataFrame', (['global_results'], {}), '(global_results)\n', (15068, 15084), True, 'import pandas as pd\n'), ((15260, 15288), 'os.path.isdir', 'os.path.isdir', (['args.save_dir'], {}), '(args.save_dir)\n', (15273, 15288), False, 'import os\n'), ((2251, 2267), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(2)'], {}), '(32, 2)\n', (2260, 2267), True, 'import torch.nn as nn\n'), ((5663, 5675), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5673, 5675), True, 'import torch.nn as nn\n'), ((5708, 5729), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5727, 5729), True, 'import torch.nn as nn\n'), ((6982, 6997), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6995, 6997), False, 'import torch\n'), ((7648, 7667), 'numpy.mean', 'np.mean', (['val_losses'], {}), '(val_losses)\n', (7655, 7667), True, 'import numpy as np\n'), ((9769, 9803), 'torch.cat', 'torch.cat', (["(protected, r['s_hat'])"], {}), "((protected, r['s_hat']))\n", (9778, 9803), False, 'import torch\n'), ((10510, 10640), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""csv"""', '"""scikit"""', '"""compas_recidive_two_years_sanitize_age_category_jail_time_decile_score.csv"""'], {}), "('..', 'data', 'csv', 'scikit',\n 'compas_recidive_two_years_sanitize_age_category_jail_time_decile_score.csv'\n )\n", (10522, 10640), False, 'import os\n'), ((11051, 11079), 'attack_model.transform_dataset_census', 'transform_dataset_census', (['df'], {}), '(df)\n', (11075, 11079), False, 'from attack_model import transform_dataset, transform_dataset_census, transform_dataset_credit, attack_keras_model\n'), ((13819, 13841), 'secml.array.c_array.CArray', 'CArray', (['x_train_tensor'], {}), '(x_train_tensor)\n', (13825, 13841), False, 'from secml.array.c_array import CArray\n'), ((10964, 11020), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""csv"""', '"""scikit"""', '"""adult.csv"""'], {}), "('..', 'data', 'csv', 'scikit', 'adult.csv')\n", (10976, 11020), False, 'import os\n'), ((11389, 11417), 'attack_model.transform_dataset_credit', 'transform_dataset_credit', (['df'], {}), '(df)\n', (11413, 11417), False, 'from attack_model import transform_dataset, transform_dataset_census, transform_dataset_credit, attack_keras_model\n'), ((15452, 15487), 'os.path.join', 'os.path.join', (['args.save_dir', 'folder'], {}), '(args.save_dir, folder)\n', (15464, 15487), False, 'import os\n'), ((15539, 15589), 'os.path.join', 'os.path.join', (['args.save_dir', 'folder', '"""history.csv"""'], {}), "(args.save_dir, folder, 'history.csv')\n", (15551, 15589), False, 'import os\n'), ((15735, 15763), 'json.dump', 'json.dump', (['args.__dict__', 'fp'], {}), '(args.__dict__, fp)\n', (15744, 15763), False, 'import json\n'), ((15840, 15887), 'os.path.join', 'os.path.join', (['args.save_dir', 'folder', '"""model.pt"""'], {}), "(args.save_dir, folder, 'model.pt')\n", (15852, 15887), False, 'import os\n'), ((11277, 11335), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""csv"""', '"""scikit"""', '"""german.data"""'], {}), "('..', 'data', 'csv', 'scikit', 'german.data')\n", (11289, 11335), False, 'import os\n'), ((15319, 15333), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15331, 15333), False, 'from datetime import datetime\n'), ((15653, 15705), 'os.path.join', 'os.path.join', (['args.save_dir', 'folder', '"""settings.json"""'], {}), "(args.save_dir, folder, 'settings.json')\n", (15665, 15705), False, 'import os\n'), ((11821, 11850), 'sklearn.preprocessing.OneHotEncoder', 'preprocessing.OneHotEncoder', ([], {}), '()\n', (11848, 11850), False, 'from sklearn import preprocessing\n'), ((2891, 2902), 'bayesian_model.BayesianModel', 'bm', (['results'], {}), '(results)\n', (2893, 2902), True, 'from bayesian_model import BayesianModel as bm\n'), ((2959, 2970), 'bayesian_model.BayesianModel', 'bm', (['results'], {}), '(results)\n', (2961, 2970), True, 'from bayesian_model import BayesianModel as bm\n'), ((3057, 3068), 'bayesian_model.BayesianModel', 'bm', (['results'], {}), '(results)\n', (3059, 3068), True, 'from bayesian_model import BayesianModel as bm\n'), ((3138, 3149), 'bayesian_model.BayesianModel', 'bm', (['results'], {}), '(results)\n', (3140, 3149), True, 'from bayesian_model import BayesianModel as bm\n'), ((3247, 3258), 'bayesian_model.BayesianModel', 'bm', (['results'], {}), '(results)\n', (3249, 3258), True, 'from bayesian_model import BayesianModel as bm\n'), ((3315, 3326), 'bayesian_model.BayesianModel', 'bm', (['results'], {}), '(results)\n', (3317, 3326), True, 'from bayesian_model import BayesianModel as bm\n'), ((11865, 11876), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (11873, 11876), True, 'import numpy as np\n'), ((14681, 14701), 'numpy.array', 'np.array', (['[s, 1 - s]'], {}), '([s, 1 - s])\n', (14689, 14701), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collaborative Filtering meetup dataset pre-processing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
from hyperbolic.utils.preprocess import process_dataset
from hyperbolic.utils.preprocess import save_as_pickle
FLAGS = flags.FLAGS
flags.DEFINE_string(
'dataset_path',
default='data/meetup/',
help='Path to raw dataset dir')
flags.DEFINE_string(
'save_dir_path',
default='data/meetup20_nrand/',
help='Path to saving directory')
def read_event_times(dataset_path):
"""Maps events times to a dictonary."""
event_times = {}
for split in ['train', 'test']:
path = os.path.join(dataset_path, 'NYC', split, 'events.txt')
with tf.gfile.Open(path, 'r') as lines:
for line in lines:
line = line.strip('\n').split(' ')
event = line[0]
timestamp = int(line[2])
event_times[event] = timestamp
return event_times
def to_np_new_ids(examples):
"""Creates new ids to a user-events dict. Casts new values as Numpy arrays."""
user_id = {user: i for i, user in enumerate(examples.keys())}
all_events = set().union(*examples.values())
event_id = {event: i for i, event in enumerate(all_events)}
examples_new_ids = {}
for user in examples:
events = [event_id[event] for event in examples[user]]
examples_new_ids[user_id[user]] = np.array(events)
return examples_new_ids
def meetup_to_dict(dataset_path, min_interaction=20):
"""Maps raw dataset file to a Dictonary.
Args:
dataset_path: Path to directory so that:
dataset_file/NYC/train/event_users.txt and
dataset_file/NYC/test/event_users.txt
both have format of
event_id user_id user_id ... user_id
dataset_file/NYC/train/events.txt and
dataset_file/NYC/test/events.txt
both have format of
Event_id Venue_id Time Group_id
where the format of Time is YYYYMMDDhhmmss.
min_interaction: number of minimal interactions per user to filter on.
Returns:
Dictionary containing users as keys, and a numpy array of events the user
interacted with, sorted by the time of interaction.
"""
# create user to event dict
all_examples = {}
for split in ['train', 'test']:
path = os.path.join(dataset_path, 'NYC', split, 'event_users.txt')
with tf.gfile.Open(path, 'r') as lines:
for line in lines:
line = line.strip('\n').split(' ')
event = line[0]
for user in line[1:]:
if user in all_examples:
all_examples[user].append(event)
else:
all_examples[user] = [event]
# filter on users with enough events and sort events by time
event_times = read_event_times(dataset_path)
for user in list(all_examples):
if len(all_examples[user]) >= min_interaction:
all_examples[user] = sorted(
all_examples[user],
key=lambda event: event_times[event] if event in event_times else 0)
else:
del all_examples[user]
return to_np_new_ids(all_examples)
def main(_):
dataset_path = FLAGS.dataset_path
save_path = FLAGS.save_dir_path
sorted_dict = meetup_to_dict(dataset_path)
dataset_examples = process_dataset(sorted_dict, random=False)
save_as_pickle(save_path, dataset_examples)
if __name__ == '__main__':
app.run(main)
| [
"os.path.join",
"absl.flags.DEFINE_string",
"hyperbolic.utils.preprocess.process_dataset",
"absl.app.run",
"numpy.array",
"tensorflow.compat.v2.gfile.Open",
"hyperbolic.utils.preprocess.save_as_pickle"
] | [((1597, 1693), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset_path"""'], {'default': '"""data/meetup/"""', 'help': '"""Path to raw dataset dir"""'}), "('dataset_path', default='data/meetup/', help=\n 'Path to raw dataset dir')\n", (1616, 1693), False, 'from absl import flags\n'), ((1702, 1808), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""save_dir_path"""'], {'default': '"""data/meetup20_nrand/"""', 'help': '"""Path to saving directory"""'}), "('save_dir_path', default='data/meetup20_nrand/', help=\n 'Path to saving directory')\n", (1721, 1808), False, 'from absl import flags\n'), ((4482, 4524), 'hyperbolic.utils.preprocess.process_dataset', 'process_dataset', (['sorted_dict'], {'random': '(False)'}), '(sorted_dict, random=False)\n', (4497, 4524), False, 'from hyperbolic.utils.preprocess import process_dataset\n'), ((4527, 4570), 'hyperbolic.utils.preprocess.save_as_pickle', 'save_as_pickle', (['save_path', 'dataset_examples'], {}), '(save_path, dataset_examples)\n', (4541, 4570), False, 'from hyperbolic.utils.preprocess import save_as_pickle\n'), ((4602, 4615), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (4609, 4615), False, 'from absl import app\n'), ((1961, 2015), 'os.path.join', 'os.path.join', (['dataset_path', '"""NYC"""', 'split', '"""events.txt"""'], {}), "(dataset_path, 'NYC', split, 'events.txt')\n", (1973, 2015), False, 'import os\n'), ((2675, 2691), 'numpy.array', 'np.array', (['events'], {}), '(events)\n', (2683, 2691), True, 'import numpy as np\n'), ((3553, 3612), 'os.path.join', 'os.path.join', (['dataset_path', '"""NYC"""', 'split', '"""event_users.txt"""'], {}), "(dataset_path, 'NYC', split, 'event_users.txt')\n", (3565, 3612), False, 'import os\n'), ((2025, 2049), 'tensorflow.compat.v2.gfile.Open', 'tf.gfile.Open', (['path', '"""r"""'], {}), "(path, 'r')\n", (2038, 2049), True, 'import tensorflow.compat.v2 as tf\n'), ((3622, 3646), 'tensorflow.compat.v2.gfile.Open', 'tf.gfile.Open', (['path', '"""r"""'], {}), "(path, 'r')\n", (3635, 3646), True, 'import tensorflow.compat.v2 as tf\n')] |
import numpy as np
from scipy import stats
try:
from matplotlib import pyplot as plt
except ModuleNotFoundError:
import sys
sys.stderr.write("matplotlib was not found, plotting would raise an exception.\n")
plt = None
class NormalNormalKnownVar:
__slots__ = ["mean", "var", "known_var"]
def __init__(self, known_var, prior_mean=0, prior_var=1):
self.mean = prior_mean
self.var = prior_var
self.known_var = known_var
def update(self, data):
var = np.var(data)
mean = np.mean(data)
n = len(data)
denom = (1.0 / self.var + n / self.known_var)
return NormalNormalKnownVar(self.known_var, (self.mean / self.var + sum(data) / self.known_var) / denom,
1.0 / denom)
def pdf(self, x):
return stats.norm.pdf(x, self.mean, np.sqrt(self.var))
def cdf(self, x):
return stats.norm.cdf(x, self.mean, np.sqrt(self.var))
def posterior(self, l, u):
if l > u:
return 0.0
return self.cdf(u) - self.cdf(l)
def plot(self, l=0.0, u=1.0):
x = np.linspace(u, l, 1001)
y = stats.norm.pdf(x, self.mean, np.sqrt(self.var))
y = y / y.sum()
plt.plot(x, y)
plt.xlim((l, u))
def predict(self, x):
return stats.norm.cdf(x, self.mean, np.sqrt(self.var + self.known_var))
def sample(self):
return np.random.normal(self.mean, np.sqrt(self.var + self.known_var))
class NormalLogNormalKnownVar(NormalNormalKnownVar):
def update(self, data):
data = np.log(data)
var = np.var(data)
mean = np.mean(data)
n = len(data)
denom = (1.0 / self.var + n / self.known_var)
return NormalLogNormalKnownVar(self.known_var, (self.mean / self.var + sum(data) / self.known_var) / denom,
1.0 / denom)
def predict(self, x):
raise NotImplemented("No posterior predictive")
def sample(self):
raise np.log(np.random.normal(self.mean, np.sqrt(self.var + self.known_var)))
| [
"matplotlib.pyplot.xlim",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.mean",
"numpy.linspace",
"sys.stderr.write",
"numpy.var",
"numpy.sqrt"
] | [((138, 225), 'sys.stderr.write', 'sys.stderr.write', (['"""matplotlib was not found, plotting would raise an exception.\n"""'], {}), "(\n 'matplotlib was not found, plotting would raise an exception.\\n')\n", (154, 225), False, 'import sys\n'), ((512, 524), 'numpy.var', 'np.var', (['data'], {}), '(data)\n', (518, 524), True, 'import numpy as np\n'), ((540, 553), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (547, 553), True, 'import numpy as np\n'), ((1125, 1148), 'numpy.linspace', 'np.linspace', (['u', 'l', '(1001)'], {}), '(u, l, 1001)\n', (1136, 1148), True, 'import numpy as np\n'), ((1241, 1255), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1249, 1255), True, 'from matplotlib import pyplot as plt\n'), ((1264, 1280), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(l, u)'], {}), '((l, u))\n', (1272, 1280), True, 'from matplotlib import pyplot as plt\n'), ((1588, 1600), 'numpy.log', 'np.log', (['data'], {}), '(data)\n', (1594, 1600), True, 'import numpy as np\n'), ((1615, 1627), 'numpy.var', 'np.var', (['data'], {}), '(data)\n', (1621, 1627), True, 'import numpy as np\n'), ((1643, 1656), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (1650, 1656), True, 'import numpy as np\n'), ((859, 876), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (866, 876), True, 'import numpy as np\n'), ((945, 962), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (952, 962), True, 'import numpy as np\n'), ((1190, 1207), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (1197, 1207), True, 'import numpy as np\n'), ((1352, 1386), 'numpy.sqrt', 'np.sqrt', (['(self.var + self.known_var)'], {}), '(self.var + self.known_var)\n', (1359, 1386), True, 'import numpy as np\n'), ((1454, 1488), 'numpy.sqrt', 'np.sqrt', (['(self.var + self.known_var)'], {}), '(self.var + self.known_var)\n', (1461, 1488), True, 'import numpy as np\n'), ((2056, 2090), 'numpy.sqrt', 'np.sqrt', (['(self.var + self.known_var)'], {}), '(self.var + self.known_var)\n', (2063, 2090), True, 'import numpy as np\n')] |
# ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge split operation test
"""
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import numpy as np
import pytest
from common import NgraphTest
class TestLogSoftmaxOperations(NgraphTest):
def test_logsoftmax(self):
type = np.float32
max = np.finfo(type).max
features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type)
logsoftmax = tf.nn.log_softmax(features)
sess_fn = lambda sess: sess.run([logsoftmax])
out = self.with_ngraph(sess_fn)
assert np.allclose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5,
atol=1.e-5)
| [
"tensorflow.compat.v1.disable_eager_execution",
"numpy.finfo",
"tensorflow.nn.log_softmax",
"numpy.array"
] | [((837, 875), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (873, 875), True, 'import tensorflow as tf\n'), ((1178, 1205), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['features'], {}), '(features)\n', (1195, 1205), True, 'import tensorflow as tf\n'), ((1058, 1072), 'numpy.finfo', 'np.finfo', (['type'], {}), '(type)\n', (1066, 1072), True, 'import numpy as np\n'), ((1340, 1419), 'numpy.array', 'np.array', (['[[-1.386294, -1.386294, -1.386294, -1.386294], [0, -max, -max, -max]]'], {}), '([[-1.386294, -1.386294, -1.386294, -1.386294], [0, -max, -max, -max]])\n', (1348, 1419), True, 'import numpy as np\n'), ((1096, 1150), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 1.0], [max, 1.0, 2.0, 3.0]]'], {}), '([[1.0, 1.0, 1.0, 1.0], [max, 1.0, 2.0, 3.0]])\n', (1104, 1150), True, 'import numpy as np\n')] |
"""
run the below command under 'activity_recognition' folder:
PYTHONPATH=../:./ python3 models/classical/detector_feature_mean_concatenate.py >log.txt 2>&1 &
Note:
>& is the syntax to redirect a stream to another file descriptor - 0 is stdin, 1 is stdout, and 2 is stderr.
https://stackoverflow.com/questions/876239/how-to-redirect-and-append-both-stdout-and-stderr-to-a-file-with-bash
"""
import os
import shutil
import time
from collections import Counter
from datetime import datetime
from logging import error
from shutil import copyfile
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.manifold import TSNE
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KernelDensity
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, \
extract_feature_sampling_mean, extract_feature_fixed_segments
from features.video.model_tf import CNN_tf
from features.video.utils import load_video
from features.video.video import trim
from features.video_info import get_info
def _extract_video_feature(model, in_file, out_dir):
# in_file = 'data/data-clean/refrigerator/open_close_fridge/1/open_close_fridge_3_1615392727_2.mkv'
video_name = os.path.splitext(os.path.basename(in_file))[0]
out_file = os.path.join(out_dir, '{}_{}.npy'.format(video_name, model.net_name))
if os.path.exists(out_file):
return out_file
if not os.path.exists(out_dir):
os.makedirs(out_dir)
batch_sz = 32
# sampling: only extract the first frame in each second from the video.
video_tensor = load_video(in_file, model.desired_size)
# extract features
features = model.extract(video_tensor, batch_sz)
# save features
np.save(os.path.splitext(out_file)[0], features)
return out_file
def _mirror_video(in_file, out_dir):
"""
https://stackoverflow.com/questions/29317262/opencv-video-saving-in-python
https://docs.opencv.org/4.5.2/dd/d43/tutorial_py_video_display.html
https://stackoverflow.com/questions/61659346/how-to-get-4-character-codec-code-for-videocapture-object-in-opencv
Parameters
----------
in_file
out_dir
Returns
-------
in_file = 'out/data/data-clean/refrigerator/open_close_fridge/1/trim-open_close_fridge_10_1615393352_2.mkv'
"""
_, file_name = os.path.split(in_file)
# if not os.path.exists(out_dir):
# os.makedirs(out_dir)
# copyfile(in_file, os.path.join(out_dir, file_name))
file_name, ent = file_name.split('.')
out_file = os.path.join(out_dir, file_name + '-mirrored.' + ent)
if os.path.exists(out_file):
return out_file
# capture video
try:
# in_file = 'out/data/data-clean/refrigerator/open_close_fridge/1/trim-open_close_fridge_10_1615393352_2.mkv'
if not os.path.exists(in_file):
error(f'not exist error: {in_file}')
cap = cv2.VideoCapture(in_file)
# Get the Default resolutions
# fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
# fourcc = cv2.VideoWriter_fourcc(*'XVID'.lower())
fourcc = cv2.VideoWriter_fourcc(*'mp4v'.lower())
# fourcc = cv2.VideoWriter_fourcc(
# *f"{fourcc & 255:c},{(fourcc >> 8) & 255:c}, {(fourcc >> 16) & 255:c}, {(fourcc >> 24) & 255:c}")
fourcc = cv2.VideoWriter_fourcc(*(chr(fourcc & 0xff) + chr((fourcc >> 8) & 0xff) + chr((fourcc >> 16) & 0xff)
+ chr((fourcc >> 24) & 0xff)))
# print(fourcc)
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# Define the codec and filename.
fps = cap.get(cv2.CAP_PROP_FPS)
# out = cv2.VideoWriter(out_file, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (frame_width, frame_height))
out = cv2.VideoWriter(out_file, fourcc, fps, (frame_width, frame_height), isColor=True)
while True:
ret, img = cap.read()
# print(ret)
if ret:
# cv2.imshow('Original Video',img)
# flip for truning(fliping) frames of video
img2 = cv2.flip(img, 1) # Horizontal
# cv2.imshow('Flipped video',img2)
out.write(img2)
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
except cv2.error as e:
error(f'Error: {e} on {in_file}')
print(f'_mirror_video: {out_file}')
return out_file
class AnomalyDetector:
def __init__(self, model_name='GMM', model_parameters={}, random_state=42):
self.model_name = model_name
self.random_state = random_state
self.model_parameters = model_parameters
def fit(self, X_train, y_train=None):
# 1. preprocessing
# 2. build models
if self.model_name == 'KDE':
self.model = KernelDensity(kernel='gaussian', bandwidth=0.5)
self.model.fit(X_train)
elif self.model_name == 'GMM':
pass
elif self.model_name == 'DT':
self.model = DecisionTreeClassifier(random_state=self.random_state)
self.model.fit(X_train, y_train)
elif self.model_name == 'RF':
n_estimators = self.model_parameters['n_estimators']
self.model = RandomForestClassifier(n_estimators, random_state=self.random_state)
self.model.fit(X_train, y_train)
elif self.model_name == 'SVM':
kernel = self.model_parameters['kernel']
self.model = sklearn.svm.SVC(kernel=kernel, random_state=self.random_state)
self.model.fit(X_train, y_train)
elif self.model_name == 'OvRLogReg':
C = self.model_parameters['C']
self.model = OneVsRestClassifier(
LogisticRegression(C=C, random_state=self.random_state, solver='liblinear'))
self.model.fit(X_train, y_train)
def get_threshold(self, X_train, q=0.95):
# 3. anomaly theadhold: t
log_dens = self.model.score_samples(X_train)
self.thres = np.quantile(np.exp(log_dens), q=q)
def predict_prob(self, X):
log_dens = self.model.score_samples(X)
return np.exp(log_dens)
def predict(self, X):
dens = self.predict_prob(X)
dens[dens < self.thres] = 1
dens[dens >= self.thres] = 0
return dens
def get_X_y(Xs, ys):
X = []
Y = []
for f, y in zip(Xs, ys):
x = extract_feature_average(f)
X.extend(x)
Y.append(y)
return Xs, np.asarray(X), np.asarray(Y)
def split_train_test_npy(meta, test_size=0.3, is_mirror_test_set=False, random_state=42):
X = [] # doesn't include 'mirrored' npy
y = [] # doesn't include 'mirrored' npy
X_mirrored = []
y_mirrored = []
for x_, y_ in zip(meta['X'], meta['y']):
if 'mirrored_vgg.npy' not in x_:
X.append(x_)
y.append(y_)
# to make X and X_mirriored have the same order.
ent = '_vgg.npy'
new_x_ = x_[:-len(ent)] + '-mirrored' + ent
# print(x_, new_x_)
X_mirrored.append(new_x_)
y_mirrored.append(y_)
X, y = get_X_y(X, y) # extract features from 'npy' files
# print(meta['in_dir'], ', its shape:', meta['shape'])
# print(f'mapping-(activity:(label, cnt)): ', '\n\t' + '\n\t'.join([f'{k}:{v}' for k, v in meta['labels'].items()]))
# mp = {v[0]: k for k, v in meta['labels'].items()} # idx -> activity name
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
X_mirrored, y_mirrored = get_X_y(X_mirrored, y_mirrored) # extract features from 'npy' files
X_mirrored_train, X_mirrored_test, y_mirrored_train, y_mirrored_test = \
train_test_split(X_mirrored, y_mirrored, test_size=test_size, random_state=random_state)
X_train = np.concatenate([X_train, X_mirrored_train], axis=0)
y_train = np.concatenate([y_train, y_mirrored_train], axis=0)
if is_mirror_test_set:
X_test = np.concatenate([X_test, X_mirrored_test], axis=0)
y_test = np.concatenate([y_test, y_mirrored_test], axis=0)
print(f'X_train: {X_train.shape}\nX_test: {X_test.shape}')
print(f'X_train: {X_train.shape}, y_train: {sorted(Counter(y_train).items(), key=lambda x: x[0])}')
print(f'X_train: {X_test.shape}, y_test: {sorted(Counter(y_test).items(), key=lambda x: x[0])}')
return X_train, X_test, y_train, y_test
def augment_train(train_meta, augment_type='camera_1+camera_2+camera_3', is_mirror=False):
X_meta = []
X = []
Y = []
if augment_type == 'camera_1+camera_2+camera_3':
# combine all camera data, but without mirrored data
for name, train in train_meta.items():
cnt = 0
for vs in train:
video_path, cnn_feature, y_label, y_idx, x = vs
if len(x) == 0: continue
if not is_mirror and '-mirrored' in video_path: continue
X.extend(x)
Y.extend(len(x) * [y_idx])
X_meta.extend(len(x) * [video_path])
cnt += len(x)
print(f'{name}_train: {cnt}')
elif augment_type == 'camera_1+camera_2':
# combine all camera data, but without mirrored data
for name, train in train_meta.items():
if name == 'camera_3': continue
cnt = 0
for vs in train:
video_path, cnn_feature, y_label, y_idx, x = vs
if len(x) == 0: continue
if not is_mirror and '-mirrored' in video_path: continue
X.extend(x)
Y.extend(len(x) * [y_idx])
X_meta.extend(len(x) * [video_path])
cnt += len(x)
print(f'{name}_train: {cnt}')
elif augment_type == 'camera_1' or augment_type == 'camera_2' or augment_type == 'camera_3':
# only use camera_i data
for name, train in train_meta.items():
if name != augment_type: continue
cnt = 0
for vs in train:
video_path, cnn_feature, y_label, y_idx, x = vs
if len(x) == 0: continue
if not is_mirror and '-mirrored' in video_path: continue
X.extend(x)
Y.extend(len(x) * [y_idx])
X_meta.extend(len(x) * [video_path])
cnt += len(x)
print(f'{name}_train: {cnt}')
else:
msg = augment_type
raise ValueError(msg)
return X_meta, np.asarray(X), np.asarray(Y)
def augment_test(test_meta, augment_type='camera_1+camera_2+camera_3', is_mirror=False):
X_meta = []
X = []
Y = []
if augment_type == 'camera_1+camera_2+camera_3':
# combine all camera data, but without mirrored data
for name, test in test_meta.items():
cnt = 0
for vs in test:
video_path, cnn_feature, y_label, y_idx, x = vs
if len(x) == 0: continue
if not is_mirror and '-mirrored' in video_path: continue
X.extend(x)
Y.extend(len(x) * [y_idx])
X_meta.extend(len(x) * [video_path])
cnt += len(x)
print(f'{name}_test: {cnt}')
elif augment_type == 'camera_1' or augment_type == 'camera_2' or augment_type == 'camera_3':
# only use camera_i data
for name, test in test_meta.items():
if name != augment_type: continue
cnt = 0
for vs in test:
video_path, cnn_feature, y_label, y_idx, x = vs
if len(x) == 0: continue
if not is_mirror and '-mirrored' in video_path: continue
X.extend(x)
Y.extend(len(x) * [y_idx])
X_meta.extend(len(x) * [video_path])
cnt += len(x)
print(f'{name}_test: {cnt}')
else:
msg = augment_type
raise ValueError(msg)
return X_meta, np.asarray(X), np.asarray(Y)
def tsne_plot(X, y, y_label, random_state=42, title=None, out_dir='.'):
"""
X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
tsne_plot(X[:, :2], X[:, 2])
Parameters
----------
X
y
Returns
-------
"""
X_embedded = TSNE(n_components=2, random_state=random_state).fit_transform(X)
# df = pd.DataFrame(np.concatenate([X_embedded, np.reshape(y, (-1, 1))], axis=1), columns=['x1', 'x2', 'y'])
# print(df.head(5))
# g = sns.scatterplot(data=df, x="x1", y="x2", hue="y", palette="deep")
# # g.set(xticklabels=[])
# # g.set(yticklabels=[])
# plt.show()
df = pd.DataFrame(np.concatenate([X_embedded, np.reshape(y, (-1, 1)), np.reshape(y_label, (-1, 1))], axis=1),
columns=['x1', 'x2', 'y', 'y_label'])
df = df.astype({"x1": float, "x2": float, 'y': int, 'y_label': str})
print(df.info())
print(df.head(5))
print(df.describe())
g = sns.scatterplot(data=df, x="x1", y="x2", hue="y_label", palette='deep', s=50, alpha=0.3)
g.set_title('Refrigerator')
# Put the legend out of the figure
# Note that the (1.05, 1) coordinates correspond to the (x, y) coordinates where the legend should be placed
# and the borderaxespad specifies the padding between the axes and the border legend.
# bbox (x, y, width, height)
g.legend(loc='upper left', bbox_to_anchor=(1.05, 1.0), ncol=1, borderaxespad=0,
fancybox=False, shadow=False, fontsize=8, title='classes')
# g.legend(loc='center left', bbox_to_anchor=(1.25, 1), ncol=1, borderaxespad=0.)
plt.tight_layout()
plt.show()
### FacetGrid
grid = sns.FacetGrid(df, col="y_label", hue="y_label", hue_order=list(sorted(set(y_label))), col_wrap=3)
grid.map(sns.scatterplot, "x1", "x2", s=100, alpha=0.3)
grid.add_legend()
plt.show()
### 3D
X_embedded = TSNE(n_components=3, random_state=random_state).fit_transform(X)
df = pd.DataFrame(np.concatenate([X_embedded, np.reshape(y, (-1, 1)), np.reshape(y_label, (-1, 1))], axis=1),
columns=['x1', 'x2', 'x3', 'y', 'y_label'])
df = df.astype({"x1": float, "x2": float, "x3": float, 'y': int, 'y_label': str})
sns.set(style="white")
# fig = plt.figure()
# ax = fig.add_subplot(111, projection = '3d')
# ax.scatter(df['x1'], df['x2'], df['x3'])
# plt.show()
# axes instance
fig = plt.figure(figsize=(5, 5))
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
# plot
# get colormap from seaborn
cmap = ListedColormap(sns.color_palette("deep", 5).as_hex())
sc = ax.scatter(df['x1'], df['x2'], df['x3'], s=40, c=df['y'].values, marker='o', cmap=cmap, alpha=1)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# legend
plt.legend(*sc.legend_elements(), bbox_to_anchor=(1.05, 1), loc='upper left')
# plt.legend(*sc.legend_elements())
# plt.tight_layout(pad = 2.0, rect=(0.3, 0.3, 0.8, 0.8)) # not works with axes. savefig works.
# save
out_file = os.path.join(out_dir, f'{title}')
plt.savefig(out_file, bbox_inches='tight', dpi=300)
plt.show()
def split_train_test_video(meta, video_type='_1.mp4', test_size=0.3, random_state=42):
# only get camera_1 data (_1.mp4)
camera_1 = [(f, feat, y) for f, feat, y in meta['data'] if video_type in f] # '_1.mp4'
camera_2 = [(f, feat, y) for f, feat, y in meta['data'] if '_2.mkv' in f]
camera_3 = [(f, feat, y) for f, feat, y in meta['data'] if '_3.mp4' in f]
print(f'camera_1 ({len(camera_1)}): {Counter([y for f, feat, y in camera_1])}')
print(f'camera_2 ({len(camera_2)}): {Counter([y for f, feat, y in camera_2])}')
print(f'camera_3 ({len(camera_3)}): {Counter([y for f, feat, y in camera_3])}')
train1, test1 = train_test_split(camera_1, test_size=test_size, random_state=random_state)
# add '-mirror' into train1 and test1
# camera_1
new_train1 = []
for (f, feat, y) in train1:
f = f.replace('_1.mp4', '_1-mirrored.mp4')
feat = feat.replace('_1_vgg.npy', '_1-mirrored_vgg.npy')
if not os.path.exists(f) or not os.path.exists(feat):
print(f'train1: {f} does not exist')
continue
new_train1.append((f, feat, y))
new_test1 = []
for (f, feat, y) in test1:
f = f.replace('_1.mp4', '_1-mirrored.mp4')
feat = feat.replace('_1_vgg.npy', '_1-mirrored_vgg.npy')
if not os.path.exists(f) or not os.path.exists(feat):
print(f'test1: {f} does not exist')
continue
new_test1.append((f, feat, y))
train1.extend(new_train1)
test1.extend(new_test1)
# camera_2
train2 = []
for (f, feat, y) in train1:
if '-mirrored.mp4' not in f:
f = f.replace('_1.mp4', '_2.mkv')
feat = feat.replace('_1_vgg.npy', '_2_vgg.npy')
else:
f = f.replace('_1-mirrored.mp4', '_2-mirrored.mkv')
feat = feat.replace('_1-mirrored_vgg.npy', '_2-mirrored_vgg.npy')
if f not in [v1_ for v1_, v2_, v3_ in camera_2]: # not os.path.exists(f) or not os.path.exists(feat):
print(f'train2: {f} does not exist')
continue
train2.append((f, feat, y))
test2 = []
for (f, feat, y) in test1:
if '-mirrored.mp4' not in f:
f = f.replace('_1.mp4', '_2.mkv')
feat = feat.replace('_1_vgg.npy', '_2_vgg.npy')
else:
f = f.replace('_1-mirrored.mp4', '_2-mirrored.mkv')
feat = feat.replace('_1-mirrored_vgg.npy', '_2-mirrored_vgg.npy')
if f not in [v1_ for v1_, v2_, v3_ in camera_2]: # not os.path.exists(f) or not os.path.exists(feat):
print(f'test2: {f} does not exist')
continue
test2.append((f, feat, y))
# camera_3
train3 = []
for (f, feat, y) in train1:
if '-mirrored.mp4' not in f:
f = f.replace('_1.mp4', '_3.mp4')
feat = feat.replace('_1_vgg.npy', '_3_vgg.npy')
else:
f = f.replace('_1-mirrored.mp4', '_3-mirrored.mp4')
feat = feat.replace('_1-mirrored_vgg.npy', '_3-mirrored_vgg.npy')
if f not in [v1_ for v1_, v2_, v3_ in camera_3]: # if not os.path.exists(f) or not os.path.exists(feat):
# print(f'{f} or {feat} does not exist')
continue
train3.append((f, feat, y))
test3 = []
for (f, feat, y) in test1:
if '-mirrored.mp4' not in f:
f = f.replace('_1.mp4', '_3.mp4')
feat = feat.replace('_1_vgg.npy', '_3_vgg.npy')
else:
f = f.replace('_1-mirrored.mp4', '_3-mirrored.mp4')
feat = feat.replace('_1-mirrored_vgg.npy', '_3-mirrored_vgg.npy')
if f not in [v1_ for v1_, v2_, v3_ in camera_3]: # if not os.path.exists(f) or not os.path.exists(feat):
# print(f'{f} or {feat} does not exist')
continue
test3.append((f, feat, y))
# # camera_2
# train2 = []
# for (f, feat, y) in train1:
# f = f.replace('_1.mp4', '_2.mkv')
# feat = feat.replace('_1_vgg.npy', '_2_vgg.npy')
# if not os.path.exists(f) or not os.path.exists(feat): continue
# train2.append((f, feat, y))
# test2 = []
# for (f, feat, y) in test1:
# f = f.replace('_1.mp4', '_2.mkv')
# feat = feat.replace('_1_vgg.npy', '_2_vgg.npy')
# if not os.path.exists(f) or not os.path.exists(feat): continue
# test2.append((f, feat, y))
#
# # camera_3
# train3 = []
# for (f, feat, y) in train1:
# f = f.replace('_1.mp4', '_2.mkv')
# feat = feat.replace('_1_vgg.npy', '_2_vgg.npy')
# if not os.path.exists(f) or not os.path.exists(feat): continue
# train3.append((f, feat, y))
# test3 = []
# for (f, feat, y) in test1:
# f = f.replace('_1.mp4', '_3.mp4')
# feat = feat.replace('_1_vgg.npy', '_3_vgg.npy')
# if not os.path.exists(f) or not os.path.exists(feat): continue
# test3.append((f, feat, y))
train_meta = {'camera_1': train1, 'camera_2': train2, 'camera_3': train3}
test_meta = {'camera_1': test1, 'camera_2': test2, 'camera_3': test3}
return train_meta, test_meta
def get_activity_info(in_file, video_logs={}):
start_time, end_time = 0, 0
try:
if in_file in video_logs.keys():
line = video_logs[in_file]
else:
# Due to different languages are used during collection, the timestamp generated to write the filename
# is different from the logs. To find the correct logs, we add -1 or +1 to the time_stamp in filename.
dir_tmp, f = os.path.split(in_file)
arr_tmp = f.split('_')
video_start_time = arr_tmp[-2] # timestamp
for i in [-5, -2, -1, 1, 2, 5]:
tmp = str(int(video_start_time) + i)
arr_tmp[-2] = tmp
tmp = '_'.join(arr_tmp)
in_file_tmp = os.path.join(dir_tmp, tmp)
if in_file_tmp in video_logs.keys():
line = video_logs[in_file_tmp]
break
start_time = line[0]
end_time = line[1]
# f = line[2]
except Exception as e:
video_start_time = int(in_file.split('/')[-1].split('_')[-2])
video_start_time = datetime.utcfromtimestamp(video_start_time).strftime('%Y-%m-%d %H:%M:%S')
error(f'get_activity_info () Error: {e}. {in_file}, video_start_time: {video_start_time}')
if start_time < 0:
start_time = 0
return start_time, end_time
# def time_diff(time_str, video_start_time):
# # if you encounter a "year is out of range" error the timestamp
# # may be in milliseconds, try `ts /= 1000` in that case
# # print(datetime.utcfromtimestamp(time_str).strftime('%Y-%m-%d %H:%M:%S'))
#
# time_tmp = datetime.utcfromtimestamp(time_str).strftime('%Y-%m-%d %H:%M:%S')
# video_start_time = datetime.utcfromtimestamp(video_start_time).strftime('%Y-%m-%d %H:%M:%S')
# return time_tmp - video_start_time
def parse_logs(in_dir='data/data-clean/log'):
root_dir = 'data/data-clean'
# combine all files in the list
df = pd.concat([pd.read_csv(os.path.join(in_dir, f)) for f in
sorted(os.listdir(in_dir)) if f.startswith('log_')])
df.dropna(thresh=9, inplace=True) # each row should at least have 9 values
n = len(df.values)
df['idx'] = np.asarray(list(range(0, n, 1)))
df.set_index('idx')
# change the order of columns
cols = df.columns.tolist()
cols = [cols[-1]] + cols[:-1]
df = df[cols]
# export to csv
df.to_csv(f"{root_dir}/~combined_csv.csv", index=True, encoding='utf-8-sig')
video_logs = {} # only includes camera 1 data
data = df.values
camera_counts = {'camera1': 0, 'camera2': 0, 'camera3': 0}
for i in range(n):
# data[i][5]: activity
if data[i][5].strip() not in ['no_interaction', 'open_close_fridge', 'put_back_item',
'screen_interaction', 'take_out_item']:
continue
line = data[i]
idx, timestamp, start_str, device_name, ID, activity, repetition, camera1, camera2, pcap, audio = \
[v.strip() if type(v) == str else str(int(v)) for v in list(line)]
# Note: all three cameras have the same timestamp
key = f'{root_dir}/{device_name}/{activity}/{ID}/{camera1}'
key2 = f'{root_dir}/{device_name}/{activity}/{ID}/{camera2}'
camera3 = camera1.replace('_1.mp4', '_3.mp4')
key3 = f'{root_dir}/{device_name}/{activity}/{ID}/{camera3}'
# if camera2 == 'no_interaction_10_1614039254_2.mkv': # for testing purpose.
# print(i, line)
# Note: the following method requires the start record should appear before the end record
if start_str == 'start':
# get the statistical information of each video
if '1.mp4' in camera1:
camera_counts['camera1'] += 1
if '2.mkv' in camera2:
camera_counts['camera2'] += 1
if '3.mp4' in camera1 or '3.mp4' in camera2:
camera_counts['camera3'] += 1
# For each activity, the starting time add -5s and ending time add +5s to make sure
# the activity in [start_time, end_time].
video_start_time = int(camera1.split('_')[-2])
start_time = int(timestamp) - video_start_time - 5
video_logs[key] = [start_time, '', video_start_time, key, activity]
video_logs[key2] = [start_time, '', video_start_time, key2, activity]
video_logs[key3] = [start_time, '', video_start_time, key3, activity]
elif start_str == 'end':
if key in video_logs.keys():
end_time = int(timestamp) - video_logs[key][2] + 5 # here +5 avoid to loss activity information
video_logs[key][1] = end_time
video_logs[key2][1] = end_time
video_logs[key3][1] = end_time
else:
error(f'Error happens at line:{i}, {line}')
print(f'camera_counts without manually recording: {camera_counts.items()}')
# #########################################################################################
# # parse the description.xlsx (we manually label the starting and ending time for each video)
# xlsx_file = f'{root_dir}/refrigerator/description.xlsx'
# xls = pd.ExcelFile(xlsx_file)
# # to read all sheets to a map
# df_mp = {}
# del line
# for sheet_name in xls.sheet_names:
# df_mp[sheet_name] = xls.parse(sheet_name)
# for line in df_mp[sheet_name].values.tolist():
# try:
# # print(line)
# if not line or str(line[0]) == 'nan' or line[0].startswith('full'): continue
# if '-trim_' in line[0]: continue
#
# key = os.path.join(root_dir, line[0])
# # print(line, video_path)
# if key in video_logs.keys():
# warning(f'duplicate log: {key}')
# else:
# start_time = int(line[1])
# end_time = int(line[2])
# camera_counts['camera1'] += 1
# camera_counts['camera2'] += 1
# video_start_time = int(key.split('_')[-2])
# activity = line[3]
# video_logs[key] = [start_time, end_time, video_start_time, key, activity]
# key2 = key.replace('1.mp4', '2.mkv')
# video_logs[key2] = [start_time, end_time, video_start_time, key2, activity]
# except Exception as e:
# warning(f"{line}, {e}")
print(f'camera_counts: {camera_counts.items()}')
print(f'Total number of videos (3 cameras): {len(video_logs)}, in which, '
f'{Counter([v[-1] for v in video_logs.values()])}')
return video_logs
def change_label2idx(train_meta, label2idx={}):
""" change label to index
Parameters
----------
train_meta
label2idx
Returns
-------
"""
for name, train in train_meta.items():
for i, vs in enumerate(train):
train_meta[name][i] = (vs[0], vs[1], vs[2], label2idx[vs[2]]) # (video_path, feature, y_label, y_idx)
return train_meta
def cnn_feature2final_feature(train_meta, feature_type='mean', window_size=5, is_test=False):
"""
Parameters
----------
train_meta
feature_type
Returns
-------
"""
tmp_len_lst = []
for name, train in train_meta.items():
for i, vs in enumerate(train):
f = vs[1] # (video_path, feature, y_label, y_idx )
if not os.path.exists(f):
x = ''
train_meta[name][i] = (vs[0], vs[1], vs[2], vs[3], x) # (video_path, feature, y_label, y_idx, X)
continue
if not is_test:
if feature_type == 'mean':
x = extract_feature_average(f)
elif feature_type == 'sampling':
x = extract_feature_sampling_mean(f, window_size)
elif feature_type == 'fixed_segments':
x = extract_feature_fixed_segments(f, dim=34)
elif is_test:
if feature_type == 'mean':
x = extract_feature_average(f)
elif feature_type == 'sampling':
x = extract_feature_sampling_mean(f, window_size)
# x = extract_feature_sampling(f, steps=[1, 2, 3, 4, 5])
elif feature_type == 'fixed_segments':
x = extract_feature_fixed_segments(f, dim=34)
train_meta[name][i] = (vs[0], vs[1], vs[2], vs[3], x) # (video_path, feature_file, y_label, y_idx, X)
# tmp_len_lst.append(x.shape[1]//4096*5)
# qs = [0, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 1]
# dims = [f"{int(_v)}({int(_v)} frames, q={_q})" for _v, _q in zip(np.ceil(np.quantile(tmp_len_lst, q=qs)), qs)]
# print(f'before sampling, dims: {dims} when q={qs}.')
return train_meta
def gen_Xy(in_dir, out_dir, is_subclip=True, is_mirror=True, is_cnn_feature=True, device_type='refrigerator'):
""" preprocessing the videos:
e.g., trim and mirror videos, extract features by CNN
Parameters
----------
in_dir: ['data/data-clean/refrigerator]
out_dir:
is_subclip: cut video
is_mirror
is_cnn_feature
Returns
-------
meta: dictionary
"""
if is_cnn_feature:
# deep neural network model
model_file = './features/video/slim/vgg_16.ckpt'
model = CNN_tf('vgg', model_file)
else:
model = None
video_logs = parse_logs(in_dir='data/data-clean/log')
issued_videos = pd.read_csv(os.path.join(in_dir[0], 'issued_videos.csv'), header=None).values[:, -1].tolist()
data = [] # [(video_path, cnn_feature, y)]
durations = {'camera1': [], 'camera2': [], 'camera3': []}
# list device folders (e.g., refrigerator or camera)
i = 0
cnt_3 = 0 # camera_3
cnt_32 = 0 # camera_32: backup
for device_dir in sorted(in_dir):
out_dir_sub = ''
if device_type not in device_dir: continue
# list activity folders (e.g., open_close or take_out )
for activity_dir in sorted(os.listdir(device_dir)):
activity_label = activity_dir
out_dir_activity = activity_dir
activity_dir = os.path.join(device_dir, activity_dir)
if not os.path.exists(activity_dir) or '.DS_Store' in activity_dir or not os.path.isdir(
activity_dir): continue
# list participant folders (e.g., participant 1 or participant 2)
for participant_dir in sorted(os.listdir(activity_dir)):
out_dir_participant = participant_dir
out_dir_sub = os.path.join(participant_dir)
participant_dir = os.path.join(activity_dir, participant_dir)
if not os.path.exists(participant_dir) or '.DS_Store' in participant_dir: continue
# print(participant_dir)
# list videos (e.g., 'no_interaction_1_1614038765_1.mp4')
for f in sorted(os.listdir(participant_dir)):
if f.startswith('.'): continue
if ('mp4' not in f) and ('mkv' not in f): continue # only process video file.
issued_flg = False
for _issued_f in issued_videos:
if f in _issued_f:
issued_flg = True
break
if issued_flg:
continue # issued videos, skip
x = os.path.join(participant_dir, f)
if '_3.mp4' in f: cnt_3 += 1
if '_3 2.mp4' in f: # ignore _3 2.mp4 data.
cnt_32 += 1
continue
print(f'i: {i}, {x}')
try:
# vd_info = get_info(x)
out_dir_tmp = os.path.join(out_dir, out_dir_activity, out_dir_participant)
if is_subclip:
start_time, end_time = get_activity_info(x, video_logs)
if end_time == 0: continue
x = trim(x, start_time=start_time, end_time=end_time, out_dir=out_dir_tmp)
# if '1.mp4' in x:
# durations['camera1'].append((end_time-start_time, vd_info['fps'], vd_info['duration']))
# elif '2.mkv' in x:
# durations['camera2'].append((end_time-start_time, vd_info['fps'], vd_info['duration']))
# elif '3.mp4' in x:
# durations['camera3'].append((end_time-start_time, vd_info['fps'], vd_info['duration']))
if is_cnn_feature:
x_feat = _extract_video_feature(model, x, out_dir=out_dir_tmp)
else:
x_feat = ''
data.append((x, x_feat, activity_label))
if is_mirror:
mirrored_x = _mirror_video(x, out_dir=out_dir_tmp)
if is_cnn_feature:
mirrored_x_feat = _extract_video_feature(model, mirrored_x, out_dir=out_dir_tmp)
else:
mirrored_x_feat = ''
data.append((mirrored_x, mirrored_x_feat, activity_label))
# if '_3.mp4' in x or '_3 2.mp4' in x_feat:
# print(f'---{i}, {x}')
except Exception as e:
msg = f'error: {e} on {x}'
raise ValueError(msg)
i += 1
# for key, vs in durations.items():
# vs, fps, dura = zip(*vs)
# print(f'key -> fps: {set(fps)}')
# fps = fps[0]
# qs = [0, 0.25, 0.5, 0.75, 0.9, 0.95, 1.0]
# print(key, f'fps: {fps}. before trimming', [f'{int(v_)}s({q_})' for v_, q_ in zip(np.quantile(dura, q=qs), qs)])
# print(key, f'fps: {fps}', [f'{int(v_)}s({q_})' for v_, q_ in zip(np.quantile(vs, q=qs), qs)])
# print(key, f'fps: {fps}',[f'{int(v_ * fps)}({q_})' for v_, q_ in zip(np.quantile(vs, q=qs), qs)])
# print(key, f'fps: {fps}', [f'{int(v_*fps)} frames, when q = {q_}' for v_, q_ in zip(np.quantile(vs, q=qs), qs)])
# print(f'camera_3: {cnt_3}, camera_32 (backup): {cnt_32}')
meta = {'data': data, 'is_mirror': is_mirror, 'is_cnn_feature': is_cnn_feature}
return meta
def get_dim(X, q=0.9):
X = [len(v) for v in X]
qs = [0, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 1]
dims = [f"{int(_v)}({int(_v) // 4096} frames, q={_q})" for _v, _q in zip(np.ceil(np.quantile(X, q=qs)), qs)]
print(f'dims: {dims} when q={qs}.')
dim = int(np.ceil(np.quantile(X, q=q)))
print(f'dim: {dim} when q={q}. It is around {dim // 4096} frames for each video')
return dim
def fix_dim(X, dim=10):
new_X = []
for v in X:
m = len(v)
if m < dim:
v = np.asarray(list(v) + [0] * (dim - m))
elif m > dim:
v = v[:dim]
else:
pass
new_X.append(v)
return np.asarray(new_X)
def make_confusion_matrix(cf,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Blues',
title=None,
out_dir='.'):
'''
This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
Arguments
---------
cf: confusion matrix to be passed in
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
normalize: If True, show the proportions for each category. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is True.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
'''
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names) == cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
row_sum = np.sum(cf, axis=1)
cf_row_sum = np.array([[value] * len(row_sum) for value in row_sum]).flatten()
# print(cf_row_sum)
group_percentages = ["({0:.2%})".format(value) for value in cf.flatten() / cf_row_sum]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels, group_counts, group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0], cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
# Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
# if it is a binary confusion matrix, show some more stats
if len(cf) == 2:
# Metrics for Binary Confusion Matrices
precision = cf[1, 1] / sum(cf[:, 1])
recall = cf[1, 1] / sum(cf[1, :])
f1_score = 2 * precision * recall / (precision + recall)
stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nRecall={:0.3f}\nF1 Score={:0.3f}".format(
accuracy, precision, recall, f1_score)
else:
stats_text = "\n\nAccuracy={:0.2f}".format(accuracy)
else:
stats_text = ""
print(stats_text)
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if figsize == None:
# Get default figure size if not set
figsize = plt.rcParams.get('figure.figsize')
if xyticks == False:
# Do not show categories if xyticks is False
categories = False
# MAKE THE HEATMAP VISUALIZATION
plt.figure(figsize=figsize)
sns.heatmap(cf / np.sum(cf, axis=1), annot=box_labels, fmt="", cmap=cmap, cbar=cbar, xticklabels=categories,
yticklabels=categories)
if xyplotlabels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title)
out_file = os.path.join(out_dir, f'{title}-confusion_matrix')
plt.savefig(out_file, bbox_inches='tight', dpi=300)
plt.show()
def main(random_state=42):
###############################################################################################################
# Step 1. check if the Xy.dat exists. Xy.dat includes the train and test set.
in_dir = 'out-raw/data/data-clean/refrigerator'
Xy_train_test_file = f'{in_dir}/Xy_train_test.dat'
if os.path.exists(Xy_train_test_file):
os.remove(Xy_train_test_file)
if not os.path.exists(Xy_train_test_file):
###############################################################################################################
# Step 2. Get all cnn_features from videos
Xy_cnn_features_file = f'{in_dir}/Xy_cnn_features.dat'
if os.path.exists(Xy_cnn_features_file): os.remove(Xy_cnn_features_file)
if not os.path.exists(Xy_cnn_features_file):
in_raw_dir = 'data/data-clean/refrigerator'
# Here we preprocessing all the videos (such as, trim and mirror), but if uses all of them can be seen
# in the following part. Also, extract the features by CNN
meta = gen_Xy([in_raw_dir], out_dir=in_dir, is_subclip=False, is_mirror=False, is_cnn_feature=True)
dump_data(meta, out_file=Xy_cnn_features_file)
###############################################################################################################
# Step 3. Split the features to train and test set according to camera 1 (i.e., front view-'_1.mp4') .
if not os.path.exists(Xy_train_test_file):
meta = load_data(Xy_cnn_features_file)
# video_type = {'_1.mp4': front view,'_3.mp4': side view and mirrored (up to down) view, 'mkv': side view}
video_type = '_1.mp4' # split the train and test based on 'camera_1' (i.e, '_1.mp4')
test_size = 0.3
label2idx = {'no_interaction': 0, 'open_close_fridge': 1, 'put_back_item': 2, 'screen_interaction': 3,
'take_out_item': 4}
idx2label = {0: 'no_interaction', 1: 'open_close_fridge', 2: 'put_back_item', 3: 'screen_interaction',
4: 'take_out_item'}
###############################################################################################################
# Step 3.1. Split the videos capured by camera 1 (i.e., front view-'_1.mp4') to train and test set.
train_meta, test_meta = split_train_test_video(meta, video_type, test_size=test_size,
random_state=random_state)
###############################################################################################################
# Step 3.2. change label to idx
train_meta = change_label2idx(train_meta, label2idx)
test_meta = change_label2idx(test_meta, label2idx)
###############################################################################################################
# Step 3.3. dump all data to disk
meta = {'train_meta': train_meta, 'test_meta': test_meta, 'test_size': test_size,
'label2idx': label2idx, 'idx2label': idx2label}
dump_data(meta, out_file=Xy_train_test_file)
###############################################################################################################
# Step 4. load Xy_train_test.dat
meta = load_data(Xy_train_test_file)
train_meta = meta['train_meta']
test_meta = meta['test_meta']
test_size = meta['test_size']
###############################################################################################################
# Step 5. obtain final feature data (X_train and X_test) from CNN features with different methods
train_meta = cnn_feature2final_feature(train_meta, feature_type='fixed_segments', is_test=False)
test_meta = cnn_feature2final_feature(test_meta, feature_type='fixed_segments', is_test=False)
###############################################################################################################
# Step 6. if augment data or not
# X_train_meta, X_train, y_train = augment_train(train_meta, augment_type='camera_1',
# is_mirror=False)
X_train_meta, X_train, y_train = augment_train(train_meta, augment_type='camera_1+camera_2+camera_3',
# +camera_2+camera_3
is_mirror=False)
# dim = get_dim(X_train, q= 0.9) # get maximum
# X_train = fix_dim(X_train, dim)
# X_train = X_train[:100, :] # for debugging
# y_train = y_train[:100] # for debugging
X_test_meta, X_test, y_test = augment_test(test_meta, augment_type='camera_1+camera_2+camera_3',
is_mirror=False)
# X_test = fix_dim(X_test, dim)
print(f'X_train: {X_train.shape}\nX_test: {X_test.shape}')
print(f'X_train: {X_train.shape}, y_train: {sorted(Counter(y_train).items(), key=lambda x: x[0])}')
print(f'X_test: {X_test.shape}, y_test: {sorted(Counter(y_test).items(), key=lambda x: x[0])}')
# print(X_train[:10])
# scaler = MinMaxScaler()
scaler = StandardScaler()
# X = np.concatenate(X, axis=0)
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
is_plot = True
if is_plot:
start_time = time.time()
tsne_plot(X_train, y_train, [idx2label[i] for i in y_train], title='refrigerator', out_dir=in_dir)
end_time = time.time()
print(f'Plot takes {end_time - start_time:.0f} seconds.')
res = []
for model_name in ['OvRLogReg', 'SVM(linear)', 'RF']: # ['OvRLogReg', 'SVM(linear)', 'RF']
print(f'\n\n***{model_name}')
start_time = time.time()
if model_name == 'OvRLogReg':
detector = AnomalyDetector(model_name='OvRLogReg', model_parameters={'C': 1}, random_state=random_state)
elif model_name == 'SVM(linear)':
# detector = AnomalyDetector(model_name='SVM', model_parameters={'kernel':'rbf'}, random_state=random_state)
detector = AnomalyDetector(model_name='SVM', model_parameters={'kernel': 'linear'},
random_state=random_state)
elif model_name == 'RF':
detector = AnomalyDetector(model_name='RF', model_parameters={'n_estimators': 100},
random_state=random_state)
else:
# 2. build the kde models
# detector = AnomalyDetector(model_name='KDE', model_parameters = {'bandwidth': 0.1, 'kernel': 'gussisan'})
detector = AnomalyDetector(model_name='DT', model_parameters={}, random_state=random_state)
detector.fit(X_train, y_train)
#
# # 3. compute the threshold
# detector.get_threshold(X_train, q=0.01)
# # print(detector.predict_prob(X_train))
# 4. evaulation
y_preds = detector.model.predict(X_test)
print('y_test (label, cnt): ', sorted(Counter(y_test).items(), key=lambda x: x[0]))
acc = sklearn.metrics.accuracy_score(y_test, y_preds)
print(f'accuracy: {acc}')
# res.append((acc, n_estimators))
cm = sklearn.metrics.confusion_matrix(y_test, y_preds)
# print(cm)
# labels = list(mp.keys())
w = 15 # width
# print()
# labels = [f'{v[:w]:<{w}}' for k, v in mp.items()]
# for v in zip_longest(*labels, fillvalue=' '):
# print(' '.join(v))
# print(' '* 15 + ' '.join(labels)+f'(predicted)')
print(' ' * 40 + '(predicted)')
# print(' ' * (w + 1) + '\t' + '\t\t'.join([f'({k})' for k, v in mp.items()]))
for i, vs in enumerate(list(cm)):
print(f'{idx2label[i][:w]:<{w}} ({i})\t', '\t\t'.join([f'{v}' for v in list(vs)]))
cm_file = make_confusion_matrix(cm, categories=sorted(label2idx.keys()), title=model_name, out_dir=in_dir)
print(f'confusion_matrix: {cm_file}')
# 5 get misclassification
err_mp = {}
misclassified_dir = 'out/misclassified'
if os.path.exists(misclassified_dir):
shutil.rmtree(misclassified_dir)
for x_test_, y_t, y_p in zip(X_test_meta, y_test, y_preds):
if y_t != y_p:
name = f'{idx2label[y_t]}({y_t}):{x_test_}'
if name not in err_mp.keys():
err_mp[name] = [f'{idx2label[y_p]}({y_p})']
else:
err_mp[name].append(f'{idx2label[y_p]}({y_p})')
# copy misclassified videos to dst
# print(x_test_)
tmp_out_dir = os.path.join(misclassified_dir, model_name, idx2label[y_t] + '->')
if not os.path.exists(tmp_out_dir):
os.makedirs(tmp_out_dir)
copyfile(x_test_, os.path.join(tmp_out_dir, os.path.split(x_test_)[-1]))
# print(f'{mp[y_t]} -> {mp[y_p]}')
# print(f'***misclassified classes: {len(err_mp.keys())}')
# print('\t' + '\n\t'.join([f'{k}->{Counter(vs)}' for k, vs in sorted(err_mp.items())]))
# for label_ in y_test:
# label_ = idx2label[label_]
# for k, vs in sorted(err_mp.items()):
# print('\t' + '\n\t'.join([f'{k}->{vs}']))
end_time = time.time()
print(f'{model_name} takes {end_time - start_time:.0f} seconds.')
print('\n\n', res)
print(sorted(res, key=lambda x: x[0], reverse=True))
if __name__ == '__main__':
start_time = time.time()
main()
end_time = time.time()
print(f'Total time is {end_time - start_time:.0f} seconds!')
| [
"matplotlib.pyplot.title",
"os.remove",
"numpy.trace",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.figure",
"features.video.utils.load_video",
"feature... | [((1812, 1836), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (1826, 1836), False, 'import os\n'), ((2042, 2081), 'features.video.utils.load_video', 'load_video', (['in_file', 'model.desired_size'], {}), '(in_file, model.desired_size)\n', (2052, 2081), False, 'from features.video.utils import load_video\n'), ((2786, 2808), 'os.path.split', 'os.path.split', (['in_file'], {}), '(in_file)\n', (2799, 2808), False, 'import os\n'), ((2993, 3046), 'os.path.join', 'os.path.join', (['out_dir', "(file_name + '-mirrored.' + ent)"], {}), "(out_dir, file_name + '-mirrored.' + ent)\n", (3005, 3046), False, 'import os\n'), ((3054, 3078), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (3068, 3078), False, 'import os\n'), ((7986, 8056), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(X, y, test_size=test_size, random_state=random_state)\n', (8002, 8056), False, 'from sklearn.model_selection import train_test_split\n'), ((8241, 8334), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_mirrored', 'y_mirrored'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(X_mirrored, y_mirrored, test_size=test_size, random_state=\n random_state)\n', (8257, 8334), False, 'from sklearn.model_selection import train_test_split\n'), ((8345, 8396), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_mirrored_train]'], {'axis': '(0)'}), '([X_train, X_mirrored_train], axis=0)\n', (8359, 8396), True, 'import numpy as np\n'), ((8411, 8462), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_mirrored_train]'], {'axis': '(0)'}), '([y_train, y_mirrored_train], axis=0)\n', (8425, 8462), True, 'import numpy as np\n'), ((13458, 13551), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'df', 'x': '"""x1"""', 'y': '"""x2"""', 'hue': '"""y_label"""', 'palette': '"""deep"""', 's': '(50)', 'alpha': '(0.3)'}), "(data=df, x='x1', y='x2', hue='y_label', palette='deep', s=\n 50, alpha=0.3)\n", (13473, 13551), True, 'import seaborn as sns\n'), ((14101, 14119), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14117, 14119), True, 'import matplotlib.pyplot as plt\n'), ((14124, 14134), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14132, 14134), True, 'import matplotlib.pyplot as plt\n'), ((14349, 14359), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14357, 14359), True, 'import matplotlib.pyplot as plt\n'), ((14724, 14746), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (14731, 14746), True, 'import seaborn as sns\n'), ((14917, 14943), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (14927, 14943), True, 'import matplotlib.pyplot as plt\n'), ((14953, 14990), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'auto_add_to_figure': '(False)'}), '(fig, auto_add_to_figure=False)\n', (14959, 14990), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((15558, 15591), 'os.path.join', 'os.path.join', (['out_dir', 'f"""{title}"""'], {}), "(out_dir, f'{title}')\n", (15570, 15591), False, 'import os\n'), ((15596, 15647), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_file'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "(out_file, bbox_inches='tight', dpi=300)\n", (15607, 15647), True, 'import matplotlib.pyplot as plt\n'), ((15652, 15662), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15660, 15662), True, 'import matplotlib.pyplot as plt\n'), ((16312, 16386), 'sklearn.model_selection.train_test_split', 'train_test_split', (['camera_1'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(camera_1, test_size=test_size, random_state=random_state)\n', (16328, 16386), False, 'from sklearn.model_selection import train_test_split\n'), ((36077, 36094), 'numpy.asarray', 'np.asarray', (['new_X'], {}), '(new_X)\n', (36087, 36094), True, 'import numpy as np\n'), ((39996, 40023), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (40006, 40023), True, 'import matplotlib.pyplot as plt\n'), ((40379, 40429), 'os.path.join', 'os.path.join', (['out_dir', 'f"""{title}-confusion_matrix"""'], {}), "(out_dir, f'{title}-confusion_matrix')\n", (40391, 40429), False, 'import os\n'), ((40434, 40485), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_file'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "(out_file, bbox_inches='tight', dpi=300)\n", (40445, 40485), True, 'import matplotlib.pyplot as plt\n'), ((40490, 40500), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40498, 40500), True, 'import matplotlib.pyplot as plt\n'), ((40842, 40876), 'os.path.exists', 'os.path.exists', (['Xy_train_test_file'], {}), '(Xy_train_test_file)\n', (40856, 40876), False, 'import os\n'), ((43916, 43945), 'features.feature.load_data', 'load_data', (['Xy_train_test_file'], {}), '(Xy_train_test_file)\n', (43925, 43945), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((45760, 45776), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (45774, 45776), False, 'from sklearn.preprocessing import StandardScaler\n'), ((50144, 50155), 'time.time', 'time.time', ([], {}), '()\n', (50153, 50155), False, 'import time\n'), ((50182, 50193), 'time.time', 'time.time', ([], {}), '()\n', (50191, 50193), False, 'import time\n'), ((1874, 1897), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (1888, 1897), False, 'import os\n'), ((1907, 1927), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (1918, 1927), False, 'import os\n'), ((3355, 3380), 'cv2.VideoCapture', 'cv2.VideoCapture', (['in_file'], {}), '(in_file)\n', (3371, 3380), False, 'import cv2\n'), ((4250, 4336), 'cv2.VideoWriter', 'cv2.VideoWriter', (['out_file', 'fourcc', 'fps', '(frame_width, frame_height)'], {'isColor': '(True)'}), '(out_file, fourcc, fps, (frame_width, frame_height), isColor\n =True)\n', (4265, 4336), False, 'import cv2\n'), ((4773, 4796), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4794, 4796), False, 'import cv2\n'), ((6644, 6660), 'numpy.exp', 'np.exp', (['log_dens'], {}), '(log_dens)\n', (6650, 6660), True, 'import numpy as np\n'), ((6904, 6930), 'features.feature.extract_feature_average', 'extract_feature_average', (['f'], {}), '(f)\n', (6927, 6930), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((6987, 7000), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (6997, 7000), True, 'import numpy as np\n'), ((7002, 7015), 'numpy.asarray', 'np.asarray', (['Y'], {}), '(Y)\n', (7012, 7015), True, 'import numpy as np\n'), ((8508, 8557), 'numpy.concatenate', 'np.concatenate', (['[X_test, X_mirrored_test]'], {'axis': '(0)'}), '([X_test, X_mirrored_test], axis=0)\n', (8522, 8557), True, 'import numpy as np\n'), ((8575, 8624), 'numpy.concatenate', 'np.concatenate', (['[y_test, y_mirrored_test]'], {'axis': '(0)'}), '([y_test, y_mirrored_test], axis=0)\n', (8589, 8624), True, 'import numpy as np\n'), ((11008, 11021), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (11018, 11021), True, 'import numpy as np\n'), ((11023, 11036), 'numpy.asarray', 'np.asarray', (['Y'], {}), '(Y)\n', (11033, 11036), True, 'import numpy as np\n'), ((12475, 12488), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (12485, 12488), True, 'import numpy as np\n'), ((12490, 12503), 'numpy.asarray', 'np.asarray', (['Y'], {}), '(Y)\n', (12500, 12503), True, 'import numpy as np\n'), ((30268, 30293), 'features.video.model_tf.CNN_tf', 'CNN_tf', (['"""vgg"""', 'model_file'], {}), "('vgg', model_file)\n", (30274, 30293), False, 'from features.video.model_tf import CNN_tf\n'), ((38386, 38404), 'numpy.sum', 'np.sum', (['cf'], {'axis': '(1)'}), '(cf, axis=1)\n', (38392, 38404), True, 'import numpy as np\n'), ((39813, 39847), 'matplotlib.pyplot.rcParams.get', 'plt.rcParams.get', (['"""figure.figsize"""'], {}), "('figure.figsize')\n", (39829, 39847), True, 'import matplotlib.pyplot as plt\n'), ((40207, 40231), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (40217, 40231), True, 'import matplotlib.pyplot as plt\n'), ((40240, 40282), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Predicted label' + stats_text)"], {}), "('Predicted label' + stats_text)\n", (40250, 40282), True, 'import matplotlib.pyplot as plt\n'), ((40301, 40323), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['stats_text'], {}), '(stats_text)\n', (40311, 40323), True, 'import matplotlib.pyplot as plt\n'), ((40347, 40363), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (40356, 40363), True, 'import matplotlib.pyplot as plt\n'), ((40886, 40915), 'os.remove', 'os.remove', (['Xy_train_test_file'], {}), '(Xy_train_test_file)\n', (40895, 40915), False, 'import os\n'), ((40928, 40962), 'os.path.exists', 'os.path.exists', (['Xy_train_test_file'], {}), '(Xy_train_test_file)\n', (40942, 40962), False, 'import os\n'), ((41209, 41245), 'os.path.exists', 'os.path.exists', (['Xy_cnn_features_file'], {}), '(Xy_cnn_features_file)\n', (41223, 41245), False, 'import os\n'), ((45972, 45983), 'time.time', 'time.time', ([], {}), '()\n', (45981, 45983), False, 'import time\n'), ((46110, 46121), 'time.time', 'time.time', ([], {}), '()\n', (46119, 46121), False, 'import time\n'), ((46356, 46367), 'time.time', 'time.time', ([], {}), '()\n', (46365, 46367), False, 'import time\n'), ((47687, 47734), 'sklearn.metrics.accuracy_score', 'sklearn.metrics.accuracy_score', (['y_test', 'y_preds'], {}), '(y_test, y_preds)\n', (47717, 47734), False, 'import sklearn\n'), ((47824, 47873), 'sklearn.metrics.confusion_matrix', 'sklearn.metrics.confusion_matrix', (['y_test', 'y_preds'], {}), '(y_test, y_preds)\n', (47856, 47873), False, 'import sklearn\n'), ((48719, 48752), 'os.path.exists', 'os.path.exists', (['misclassified_dir'], {}), '(misclassified_dir)\n', (48733, 48752), False, 'import os\n'), ((49932, 49943), 'time.time', 'time.time', ([], {}), '()\n', (49941, 49943), False, 'import time\n'), ((1690, 1715), 'os.path.basename', 'os.path.basename', (['in_file'], {}), '(in_file)\n', (1706, 1715), False, 'import os\n'), ((2190, 2216), 'os.path.splitext', 'os.path.splitext', (['out_file'], {}), '(out_file)\n', (2206, 2216), False, 'import os\n'), ((3267, 3290), 'os.path.exists', 'os.path.exists', (['in_file'], {}), '(in_file)\n', (3281, 3290), False, 'import os\n'), ((3304, 3340), 'logging.error', 'error', (['f"""not exist error: {in_file}"""'], {}), "(f'not exist error: {in_file}')\n", (3309, 3340), False, 'from logging import error\n'), ((4832, 4865), 'logging.error', 'error', (['f"""Error: {e} on {in_file}"""'], {}), "(f'Error: {e} on {in_file}')\n", (4837, 4865), False, 'from logging import error\n'), ((5317, 5364), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""', 'bandwidth': '(0.5)'}), "(kernel='gaussian', bandwidth=0.5)\n", (5330, 5364), False, 'from sklearn.neighbors import KernelDensity\n'), ((6526, 6542), 'numpy.exp', 'np.exp', (['log_dens'], {}), '(log_dens)\n', (6532, 6542), True, 'import numpy as np\n'), ((12779, 12826), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': 'random_state'}), '(n_components=2, random_state=random_state)\n', (12783, 12826), False, 'from sklearn.manifold import TSNE\n'), ((14389, 14436), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(3)', 'random_state': 'random_state'}), '(n_components=3, random_state=random_state)\n', (14393, 14436), False, 'from sklearn.manifold import TSNE\n'), ((21195, 21217), 'os.path.split', 'os.path.split', (['in_file'], {}), '(in_file)\n', (21208, 21217), False, 'import os\n'), ((21952, 22052), 'logging.error', 'error', (['f"""get_activity_info () Error: {e}. {in_file}, video_start_time: {video_start_time}"""'], {}), "(\n f'get_activity_info () Error: {e}. {in_file}, video_start_time: {video_start_time}'\n )\n", (21957, 22052), False, 'from logging import error\n'), ((30952, 30974), 'os.listdir', 'os.listdir', (['device_dir'], {}), '(device_dir)\n', (30962, 30974), False, 'import os\n'), ((31090, 31128), 'os.path.join', 'os.path.join', (['device_dir', 'activity_dir'], {}), '(device_dir, activity_dir)\n', (31102, 31128), False, 'import os\n'), ((35692, 35711), 'numpy.quantile', 'np.quantile', (['X'], {'q': 'q'}), '(X, q=q)\n', (35703, 35711), True, 'import numpy as np\n'), ((38798, 38820), 'numpy.asarray', 'np.asarray', (['box_labels'], {}), '(box_labels)\n', (38808, 38820), True, 'import numpy as np\n'), ((39028, 39040), 'numpy.trace', 'np.trace', (['cf'], {}), '(cf)\n', (39036, 39040), True, 'import numpy as np\n'), ((40045, 40063), 'numpy.sum', 'np.sum', (['cf'], {'axis': '(1)'}), '(cf, axis=1)\n', (40051, 40063), True, 'import numpy as np\n'), ((41247, 41278), 'os.remove', 'os.remove', (['Xy_cnn_features_file'], {}), '(Xy_cnn_features_file)\n', (41256, 41278), False, 'import os\n'), ((41294, 41330), 'os.path.exists', 'os.path.exists', (['Xy_cnn_features_file'], {}), '(Xy_cnn_features_file)\n', (41308, 41330), False, 'import os\n'), ((41698, 41744), 'features.feature.dump_data', 'dump_data', (['meta'], {'out_file': 'Xy_cnn_features_file'}), '(meta, out_file=Xy_cnn_features_file)\n', (41707, 41744), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((41992, 42026), 'os.path.exists', 'os.path.exists', (['Xy_train_test_file'], {}), '(Xy_train_test_file)\n', (42006, 42026), False, 'import os\n'), ((42047, 42078), 'features.feature.load_data', 'load_data', (['Xy_cnn_features_file'], {}), '(Xy_cnn_features_file)\n', (42056, 42078), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((43706, 43750), 'features.feature.dump_data', 'dump_data', (['meta'], {'out_file': 'Xy_train_test_file'}), '(meta, out_file=Xy_train_test_file)\n', (43715, 43750), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((48766, 48798), 'shutil.rmtree', 'shutil.rmtree', (['misclassified_dir'], {}), '(misclassified_dir)\n', (48779, 48798), False, 'import shutil\n'), ((4566, 4582), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (4574, 4582), False, 'import cv2\n'), ((13185, 13207), 'numpy.reshape', 'np.reshape', (['y', '(-1, 1)'], {}), '(y, (-1, 1))\n', (13195, 13207), True, 'import numpy as np\n'), ((13209, 13237), 'numpy.reshape', 'np.reshape', (['y_label', '(-1, 1)'], {}), '(y_label, (-1, 1))\n', (13219, 13237), True, 'import numpy as np\n'), ((14504, 14526), 'numpy.reshape', 'np.reshape', (['y', '(-1, 1)'], {}), '(y, (-1, 1))\n', (14514, 14526), True, 'import numpy as np\n'), ((14528, 14556), 'numpy.reshape', 'np.reshape', (['y_label', '(-1, 1)'], {}), '(y_label, (-1, 1))\n', (14538, 14556), True, 'import numpy as np\n'), ((15081, 15109), 'seaborn.color_palette', 'sns.color_palette', (['"""deep"""', '(5)'], {}), "('deep', 5)\n", (15098, 15109), True, 'import seaborn as sns\n'), ((16080, 16119), 'collections.Counter', 'Counter', (['[y for f, feat, y in camera_1]'], {}), '([y for f, feat, y in camera_1])\n', (16087, 16119), False, 'from collections import Counter\n'), ((16164, 16203), 'collections.Counter', 'Counter', (['[y for f, feat, y in camera_2]'], {}), '([y for f, feat, y in camera_2])\n', (16171, 16203), False, 'from collections import Counter\n'), ((16248, 16287), 'collections.Counter', 'Counter', (['[y for f, feat, y in camera_3]'], {}), '([y for f, feat, y in camera_3])\n', (16255, 16287), False, 'from collections import Counter\n'), ((16628, 16645), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (16642, 16645), False, 'import os\n'), ((16653, 16673), 'os.path.exists', 'os.path.exists', (['feat'], {}), '(feat)\n', (16667, 16673), False, 'import os\n'), ((16966, 16983), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (16980, 16983), False, 'import os\n'), ((16991, 17011), 'os.path.exists', 'os.path.exists', (['feat'], {}), '(feat)\n', (17005, 17011), False, 'import os\n'), ((21510, 21536), 'os.path.join', 'os.path.join', (['dir_tmp', 'tmp'], {}), '(dir_tmp, tmp)\n', (21522, 21536), False, 'import os\n'), ((22756, 22779), 'os.path.join', 'os.path.join', (['in_dir', 'f'], {}), '(in_dir, f)\n', (22768, 22779), False, 'import os\n'), ((28318, 28335), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (28332, 28335), False, 'import os\n'), ((31394, 31418), 'os.listdir', 'os.listdir', (['activity_dir'], {}), '(activity_dir)\n', (31404, 31418), False, 'import os\n'), ((31505, 31534), 'os.path.join', 'os.path.join', (['participant_dir'], {}), '(participant_dir)\n', (31517, 31534), False, 'import os\n'), ((31569, 31612), 'os.path.join', 'os.path.join', (['activity_dir', 'participant_dir'], {}), '(activity_dir, participant_dir)\n', (31581, 31612), False, 'import os\n'), ((39049, 39059), 'numpy.sum', 'np.sum', (['cf'], {}), '(cf)\n', (39055, 39059), True, 'import numpy as np\n'), ((49268, 49334), 'os.path.join', 'os.path.join', (['misclassified_dir', 'model_name', "(idx2label[y_t] + '->')"], {}), "(misclassified_dir, model_name, idx2label[y_t] + '->')\n", (49280, 49334), False, 'import os\n'), ((5520, 5574), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': 'self.random_state'}), '(random_state=self.random_state)\n', (5542, 5574), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((21870, 21913), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['video_start_time'], {}), '(video_start_time)\n', (21895, 21913), False, 'from datetime import datetime\n'), ((22817, 22835), 'os.listdir', 'os.listdir', (['in_dir'], {}), '(in_dir)\n', (22827, 22835), False, 'import os\n'), ((25619, 25662), 'logging.error', 'error', (['f"""Error happens at line:{i}, {line}"""'], {}), "(f'Error happens at line:{i}, {line}')\n", (25624, 25662), False, 'from logging import error\n'), ((28594, 28620), 'features.feature.extract_feature_average', 'extract_feature_average', (['f'], {}), '(f)\n', (28617, 28620), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((31148, 31176), 'os.path.exists', 'os.path.exists', (['activity_dir'], {}), '(activity_dir)\n', (31162, 31176), False, 'import os\n'), ((31215, 31242), 'os.path.isdir', 'os.path.isdir', (['activity_dir'], {}), '(activity_dir)\n', (31228, 31242), False, 'import os\n'), ((31859, 31886), 'os.listdir', 'os.listdir', (['participant_dir'], {}), '(participant_dir)\n', (31869, 31886), False, 'import os\n'), ((32368, 32400), 'os.path.join', 'os.path.join', (['participant_dir', 'f'], {}), '(participant_dir, f)\n', (32380, 32400), False, 'import os\n'), ((35602, 35622), 'numpy.quantile', 'np.quantile', (['X'], {'q': 'qs'}), '(X, q=qs)\n', (35613, 35622), True, 'import numpy as np\n'), ((49358, 49385), 'os.path.exists', 'os.path.exists', (['tmp_out_dir'], {}), '(tmp_out_dir)\n', (49372, 49385), False, 'import os\n'), ((49407, 49431), 'os.makedirs', 'os.makedirs', (['tmp_out_dir'], {}), '(tmp_out_dir)\n', (49418, 49431), False, 'import os\n'), ((5748, 5816), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (['n_estimators'], {'random_state': 'self.random_state'}), '(n_estimators, random_state=self.random_state)\n', (5770, 5816), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((28694, 28739), 'features.feature.extract_feature_sampling_mean', 'extract_feature_sampling_mean', (['f', 'window_size'], {}), '(f, window_size)\n', (28723, 28739), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((28954, 28980), 'features.feature.extract_feature_average', 'extract_feature_average', (['f'], {}), '(f)\n', (28977, 28980), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((30417, 30461), 'os.path.join', 'os.path.join', (['in_dir[0]', '"""issued_videos.csv"""'], {}), "(in_dir[0], 'issued_videos.csv')\n", (30429, 30461), False, 'import os\n'), ((31636, 31667), 'os.path.exists', 'os.path.exists', (['participant_dir'], {}), '(participant_dir)\n', (31650, 31667), False, 'import os\n'), ((32737, 32797), 'os.path.join', 'os.path.join', (['out_dir', 'out_dir_activity', 'out_dir_participant'], {}), '(out_dir, out_dir_activity, out_dir_participant)\n', (32749, 32797), False, 'import os\n'), ((47627, 47642), 'collections.Counter', 'Counter', (['y_test'], {}), '(y_test)\n', (47634, 47642), False, 'from collections import Counter\n'), ((5979, 6041), 'sklearn.svm.SVC', 'sklearn.svm.SVC', ([], {'kernel': 'kernel', 'random_state': 'self.random_state'}), '(kernel=kernel, random_state=self.random_state)\n', (5994, 6041), False, 'import sklearn\n'), ((8744, 8760), 'collections.Counter', 'Counter', (['y_train'], {}), '(y_train)\n', (8751, 8760), False, 'from collections import Counter\n'), ((8846, 8861), 'collections.Counter', 'Counter', (['y_test'], {}), '(y_test)\n', (8853, 8861), False, 'from collections import Counter\n'), ((28819, 28860), 'features.feature.extract_feature_fixed_segments', 'extract_feature_fixed_segments', (['f'], {'dim': '(34)'}), '(f, dim=34)\n', (28849, 28860), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((29054, 29099), 'features.feature.extract_feature_sampling_mean', 'extract_feature_sampling_mean', (['f', 'window_size'], {}), '(f, window_size)\n', (29083, 29099), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((33008, 33078), 'features.video.video.trim', 'trim', (['x'], {'start_time': 'start_time', 'end_time': 'end_time', 'out_dir': 'out_dir_tmp'}), '(x, start_time=start_time, end_time=end_time, out_dir=out_dir_tmp)\n', (33012, 33078), False, 'from features.video.video import trim\n'), ((45541, 45557), 'collections.Counter', 'Counter', (['y_train'], {}), '(y_train)\n', (45548, 45557), False, 'from collections import Counter\n'), ((45642, 45657), 'collections.Counter', 'Counter', (['y_test'], {}), '(y_test)\n', (45649, 45657), False, 'from collections import Counter\n'), ((49492, 49514), 'os.path.split', 'os.path.split', (['x_test_'], {}), '(x_test_)\n', (49505, 49514), False, 'import os\n'), ((29256, 29297), 'features.feature.extract_feature_fixed_segments', 'extract_feature_fixed_segments', (['f'], {'dim': '(34)'}), '(f, dim=34)\n', (29286, 29297), False, 'from features.feature import extract_feature_average, extract_feature_sampling, load_data, dump_data, extract_feature_sampling_mean, extract_feature_fixed_segments\n'), ((6237, 6312), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'C', 'random_state': 'self.random_state', 'solver': '"""liblinear"""'}), "(C=C, random_state=self.random_state, solver='liblinear')\n", (6255, 6312), False, 'from sklearn.linear_model import LogisticRegression\n')] |
import numpy as np
import random as rd
from random import randint
import matplotlib.pyplot as plt
import io
import base64
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
def to_base64(fig):
# Convert plot to PNG image
pngImage = io.BytesIO()
FigureCanvas(fig).print_png(pngImage)
# Encode PNG image to base64 string
pngImageB64String = "data:image/png;base64,"
pngImageB64String += base64.b64encode(pngImage.getvalue()).decode('utf8')
return pngImageB64String
class Knapsack_Class_GA:
maxx_val = 0
def __init__(self,weight_list,value_list,knapsack_value,gene_count=8,gen_count=50,crossover_rate=0.8,mutation_rate=0.4):
self.item_number = np.arange(1,len(weight_list)+1)
self.weight = np.array(weight_list)
self.value = np.array(value_list)
self.knapsack_threshold = knapsack_value
print('\nThe list is as follows:')
print('Item No. Weight Value')
for i in range(self.item_number.shape[0]):
print('{0} {1} {2}\n'.format(i, self.weight[i], self.value[i]))
self.solutions_per_pop = gene_count
self.pop_size = (self.solutions_per_pop, self.item_number.shape[0])
print('Population size = {}'.format(self.pop_size))
initial_population = np.random.randint(2, size = self.pop_size)
self.initial_population = initial_population.astype(int)
self.num_generations = gen_count
print('Initial population: \n{}'.format(initial_population))
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
def cal_fitness(self):
fitness = np.empty(self.initial_population.shape[0])
for i in range(self.initial_population.shape[0]):
S1 = np.sum(self.initial_population[i] * self.value)
S2 = np.sum(self.initial_population[i] * self.weight)
if S2 <= self.knapsack_threshold:
fitness[i] = S1
else :
fitness[i] = 0
return fitness.astype(int)
def selection(self,fitness, num_parents):
fitness = list(fitness)
parents = np.empty((num_parents, self.initial_population.shape[1]))
for i in range(num_parents):
max_fitness_idx = np.where(fitness == np.max(fitness))
parents[i,:] = self.initial_population[max_fitness_idx[0][0], :]
fitness[max_fitness_idx[0][0]] = -999999
return parents
def crossover(self,parents, num_offsprings):
offsprings = np.empty((num_offsprings, parents.shape[1]))
crossover_point = int(parents.shape[1]/2)
i=0
while (parents.shape[0] < num_offsprings):
parent1_index = i%parents.shape[0]
parent2_index = (i+1)%parents.shape[0]
x = rd.random()
if x > self.crossover_rate:
continue
parent1_index = i%parents.shape[0]
parent2_index = (i+1)%parents.shape[0]
offsprings[i,0:crossover_point] = parents[parent1_index,0:crossover_point]
offsprings[i,crossover_point:] = parents[parent2_index,crossover_point:]
i=+1
return offsprings
def mutation(self,offsprings):
mutants = np.empty((offsprings.shape))
for i in range(mutants.shape[0]):
random_value = rd.random()
mutants[i,:] = offsprings[i,:]
if random_value > self.mutation_rate:
continue
int_random_value = randint(0,offsprings.shape[1]-1)
if mutants[i,int_random_value] == 0 :
mutants[i,int_random_value] = 1
else :
mutants[i,int_random_value] = 0
return mutants
def optimize(self):
parameters, fitness_history = [], []
num_parents = int(self.pop_size[0]/2)
num_offsprings = self.pop_size[0] - num_parents
for _ in range(self.num_generations):
fitness = self.cal_fitness()
fitness_history.append(fitness)
parents = self.selection(fitness, num_parents)
offsprings = self.crossover(parents, num_offsprings)
mutants = self.mutation(offsprings)
self.initial_population[0:parents.shape[0], :] = parents
self.initial_population[parents.shape[0]:, :] = mutants
print('Last generation: \n{}\n'.format(self.initial_population))
fitness_last_gen = self.cal_fitness()
print('Fitness of the last generation: \n{}\n'.format(fitness_last_gen))
max_fitness = np.where(fitness_last_gen == np.max(fitness_last_gen))
parameters.append(self.initial_population[max_fitness[0][0],:])
return (parameters, fitness_history)
def get_solution_ga(self):
parameters, self.fitness_history = self.optimize()
print('The optimized parameters for the given inputs are: \n{}'.format(parameters))
selected_items = self.item_number * parameters
print('\nSelected items that will maximize the knapsack without breaking it:')
for i in range(selected_items.shape[1]):
if selected_items[0][i] != 0:
print('{}\n'.format(selected_items[0][i]))
for i in range(selected_items.shape[1]):
if selected_items[0][i] != 0:
self.maxx_val += self.value[i]
print("maxx_val is : ",self.maxx_val)
return self.maxx_val
def get_graph(self):
fitness_history_mean = [np.mean(fitness) for fitness in self.fitness_history]
fitness_history_max = [np.max(fitness) for fitness in self.fitness_history]
fig = plt.figure()
plt.plot(list(range(self.num_generations)), fitness_history_mean, label = 'Mean Fitness')
plt.plot(list(range(self.num_generations)), fitness_history_max, label = 'Max Fitness')
plt.legend()
plt.title('Fitness through the generations')
plt.xlabel('Generations')
plt.ylabel('Fitness')
#plt.show()
print(np.asarray(self.fitness_history).shape)
return to_base64(fig)
def demo():
session_knapsack = Knapsack_Class_GA([10,20,30],[60,100,120],50)#,8,50,0.8,0.4)
session_knapsack.get_solution_ga()
#session_knapsack.get_graph()
#will not work now as its returning a base64 string
if __name__ == '__main__':
demo() | [
"matplotlib.pyplot.title",
"io.BytesIO",
"numpy.sum",
"random.randint",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"numpy.empty",
"matplotlib.pyplot.legend",
"numpy.asarray",
"random.random",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.array",
"numpy.mean",
"numpy.ma... | [((303, 315), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (313, 315), False, 'import io\n'), ((803, 824), 'numpy.array', 'np.array', (['weight_list'], {}), '(weight_list)\n', (811, 824), True, 'import numpy as np\n'), ((846, 866), 'numpy.array', 'np.array', (['value_list'], {}), '(value_list)\n', (854, 866), True, 'import numpy as np\n'), ((1355, 1395), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'self.pop_size'}), '(2, size=self.pop_size)\n', (1372, 1395), True, 'import numpy as np\n'), ((1707, 1749), 'numpy.empty', 'np.empty', (['self.initial_population.shape[0]'], {}), '(self.initial_population.shape[0])\n', (1715, 1749), True, 'import numpy as np\n'), ((2200, 2257), 'numpy.empty', 'np.empty', (['(num_parents, self.initial_population.shape[1])'], {}), '((num_parents, self.initial_population.shape[1]))\n', (2208, 2257), True, 'import numpy as np\n'), ((2586, 2630), 'numpy.empty', 'np.empty', (['(num_offsprings, parents.shape[1])'], {}), '((num_offsprings, parents.shape[1]))\n', (2594, 2630), True, 'import numpy as np\n'), ((3306, 3332), 'numpy.empty', 'np.empty', (['offsprings.shape'], {}), '(offsprings.shape)\n', (3314, 3332), True, 'import numpy as np\n'), ((5752, 5764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5762, 5764), True, 'import matplotlib.pyplot as plt\n'), ((5967, 5979), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5977, 5979), True, 'import matplotlib.pyplot as plt\n'), ((5988, 6032), 'matplotlib.pyplot.title', 'plt.title', (['"""Fitness through the generations"""'], {}), "('Fitness through the generations')\n", (5997, 6032), True, 'import matplotlib.pyplot as plt\n'), ((6041, 6066), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generations"""'], {}), "('Generations')\n", (6051, 6066), True, 'import matplotlib.pyplot as plt\n'), ((6075, 6096), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fitness"""'], {}), "('Fitness')\n", (6085, 6096), True, 'import matplotlib.pyplot as plt\n'), ((320, 337), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (332, 337), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((1825, 1872), 'numpy.sum', 'np.sum', (['(self.initial_population[i] * self.value)'], {}), '(self.initial_population[i] * self.value)\n', (1831, 1872), True, 'import numpy as np\n'), ((1890, 1938), 'numpy.sum', 'np.sum', (['(self.initial_population[i] * self.weight)'], {}), '(self.initial_population[i] * self.weight)\n', (1896, 1938), True, 'import numpy as np\n'), ((2858, 2869), 'random.random', 'rd.random', ([], {}), '()\n', (2867, 2869), True, 'import random as rd\n'), ((3404, 3415), 'random.random', 'rd.random', ([], {}), '()\n', (3413, 3415), True, 'import random as rd\n'), ((3565, 3600), 'random.randint', 'randint', (['(0)', '(offsprings.shape[1] - 1)'], {}), '(0, offsprings.shape[1] - 1)\n', (3572, 3600), False, 'from random import randint\n'), ((5600, 5616), 'numpy.mean', 'np.mean', (['fitness'], {}), '(fitness)\n', (5607, 5616), True, 'import numpy as np\n'), ((5685, 5700), 'numpy.max', 'np.max', (['fitness'], {}), '(fitness)\n', (5691, 5700), True, 'import numpy as np\n'), ((4677, 4701), 'numpy.max', 'np.max', (['fitness_last_gen'], {}), '(fitness_last_gen)\n', (4683, 4701), True, 'import numpy as np\n'), ((6131, 6163), 'numpy.asarray', 'np.asarray', (['self.fitness_history'], {}), '(self.fitness_history)\n', (6141, 6163), True, 'import numpy as np\n'), ((2345, 2360), 'numpy.max', 'np.max', (['fitness'], {}), '(fitness)\n', (2351, 2360), True, 'import numpy as np\n')] |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.graph.graph import Node
from mo.ops.power import Power
from mo.utils.unittest.graph import build_graph
class TestPowerOp(unittest.TestCase):
@staticmethod
def create_graph(single_input=True):
nodes_attributes = {
'input1': {
'kind': 'data',
'shape': np.array([1, 3, 224, 224]),
'value': None,
},
'input2': {
'kind': 'data',
'shape': np.array([]),
'value': np.array(1.0),
},
'power': {
'kind': 'op',
'shape': np.array([1, 3, 224, 224]),
},
'power_data': {
'kind': 'data',
'shape': None,
},
}
if single_input:
return build_graph(nodes_attributes,
[
('input1', 'power'),
('power', 'power_data')
])
else:
return build_graph(nodes_attributes,
[
('input1', 'power'),
('input2', 'power'),
('power', 'power_data')
])
def test_power_single_input_infer1(self):
graph = self.create_graph(single_input=True)
graph.graph['layout'] = 'NCHW'
power_node = Node(graph, 'power')
power_node['power'] = 1.0
Power.infer(power_node)
self.assertTrue(np.array_equal(power_node.out_node().shape, power_node.in_node(0).shape))
def test_power_two_input_infer1(self):
graph = self.create_graph(single_input=False)
graph.graph['layout'] = 'NCHW'
power_node = Node(graph, 'power')
Power.infer(power_node)
self.assertTrue(np.array_equal(power_node.out_node().shape, power_node.in_node(0).shape))
def test_power_two_input_infer2(self):
graph = self.create_graph(single_input=False)
power_node = Node(graph, 'power')
input2 = Node(graph, 'input2')
input2.value = np.ones((1, 2, 3))
Power.infer(power_node)
self.assertIsNone(power_node.out_node().shape)
def test_power_two_input_infer3(self):
graph = self.create_graph(single_input=False)
power_node = Node(graph, 'power')
input2 = Node(graph, 'input2')
input2.value = None
Power.infer(power_node)
self.assertIsNone(power_node.out_node().shape)
| [
"mo.ops.power.Power.infer",
"numpy.ones",
"mo.graph.graph.Node",
"mo.utils.unittest.graph.build_graph",
"numpy.array"
] | [((2117, 2137), 'mo.graph.graph.Node', 'Node', (['graph', '"""power"""'], {}), "(graph, 'power')\n", (2121, 2137), False, 'from mo.graph.graph import Node\n'), ((2181, 2204), 'mo.ops.power.Power.infer', 'Power.infer', (['power_node'], {}), '(power_node)\n', (2192, 2204), False, 'from mo.ops.power import Power\n'), ((2462, 2482), 'mo.graph.graph.Node', 'Node', (['graph', '"""power"""'], {}), "(graph, 'power')\n", (2466, 2482), False, 'from mo.graph.graph import Node\n'), ((2492, 2515), 'mo.ops.power.Power.infer', 'Power.infer', (['power_node'], {}), '(power_node)\n', (2503, 2515), False, 'from mo.ops.power import Power\n'), ((2734, 2754), 'mo.graph.graph.Node', 'Node', (['graph', '"""power"""'], {}), "(graph, 'power')\n", (2738, 2754), False, 'from mo.graph.graph import Node\n'), ((2772, 2793), 'mo.graph.graph.Node', 'Node', (['graph', '"""input2"""'], {}), "(graph, 'input2')\n", (2776, 2793), False, 'from mo.graph.graph import Node\n'), ((2817, 2835), 'numpy.ones', 'np.ones', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (2824, 2835), True, 'import numpy as np\n'), ((2845, 2868), 'mo.ops.power.Power.infer', 'Power.infer', (['power_node'], {}), '(power_node)\n', (2856, 2868), False, 'from mo.ops.power import Power\n'), ((3044, 3064), 'mo.graph.graph.Node', 'Node', (['graph', '"""power"""'], {}), "(graph, 'power')\n", (3048, 3064), False, 'from mo.graph.graph import Node\n'), ((3082, 3103), 'mo.graph.graph.Node', 'Node', (['graph', '"""input2"""'], {}), "(graph, 'input2')\n", (3086, 3103), False, 'from mo.graph.graph import Node\n'), ((3141, 3164), 'mo.ops.power.Power.infer', 'Power.infer', (['power_node'], {}), '(power_node)\n', (3152, 3164), False, 'from mo.ops.power import Power\n'), ((1444, 1521), 'mo.utils.unittest.graph.build_graph', 'build_graph', (['nodes_attributes', "[('input1', 'power'), ('power', 'power_data')]"], {}), "(nodes_attributes, [('input1', 'power'), ('power', 'power_data')])\n", (1455, 1521), False, 'from mo.utils.unittest.graph import build_graph\n'), ((1689, 1792), 'mo.utils.unittest.graph.build_graph', 'build_graph', (['nodes_attributes', "[('input1', 'power'), ('input2', 'power'), ('power', 'power_data')]"], {}), "(nodes_attributes, [('input1', 'power'), ('input2', 'power'), (\n 'power', 'power_data')])\n", (1700, 1792), False, 'from mo.utils.unittest.graph import build_graph\n'), ((939, 965), 'numpy.array', 'np.array', (['[1, 3, 224, 224]'], {}), '([1, 3, 224, 224])\n', (947, 965), True, 'import numpy as np\n'), ((1094, 1106), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1102, 1106), True, 'import numpy as np\n'), ((1133, 1146), 'numpy.array', 'np.array', (['(1.0)'], {}), '(1.0)\n', (1141, 1146), True, 'import numpy as np\n'), ((1241, 1267), 'numpy.array', 'np.array', (['[1, 3, 224, 224]'], {}), '([1, 3, 224, 224])\n', (1249, 1267), True, 'import numpy as np\n')] |
import numpy as np
from functools import partial
import scipy
from pyapprox.random_variable_algebra import invert_monotone_function
def value_at_risk(samples, alpha, weights=None, samples_sorted=False):
"""
Compute the value at risk of a variable Y using a set of samples.
Parameters
----------
samples : np.ndarray (num_samples)
Samples of the random variable Y
alpha : integer
The superquantile parameter
weights : np.ndarray (num_samples)
Importance weights associated with each sample. If samples Y are drawn
from biasing distribution g(y) but we wish to compute VaR with respect
to measure f(y) then weights are the ratio f(y_i)/g(y_i) for
i=1,...,num_samples.
Returns
-------
var : float
The value at risk of the random variable Y
"""
assert alpha >= 0 and alpha < 1
assert samples.ndim == 1
num_samples = samples.shape[0]
if weights is None:
weights = np.ones(num_samples)/num_samples
assert np.allclose(weights.sum(), 1)
assert weights.ndim == 1 or weights.shape[1] == 1
assert samples.ndim == 1 or samples.shape[1] == 1
if not samples_sorted:
# TODO only need to find largest k entries. k is determined by
# ecdf>=alpha
II = np.argsort(samples)
xx, ww = samples[II], weights[II]
else:
xx, ww = samples, weights
ecdf = ww.cumsum()
index = np.arange(num_samples)[ecdf >= alpha][0]
VaR = xx[index]
if not samples_sorted:
index = II[index]
# assert samples[index]==VaR
return VaR, index
def conditional_value_at_risk(samples, alpha, weights=None,
samples_sorted=False, return_var=False):
"""
Compute conditional value at risk of a variable Y using a set of samples.
Note accuracy of Monte Carlo Estimate of CVaR is dependent on alpha.
As alpha increases more samples will be needed to achieve a fixed
level of accruracy.
Parameters
----------
samples : np.ndarray (num_samples)
Samples of the random variable Y
alpha : integer
The superquantile parameter
weights : np.ndarray (num_samples)
Importance weights associated with each sample. If samples Y are drawn
from biasing distribution g(y) but we wish to compute VaR with respect
to measure f(y) then weights are the ratio f(y_i)/g(y_i) for
i=1,...,num_samples.
Returns
-------
cvar : float
The conditional value at risk of the random variable Y
"""
assert samples.ndim == 1 or samples.shape[1] == 1
samples = samples.squeeze()
num_samples = samples.shape[0]
if weights is None:
weights = np.ones(num_samples)/num_samples
assert np.allclose(weights.sum(), 1), (weights.sum())
assert weights.ndim == 1 or weights.shape[1] == 1
if not samples_sorted:
II = np.argsort(samples)
xx, ww = samples[II], weights[II]
else:
xx, ww = samples, weights
VaR, index = value_at_risk(xx, alpha, ww, samples_sorted=True)
CVaR = VaR+1/((1-alpha))*np.sum((xx[index+1:]-VaR)*ww[index+1:])
# The above one line can be used instead of the following
# # number of support points above VaR
# n_plus = num_samples-index-1
# if n_plus==0:
# CVaR=VaR
# else:
# # evaluate CDF at VaR
# cdf_at_var = (index+1)/num_samples
# lamda = (cdf_at_var-alpha)/(1-alpha)
# # Compute E[X|X>VaR(beta)]
# CVaR_plus = xx[index+1:].dot(ww[index+1:])/n_plus
# CVaR=lamda*VaR+(1-lamda)*CVaR_plus
if not return_var:
return CVaR
else:
return CVaR, VaR
def cvar_importance_sampling_biasing_density(pdf, function, beta, VaR, tau, x):
"""
Evalute the biasing density used to compute CVaR of the variable
Y=f(X), for some function f, vector X and scalar Y.
The PDF of the biasing density is
q(x) = [ beta/alpha p(x) if f(x)>=VaR
[ (1-beta)/(1-alpha) p(x) otherwise
See https://link.springer.com/article/10.1007/s10287-014-0225-7
Parameters
==========
pdf: callable
The probability density function p(x) of x
function : callable
Call signature f(x), where x is a 1D np.ndarray.
VaR : float
The value-at-risk associated above which to compute conditional value
at risk
tau : float
The quantile of interest. 100*tau% percent of data will fall below this
value
beta: float
Tunable parameter that controls shape of biasing density. As beta=0
all samples will have values above VaR. If beta=tau, then biasing
density will just be density of X p(x).
x : np.ndarray (nsamples)
The samples used to evaluate the biasing density.
Returns
=======
vals: np.ndarray (nsamples)
The values of the biasing density at x
"""
if np.isscalar(x):
x = np.array([[x]])
assert x.ndim == 2
vals = np.atleast_1d(pdf(x))
assert vals.ndim == 1 or vals.shape[1] == 1
y = function(x)
assert y.ndim == 1 or y.shape[1] == 1
II = np.where(y < VaR)[0]
JJ = np.where(y >= VaR)[0]
vals[II] *= beta/tau
vals[JJ] *= (1-beta)/(1-tau)
return vals
def generate_samples_from_cvar_importance_sampling_biasing_density(
function, beta, VaR, generate_candidate_samples, nsamples):
"""
Draw samples from the biasing density used to compute CVaR of the variable
Y=f(X), for some function f, vector X and scalar Y.
The PDF of the biasing density is
q(x) = [ beta/alpha p(x) if f(x)>=VaR
[ (1-beta)/(1-alpha) p(x) otherwise
See https://link.springer.com/article/10.1007/s10287-014-0225-7
Parameters
==========
function : callable
Call signature f(x), where x is a 1D np.ndarray.
beta: float
Tunable parameter that controls shape of biasing density. As beta=0
all samples will have values above VaR. If beta=tau, then biasing
density will just be density of X p(x). Best value of beta is problem
dependent, but 0.2 has often been a reasonable choice.
VaR : float
The value-at-risk associated above which to compute conditional value
at risk
generate_candidate_samples : callable
Function used to draw samples of X from pdf(x)
Callable signature generate_canidate_samples(n) for some integer n
nsamples : integer
The numebr of samples desired
Returns
=======
samples: np.ndarray (nvars,nsamples)
Samples from the biasing density
"""
candidate_samples = generate_candidate_samples(nsamples)
nvars = candidate_samples.shape[0]
samples = np.empty((nvars, nsamples))
r = np.random.uniform(0, 1, nsamples)
Ir = np.where(r < beta)[0]
Jr = np.where(r >= beta)[0]
Icnt = 0
Jcnt = 0
while True:
vals = function(candidate_samples)
assert vals.ndim == 1 or vals.shape[1] == 1
II = np.where(vals < VaR)[0]
JJ = np.where(vals >= VaR)[0]
Iend = min(II.shape[0], Ir.shape[0]-Icnt)
Jend = min(JJ.shape[0], Jr.shape[0]-Jcnt)
samples[:, Ir[Icnt:Icnt+Iend]] = candidate_samples[:, II[:Iend]]
samples[:, Jr[Jcnt:Jcnt+Jend]] = candidate_samples[:, JJ[:Jend]]
Icnt += Iend
Jcnt += Jend
if Icnt == Ir.shape[0] and Jcnt == Jr.shape[0]:
break
candidate_samples = generate_candidate_samples(nsamples)
assert Icnt+Jcnt == nsamples
return samples
def compute_conditional_expectations(
eta, samples, disutility_formulation=True):
r"""
Compute the conditional expectation of :math:`Y`
.. math::
\mathbb{E}\left[\max(0,\eta-Y)\right]
or of :math:`-Y` (disutility form)
.. math::
\mathbb{E}\left[\max(0,Y-\eta)\right]
where \math:`\eta\in Y' in the domain of :math:`Y'
The conditional expectation is convex non-negative and non-decreasing.
Parameters
==========
eta : np.ndarray (num_eta)
values of :math:`\eta`
samples : np.ndarray (nsamples)
The samples of :math:`Y`
disutility_formulation : boolean
True - evaluate \mathbb{E}\left[\max(0,\eta-Y)\right]
False - evaluate \mathbb{E}\left[\max(0,Y-\eta)\right]
Returns
=======
values : np.ndarray (num_eta)
The conditional expectations
"""
assert samples.ndim == 1
assert eta.ndim == 1
if disutility_formulation:
values = np.maximum(
0, samples[:, np.newaxis]+eta[np.newaxis, :]).mean(axis=0)
else:
values = np.maximum(
0, eta[np.newaxis, :]-samples[:, np.newaxis]).mean(axis=0)
return values
def univariate_cdf_continuous_variable(pdf, lb, ub, x, quad_opts={}):
x = np.atleast_1d(x)
assert x.ndim == 1
assert x.min() >= lb and x.max() <= ub
vals = np.empty_like(x, dtype=float)
for jj in range(x.shape[0]):
integral, err = scipy.integrate.quad(pdf, lb, x[jj], **quad_opts)
vals[jj] = integral
if vals[jj] > 1 and vals[jj]-1 < quad_opts.get("epsabs", 1.49e-8):
vals[jj] = 1.
return vals
def univariate_quantile_continuous_variable(pdf, bounds, beta, opt_tol=1e-8,
quad_opts={}):
if quad_opts.get("epsabs", 1.49e-8) > opt_tol:
raise ValueError("epsabs must be smaller than opt_tol")
func = partial(univariate_cdf_continuous_variable,
pdf, bounds[0], bounds[1], quad_opts=quad_opts)
method = 'bisect'
quantile = invert_monotone_function(
func, bounds, np.array([beta]), method, opt_tol)
return quantile
def univariate_cvar_continuous_variable(pdf, bounds, beta, opt_tol=1e-8,
quad_opts={}):
quantile = univariate_quantile_continuous_variable(
pdf, bounds, beta, opt_tol, quad_opts)
def integrand(x): return x*pdf(x)
return 1/(1-beta)*scipy.integrate.quad(
integrand, quantile, bounds[1], **quad_opts)[0]
| [
"numpy.random.uniform",
"functools.partial",
"numpy.sum",
"numpy.maximum",
"scipy.integrate.quad",
"numpy.isscalar",
"numpy.empty",
"numpy.empty_like",
"numpy.ones",
"numpy.argsort",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.atleast_1d"
] | [((4958, 4972), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (4969, 4972), True, 'import numpy as np\n'), ((6786, 6813), 'numpy.empty', 'np.empty', (['(nvars, nsamples)'], {}), '((nvars, nsamples))\n', (6794, 6813), True, 'import numpy as np\n'), ((6822, 6855), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'nsamples'], {}), '(0, 1, nsamples)\n', (6839, 6855), True, 'import numpy as np\n'), ((8876, 8892), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (8889, 8892), True, 'import numpy as np\n'), ((8970, 8999), 'numpy.empty_like', 'np.empty_like', (['x'], {'dtype': 'float'}), '(x, dtype=float)\n', (8983, 8999), True, 'import numpy as np\n'), ((9516, 9611), 'functools.partial', 'partial', (['univariate_cdf_continuous_variable', 'pdf', 'bounds[0]', 'bounds[1]'], {'quad_opts': 'quad_opts'}), '(univariate_cdf_continuous_variable, pdf, bounds[0], bounds[1],\n quad_opts=quad_opts)\n', (9523, 9611), False, 'from functools import partial\n'), ((1305, 1324), 'numpy.argsort', 'np.argsort', (['samples'], {}), '(samples)\n', (1315, 1324), True, 'import numpy as np\n'), ((2931, 2950), 'numpy.argsort', 'np.argsort', (['samples'], {}), '(samples)\n', (2941, 2950), True, 'import numpy as np\n'), ((4986, 5001), 'numpy.array', 'np.array', (['[[x]]'], {}), '([[x]])\n', (4994, 5001), True, 'import numpy as np\n'), ((5177, 5194), 'numpy.where', 'np.where', (['(y < VaR)'], {}), '(y < VaR)\n', (5185, 5194), True, 'import numpy as np\n'), ((5207, 5225), 'numpy.where', 'np.where', (['(y >= VaR)'], {}), '(y >= VaR)\n', (5215, 5225), True, 'import numpy as np\n'), ((6865, 6883), 'numpy.where', 'np.where', (['(r < beta)'], {}), '(r < beta)\n', (6873, 6883), True, 'import numpy as np\n'), ((6896, 6915), 'numpy.where', 'np.where', (['(r >= beta)'], {}), '(r >= beta)\n', (6904, 6915), True, 'import numpy as np\n'), ((9057, 9106), 'scipy.integrate.quad', 'scipy.integrate.quad', (['pdf', 'lb', 'x[jj]'], {}), '(pdf, lb, x[jj], **quad_opts)\n', (9077, 9106), False, 'import scipy\n'), ((9712, 9728), 'numpy.array', 'np.array', (['[beta]'], {}), '([beta])\n', (9720, 9728), True, 'import numpy as np\n'), ((990, 1010), 'numpy.ones', 'np.ones', (['num_samples'], {}), '(num_samples)\n', (997, 1010), True, 'import numpy as np\n'), ((1446, 1468), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (1455, 1468), True, 'import numpy as np\n'), ((2746, 2766), 'numpy.ones', 'np.ones', (['num_samples'], {}), '(num_samples)\n', (2753, 2766), True, 'import numpy as np\n'), ((3133, 3180), 'numpy.sum', 'np.sum', (['((xx[index + 1:] - VaR) * ww[index + 1:])'], {}), '((xx[index + 1:] - VaR) * ww[index + 1:])\n', (3139, 3180), True, 'import numpy as np\n'), ((7069, 7089), 'numpy.where', 'np.where', (['(vals < VaR)'], {}), '(vals < VaR)\n', (7077, 7089), True, 'import numpy as np\n'), ((7106, 7127), 'numpy.where', 'np.where', (['(vals >= VaR)'], {}), '(vals >= VaR)\n', (7114, 7127), True, 'import numpy as np\n'), ((10061, 10126), 'scipy.integrate.quad', 'scipy.integrate.quad', (['integrand', 'quantile', 'bounds[1]'], {}), '(integrand, quantile, bounds[1], **quad_opts)\n', (10081, 10126), False, 'import scipy\n'), ((8585, 8643), 'numpy.maximum', 'np.maximum', (['(0)', '(samples[:, np.newaxis] + eta[np.newaxis, :])'], {}), '(0, samples[:, np.newaxis] + eta[np.newaxis, :])\n', (8595, 8643), True, 'import numpy as np\n'), ((8695, 8753), 'numpy.maximum', 'np.maximum', (['(0)', '(eta[np.newaxis, :] - samples[:, np.newaxis])'], {}), '(0, eta[np.newaxis, :] - samples[:, np.newaxis])\n', (8705, 8753), True, 'import numpy as np\n')] |
from typing import Tuple, Union, List
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.linear_model import LogisticRegression
XY = Tuple[np.ndarray, np.ndarray]
Dataset = Tuple[XY, XY]
LogRegParams = Union[XY, Tuple[np.ndarray]]
XYList = List[XY]
# The get_model_parameters function returns the model parameters. These are found in the coef_ and intercept_ attributes for LogisticRegression .
def get_model_parameters(model):
"""Returns the paramters of a sklearn LogisticRegression model"""
if model.fit_intercept:
params = (model.coef_, model.intercept_)
else:
params = (model.coef_,)
return params
# The set_model_params function sets/updates the model's parameters. Here care needs to be taken to set the parameters using the same order/index in
# which they were returned by get_model_parameters.
def set_model_params(
model: LogisticRegression, params: LogRegParams
) -> LogisticRegression:
"""Sets the parameters of a sklean LogisticRegression model"""
model.coef_ = params[0]
if model.fit_intercept:
model.intercept_ = params[1]
return model
def set_initial_params(model: LogisticRegression):
"""
Sets initial parameters as zeros
"""
n_classes = 11 # threat types
n_features = 33 # Number of features in dataset
model.classes_ = np.array([i for i in range(11)])
model.coef_ = np.zeros((n_classes, n_features))
if model.fit_intercept:
model.intercept_ = np.zeros((n_classes,))
def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList: # returns list of Xy (read more about function annotations)
"""Split X and y into a number of partitions."""
sss = StratifiedShuffleSplit(n_splits=num_partitions, test_size=0.001, random_state=0)
for train_index, test_index in sss.split(X, y):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# print(f'Unique classes are before zip.. ', len(np.unique(y_train)))
# print(np.array_split(y_train, num_partitions))
return list(
zip(np.array_split(X_train, num_partitions),
np.array_split(y_train, num_partitions))
)
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.zeros",
"numpy.array_split"
] | [((1424, 1457), 'numpy.zeros', 'np.zeros', (['(n_classes, n_features)'], {}), '((n_classes, n_features))\n', (1432, 1457), True, 'import numpy as np\n'), ((1737, 1822), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': 'num_partitions', 'test_size': '(0.001)', 'random_state': '(0)'}), '(n_splits=num_partitions, test_size=0.001, random_state=0\n )\n', (1759, 1822), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((1513, 1535), 'numpy.zeros', 'np.zeros', (['(n_classes,)'], {}), '((n_classes,))\n', (1521, 1535), True, 'import numpy as np\n'), ((2213, 2252), 'numpy.array_split', 'np.array_split', (['X_train', 'num_partitions'], {}), '(X_train, num_partitions)\n', (2227, 2252), True, 'import numpy as np\n'), ((2263, 2302), 'numpy.array_split', 'np.array_split', (['y_train', 'num_partitions'], {}), '(y_train, num_partitions)\n', (2277, 2302), True, 'import numpy as np\n')] |
"""
@file
@brief Helpers to validate python code.
"""
import pickle
import numpy
from scipy.spatial.distance import cdist # pylint: disable=E0611
from scipy.special import expit, erf # pylint: disable=E0611
from scipy.linalg import solve # pylint: disable=E0611
from ...tools.code_helper import make_callable
def _make_callable(fct, obj, code, gl, debug):
"""
Same function as @see fn make_callable but deals with
function which an undefined number of arguments.
"""
def pyrt_Concat_(*inputs, axis=0):
return numpy.concatenate(inputs, axis=axis)
if fct == "pyrt_Concat":
return pyrt_Concat_
return make_callable(fct, obj, code, gl, debug)
def validate_python_inference(oinf, inputs, tolerance=0.):
"""
Validates the code produced by method :meth:`to_python
<mlprodict.onnxrt.onnx_inference_exports.OnnxInferenceExport.to_python>`.
The function compiles and executes the code
given as an argument and compares the results to
what *oinf* returns. This function is mostly used for
unit testing purpose but it is not robust enough
to handle all cases.
@param oinf @see cl OnnxInference
@param inputs inputs as dictionary
@param tolerance discrepencies must be below or equal to
this theshold
The function fails if the expected output are not the same.
"""
from ..ops_cpu.op_argmax import _argmax
from ..ops_cpu.op_argmin import _argmin
from ..ops_cpu.op_celu import _vcelu1
cd = oinf.to_python()
code = cd['onnx_pyrt_main.py']
exp = oinf.run(inputs)
if not isinstance(exp, dict):
raise TypeError("exp is not a dictionary by '{}'.".format(type(exp)))
if len(exp) == 0:
raise ValueError("No result to compare.")
inps = ['{0}={0}'.format(k) for k in sorted(inputs)]
code += "\n".join(['', '', 'opi = OnnxPythonInference()',
'res = opi.run(%s)' % ', '.join(inps)])
cp = compile(code, "<string>", mode='exec')
pyrt_fcts = [_ for _ in cp.co_names if _.startswith("pyrt_")]
fcts_local = {}
gl = {'numpy': numpy, 'pickle': pickle,
'expit': expit, 'erf': erf, 'cdist': cdist,
'_argmax': _argmax, '_argmin': _argmin,
'_vcelu1': _vcelu1, 'solve': solve}
for fct in pyrt_fcts:
for obj in cp.co_consts:
if isinstance(obj, str):
continue
sobj = str(obj)
if '<string>' in sobj and fct in sobj:
fcts_local[fct] = _make_callable(fct, obj, code, gl, False)
gl.update(fcts_local)
loc = inputs
try:
exec(cp, gl, loc) # pylint: disable=W0122
except (NameError, TypeError, SyntaxError) as e:
raise RuntimeError(
"Unable to execute code\n-----\n{}".format(code)) from e
got = loc['res']
keys = list(sorted(exp))
if isinstance(got, numpy.ndarray) and len(keys) == 1:
got = {keys[0]: got}
if not isinstance(got, dict):
raise TypeError("got is not a dictionary by '{}'.".format(type(got)))
if len(got) != len(exp):
raise RuntimeError(
"Different number of results.\nexp: {}\ngot: {}".format(
", ".join(sorted(exp)), ", ".join(sorted(got))))
if keys != list(sorted(got)):
raise RuntimeError(
"Different result names.\nexp: {}\ngot: {}".format(
", ".join(sorted(exp)), ", ".join(sorted(got))))
for k in keys:
e = exp[k]
g = got[k]
if isinstance(e, numpy.ndarray):
if e.shape != g.shape:
raise ValueError(
"Shapes are different {} != {}.".format(e.shape, g.shape))
diff = 0
for a, b in zip(e.ravel(), g.ravel()):
if a == b:
continue
if (isinstance(a, float) and isinstance(b, float) and
numpy.isnan(a) and numpy.isnan(b)):
continue
diff = max(diff, abs(a - b))
if diff > tolerance:
raise ValueError(
"Values are different (max diff={}>{})\n--EXP--\n{}\n--GOT--"
"\n{}\n--\n{}".format(diff, tolerance, e, g, code))
else:
raise NotImplementedError(
"Unable to compare values of type '{}'.".format(type(e)))
| [
"numpy.isnan",
"numpy.concatenate"
] | [((542, 578), 'numpy.concatenate', 'numpy.concatenate', (['inputs'], {'axis': 'axis'}), '(inputs, axis=axis)\n', (559, 578), False, 'import numpy\n'), ((3965, 3979), 'numpy.isnan', 'numpy.isnan', (['a'], {}), '(a)\n', (3976, 3979), False, 'import numpy\n'), ((3984, 3998), 'numpy.isnan', 'numpy.isnan', (['b'], {}), '(b)\n', (3995, 3998), False, 'import numpy\n')] |
import numpy as np
import os
import sys
sys.path.append("../src")
import localmodule
# Define constants.
aug_kinds = ["all", "all-but-noise", "none"]
units = localmodule.get_units()
script_name = "045_evaluate-add-convnet-full-audio.py"
script_path = os.path.join("..", "..", "..", "src", script_name)
n_trials = 10
# Define thresholds.
icassp_thresholds = 1.0 - np.linspace(0.0, 1.0, 201)[:-1]
n_thresholds = len(icassp_thresholds)
# Create folder.
script_dir = script_name[:-3]
os.makedirs(script_dir, exist_ok=True)
sbatch_dir = os.path.join(script_dir, "sbatch")
os.makedirs(sbatch_dir, exist_ok=True)
slurm_dir = os.path.join(script_dir, "slurm")
os.makedirs(slurm_dir, exist_ok=True)
# Loop over kinds of data augmentation.
for aug_kind_str in aug_kinds:
# Loop over test units.
for test_unit_str in units:
# Retrieve fold such that unit_str is in the test set.
folds = localmodule.fold_units()
fold = [f for f in folds if test_unit_str in f[0]][0]
test_units = fold[0]
training_units = fold[1]
validation_units = fold[2]
predict_units = test_units + validation_units
# Loop over prediction units.
for predict_unit_str in predict_units:
# Loop over trials.
for trial_id in range(n_trials):
# Define job name.
job_name = "_".join([
script_name[:3],
"aug-" + aug_kind_str,
"test-" + test_unit_str,
"predict-" + predict_unit_str,
"trial-" + str(trial_id)])
# Define file path.
file_name = job_name + ".sbatch"
file_path = os.path.join(sbatch_dir, file_name)
# Define script path with arguments.
script_list = [
script_path, aug_kind_str,
test_unit_str, predict_unit_str,
str(trial_id)]
script_path_with_args = " ".join(script_list)
# Define slurm path.
slurm_path = os.path.join("..", "slurm",
"slurm_" + job_name + "_%j.out")
# Write sbatch file.
with open(file_path, "w") as f:
f.write("#!/bin/bash\n")
f.write("\n")
f.write("#BATCH --job-name=" + job_name + "\n")
f.write("#SBATCH --nodes=1\n")
f.write("#SBATCH --tasks-per-node=1\n")
f.write("#SBATCH --cpus-per-task=1\n")
f.write("#SBATCH --time=24:00:00\n")
f.write("#SBATCH --mem=1GB\n")
f.write("#SBATCH --output=" + slurm_path + "\n")
f.write("\n")
f.write("module purge\n")
f.write("\n")
f.write("# The first argument is the kind of data augmentation.\n")
f.write("# The second argument is the test unit.\n")
f.write("# The third argument is the prediction unit.\n")
f.write("# The fourth argument is the trial index.\n")
f.write("python " + script_path_with_args)
| [
"sys.path.append",
"os.makedirs",
"localmodule.get_units",
"numpy.linspace",
"localmodule.fold_units",
"os.path.join"
] | [((41, 66), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (56, 66), False, 'import sys\n'), ((161, 184), 'localmodule.get_units', 'localmodule.get_units', ([], {}), '()\n', (182, 184), False, 'import localmodule\n'), ((254, 304), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '""".."""', '"""src"""', 'script_name'], {}), "('..', '..', '..', 'src', script_name)\n", (266, 304), False, 'import os\n'), ((487, 525), 'os.makedirs', 'os.makedirs', (['script_dir'], {'exist_ok': '(True)'}), '(script_dir, exist_ok=True)\n', (498, 525), False, 'import os\n'), ((539, 573), 'os.path.join', 'os.path.join', (['script_dir', '"""sbatch"""'], {}), "(script_dir, 'sbatch')\n", (551, 573), False, 'import os\n'), ((574, 612), 'os.makedirs', 'os.makedirs', (['sbatch_dir'], {'exist_ok': '(True)'}), '(sbatch_dir, exist_ok=True)\n', (585, 612), False, 'import os\n'), ((625, 658), 'os.path.join', 'os.path.join', (['script_dir', '"""slurm"""'], {}), "(script_dir, 'slurm')\n", (637, 658), False, 'import os\n'), ((659, 696), 'os.makedirs', 'os.makedirs', (['slurm_dir'], {'exist_ok': '(True)'}), '(slurm_dir, exist_ok=True)\n', (670, 696), False, 'import os\n'), ((368, 394), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(201)'], {}), '(0.0, 1.0, 201)\n', (379, 394), True, 'import numpy as np\n'), ((911, 935), 'localmodule.fold_units', 'localmodule.fold_units', ([], {}), '()\n', (933, 935), False, 'import localmodule\n'), ((1724, 1759), 'os.path.join', 'os.path.join', (['sbatch_dir', 'file_name'], {}), '(sbatch_dir, file_name)\n', (1736, 1759), False, 'import os\n'), ((2110, 2170), 'os.path.join', 'os.path.join', (['""".."""', '"""slurm"""', "('slurm_' + job_name + '_%j.out')"], {}), "('..', 'slurm', 'slurm_' + job_name + '_%j.out')\n", (2122, 2170), False, 'import os\n')] |
import unittest
import numpy as np
from unittest.mock import patch
import a3 as my
tolerance = 0
class TestPolygon(unittest.TestCase):
# P1 is the square and P2 a pentagon
inp = np.array([[1.0, 1.0, 1.0], [1.0, 5.0, 1.0], [5.0, 5.0, 1.0], [5.0, 1.0, 1.0]])
P1 = my.Polygon(inp)
inp2 = np.array([[2.0, 1.0, 1.0], [4.0, 1.0, 1.0], [5.0, 2.0, 1.0], [3.0, 3.0, 1.0], [1.0, 2.0, 1.0]])
P2 = my.Polygon(inp2)
inp3 = np.array([[0.0, 0.0, 1.0], [4.0, 0.0, 1.0], [4.0, 4.0, 1.0], [0.0, 4.0, 1.0]])
P3 = my.Polygon(inp3)
def test_1(self):
user_output = self.P1.rotate(90)
exp_output = (np.array([1.0, 5.0, 5.0, 1.0]), np.array([-1.0, -1.0, -5.0, -5.0]))
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
def test_2(self):
user_output = self.P1.translate(2, 2)
exp_output = (np.array([3.0, 7.0, 7.0, 3.0]), np.array([1.0, 1.0, -3.0, -3.0]))
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
def test_3(self):
user_output = self.P1.scale(3, 2)
exp_output = (np.array([-1.0, 11.0, 11.0, -1.0]), np.array([3.0, 3.0, -5.0, -5.0]))
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
def test_4(self):
user_output = self.P2.scale(-2, -2)
exp_output = (np.array([5.0, 1.0, -1.0, 3.0, 7.0]), np.array([3.4, 3.4, 1.4, -0.6, 1.4]))
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
def test_5(self):
user_output = self.P2.rotate(-45)
exp_output = (np.array([1.13, -1.7, -1.7, 2.55, 3.96]), np.array([5.94, 3.11, 0.28, 1.7, 5.94]))
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
def test_6(self):
user_output = self.P2.scale(0.5, 0.3)
exp_output = (np.array([0.99, -0.43, -0.43, 1.7, 2.4]), np.array([4.16, 3.31, 2.46, 2.89, 4.16]))
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
def test_7(self):
user_output = self.P3.rotate(45, 2, 2)
exp_output = (np.array([-0.83, 2.0, 4.83, 2.0]), np.array([2.0, -0.83, 2.0, 4.83]))
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
class TestCircle(unittest.TestCase):
C1 = my.Circle(2.0, 2.0, 3.0) # 2,2 is center and 3 is radius
C2 = my.Circle(2.0, 2.0, 3.0) # 2,2 is center and 3 is radius
def test_1(self):
user_output = self.C1.rotate(45)
print(user_output)
exp_output = (2.83, 0.0, 3)
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
def test_2(self):
user_output = self.C1.scale(0.5)
print(user_output)
exp_output = (2.83, 0.0, 1.5)
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
def test_3(self):
user_output = self.C1.translate(-3, 3)
print(user_output)
exp_output = (-0.17, 3.0, 1.5)
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
def test_4(self):
user_output = self.C2.rotate(45, 4, 4)
exp_output = (1.17, 4, 3)
print(user_output)
np.testing.assert_allclose(exp_output, user_output, atol=tolerance)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"a3.Polygon",
"a3.Circle",
"numpy.array",
"numpy.testing.assert_allclose"
] | [((182, 260), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0], [1.0, 5.0, 1.0], [5.0, 5.0, 1.0], [5.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0], [1.0, 5.0, 1.0], [5.0, 5.0, 1.0], [5.0, 1.0, 1.0]])\n', (190, 260), True, 'import numpy as np\n'), ((267, 282), 'a3.Polygon', 'my.Polygon', (['inp'], {}), '(inp)\n', (277, 282), True, 'import a3 as my\n'), ((291, 391), 'numpy.array', 'np.array', (['[[2.0, 1.0, 1.0], [4.0, 1.0, 1.0], [5.0, 2.0, 1.0], [3.0, 3.0, 1.0], [1.0, \n 2.0, 1.0]]'], {}), '([[2.0, 1.0, 1.0], [4.0, 1.0, 1.0], [5.0, 2.0, 1.0], [3.0, 3.0, 1.0\n ], [1.0, 2.0, 1.0]])\n', (299, 391), True, 'import numpy as np\n'), ((393, 409), 'a3.Polygon', 'my.Polygon', (['inp2'], {}), '(inp2)\n', (403, 409), True, 'import a3 as my\n'), ((418, 496), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0], [4.0, 0.0, 1.0], [4.0, 4.0, 1.0], [0.0, 4.0, 1.0]]'], {}), '([[0.0, 0.0, 1.0], [4.0, 0.0, 1.0], [4.0, 4.0, 1.0], [0.0, 4.0, 1.0]])\n', (426, 496), True, 'import numpy as np\n'), ((503, 519), 'a3.Polygon', 'my.Polygon', (['inp3'], {}), '(inp3)\n', (513, 519), True, 'import a3 as my\n'), ((2097, 2121), 'a3.Circle', 'my.Circle', (['(2.0)', '(2.0)', '(3.0)'], {}), '(2.0, 2.0, 3.0)\n', (2106, 2121), True, 'import a3 as my\n'), ((2160, 2184), 'a3.Circle', 'my.Circle', (['(2.0)', '(2.0)', '(3.0)'], {}), '(2.0, 2.0, 3.0)\n', (2169, 2184), True, 'import a3 as my\n'), ((2974, 2989), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2987, 2989), False, 'import unittest\n'), ((665, 732), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (691, 732), True, 'import numpy as np\n'), ((878, 945), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (904, 945), True, 'import numpy as np\n'), ((1091, 1158), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (1117, 1158), True, 'import numpy as np\n'), ((1311, 1378), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (1337, 1378), True, 'import numpy as np\n'), ((1536, 1603), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (1562, 1603), True, 'import numpy as np\n'), ((1766, 1833), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (1792, 1833), True, 'import numpy as np\n'), ((1984, 2051), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (2010, 2051), True, 'import numpy as np\n'), ((2329, 2396), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (2355, 2396), True, 'import numpy as np\n'), ((2508, 2575), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (2534, 2575), True, 'import numpy as np\n'), ((2694, 2761), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (2720, 2761), True, 'import numpy as np\n'), ((2877, 2944), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_output', 'user_output'], {'atol': 'tolerance'}), '(exp_output, user_output, atol=tolerance)\n', (2903, 2944), True, 'import numpy as np\n'), ((595, 625), 'numpy.array', 'np.array', (['[1.0, 5.0, 5.0, 1.0]'], {}), '([1.0, 5.0, 5.0, 1.0])\n', (603, 625), True, 'import numpy as np\n'), ((627, 661), 'numpy.array', 'np.array', (['[-1.0, -1.0, -5.0, -5.0]'], {}), '([-1.0, -1.0, -5.0, -5.0])\n', (635, 661), True, 'import numpy as np\n'), ((810, 840), 'numpy.array', 'np.array', (['[3.0, 7.0, 7.0, 3.0]'], {}), '([3.0, 7.0, 7.0, 3.0])\n', (818, 840), True, 'import numpy as np\n'), ((842, 874), 'numpy.array', 'np.array', (['[1.0, 1.0, -3.0, -3.0]'], {}), '([1.0, 1.0, -3.0, -3.0])\n', (850, 874), True, 'import numpy as np\n'), ((1019, 1053), 'numpy.array', 'np.array', (['[-1.0, 11.0, 11.0, -1.0]'], {}), '([-1.0, 11.0, 11.0, -1.0])\n', (1027, 1053), True, 'import numpy as np\n'), ((1055, 1087), 'numpy.array', 'np.array', (['[3.0, 3.0, -5.0, -5.0]'], {}), '([3.0, 3.0, -5.0, -5.0])\n', (1063, 1087), True, 'import numpy as np\n'), ((1233, 1269), 'numpy.array', 'np.array', (['[5.0, 1.0, -1.0, 3.0, 7.0]'], {}), '([5.0, 1.0, -1.0, 3.0, 7.0])\n', (1241, 1269), True, 'import numpy as np\n'), ((1271, 1307), 'numpy.array', 'np.array', (['[3.4, 3.4, 1.4, -0.6, 1.4]'], {}), '([3.4, 3.4, 1.4, -0.6, 1.4])\n', (1279, 1307), True, 'import numpy as np\n'), ((1451, 1491), 'numpy.array', 'np.array', (['[1.13, -1.7, -1.7, 2.55, 3.96]'], {}), '([1.13, -1.7, -1.7, 2.55, 3.96])\n', (1459, 1491), True, 'import numpy as np\n'), ((1493, 1532), 'numpy.array', 'np.array', (['[5.94, 3.11, 0.28, 1.7, 5.94]'], {}), '([5.94, 3.11, 0.28, 1.7, 5.94])\n', (1501, 1532), True, 'import numpy as np\n'), ((1680, 1720), 'numpy.array', 'np.array', (['[0.99, -0.43, -0.43, 1.7, 2.4]'], {}), '([0.99, -0.43, -0.43, 1.7, 2.4])\n', (1688, 1720), True, 'import numpy as np\n'), ((1722, 1762), 'numpy.array', 'np.array', (['[4.16, 3.31, 2.46, 2.89, 4.16]'], {}), '([4.16, 3.31, 2.46, 2.89, 4.16])\n', (1730, 1762), True, 'import numpy as np\n'), ((1912, 1945), 'numpy.array', 'np.array', (['[-0.83, 2.0, 4.83, 2.0]'], {}), '([-0.83, 2.0, 4.83, 2.0])\n', (1920, 1945), True, 'import numpy as np\n'), ((1947, 1980), 'numpy.array', 'np.array', (['[2.0, -0.83, 2.0, 4.83]'], {}), '([2.0, -0.83, 2.0, 4.83])\n', (1955, 1980), True, 'import numpy as np\n')] |
import numpy as np
import random
class ReplayMemory:
def __init__(self, memory_size=5000):
self.tree = SumTree(memory_size)
self.alpha = 0.6
def add(self, error, observation):
priority = self._get_priority(error)
self.tree.add(priority, observation)
def sample(self, quantity):
samples = list()
segment = self.tree.total() / quantity
for i in range(quantity):
s = random.uniform(a=segment * i, b=segment * (i + 1))
index, priority, observation = self.tree.get(s)
samples.append((index, observation))
return samples
def update(self, index, error):
priority = self._get_priority(error)
self.tree.update(index, priority)
def _get_priority(self, error):
return (error + 0.01) ** self.alpha
class SumTree:
def __init__(self, size=5000):
self.position = 0
self.size = size
self.tree = np.zeros(2 * size - 1)
self.data = np.zeros(size, dtype=object)
def total(self):
return self.tree[0]
def add(self, priority, data):
index = self.position + self.size - 1
self.data[self.position] = data
self.update(index, priority)
self.position += 1
if self.position >= self.size:
self.position = 0
def update(self, index, priority):
delta = priority - self.tree[index]
self.tree[index] = priority
self._propagate(index, delta)
def get(self, s):
index = self._retrieve(0, s)
data_index = index - self.size + 1
return index, self.tree[index], self.data[data_index]
def _propagate(self, index, delta):
parent = (index - 1) // 2
self.tree[parent] += delta
if parent != 0:
self._propagate(parent, delta)
def _retrieve(self, index, s):
left = 2 * index + 1
right = left + 1
if left >= len(self.tree):
return index
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s-self.tree[left])
| [
"numpy.zeros",
"random.uniform"
] | [((963, 985), 'numpy.zeros', 'np.zeros', (['(2 * size - 1)'], {}), '(2 * size - 1)\n', (971, 985), True, 'import numpy as np\n'), ((1006, 1034), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'object'}), '(size, dtype=object)\n', (1014, 1034), True, 'import numpy as np\n'), ((449, 499), 'random.uniform', 'random.uniform', ([], {'a': '(segment * i)', 'b': '(segment * (i + 1))'}), '(a=segment * i, b=segment * (i + 1))\n', (463, 499), False, 'import random\n')] |
#!/bin/python
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
from .stats import gfevd, mbcs_index, nhd, mdd
from .mcmc import mcmc
from .filtering import *
from .tools import *
from .mpile import *
from .estimation import *
import emcwrap as ew
class DSGE_RAW(dict):
pass
def vix(self, variables, dontfail=False):
"""Returns the indices of a list of variables"""
if isinstance(variables, str):
variables = [variables]
res = []
for v in variables:
try:
res.append(list(self.vv).index(v))
except ValueError:
if not dontfail:
raise
return res
def oix(self, observables):
"""Returns the indices of a list of observables"""
if isinstance(observables, str):
observables = [observables]
return [list(self.observables).index(v) for v in observables]
@property
def get_tune(self):
if hasattr(self, "tune"):
return self.tune
else:
return self.fdict["tune"]
def get_chain(
self,
get_acceptance_fraction=False,
get_log_prob=False,
backend_file=None,
flat=None,
):
if not backend_file:
if hasattr(self, "sampler"):
reader = self.sampler
elif "backend_file" in self.fdict.keys():
backend_file = str(self.fdict["backend_file"])
else:
backend_file = os.path.join(self.path, self.name + "_sampler.h5")
if backend_file:
if not os.path.exists(backend_file):
raise NameError(
"A backend file named `%s` could not be found." % backend_file
)
import emcee
reader = emcee.backends.HDFBackend(backend_file, read_only=True)
if get_acceptance_fraction:
try:
return reader.acceptance_fraction
except:
return reader.accepted / reader.iteration
if get_log_prob:
return reader.get_log_prob(flat=flat)
chain = reader.get_chain(flat=flat)
return chain
def get_log_prob(self, **args):
"""Get the log likelihoods in the chain"""
# just a wrapper
return get_chain(self, get_log_prob=True, **args)
def write_yaml(self, filename):
if filename[-5:] != ".yaml":
filename = filename + ".yaml"
f = open(filename, "w+")
f.write(self.raw_yaml)
f.close()
print("Model written to '%s.'" % filename)
return
def save_meta(self, filename=None, verbose=True):
import os
filename = filename or os.path.join(self.path, self.name + "_meta")
objs = "description", "backend_file", "tune", "name"
for o in objs:
if hasattr(self, o):
exec("self.fdict[o] = self." + str(o))
if hasattr(self, "filter"):
try:
self.fdict["filter_R"] = self.filter.R
self.fdict["filter_P"] = self.filter.P
except:
pass
np.savez_compressed(filename, **self.fdict)
if verbose:
print("[save_meta:]".ljust(15, " ") +
" Metadata saved as '%s'" % filename)
return
def save_rdict(self, rdict, path=None, suffix="", verbose=True):
"""Save dictionary of results
The idea is to keep meta data (the model, setup, ...) and the results obtained (chains, smoothed residuals, ...) separate.
"""
if not isinstance(path, str):
path = self.name + "_res"
if path[-4:] == ".npz":
path = path[:-4]
if not os.path.isabs(path):
path = os.path.join(self.path, path)
np.savez_compressed(path + suffix, **rdict)
if verbose:
print(
"[save_rdict:]".ljust(15, " ") +
" Results saved as '%s'" % (path + suffix)
)
return
def load_rdict(self, path=None, suffix=""):
"""Load stored dictionary of results
The idea is to keep meta data (the model, setup, ...) and the results obtained (chains, smoothed residuals, ...) separate. `save_rdict` suggests some standard conventions.
"""
if path is None:
path = self.name + "_res"
path += suffix
if path[-4] != ".npz":
path += ".npz"
if not os.path.isabs(path):
path = os.path.join(self.path, path)
return dict(np.load(path, allow_pickle=True))
def traceplot_m(self, chain=None, prior_names=None, **args):
if chain is None:
chain = self.get_chain()
args["tune"] = self.get_tune
if prior_names is None:
prior_names = self.fdict["prior_names"]
return ew.traceplot(chain, varnames=prior_names, **args)
def posteriorplot_m(self, **args):
tune = self.get_tune
return ew.posteriorplot(
self.get_chain(), varnames=self.fdict["prior_names"], tune=tune, **args
)
def mcmc_summary(
self,
chain=None,
tune=None,
**args
):
try:
chain = self.get_chain() if chain is None else chain
except AttributeError:
raise AttributeError("[summary:]".ljust(
15, " ") + "No chain to be found...")
tune = tune or self.get_tune
lprobs = self.get_log_prob()
transformed_chain = self.bptrans(
chain[-tune:]) if self.bptrans else chain[-tune:]
return ew.mcmc_summary(transformed_chain, lprobs[-tune:], priors=self.prior, acceptance_fraction=self.get_chain(get_acceptance_fraction=True), **args)
def posterior2csv(self, path=None, tune=None, **args):
tune = tune or self.get_tune
path = path or os.path.join(self.path, self.name + "_posterior.csv")
chain = self.get_chain()
post = chain[-tune:].reshape(-1, chain.shape[-1])
vd = pd.DataFrame(post.T, index=self.prior_names)
vd.to_csv(path)
return
def info_m(self, verbose=True, **args):
try:
name = self.name
except AttributeError:
name = self.fdict["name"]
try:
description = self.description
except AttributeError:
description = self.fdict["description"]
try:
dtime = str(self.fdict["datetime"])
except KeyError:
dtime = ""
res = "Title: %s\n" % name
res += "Date: %s\n" % dtime if dtime else ""
res += "Description: %s\n" % description
try:
cshp = self.get_chain().shape
tune = self.get_tune
res += "Parameters: %s\n" % cshp[2]
res += "Chains: %s\n" % cshp[1]
res += "Last %s of %s samples\n" % (tune, cshp[0])
except (AttributeError, KeyError):
pass
if verbose:
print(res)
return res
def load_data(self, df, start=None, end=None):
"""Load and prepare data
...
This function takes a provided `pandas.DataFrame`, reads out the observables as they are defined in the YAML-file, and ajusts it regarding the `start` and `end` keywords. Using a `pandas.DatetimeIndex` as index of the DataFrame is strongly encuraged as it can be very powerful, but not necessary.
Parameters
----------
df : pandas.DataFrame
start : index (optional)
end : index (optional)
Returns
-------
pandas.DataFrame
"""
import cloudpickle as cpickle
if not isinstance(df, pd.DataFrame):
raise TypeError("Type of input data must be a `pandas.DataFrame`.")
if self is not None:
for o in self.observables:
if str(o) not in df.keys():
raise KeyError("%s is not in the data!" % o)
d = df[self.observables]
if start is not None:
start = str(start)
if end is not None:
end = str(end)
d = d.loc[start:end]
if np.any(d.isna()):
raise Exception("Data must not contain `NaN`s.")
self.data = d
self.fdict["data"] = cpickle.dumps(d, protocol=4)
self.fdict["obs"] = self.observables
return d
def get_sample(self, size, chain=None):
"""Get a (preferably recent) sample from the chain"""
chain = None or self.get_chain()
clen, nwalks, npar = chain.shape
recent = int(np.ceil(60 / nwalks))
if recent > clen:
raise Exception("Requested sample size is larger than chain")
sample = chain[:, -recent, :].reshape(-1, npar)
res = np.random.choice(np.arange(recent * nwalks), size, False)
return sample[res]
DSGE_RAW.vix = vix
DSGE_RAW.oix = oix
DSGE_RAW.get_tune = get_tune
DSGE_RAW.save = save_meta
DSGE_RAW.mapper = mapper
DSGE_RAW.mcmc_summary = mcmc_summary
DSGE_RAW.info = info_m
DSGE_RAW.mdd = mdd
DSGE_RAW.get_data = load_data
DSGE_RAW.load_data = load_data
DSGE_RAW.get_sample = get_sample
DSGE_RAW.create_pool = create_pool
DSGE_RAW.posterior2csv = posterior2csv
# from mpile
DSGE_RAW.prior_sampler = prior_sampler
DSGE_RAW.get_par = get_par
DSGE_RAW.gp = get_par
DSGE_RAW.get_cov = get_cov
DSGE_RAW.set_par = set_par
DSGE_RAW.box_check = box_check
# from tools
DSGE_RAW.t_func = t_func
DSGE_RAW.o_func = o_func
DSGE_RAW.irfs = irfs
DSGE_RAW.simulate = simulate
DSGE_RAW.shock2state = shock2state
DSGE_RAW.obs = o_func
DSGE_RAW.get_eps_lin = get_eps_lin
DSGE_RAW.k_map = k_map
DSGE_RAW.traj = traj
# from mcmc
DSGE_RAW.mcmc = mcmc
# from estimation
DSGE_RAW.prep_estim = prep_estim
DSGE_RAW.load_estim = prep_estim
# DSGE_RAW.lprob = lprob
# from filter
DSGE_RAW.create_filter = create_filter
DSGE_RAW.get_p_init_lyapunov = get_p_init_lyapunov
DSGE_RAW.run_filter = run_filter
DSGE_RAW.get_ll = get_ll
# from plot
DSGE_RAW.traceplot = traceplot_m
DSGE_RAW.posteriorplot = posteriorplot_m
# from misc
DSGE_RAW.get_chain = get_chain
DSGE_RAW.get_log_prob = get_log_prob
DSGE_RAW.extract = extract
DSGE_RAW.create_obs_cov = create_obs_cov
DSGE_RAW.mask = mask
DSGE_RAW.load_rdict = load_rdict
DSGE_RAW.save_rdict = save_rdict
DSGE_RAW.gfevd = gfevd
DSGE_RAW.mbcs_index = mbcs_index
DSGE_RAW.nhd = nhd
| [
"pandas.DataFrame",
"os.path.isabs",
"numpy.load",
"numpy.ceil",
"cloudpickle.dumps",
"emcee.backends.HDFBackend",
"os.path.exists",
"numpy.savez_compressed",
"numpy.arange",
"emcwrap.traceplot",
"os.path.join"
] | [((2897, 2940), 'numpy.savez_compressed', 'np.savez_compressed', (['filename'], {}), '(filename, **self.fdict)\n', (2916, 2940), True, 'import numpy as np\n'), ((3511, 3554), 'numpy.savez_compressed', 'np.savez_compressed', (['(path + suffix)'], {}), '(path + suffix, **rdict)\n', (3530, 3554), True, 'import numpy as np\n'), ((4481, 4530), 'emcwrap.traceplot', 'ew.traceplot', (['chain'], {'varnames': 'prior_names'}), '(chain, varnames=prior_names, **args)\n', (4493, 4530), True, 'import emcwrap as ew\n'), ((5559, 5603), 'pandas.DataFrame', 'pd.DataFrame', (['post.T'], {'index': 'self.prior_names'}), '(post.T, index=self.prior_names)\n', (5571, 5603), True, 'import pandas as pd\n'), ((7601, 7629), 'cloudpickle.dumps', 'cpickle.dumps', (['d'], {'protocol': '(4)'}), '(d, protocol=4)\n', (7614, 7629), True, 'import cloudpickle as cpickle\n'), ((1674, 1729), 'emcee.backends.HDFBackend', 'emcee.backends.HDFBackend', (['backend_file'], {'read_only': '(True)'}), '(backend_file, read_only=True)\n', (1699, 1729), False, 'import emcee\n'), ((2508, 2552), 'os.path.join', 'os.path.join', (['self.path', "(self.name + '_meta')"], {}), "(self.path, self.name + '_meta')\n", (2520, 2552), False, 'import os\n'), ((3440, 3459), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (3453, 3459), False, 'import os\n'), ((3476, 3505), 'os.path.join', 'os.path.join', (['self.path', 'path'], {}), '(self.path, path)\n', (3488, 3505), False, 'import os\n'), ((4119, 4138), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (4132, 4138), False, 'import os\n'), ((4155, 4184), 'os.path.join', 'os.path.join', (['self.path', 'path'], {}), '(self.path, path)\n', (4167, 4184), False, 'import os\n'), ((4202, 4234), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)'}), '(path, allow_pickle=True)\n', (4209, 4234), True, 'import numpy as np\n'), ((5411, 5464), 'os.path.join', 'os.path.join', (['self.path', "(self.name + '_posterior.csv')"], {}), "(self.path, self.name + '_posterior.csv')\n", (5423, 5464), False, 'import os\n'), ((7877, 7897), 'numpy.ceil', 'np.ceil', (['(60 / nwalks)'], {}), '(60 / nwalks)\n', (7884, 7897), True, 'import numpy as np\n'), ((8072, 8098), 'numpy.arange', 'np.arange', (['(recent * nwalks)'], {}), '(recent * nwalks)\n', (8081, 8098), True, 'import numpy as np\n'), ((1482, 1510), 'os.path.exists', 'os.path.exists', (['backend_file'], {}), '(backend_file)\n', (1496, 1510), False, 'import os\n'), ((1394, 1444), 'os.path.join', 'os.path.join', (['self.path', "(self.name + '_sampler.h5')"], {}), "(self.path, self.name + '_sampler.h5')\n", (1406, 1444), False, 'import os\n')] |
import numpy as np
import os
class DataReaderRL:
def __init__ (self):
self.a = 0
def get_filelist (self, rootpath):
pathlist = list()
country = os.listdir(rootpath)
for i in range(len(country)):
country[i] = rootpath + str(country[i]) + '/'
datelist = list()
for i in range(len(country)):
datelist = os.listdir(country[i])
for j in range(len(datelist)):
pathlist.append(country[i] + datelist[j] + '/')
pathlist.sort()
#for i in range( len(pathlist) ):
# print pathlist[i]
print ('Number of all dates intervals : ', len(pathlist))
return pathlist
def readRaw_generate_X(self, filepath, height, width):
# Generate height by width input chart image
f = open(filepath, 'r')
rawdata = f.read()
rawdata = rawdata.split( '\nF\n' )
DataX = list()
N = len(rawdata) - 1
Days = len(rawdata[0].split( '\nE\n' ))
for c in range(N) :
state_seq = rawdata[c].split( '\nE\n' )
# matrix seq for company c
matrix_seq = list()
for t in range (Days):
matrix = np.zeros((height, width))
rows = state_seq[t].split('\n')
# input matrix on day t
for r in range (height):
row = rows[r].split(' ')
for w in range(width):
matrix[r][w] = int(row[w])
matrix_seq.append( matrix )
DataX.append(matrix_seq)
return DataX
def readRaw_generate_Y(self, filepath, N, Days):
# Generate input price change L_c^t
f = open (filepath, 'r')
rawdata = f.read()
rawdata = rawdata.split('\n')
DataY = list()
if (len(rawdata)-1) != (N*Days) :
print ('number of input data is invalid')
cnt = 0
for c in range(N) :
return_seq = list()
for t in range(Days) :
return_seq.append(float(rawdata[cnt]))
cnt = cnt + 1
DataY.append(return_seq)
return DataY
| [
"numpy.zeros",
"os.listdir"
] | [((182, 202), 'os.listdir', 'os.listdir', (['rootpath'], {}), '(rootpath)\n', (192, 202), False, 'import os\n'), ((387, 409), 'os.listdir', 'os.listdir', (['country[i]'], {}), '(country[i])\n', (397, 409), False, 'import os\n'), ((1282, 1307), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (1290, 1307), True, 'import numpy as np\n')] |
"""
Module implementing pixel-based classifier
"""
import numpy as np
from lightgbm import Booster
class PixelClassifier:
"""
Pixel classifier extends a receptive field of a classifier over an entire image.
The classifier's receptive field is in case of PixelClassifier a pixel (i.e, it
has dimension of (1,1))
Pixel classifier divides the image into individual pixels, runs classifier over
them, and finally produces a classification mask of the same size as image.
The classifier can be of a type that is explicitly supported (e.g. lightgbm.Booster) or of any type as long as
it has the following two methods implemented:
- predict(X)
- predict_proba(X)
This is true for all classifiers that follow scikit-learn's API.
The APIs of scikit-learn's objects is described
at: http://scikit-learn.org/stable/developers/contributing.html#apis-of-scikit-learn-objects.
"""
# pylint: disable=invalid-name
def __init__(self, classifier):
"""
:param classifier: An instance of trained classifier that will be executed over an entire image
:type classifier: Booster or object that implements methods predict and predict_proba
"""
self._check_classifier(classifier)
self.classifier = classifier
@staticmethod
def _check_classifier(classifier):
"""
Checks if the classifier is of correct type or if it implements predict and predict_proba methods
"""
if isinstance(classifier, Booster):
return
predict = getattr(classifier, 'predict', None)
if not callable(predict):
raise ValueError('Classifier does not have a predict method!')
predict_proba = getattr(classifier, 'predict_proba', None)
if not callable(predict_proba):
raise ValueError('Classifier does not have a predict_proba method!')
@staticmethod
def extract_pixels(X):
""" Extracts pixels from array X
:param X: Array of images to be classified.
:type X: numpy array, shape = [n_images, n_pixels_y, n_pixels_x, n_bands]
:return: Reshaped 2D array
:rtype: numpy array, [n_samples*n_pixels_y*n_pixels_x,n_bands]
:raises: ValueError is input array has wrong dimensions
"""
if len(X.shape) != 4:
raise ValueError('Array of input images has to be a 4-dimensional array of shape'
'[n_images, n_pixels_y, n_pixels_x, n_bands]')
new_shape = X.shape[0] * X.shape[1] * X.shape[2], X.shape[3]
pixels = X.reshape(new_shape)
return pixels
def image_predict(self, X, **kwargs):
"""
Predicts class labels for the entire image.
:param X: Array of images to be classified.
:type X: numpy array, shape = [n_images, n_pixels_y, n_pixels_x, n_bands]
:param kwargs: Any keyword arguments that will be passed to the classifier's prediction method
:return: raster classification map
:rtype: numpy array, [n_samples, n_pixels_y, n_pixels_x]
"""
pixels = self.extract_pixels(X)
if isinstance(self.classifier, Booster):
raise NotImplementedError('An instance of lightgbm.Booster can only return prediction probabilities, '
'use PixelClassifier.image_predict_proba instead')
predictions = self.classifier.predict(pixels, **kwargs)
return predictions.reshape(X.shape[0], X.shape[1], X.shape[2])
def image_predict_proba(self, X, **kwargs):
"""
Predicts class probabilities for the entire image.
:param X: Array of images to be classified.
:type X: numpy array, shape = [n_images, n_pixels_y, n_pixels_x, n_bands]
:param kwargs: Any keyword arguments that will be passed to the classifier's prediction method
:return: classification probability map
:rtype: numpy array, [n_samples, n_pixels_y, n_pixels_x]
"""
pixels = self.extract_pixels(X)
if isinstance(self.classifier, Booster):
probabilities = self.classifier.predict(pixels, **kwargs)
probabilities = np.vstack((1. - probabilities, probabilities)).transpose()
else:
probabilities = self.classifier.predict_proba(pixels, **kwargs)
return probabilities.reshape(X.shape[0], X.shape[1], X.shape[2], probabilities.shape[1])
| [
"numpy.vstack"
] | [((4215, 4262), 'numpy.vstack', 'np.vstack', (['(1.0 - probabilities, probabilities)'], {}), '((1.0 - probabilities, probabilities))\n', (4224, 4262), True, 'import numpy as np\n')] |
import os
import dataset
import engine
import torch
import pandas as pd
import numpy as np
import random
import config
from tqdm import tqdm
from model import TransforomerModel
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import StratifiedKFold
from sklearn import metrics
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from transformers import logging
logging.set_verbosity_error()
def run(df_train, df_val, max_len, task, transformer, batch_size, drop_out, lr, df_results):
train_dataset = dataset.TransformerDataset(
text=df_train[config.DATASET_TEXT_PROCESSED].values,
target=df_train[task].values,
max_len=max_len,
transformer=transformer
)
train_data_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
num_workers = config.TRAIN_WORKERS
)
val_dataset = dataset.TransformerDataset(
text=df_val[config.DATASET_TEXT_PROCESSED].values,
target=df_val[task].values,
max_len=max_len,
transformer=transformer
)
val_data_loader = torch.utils.data.DataLoader(
dataset=val_dataset,
batch_size=batch_size,
num_workers=config.VAL_WORKERS
)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = TransforomerModel(transformer, drop_out, number_of_classes=df_train[task].max()+1)
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
num_train_steps = int(len(df_train) / batch_size * config.EPOCHS)
optimizer = AdamW(optimizer_parameters, lr=lr)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=0, num_training_steps=num_train_steps
)
for epoch in range(1, config.EPOCHS+1):
pred_train, targ_train, loss_train = engine.train_fn(train_data_loader, model, optimizer, device, scheduler)
f1_train = metrics.f1_score(targ_train, pred_train, average='macro')
acc_train = metrics.accuracy_score(targ_train, pred_train)
pred_val, targ_val, loss_val = engine.eval_fn(val_data_loader, model, device)
f1_val = metrics.f1_score(targ_val, pred_val, average='macro')
acc_val = metrics.accuracy_score(targ_val, pred_val)
df_new_results = pd.DataFrame({'task':task,
'epoch':epoch,
'transformer':transformer,
'max_len':max_len,
'batch_size':batch_size,
'lr':lr,
'dropout':drop_out,
'accuracy_train':acc_train,
'f1-macro_train':f1_train,
'loss_train':loss_train,
'accuracy_val':acc_val,
'f1-macro_val':f1_val,
'loss_val':loss_val
}, index=[0]
)
df_results = pd.concat([df_results, df_new_results], ignore_index=True)
tqdm.write("Epoch {}/{} f1-macro_training = {:.3f} accuracy_training = {:.3f} loss_training = {:.3f} f1-macro_val = {:.3f} accuracy_val = {:.3f} loss_val = {:.3f}".format(epoch, config.EPOCHS, f1_train, acc_train, loss_train, f1_val, acc_val, loss_val))
return df_results
if __name__ == "__main__":
random.seed(config.SEED)
np.random.seed(config.SEED)
torch.manual_seed(config.SEED)
torch.cuda.manual_seed_all(config.SEED)
dfx = pd.read_csv(config.DATA_PATH + '/' + config.DATASET_TRAIN, sep='\t', nrows=config.N_ROWS).fillna("none")
skf = StratifiedKFold(n_splits=config.SPLITS, shuffle=True, random_state=config.SEED)
df_results = pd.DataFrame(columns=['task',
'epoch',
'transformer',
'max_len',
'batch_size',
'lr',
'dropout',
'accuracy_train',
'f1-macro_train',
'loss_train',
'accuracy_val',
'f1-macro_val',
'loss_val'
]
)
inter = len(config.LABELS) * len(config.TRANSFORMERS) * len(config.MAX_LEN) * len(config.BATCH_SIZE) * len(config.DROPOUT) * len(config.LR) * config.SPLITS
grid_search_bar = tqdm(total=inter, desc='GRID SEARCH', position=2)
for task in tqdm(config.LABELS, desc='TASKS', position=1):
df_grid_search = dfx.loc[dfx[task]>=0].reset_index(drop=True)
for transformer in tqdm(config.TRANSFORMERS, desc='TRANSFOMERS', position=0):
for max_len in config.MAX_LEN:
for batch_size in config.BATCH_SIZE:
for drop_out in config.DROPOUT:
for lr in config.LR:
for fold, (train_index, val_index) in enumerate(skf.split(df_grid_search[config.DATASET_TEXT_PROCESSED], df_grid_search[task])):
df_train = df_grid_search.loc[train_index]
df_val = df_grid_search.loc[val_index]
tqdm.write(f'\nTask: {task} Transfomer: {transformer.split("/")[-1]} Max_len: {max_len} Batch_size: {batch_size} Dropout: {drop_out} lr: {lr} Fold: {fold+1}/{config.SPLITS}')
df_results = run(df_train,
df_val,
max_len,
task,
transformer,
batch_size,
drop_out,
lr,
df_results
)
grid_search_bar.update(1)
df_results = df_results.groupby(['task',
'epoch',
'transformer',
'max_len',
'batch_size',
'lr',
'dropout'], as_index=False, sort=False)['accuracy_train',
'f1-macro_train',
'loss_train',
'accuracy_val',
'f1-macro_val',
'loss_val'].mean()
df_results.to_csv(config.LOGS_PATH + '/' + DOMAIN_GRID_SEARCH + '.csv', index=False)
#TODO may remove save models
# pre_trained_model = "bert-base-uncased"
# transformer = AutoModel.from_pretrained(pre_trained_model)
# tokenizer = AutoTokenizer.from_pretrained(pre_trained_model)
# max_len = 15
# Example1 = "Angel table home car"
# Example2 = "bhabha char roofing house get"
# Example3 = "I wan to go to the beach for surfing"
# pt_batch = tokenizer(
# [Example1, Example2, Example3],
# padding=True,
# truncation=True,
# add_special_tokens=True,
# max_length=max_len,
# return_tensors="pt")
# print(pt_batch) | [
"numpy.random.seed",
"pandas.read_csv",
"engine.train_fn",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.f1_score",
"pandas.DataFrame",
"torch.utils.data.DataLoader",
"random.seed",
"pandas.concat",
"dataset.TransformerDataset",
"tqdm.tqdm",
"torch.manual_seed",
"engine.eval_fn",
"tra... | [((194, 227), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (217, 227), False, 'import warnings\n'), ((430, 459), 'transformers.logging.set_verbosity_error', 'logging.set_verbosity_error', ([], {}), '()\n', (457, 459), False, 'from transformers import logging\n'), ((580, 741), 'dataset.TransformerDataset', 'dataset.TransformerDataset', ([], {'text': 'df_train[config.DATASET_TEXT_PROCESSED].values', 'target': 'df_train[task].values', 'max_len': 'max_len', 'transformer': 'transformer'}), '(text=df_train[config.DATASET_TEXT_PROCESSED].\n values, target=df_train[task].values, max_len=max_len, transformer=\n transformer)\n', (606, 741), False, 'import dataset\n'), ((795, 906), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'num_workers': 'config.TRAIN_WORKERS'}), '(dataset=train_dataset, batch_size=batch_size,\n num_workers=config.TRAIN_WORKERS)\n', (822, 906), False, 'import torch\n'), ((956, 1113), 'dataset.TransformerDataset', 'dataset.TransformerDataset', ([], {'text': 'df_val[config.DATASET_TEXT_PROCESSED].values', 'target': 'df_val[task].values', 'max_len': 'max_len', 'transformer': 'transformer'}), '(text=df_val[config.DATASET_TEXT_PROCESSED].\n values, target=df_val[task].values, max_len=max_len, transformer=\n transformer)\n', (982, 1113), False, 'import dataset\n'), ((1165, 1272), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'val_dataset', 'batch_size': 'batch_size', 'num_workers': 'config.VAL_WORKERS'}), '(dataset=val_dataset, batch_size=batch_size,\n num_workers=config.VAL_WORKERS)\n', (1192, 1272), False, 'import torch\n'), ((2088, 2122), 'transformers.AdamW', 'AdamW', (['optimizer_parameters'], {'lr': 'lr'}), '(optimizer_parameters, lr=lr)\n', (2093, 2122), False, 'from transformers import AdamW\n'), ((2139, 2241), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0)', 'num_training_steps': 'num_train_steps'}), '(optimizer, num_warmup_steps=0,\n num_training_steps=num_train_steps)\n', (2170, 2241), False, 'from transformers import get_linear_schedule_with_warmup\n'), ((3906, 3930), 'random.seed', 'random.seed', (['config.SEED'], {}), '(config.SEED)\n', (3917, 3930), False, 'import random\n'), ((3935, 3962), 'numpy.random.seed', 'np.random.seed', (['config.SEED'], {}), '(config.SEED)\n', (3949, 3962), True, 'import numpy as np\n'), ((3967, 3997), 'torch.manual_seed', 'torch.manual_seed', (['config.SEED'], {}), '(config.SEED)\n', (3984, 3997), False, 'import torch\n'), ((4002, 4041), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['config.SEED'], {}), '(config.SEED)\n', (4028, 4041), False, 'import torch\n'), ((4168, 4247), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'config.SPLITS', 'shuffle': '(True)', 'random_state': 'config.SEED'}), '(n_splits=config.SPLITS, shuffle=True, random_state=config.SEED)\n', (4183, 4247), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((4266, 4464), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['task', 'epoch', 'transformer', 'max_len', 'batch_size', 'lr', 'dropout',\n 'accuracy_train', 'f1-macro_train', 'loss_train', 'accuracy_val',\n 'f1-macro_val', 'loss_val']"}), "(columns=['task', 'epoch', 'transformer', 'max_len',\n 'batch_size', 'lr', 'dropout', 'accuracy_train', 'f1-macro_train',\n 'loss_train', 'accuracy_val', 'f1-macro_val', 'loss_val'])\n", (4278, 4464), True, 'import pandas as pd\n'), ((5142, 5191), 'tqdm.tqdm', 'tqdm', ([], {'total': 'inter', 'desc': '"""GRID SEARCH"""', 'position': '(2)'}), "(total=inter, desc='GRID SEARCH', position=2)\n", (5146, 5191), False, 'from tqdm import tqdm\n'), ((5213, 5258), 'tqdm.tqdm', 'tqdm', (['config.LABELS'], {'desc': '"""TASKS"""', 'position': '(1)'}), "(config.LABELS, desc='TASKS', position=1)\n", (5217, 5258), False, 'from tqdm import tqdm\n'), ((2346, 2417), 'engine.train_fn', 'engine.train_fn', (['train_data_loader', 'model', 'optimizer', 'device', 'scheduler'], {}), '(train_data_loader, model, optimizer, device, scheduler)\n', (2361, 2417), False, 'import engine\n'), ((2437, 2494), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['targ_train', 'pred_train'], {'average': '"""macro"""'}), "(targ_train, pred_train, average='macro')\n", (2453, 2494), False, 'from sklearn import metrics\n'), ((2515, 2561), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['targ_train', 'pred_train'], {}), '(targ_train, pred_train)\n', (2537, 2561), False, 'from sklearn import metrics\n'), ((2610, 2656), 'engine.eval_fn', 'engine.eval_fn', (['val_data_loader', 'model', 'device'], {}), '(val_data_loader, model, device)\n', (2624, 2656), False, 'import engine\n'), ((2674, 2727), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['targ_val', 'pred_val'], {'average': '"""macro"""'}), "(targ_val, pred_val, average='macro')\n", (2690, 2727), False, 'from sklearn import metrics\n'), ((2746, 2788), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['targ_val', 'pred_val'], {}), '(targ_val, pred_val)\n', (2768, 2788), False, 'from sklearn import metrics\n'), ((2823, 3153), 'pandas.DataFrame', 'pd.DataFrame', (["{'task': task, 'epoch': epoch, 'transformer': transformer, 'max_len':\n max_len, 'batch_size': batch_size, 'lr': lr, 'dropout': drop_out,\n 'accuracy_train': acc_train, 'f1-macro_train': f1_train, 'loss_train':\n loss_train, 'accuracy_val': acc_val, 'f1-macro_val': f1_val, 'loss_val':\n loss_val}"], {'index': '[0]'}), "({'task': task, 'epoch': epoch, 'transformer': transformer,\n 'max_len': max_len, 'batch_size': batch_size, 'lr': lr, 'dropout':\n drop_out, 'accuracy_train': acc_train, 'f1-macro_train': f1_train,\n 'loss_train': loss_train, 'accuracy_val': acc_val, 'f1-macro_val':\n f1_val, 'loss_val': loss_val}, index=[0])\n", (2835, 3153), True, 'import pandas as pd\n'), ((3517, 3575), 'pandas.concat', 'pd.concat', (['[df_results, df_new_results]'], {'ignore_index': '(True)'}), '([df_results, df_new_results], ignore_index=True)\n', (3526, 3575), True, 'import pandas as pd\n'), ((5357, 5414), 'tqdm.tqdm', 'tqdm', (['config.TRANSFORMERS'], {'desc': '"""TRANSFOMERS"""', 'position': '(0)'}), "(config.TRANSFORMERS, desc='TRANSFOMERS', position=0)\n", (5361, 5414), False, 'from tqdm import tqdm\n'), ((1338, 1363), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1361, 1363), False, 'import torch\n'), ((4053, 4147), 'pandas.read_csv', 'pd.read_csv', (["(config.DATA_PATH + '/' + config.DATASET_TRAIN)"], {'sep': '"""\t"""', 'nrows': 'config.N_ROWS'}), "(config.DATA_PATH + '/' + config.DATASET_TRAIN, sep='\\t', nrows=\n config.N_ROWS)\n", (4064, 4147), True, 'import pandas as pd\n')] |
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
import math
def predict_using_sklean():
df = pd.read_csv("exe_gradent_decent.csv")
r = LinearRegression()
r.fit(df[['math']],df.cs)
return r.coef_, r.intercept_
def gradient_descent(x,y):
m_curr = 0
b_curr = 0
iterations = 1000000
n = len(x)
learning_rate = 0.0002
cost_previous = 0
for i in range(iterations):
y_predicted = m_curr * x + b_curr
cost = (1/n)*sum([value**2 for value in (y-y_predicted)])
md = -(2/n)*sum(x*(y-y_predicted))
bd = -(2/n)*sum(y-y_predicted)
m_curr = m_curr - learning_rate * md
b_curr = b_curr - learning_rate * bd
if math.isclose(cost, cost_previous, rel_tol=1e-20):
break
cost_previous = cost
print ("m {}, b {}, cost {}, iteration {}".format(m_curr,b_curr,cost, i))
return m_curr, b_curr
if __name__ == "__main__":
df = pd.read_csv("exe_gradent_decent.csv")
x = np.array(df.math)
y = np.array(df.cs)
m, b = gradient_descent(x,y)
print("Using gradient descent function: Coef {} Intercept {}".format(m, b))
m_sklearn, b_sklearn = predict_using_sklean()
print("Using sklearn: Coef {} Intercept {}".format(m_sklearn,b_sklearn)) | [
"pandas.read_csv",
"numpy.array",
"math.isclose",
"sklearn.linear_model.LinearRegression"
] | [((139, 176), 'pandas.read_csv', 'pd.read_csv', (['"""exe_gradent_decent.csv"""'], {}), "('exe_gradent_decent.csv')\n", (150, 176), True, 'import pandas as pd\n'), ((185, 203), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (201, 203), False, 'from sklearn.linear_model import LinearRegression\n'), ((982, 1019), 'pandas.read_csv', 'pd.read_csv', (['"""exe_gradent_decent.csv"""'], {}), "('exe_gradent_decent.csv')\n", (993, 1019), True, 'import pandas as pd\n'), ((1028, 1045), 'numpy.array', 'np.array', (['df.math'], {}), '(df.math)\n', (1036, 1045), True, 'import numpy as np\n'), ((1054, 1069), 'numpy.array', 'np.array', (['df.cs'], {}), '(df.cs)\n', (1062, 1069), True, 'import numpy as np\n'), ((739, 787), 'math.isclose', 'math.isclose', (['cost', 'cost_previous'], {'rel_tol': '(1e-20)'}), '(cost, cost_previous, rel_tol=1e-20)\n', (751, 787), False, 'import math\n')] |
# nasty hack to deal with issue #46
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import pytest
import numpy as np
import time
from rl_coach.memories.non_episodic.differentiable_neural_dictionary import QDND
import tensorflow as tf
NUM_ACTIONS = 3
NUM_DND_ENTRIES_TO_ADD = 10000
EMBEDDING_SIZE = 512
NUM_SAMPLED_EMBEDDINGS = 500
NUM_NEIGHBORS = 10
DND_SIZE = 500000
@pytest.fixture()
def dnd():
return QDND(
DND_SIZE,
EMBEDDING_SIZE,
NUM_ACTIONS,
0.1,
key_error_threshold=0,
learning_rate=0.0001,
num_neighbors=NUM_NEIGHBORS
)
@pytest.mark.unit_test
def test_random_sample_from_dnd(dnd: QDND):
# store single non terminal transition
embeddings = [np.random.rand(EMBEDDING_SIZE) for j in range(NUM_DND_ENTRIES_TO_ADD)]
actions = [np.random.randint(NUM_ACTIONS) for j in range(NUM_DND_ENTRIES_TO_ADD)]
values = [np.random.rand() for j in range(NUM_DND_ENTRIES_TO_ADD)]
dnd.add(embeddings, actions, values)
dnd_embeddings, dnd_values, dnd_indices = dnd.query(embeddings[0:10], 0, NUM_NEIGHBORS)
# calculate_normalization_factor
sampled_embeddings = dnd.sample_embeddings(NUM_SAMPLED_EMBEDDINGS)
coefficient = 1/(NUM_SAMPLED_EMBEDDINGS * (NUM_SAMPLED_EMBEDDINGS - 1.0))
tf_current_embedding = tf.placeholder(tf.float32, shape=(EMBEDDING_SIZE), name='current_embedding')
tf_other_embeddings = tf.placeholder(tf.float32, shape=(NUM_SAMPLED_EMBEDDINGS - 1, EMBEDDING_SIZE), name='other_embeddings')
sub = tf_current_embedding - tf_other_embeddings
square = tf.square(sub)
result = tf.reduce_sum(square)
###########################
# more efficient method
###########################
sampled_embeddings_expanded = tf.placeholder(
tf.float32, shape=(1, NUM_SAMPLED_EMBEDDINGS, EMBEDDING_SIZE), name='sampled_embeddings_expanded')
sampled_embeddings_tiled = tf.tile(sampled_embeddings_expanded, (sampled_embeddings_expanded.shape[1], 1, 1))
sampled_embeddings_transposed = tf.transpose(sampled_embeddings_tiled, (1, 0, 2))
sub2 = sampled_embeddings_tiled - sampled_embeddings_transposed
square2 = tf.square(sub2)
result2 = tf.reduce_sum(square2)
config = tf.ConfigProto()
config.allow_soft_placement = True # allow placing ops on cpu if they are not fit for gpu
config.gpu_options.allow_growth = True # allow the gpu memory allocated for the worker to grow if needed
sess = tf.Session(config=config)
sum1 = 0
start = time.time()
for i in range(NUM_SAMPLED_EMBEDDINGS):
curr_sampled_embedding = sampled_embeddings[i]
other_embeddings = np.delete(sampled_embeddings, i, 0)
sum1 += sess.run(result, feed_dict={tf_current_embedding: curr_sampled_embedding, tf_other_embeddings: other_embeddings})
print("1st method: {} sec".format(time.time()-start))
start = time.time()
sum2 = sess.run(result2, feed_dict={sampled_embeddings_expanded: np.expand_dims(sampled_embeddings,0)})
print("2nd method: {} sec".format(time.time()-start))
# validate that results are equal
print("sum1 = {}, sum2 = {}".format(sum1, sum2))
norm_factor = -0.5/(coefficient * sum2)
if __name__ == '__main__':
test_random_sample_from_dnd(dnd())
| [
"tensorflow.reduce_sum",
"rl_coach.memories.non_episodic.differentiable_neural_dictionary.QDND",
"os.path.dirname",
"pytest.fixture",
"tensorflow.Session",
"numpy.expand_dims",
"tensorflow.transpose",
"time.time",
"tensorflow.placeholder",
"tensorflow.tile",
"tensorflow.ConfigProto",
"numpy.ra... | [((424, 440), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (438, 440), False, 'import pytest\n'), ((463, 589), 'rl_coach.memories.non_episodic.differentiable_neural_dictionary.QDND', 'QDND', (['DND_SIZE', 'EMBEDDING_SIZE', 'NUM_ACTIONS', '(0.1)'], {'key_error_threshold': '(0)', 'learning_rate': '(0.0001)', 'num_neighbors': 'NUM_NEIGHBORS'}), '(DND_SIZE, EMBEDDING_SIZE, NUM_ACTIONS, 0.1, key_error_threshold=0,\n learning_rate=0.0001, num_neighbors=NUM_NEIGHBORS)\n', (467, 589), False, 'from rl_coach.memories.non_episodic.differentiable_neural_dictionary import QDND\n'), ((1421, 1495), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'EMBEDDING_SIZE', 'name': '"""current_embedding"""'}), "(tf.float32, shape=EMBEDDING_SIZE, name='current_embedding')\n", (1435, 1495), True, 'import tensorflow as tf\n'), ((1524, 1631), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(NUM_SAMPLED_EMBEDDINGS - 1, EMBEDDING_SIZE)', 'name': '"""other_embeddings"""'}), "(tf.float32, shape=(NUM_SAMPLED_EMBEDDINGS - 1,\n EMBEDDING_SIZE), name='other_embeddings')\n", (1538, 1631), True, 'import tensorflow as tf\n'), ((1695, 1709), 'tensorflow.square', 'tf.square', (['sub'], {}), '(sub)\n', (1704, 1709), True, 'import tensorflow as tf\n'), ((1723, 1744), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['square'], {}), '(square)\n', (1736, 1744), True, 'import tensorflow as tf\n'), ((1874, 1992), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(1, NUM_SAMPLED_EMBEDDINGS, EMBEDDING_SIZE)', 'name': '"""sampled_embeddings_expanded"""'}), "(tf.float32, shape=(1, NUM_SAMPLED_EMBEDDINGS, EMBEDDING_SIZE\n ), name='sampled_embeddings_expanded')\n", (1888, 1992), True, 'import tensorflow as tf\n'), ((2028, 2114), 'tensorflow.tile', 'tf.tile', (['sampled_embeddings_expanded', '(sampled_embeddings_expanded.shape[1], 1, 1)'], {}), '(sampled_embeddings_expanded, (sampled_embeddings_expanded.shape[1],\n 1, 1))\n', (2035, 2114), True, 'import tensorflow as tf\n'), ((2147, 2196), 'tensorflow.transpose', 'tf.transpose', (['sampled_embeddings_tiled', '(1, 0, 2)'], {}), '(sampled_embeddings_tiled, (1, 0, 2))\n', (2159, 2196), True, 'import tensorflow as tf\n'), ((2279, 2294), 'tensorflow.square', 'tf.square', (['sub2'], {}), '(sub2)\n', (2288, 2294), True, 'import tensorflow as tf\n'), ((2309, 2331), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['square2'], {}), '(square2)\n', (2322, 2331), True, 'import tensorflow as tf\n'), ((2346, 2362), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2360, 2362), True, 'import tensorflow as tf\n'), ((2580, 2605), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2590, 2605), True, 'import tensorflow as tf\n'), ((2632, 2643), 'time.time', 'time.time', ([], {}), '()\n', (2641, 2643), False, 'import time\n'), ((3007, 3018), 'time.time', 'time.time', ([], {}), '()\n', (3016, 3018), False, 'import time\n'), ((846, 876), 'numpy.random.rand', 'np.random.rand', (['EMBEDDING_SIZE'], {}), '(EMBEDDING_SIZE)\n', (860, 876), True, 'import numpy as np\n'), ((932, 962), 'numpy.random.randint', 'np.random.randint', (['NUM_ACTIONS'], {}), '(NUM_ACTIONS)\n', (949, 962), True, 'import numpy as np\n'), ((1017, 1033), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1031, 1033), True, 'import numpy as np\n'), ((2770, 2805), 'numpy.delete', 'np.delete', (['sampled_embeddings', 'i', '(0)'], {}), '(sampled_embeddings, i, 0)\n', (2779, 2805), True, 'import numpy as np\n'), ((106, 131), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (121, 131), False, 'import os\n'), ((2974, 2985), 'time.time', 'time.time', ([], {}), '()\n', (2983, 2985), False, 'import time\n'), ((3088, 3125), 'numpy.expand_dims', 'np.expand_dims', (['sampled_embeddings', '(0)'], {}), '(sampled_embeddings, 0)\n', (3102, 3125), True, 'import numpy as np\n'), ((3165, 3176), 'time.time', 'time.time', ([], {}), '()\n', (3174, 3176), False, 'import time\n')] |
import argparse
import os
import tensorflow as tf
import numpy as np
import models
from libs.inputs import (
get_filename_queue,
get_input_image, get_input_cifar10,
create_batch
)
from train import train
from utils import pp
parser = argparse.ArgumentParser(description='Train and run a GAN.')
# Architecture
parser.add_argument('--image-size', default=128, type=int, help='Size of image crops.')
parser.add_argument('--output-size', default=64, type=int, help='Size of samples.')
parser.add_argument('--c-dim', default=3, type=int, help='Number of channels.')
parser.add_argument('--z-dim', default=512, type=int, help='Dimensionality of the latent space.')
parser.add_argument('--gf-dim', default=64, type=int, help='Number of filters to use for generator.')
parser.add_argument('--df-dim', default=64, type=int, help='Number of filters to use for discriminator.')
parser.add_argument('--reg-param', default=10., type=float, help='Regularization parameter.')
parser.add_argument('--g-architecture', default='conv4', type=str, help='Architecture for generator.')
parser.add_argument('--d-architecture', default='conv4', type=str, help='Architecture for discriminator.')
parser.add_argument('--gan-type', default='standard', type=str, help='Which type of GAN to use.')
# Training
parser.add_argument('--seed', default=124, type=int, help='let numpy.random and tf.random keep the same seed')
parser.add_argument('--optimizer', default='jare', type=str, help='Which optimizer to use.')
parser.add_argument('--opt-type', default='rmsprop', type=str, help='Which optimizer type to use.')
parser.add_argument('--altgd-gsteps', default='1', type=int, help='How many training steps to use for generator.')
parser.add_argument('--altgd-dsteps', default='1', type=int, help='How many training steps to use for discriminator.')
parser.add_argument('--beta1', default='0.9', type=float, help='beta1 for adam optimizer')
parser.add_argument('--beta2', default='0.999', type=float, help='beta2 for adam optimizer')
parser.add_argument('--nsteps', default=200000, type=int, help='Number of steps to run training.')
parser.add_argument('--ntest', default=500, type=int, help='How often to run tests.')
parser.add_argument('--learning-rate', default=1e-4, type=float, help='Learning rate for the model.')
parser.add_argument('--batch-size', default=64, type=int, help='Batchsize for training.')
parser.add_argument('--log-dir', default='./logs', type=str, help='Where to store log and checkpoint files.')
parser.add_argument('--sample-dir', default='./samples', type=str, help='Where to put samples during training.')
parser.add_argument('--is-inception-scores', default=False, action='store_true',
help='Whether to compute inception scores.')
parser.add_argument('--fid-type', default=0, type=int,
help='How to compute fid [0: No calculation, 1: without pre-stats, 2: with pre-stats]')
parser.add_argument('--inception-dir', default='./inception', type=str, help='Where to put inception network.')
parser.add_argument('--dataset', default='cifar-10', type=str, help='Which data set to use.')
parser.add_argument('--data-dir', default='./data', type=str, help='Where data data is stored..')
parser.add_argument('--split', default='train', type=str, help='Which split to use.')
def main():
args = parser.parse_args()
pp.pprint(vars(args))
# seed
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
# Data
filename_queue = get_filename_queue(
split_file=os.path.join(args.data_dir, 'splits', args.dataset, args.split + '.lst'),
data_dir=os.path.join(args.data_dir, args.dataset)
)
if args.dataset == "cifar-10":
image, label = get_input_cifar10(filename_queue)
output_size = 32
c_dim = 3
else:
image = get_input_image(filename_queue,
output_size=args.output_size, image_size=args.image_size, c_dim=args.c_dim
)
output_size = args.output_size
c_dim = args.c_dim
image_batch = create_batch([image], batch_size=args.batch_size,
num_preprocess_threads=16, min_queue_examples=10000)
config = vars(args)
generator = models.get_generator(args.g_architecture,
output_size=args.output_size, c_dim=args.c_dim, f_dim=args.gf_dim)
discriminator = models.get_discriminator(args.d_architecture,
output_size=args.output_size, c_dim=args.c_dim, f_dim=args.df_dim)
train(generator, discriminator, image_batch, config)
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"libs.inputs.create_batch",
"argparse.ArgumentParser",
"libs.inputs.get_input_image",
"models.get_generator",
"tensorflow.set_random_seed",
"models.get_discriminator",
"train.train",
"os.path.join",
"libs.inputs.get_input_cifar10"
] | [((248, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train and run a GAN."""'}), "(description='Train and run a GAN.')\n", (271, 307), False, 'import argparse\n'), ((3404, 3429), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3418, 3429), True, 'import numpy as np\n'), ((3434, 3463), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.seed'], {}), '(args.seed)\n', (3452, 3463), True, 'import tensorflow as tf\n'), ((4095, 4201), 'libs.inputs.create_batch', 'create_batch', (['[image]'], {'batch_size': 'args.batch_size', 'num_preprocess_threads': '(16)', 'min_queue_examples': '(10000)'}), '([image], batch_size=args.batch_size, num_preprocess_threads=16,\n min_queue_examples=10000)\n', (4107, 4201), False, 'from libs.inputs import get_filename_queue, get_input_image, get_input_cifar10, create_batch\n'), ((4271, 4383), 'models.get_generator', 'models.get_generator', (['args.g_architecture'], {'output_size': 'args.output_size', 'c_dim': 'args.c_dim', 'f_dim': 'args.gf_dim'}), '(args.g_architecture, output_size=args.output_size,\n c_dim=args.c_dim, f_dim=args.gf_dim)\n', (4291, 4383), False, 'import models\n'), ((4438, 4554), 'models.get_discriminator', 'models.get_discriminator', (['args.d_architecture'], {'output_size': 'args.output_size', 'c_dim': 'args.c_dim', 'f_dim': 'args.df_dim'}), '(args.d_architecture, output_size=args.output_size,\n c_dim=args.c_dim, f_dim=args.df_dim)\n', (4462, 4554), False, 'import models\n'), ((4601, 4653), 'train.train', 'train', (['generator', 'discriminator', 'image_batch', 'config'], {}), '(generator, discriminator, image_batch, config)\n', (4606, 4653), False, 'from train import train\n'), ((3734, 3767), 'libs.inputs.get_input_cifar10', 'get_input_cifar10', (['filename_queue'], {}), '(filename_queue)\n', (3751, 3767), False, 'from libs.inputs import get_filename_queue, get_input_image, get_input_cifar10, create_batch\n'), ((3837, 3949), 'libs.inputs.get_input_image', 'get_input_image', (['filename_queue'], {'output_size': 'args.output_size', 'image_size': 'args.image_size', 'c_dim': 'args.c_dim'}), '(filename_queue, output_size=args.output_size, image_size=\n args.image_size, c_dim=args.c_dim)\n', (3852, 3949), False, 'from libs.inputs import get_filename_queue, get_input_image, get_input_cifar10, create_batch\n'), ((3536, 3608), 'os.path.join', 'os.path.join', (['args.data_dir', '"""splits"""', 'args.dataset', "(args.split + '.lst')"], {}), "(args.data_dir, 'splits', args.dataset, args.split + '.lst')\n", (3548, 3608), False, 'import os\n'), ((3627, 3668), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.dataset'], {}), '(args.data_dir, args.dataset)\n', (3639, 3668), False, 'import os\n')] |
import numpy as np
import cv2
import time
# from simple_pid import PID
cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture('http://192.168.55.6:8080/video')
# set res of camera
settings = {
"window_x": 320,
"window_y": 240,
"crop_window_height": 80,
"contrast_high": 255,
"contrast_low": 160,
"contrast_auto": True,
"debug_mode": True,
"display_on_screen": False,
"follow_nearest_to_center": True
}
data = np.zeros(4, dtype=int)
# contrast_pid = PID(1, .1, .1, setpoint=1)
print(cv2.useOptimized())
# do not remove used in the trackbar control.
def nothing(x):
pass
cap.set(3, settings['window_x'])
cap.set(4, settings['window_y'])
time.sleep(2)
# create variables from settings needed at runtime.
contrast_low = settings['contrast_low']
box_2_position = settings['window_y'] - 80
# variables for the frame counter
frame_counter: int = 0
start_time = time.time()
fps: int = 0
def set_contrast_low(new_value):
global contrast_low
print('contrast low: {}'.format(contrast_low))
contrast_low = contrast_low + int(new_value)
# we have hit the bottom go back to top and come down again.
if contrast_low <= 20:
contrast_low = 255
def create_crop_box(position):
b = position + 80
c = 0
d = 360
center = 0
contore_count = 0
cropped_frame = frame[position:b, c:d]
# add the filters
gray = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
# convert to a binary filter
ret, processed_cropped_image = cv2.threshold(
blur, contrast_low, settings['contrast_high'], cv2.THRESH_BINARY)
kernel = np.ones((5, 5), np.uint8)
crop_color = cv2.morphologyEx(
processed_cropped_image, cv2.MORPH_OPEN, kernel)
# create box at top and bottom so we get a nice square to process against.
cv2.rectangle(crop_color, (0, 0), (d, 10), (0, 0, 0), -1)
cv2.rectangle(crop_color, (0, 70), (d, b), (0, 0, 0), -1)
im2, contours, hierarchy = cv2.findContours(
crop_color, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contore_count = len(contours)
if 1 <= contore_count <= 2:
###################
# TODO: find largest contore and follow it
# TODO: T junction
###################
##################################################
# find the contore nearest to center and return it
##################################################
# only if there is more than two contours
if contore_count >= 2:
if settings['follow_nearest_to_center']:
center_0 = find_center_of_contour(contours[0])
center_1 = find_center_of_contour(contours[1])
# remove negative numbers
width = settings['window_x']/2
c_0 = 0
if center_0 < width:
c_0 = width-center_0
else:
c_0 = center_0 - width
# find the nearest to the center.
c_1 = 0
if center_1 < width:
c_1 = width-center_1
else:
c_1 = center_1 - width
# and draw the color rectangles around them.
if c_0 <= c_1:
center = draw_rectangles(
contours[0], cropped_frame, center, 'green')
draw_rectangles(
contours[1], cropped_frame, center)
else:
draw_rectangles(
contours[0], cropped_frame, center)
center = draw_rectangles(
contours[1], cropped_frame, center, 'green')
# we only have one so it's green
else:
center = draw_rectangles(
contours[0], cropped_frame, center, 'green')
# area = cv2.contourArea(cnt)
# print('\n\narea\n')
# print(area)
##################################
# we have too many contours so adjust the contrast
elif len(contours) >= 3:
set_contrast_low(5)
else:
# we have no contours pull it down a lot
# then let it increese slowly backup
set_contrast_low(-30)
return crop_color, center, contore_count
def find_center_of_contour(contour):
(x, y), radius = cv2.minEnclosingCircle(contour)
img_center = 160
center = str(-(img_center - int(x)))
return int(x)
def draw_rectangles(cnt, cropped_frame, center, color='red'):
'''
draws the bounding box around the contore drawn on the frame
and returns the center point
'''
r_x, r_y, r_w, r_h = cv2.boundingRect(cnt)
if color == 'green':
cv2.rectangle(cropped_frame, (r_x, r_y),
(r_x+r_w, r_y+r_h), (0, 255, 0), 2)
else:
cv2.rectangle(cropped_frame, (r_x, r_y),
(r_x+r_w, r_y+r_h), (0, 0, 255), 2)
# add center point to image
(x, y), radius = cv2.minEnclosingCircle(cnt)
center = (int(x), int(y))
cv2.circle(cropped_frame, center, 1, (67, 95, 0), 2)
# write center data to screen
img_center = 160
res = str(-(img_center - int(x)))
center_x, center_y = center
# cv2.putText(cropped_frame, res, (center_x-15, center_y+20), font,
# 1, (255, 255, 255), 2, cv2.LINE_AA)
return center
def print_fps(frame, fps):
text = 'fps:{}'.format(fps)
# cv2.putText(frame, text, (5, 15), font,
# 1, (255, 255, 255), 2, cv2.LINE_AA)
# read 45 frames and throw them away
# just lets the camera settle before we
# start to do any work with it
for x in range(45):
ret, frame = cap.read()
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame_counter = frame_counter + 1
# create a crop boxes to work from
crop_mask_1, center_1, contore_count = create_crop_box(
0)
# get current positions of four trackbars
# the settings may have been updated in the previous call.
if 1 <= contore_count <= 2:
crop_mask_2, center_2, contore_count_2 = create_crop_box(
box_2_position)
if settings['display_on_screen']:
cv2.imshow('crop_mask_2', crop_mask_2)
######################
try:
x_1, y_1 = center_1
x_2, y_2 = center_2
# Draw a line between the two points.
# cv2.line(frame, center_1, (x_2, y_2+box_2_position),
# (0, 255, 255), 1)
c_x = int(40 + (box_2_position/2))
c_y = 0
if x_1 >= x_2:
c_y = x_1 - x_2
c_y = int(x_2 + (c_y/2))
else:
c_y = x_2 - x_1
c_y = int(x_1 + (c_y/2))
cv2.circle(frame, (c_y, c_x), 10, (0, 255, 0), 2)
data[0] = x_1
data[1] = c_y
data[2] = x_2
print(data)
except:
print('someting bad happened')
##################################
# drive the bot
# TODO FROM HERE
##################################
# frame counter
if (time.time() - start_time) >= 1:
fps = frame_counter
start_time = time.time()
data[3] = frame_counter
frame_counter = 0
print_fps(frame, fps)
if settings['display_on_screen']:
cv2.imshow('frame', frame)
cv2.imshow('crop_mask_1', crop_mask_1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"cv2.GaussianBlur",
"cv2.boundingRect",
"cv2.circle",
"cv2.minEnclosingCircle",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.threshold",
"cv2.waitKey",
"numpy.zeros",
"numpy.ones",
"cv2.imshow",
"time.time",
"cv2.VideoCapture",
"time.sleep",
"cv2.useOptimized",
"cv2.rectangle",
"cv2.dest... | [((78, 97), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (94, 97), False, 'import cv2\n'), ((445, 467), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'int'}), '(4, dtype=int)\n', (453, 467), True, 'import numpy as np\n'), ((681, 694), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (691, 694), False, 'import time\n'), ((903, 914), 'time.time', 'time.time', ([], {}), '()\n', (912, 914), False, 'import time\n'), ((7557, 7580), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7578, 7580), False, 'import cv2\n'), ((519, 537), 'cv2.useOptimized', 'cv2.useOptimized', ([], {}), '()\n', (535, 537), False, 'import cv2\n'), ((1398, 1445), 'cv2.cvtColor', 'cv2.cvtColor', (['cropped_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(cropped_frame, cv2.COLOR_BGR2GRAY)\n', (1410, 1445), False, 'import cv2\n'), ((1457, 1490), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (1473, 1490), False, 'import cv2\n'), ((1560, 1639), 'cv2.threshold', 'cv2.threshold', (['blur', 'contrast_low', "settings['contrast_high']", 'cv2.THRESH_BINARY'], {}), "(blur, contrast_low, settings['contrast_high'], cv2.THRESH_BINARY)\n", (1573, 1639), False, 'import cv2\n'), ((1663, 1688), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (1670, 1688), True, 'import numpy as np\n'), ((1706, 1771), 'cv2.morphologyEx', 'cv2.morphologyEx', (['processed_cropped_image', 'cv2.MORPH_OPEN', 'kernel'], {}), '(processed_cropped_image, cv2.MORPH_OPEN, kernel)\n', (1722, 1771), False, 'import cv2\n'), ((1866, 1923), 'cv2.rectangle', 'cv2.rectangle', (['crop_color', '(0, 0)', '(d, 10)', '(0, 0, 0)', '(-1)'], {}), '(crop_color, (0, 0), (d, 10), (0, 0, 0), -1)\n', (1879, 1923), False, 'import cv2\n'), ((1928, 1985), 'cv2.rectangle', 'cv2.rectangle', (['crop_color', '(0, 70)', '(d, b)', '(0, 0, 0)', '(-1)'], {}), '(crop_color, (0, 70), (d, b), (0, 0, 0), -1)\n', (1941, 1985), False, 'import cv2\n'), ((2017, 2085), 'cv2.findContours', 'cv2.findContours', (['crop_color', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(crop_color, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (2033, 2085), False, 'import cv2\n'), ((4393, 4424), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['contour'], {}), '(contour)\n', (4415, 4424), False, 'import cv2\n'), ((4710, 4731), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (4726, 4731), False, 'import cv2\n'), ((5035, 5062), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['cnt'], {}), '(cnt)\n', (5057, 5062), False, 'import cv2\n'), ((5097, 5149), 'cv2.circle', 'cv2.circle', (['cropped_frame', 'center', '(1)', '(67, 95, 0)', '(2)'], {}), '(cropped_frame, center, 1, (67, 95, 0), 2)\n', (5107, 5149), False, 'import cv2\n'), ((4765, 4850), 'cv2.rectangle', 'cv2.rectangle', (['cropped_frame', '(r_x, r_y)', '(r_x + r_w, r_y + r_h)', '(0, 255, 0)', '(2)'], {}), '(cropped_frame, (r_x, r_y), (r_x + r_w, r_y + r_h), (0, 255, 0), 2\n )\n', (4778, 4850), False, 'import cv2\n'), ((4882, 4967), 'cv2.rectangle', 'cv2.rectangle', (['cropped_frame', '(r_x, r_y)', '(r_x + r_w, r_y + r_h)', '(0, 0, 255)', '(2)'], {}), '(cropped_frame, (r_x, r_y), (r_x + r_w, r_y + r_h), (0, 0, 255), 2\n )\n', (4895, 4967), False, 'import cv2\n'), ((7267, 7278), 'time.time', 'time.time', ([], {}), '()\n', (7276, 7278), False, 'import time\n'), ((7410, 7436), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (7420, 7436), False, 'import cv2\n'), ((7445, 7483), 'cv2.imshow', 'cv2.imshow', (['"""crop_mask_1"""', 'crop_mask_1'], {}), "('crop_mask_1', crop_mask_1)\n", (7455, 7483), False, 'import cv2\n'), ((6252, 6290), 'cv2.imshow', 'cv2.imshow', (['"""crop_mask_2"""', 'crop_mask_2'], {}), "('crop_mask_2', crop_mask_2)\n", (6262, 6290), False, 'import cv2\n'), ((6825, 6874), 'cv2.circle', 'cv2.circle', (['frame', '(c_y, c_x)', '(10)', '(0, 255, 0)', '(2)'], {}), '(frame, (c_y, c_x), 10, (0, 255, 0), 2)\n', (6835, 6874), False, 'import cv2\n'), ((7186, 7197), 'time.time', 'time.time', ([], {}), '()\n', (7195, 7197), False, 'import time\n'), ((7492, 7506), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7503, 7506), False, 'import cv2\n')] |
#!/opt/local/bin/python
__author__ = "<NAME>"
__date__ = "7 May 2014"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
""" This script analyzes linescans and extracts cortex thickness and density from actin/membrane linescan pairs.
The script can be run in a 'pair' mode (to analyze a single linescan pair)
or 'batch' mode (to analyze multiple directories full of linescan pairs).
The mode can be specified at the bottom ("main" function).
For batch mode:
Your parent directory should contain a file called 'dir_list.dat'
with the following information in row/column form, with only space as delimiters:
sub_dir px_size category ch_actin sigma_actin
stk_1 0.05 control 1 0.119
stk_2 0.04 siRNA 2 0.220
...
The first row must contain the column headers as shown
Definitions of input parameters:
sub_dir: The name of the sub-directory containing the linescan pairs (linescan pairs must end in '...average.dat')
px_size: The pixel size for the linescans in the given sub_dir
category: The category of the experiment in each sub_dir (can be used for plotting later)
ch_actin: The actin channel (either '1' or '2'; used for extracting cortex thickness/i_c)
sigma_actin: The sigma of the point spread function for the actin channel (used for extracting h/i_c)
Note: For the sub_dir entries in the dir_list, only those directories NOT appearing in 'completed_list_v4_1.dat' will be analyzed
Output:
In each sub-directory, a list called '.../ls_data/ls_fit_data.dat' will be created containing linescan and thickness data
-The columns are labeled according to channel number (ch1/ch2)
-delta is always the position of the peak intensity of channel 2 (ch2.x_peak) minus ch1.x_peak
In each sub-directory, plots of the linescans and the linescans with fits (if applicable) will be saved in '.../ls_plots/'
At the end, a master list of all of the data combined is be created in the parent_directory
For 'manual' mode:
When running the script, windows will pop up sequentially to request the following information:
-Channel 1 average linescan file
-Channel 2 average linescan file
-Pixel Size
-Actin Channel
-Sigma (Actin)
These parameters are defined above.
"""
import os
import math
from copy import deepcopy
from tkinter import *
from tkinter.filedialog import *
from tkinter.simpledialog import *
root = Tk()
import scipy
from scipy import optimize, stats
import pylab
import numpy as np
import utility_functions as uf
def gauss_func(p, x):
"""Definition of gaussian function used to fit linescan peaks.
p = [a, sigma, mu, c].
"""
a, sigma, mu, c = p #unpacks p (for readability)
g = a / (sigma * math.sqrt(2 * math.pi)) * scipy.exp(-(x - mu)**2 / (2 * sigma**2)) + c
return g
def convolved(p,x):
"""Defines convolved linescan. Args: x: float or list/iterable of floats,
the position for which convolved intensity is calculated; p: list/iterable
of floats, linecan parameters (p=[i_in, i_c, i_out, h, x_c, sigma]).
Returns: i: float, intensity at x.
"""
i_in, i_c, i_out, h, x_c, sigma = p #unpacks p (for readability)
i = (i_in + (i_c - i_in) * stats.norm.cdf((x - x_c) + h / 2., 0., sigma) +
(i_out - i_c) * stats.norm.cdf((x - x_c) - h / 2., 0., sigma))
return i
def unconvolved(p,x):
"""Defines unconvolved linescan. Args: x: float or list/iterable of floats,
the position for which intensity is calculated; p: list/iterable of floats,
linecan parameters (p=[i_in, i_c, i_out, h, x_c]). Returns: i: float,
intensity at x.
"""
i_in, i_c, i_out, h, x_c = p #unpacks p (for readability)
i = np.zeros(len(x))
for j in range(len(x)):
if x[j] < x_c - h / 2.:
i[j] = i_in
if x[j] >= x_c - h / 2. and x[j] < x_c + h / 2.:
i[j] = i_c
if x[j] >= x_c + h / 2.:
i[j] = i_out
return i
def sort_ls_list(list):
"""Sorts list of linescan files by keyword.
Args:
list (list): the list to be sorted (here, linescan filenames)
param (str): the keyword to use for sorting (here, usually 'frame')
"""
def find_key(line):
key = int(re.search('frame_([0-9]+)_', line).group(1))
return key
list.sort(key=find_key)
return list
class Linescan():
"""Linescan object with methods to extract important parameters
from linescans.
"""
def __init__(self,x,i):
"""Initializes linescan.
Args:
x (list of numbers): the position values
i (list of numbers): the intensity values
"""
#populate linescan position/intensity
self.x = np.array(x,dtype='float') #position list as NumPy array of floats
self.i = np.array(i,dtype='float') #intensity list as NumPy array of floats
#detminere a few easy parameters from position/intensity
self.H = self.x[-1] - self.x[0]
self.i_tot = np.trapz(self.i,self.x)
#populate other attributes
self.dist_to_x_in_out = 1. #specifies how far away x_in is from the peak (in um)
self.gauss_params = None #parameter list from Gaussian fit to find peak
self.x_peak = None #linescan peak position
self.i_peak = None #linescan peak intensity
self.i_in = None #intracellular intensity
self.i_out = None #extracellular intensity
self.max_idx = None #index of point near linescan center with highest intensity
self.x_fit = None #position list used for peak fitting
self.i_fit = None #intensity list used for peak fitting
self.i_in_x_list = None #position list used to determine self.i_in
self.i_in_i_list = None #intensity list used to determine self.i_in
self.i_out_x_list = None #position list used to determine self.i_out
self.i_out_i_list = None #intensity list used to determine self.i_out
self.x_in_upper_index = None #the index at the upper end of the region where x_in is calculated
self.x_out_lower_index = None #the index at the lower end of the region where x_out is calculated
self.fwhm = None #full width at half-max
#initializes linescans and determines linescan parameters
self.extract_ls_parameters()
def convert_px_to_um(self):
"""Multiplies list of coordinates by pixel_size."""
self.x = np.array([a * self.px_size for a in self.x])
def extract_ls_parameters(self):
"""Extracts intensity and position information from linescan"""
self.get_peak()
self.get_i_in_out()
self.get_fwhm()
def get_peak(self):
"""Finds the peak position and intensity of a linescan by fitting
a Gaussian near the peak.
"""
#restricts fitting to near the center of the linescan
self.max_idx = int(np.argmax(self.i[int(len(self.i)/2-6):int(len(self.i)/2+20)]) + len(self.i)/2-6)
self.x_fit = self.x[int(self.max_idx-2):int(self.max_idx+3)]
self.i_fit = self.i[int(self.max_idx-2):int(self.max_idx+3)]
#picks reasonable starting values for fit
self.i_in_guess = np.mean(self.i[:int(self.max_idx-14)])
a = (self.i[self.max_idx] - self.i_in_guess) / 2.4
sigma = 0.170
mu = self.x[self.max_idx]
b = self.i_in_guess
#perform fit with starting values
p0 = [a, sigma, mu, b]
p1, success = optimize.leastsq(self.residuals_gauss,p0,
args=(self.x_fit, self.i_fit),
maxfev = 1000000)
self.gauss_params = p1
self.x_peak = p1[2]
self.i_peak = gauss_func(p1, self.x_peak)
def get_i_in_out(self):
"""Gets values for intracellular intensity (self.i_in) and
extracellular intensity (self.i_out). The left of the linescan
(nearer zero) is always assumed to be the intracellular side.
Note: the i_in and i_out values are calculated to be the average value
of the ten points out from the distance between the peak and position x away
from the peak, where x is given by self.dist_to_x_in_out (defined in __init__).
"""
x_in_upper = self.x_peak - self.dist_to_x_in_out
x_in_upper_index = np.argmin(abs(self.x - x_in_upper))
self.x_in_upper_index = x_in_upper_index #for use in finding total intensity for density calculation
self.i_in_x_list = self.x[x_in_upper_index-10:x_in_upper_index]
self.i_in_i_list = self.i[x_in_upper_index-10:x_in_upper_index]
self.i_in = np.mean(self.i_in_i_list)
x_out_lower = self.x_peak + self.dist_to_x_in_out
x_out_lower_index = np.argmin(abs(self.x - x_out_lower))
self.x_out_lower_index = x_out_lower_index #for use in finding total intensity for density calculation
self.i_out_x_list = self.x[x_out_lower_index:x_out_lower_index+10]
self.i_out_i_list = self.i[x_out_lower_index:x_out_lower_index+10]
self.i_out = np.mean(self.i_out_i_list)
def residuals_gauss(self,p,x,x_data):
"""Returns residuals for Gaussian fit of the intensity peak.
Possible values for fit parameters are constrained to avoid
overestimation of peak intensity.
Args:
p (list): fit parameters, [a, sigma, mu, c]
x (list): position values
x_data (list): intensity values
Returns:
residuals (list): residuals for fit
-or-
fail_array (list): in place of residuals if the fit fails
"""
a, sigma, mu, c = p #unpacks p (for readability)
i_peak_guess = gauss_func(p, mu)
fail_array = np.ones(len(x)) * 99999.
if all([sigma >= 0.1,
abs(i_peak_guess - self.i[self.max_idx]) < 0.5 * self.i[self.max_idx]]):
residuals = gauss_func(p,x) - x_data
return residuals
else:
return fail_array
def get_fwhm(self):
"""Calculates the full-width at half maximum (FWHM) of the linescan peak"""
#determines half-max
hm = (self.i_in + self.i_peak) / 2.
# print(hm)
# finds points closest to hm to the left of the peak
search = self.i[:self.max_idx]
self.left_index = (np.abs(search - hm)).argmin()
if hm > self.i[self.left_index]:
self.left_index_left = deepcopy(self.left_index)
self.left_index_right = self.left_index_left + 1
else:
self.left_index_right = deepcopy(self.left_index)
self.left_index_left = self.left_index_right - 1
#gets interpolated intensity (linear interpolation between 2 surrounding points
m_left = (self.i[self.left_index_right] - self.i[self.left_index_left]) / (self.x[self.left_index_right] - self.x[self.left_index_left])
b_left = self.i[self.left_index_right] - m_left * self.x[self.left_index_right]
x_fwhm_left = (hm - b_left) / m_left
self.fwhm_left = [x_fwhm_left,hm]
#finds point closest to hm to the right of the peak
search = self.i[self.max_idx:]
self.right_index = (np.abs(search - hm)).argmin() + self.max_idx
if hm < self.i[self.right_index]:
self.right_index_left = deepcopy(self.right_index)
self.right_index_right = self.right_index_left + 1
else:
self.right_index_right = deepcopy(self.right_index)
self.right_index_left = self.right_index_right - 1
#gets interpolated intensity (linear interpolation between 2 surrounding points
m_right = (self.i[self.right_index_right] - self.i[self.right_index_left]) / (self.x[self.right_index_right] - self.x[self.right_index_left])
b_right = self.i[self.right_index_right] - m_right * self.x[self.right_index_right]
x_fwhm_right = (hm - b_right) / m_right
self.fwhm_right = [x_fwhm_right,hm]
self.fwhm = x_fwhm_right - x_fwhm_left
class Cortex():
"""A Class for a cortex, with actin and membrane linescans and
methods to determine cortex thickness and density.
"""
def __init__(self,ch1,ch2,sigma_actin,ch_actin=1):
"""Initializes linescan pairs and remaining attributes.
Args:
ch1 (Linescan class): the ch1 linescan
ch2 (Linescan class): the ch2 linescan
sigma_actin (float): the sigma of the PSF for the actin channel
Kwargs:
ch_actin (int): says which channel is actin
"""
self.ch1 = ch1
self.ch2 = ch2
self.sigma_actin = sigma_actin
self.ch_actin = ch_actin
self.delta = self.ch2.x_peak - self.ch1.x_peak #separation between ch2 and ch1 peaks
if self.ch_actin==1:
self.actin = self.ch1
self.memb = self.ch2
elif self.ch_actin==2:
self.actin = self.ch2
self.memb = self.ch1
else:
self.actin = None
self.memb = None
self.h_max = 1. #maximum cortex thickness (for constraining fit)
self.i_c_max = 500. #maximum cortex intensity (for constraining fit)
self.h = None #cortex thickness (from fit)
self.i_c = None #cortical actin intensity (from fit)
self.density = None #cortical actin density
self.X_c = None #background-independent center position of the cortical actin (from fit)
self.solution = None #solution from actin cortex thickness fit
def get_h_i_c(self):
""" Performs fit to get cortex thickness, h, and cortex intensity, i_c
Note: density is calculated as the difference between fitted cortex intensity
and intracellular background, normalized by the intensity from the beginning
of the linescan to end of the i_out calculation region
"""
delta = abs(self.delta)
#SET STARTING VALUES FOR ROOTS AND SOLUTIONS
self.solution = 2e+20
#only try fitting if the peak is higher than both i_in and i_out
if ((self.actin.i_out - self.actin.i_peak) /
(self.actin.i_in - self.actin.i_peak))>=0:
#loops through several different starting values for i_c and h
for i_c_factor in np.arange(2.,3.1,0.2):
for h_factor in np.arange(0.5, 2.1, 0.2):
i_c_start = self.actin.i_peak * i_c_factor
delta_start = ((self.sigma_actin**2 / delta*2) *
np.log((self.actin.i_out - i_c_start) /
(self.actin.i_in - i_c_start)))
h_start = 2 * (delta - delta_start) * h_factor
#performs fit
p0 = [h_start, i_c_start]
try:
result = optimize.leastsq(self.residuals, p0,
maxfev=100000, full_output=1)
solution_temp = np.sum([x**2 for x in result[2]['fvec']])
if solution_temp < self.solution:
self.solution = deepcopy(solution_temp)
p1 = result[0]
except TypeError:
pass
#controls for bad fits
if any([self.solution>0.01,
p1[0] >= self.h_max - 0.001,
p1[1] >= self.i_c_max - 1.]):
p1 = [None, None]
self.h = None
self.i_c = None
self.density = None
self.X_c = None
self.solution = None
else:
self.h, self.i_c = p1
actin_ls_mean = np.mean(self.actin.i[:self.actin.x_out_lower_index+10])
self.density = (self.i_c - self.actin.i_in) / actin_ls_mean
self.X_c = self.memb.x_peak - self.h / 2.
def residuals(self,p):
"""Calculates residuals for cortex linescan fit to extract cortex
thickness and intensity values
Args:
p (list of floats): [thickness, cortex_intensity]
Returns:
residuals (list of floats): [residual1, residual2]
-or-
fail_array (list of floats): [1000000., 1000000.]
(returned only if fitting fails)
"""
fail_array = [1000000., 1000000.]
#constrains fit and ensures log term is positive
if all([self.h_max>p[0]>0,
self.i_c_max>p[1]>self.actin.i_in,
(self.actin.i_out - p[1]) / (self.actin.i_in - p[1]) > 0]):
#X_c is the position of the center of the cortex
#x_c is the position of the cortex peak
X_c_try = self.memb.x_peak - p[0] / 2.
delta_try = (self.sigma_actin**2 / p[0]) * np.log((self.actin.i_out - p[1]) / (self.actin.i_in - p[1]))
x_c_try = X_c_try - delta_try
i_peak_try = convolved([self.actin.i_in, p[1], self.actin.i_out, p[0], X_c_try, self.sigma_actin], x_c_try)
#residuals are difference between calculated peak position/intensity and values from data
residuals = [x_c_try - self.actin.x_peak, i_peak_try - self.actin.i_peak]
return residuals
else:
return fail_array
def plot_lss(self):
"""Plots linescans"""
fig = pylab.figure()
ax = fig.add_subplot(1,1,1)
#plots raw data
pylab.plot(self.ch1.x,self.ch1.i,'go',label="Ch. 1")
pylab.plot(self.ch2.x,self.ch2.i,'ro',label="Ch. 2")
#plots points used for determining i_in and i_out
pylab.plot(self.ch1.i_in_x_list,self.ch1.i_in_i_list,'yo',label=r"$i_{\rm{in}}$, $i_{\rm{out}}$")
pylab.plot(self.ch2.i_in_x_list,self.ch2.i_in_i_list,'yo')
pylab.plot(self.ch1.i_out_x_list,self.ch1.i_out_i_list,'yo')
pylab.plot(self.ch2.i_out_x_list,self.ch2.i_out_i_list,'yo')
#plots points used to calculate fwhm and shows the fwhm
# pylab.plot(self.ch1.x[self.ch1.left_index_left],self.ch1.i[self.ch1.left_index_left],'ko',label="fwhm points")
# pylab.plot(self.ch1.x[self.ch1.left_index_left],self.ch1.i[self.ch1.left_index_left],'ko')
# pylab.plot(self.ch1.x[self.ch1.left_index_right],self.ch1.i[self.ch1.left_index_right],'ko')
# pylab.plot(self.ch1.x[self.ch1.right_index_left],self.ch1.i[self.ch1.right_index_left],'ko')
# pylab.plot(self.ch1.x[self.ch1.right_index_right],self.ch1.i[self.ch1.right_index_right],'ko')
#
# pylab.plot(self.ch2.x[self.ch2.left_index_left],self.ch2.i[self.ch2.left_index_left],'ko')
# pylab.plot(self.ch2.x[self.ch2.left_index_right],self.ch2.i[self.ch2.left_index_right],'ko')
# pylab.plot(self.ch2.x[self.ch2.right_index_left],self.ch2.i[self.ch2.right_index_left],'ko')
# pylab.plot(self.ch2.x[self.ch2.right_index_right],self.ch2.i[self.ch2.right_index_right],'ko')
x_fwhm1, i_fwhm1 = zip(self.ch1.fwhm_left,self.ch1.fwhm_right)
x_fwhm2, i_fwhm2 = zip(self.ch2.fwhm_left,self.ch2.fwhm_right)
pylab.plot(x_fwhm1, i_fwhm1,'g',ls='-',marker='x',label="fwhm")
pylab.plot(x_fwhm2, i_fwhm2,'r',ls='-',marker='x',label='fwhm')
# x_fwhm1 = [self.ch1.x[self.ch1.left_index],self.ch1.x[self.ch1.right_index]]
# y_fwhm1 = (self.ch1.i[self.ch1.left_index] + self.ch1.i[self.ch1.right_index]) / 2.
# i_fwhm1 = [y_fwhm1,y_fwhm1]
# pylab.plot(x_fwhm1,i_fwhm1,'g-',label="fwhm")
#
# x_fwhm2 = [self.ch2.x[self.ch2.left_index],self.ch2.x[self.ch2.right_index]]
# y_fwhm2 = (self.ch2.i[self.ch2.left_index] + self.ch2.i[self.ch2.right_index]) / 2.
# i_fwhm2 = [y_fwhm2,y_fwhm2]
# pylab.plot(x_fwhm2,i_fwhm2,'r-',label="fwhm")
#plots gaussian fit curve
x_gauss_fit_ch1 = np.linspace(self.ch1.x_fit[0],self.ch1.x_fit[-1],100)
i_gauss_fit_ch1 = gauss_func(self.ch1.gauss_params,x_gauss_fit_ch1)
pylab.plot(x_gauss_fit_ch1,i_gauss_fit_ch1,'b',label="Peak fit")
x_gauss_fit_ch2 = np.linspace(self.ch2.x_fit[0],self.ch2.x_fit[-1],100)
i_gauss_fit_ch2 = gauss_func(self.ch2.gauss_params,x_gauss_fit_ch2)
pylab.plot(x_gauss_fit_ch2,i_gauss_fit_ch2,'b')
#finish plot
y_min, y_max = ax.get_ylim()
pylab.ylim = (0,y_max)
pylab.xlabel("Position ($\mu$m)")
pylab.ylabel("Intensity (AU)")
pylab.legend(loc='upper right')
pylab.gcf().subplots_adjust(bottom=0.15)
def plot_fits(self):
"""Plots linescan pair with fitted cortex thickness"""
fig = pylab.figure()
ax = fig.add_subplot(1,1,1)
if self.ch_actin==1 or self.ch_actin=="1":
color_actin = 'g'
color_memb = 'r'
elif self.ch_actin==2 or self.ch_actin=="2":
color_actin = 'r'
color_memb = 'g'
else:
raise ValueError("Please specify ch_actin as <<1>>, <<2>> for plotting fit!")
#plots raw data
pylab.plot(self.memb.x,self.memb.i,'o',color=color_memb,label="Memb. (raw)")
pylab.plot(self.actin.x,self.actin.i,'o',color=color_actin,label="Actin (raw)")
#plots unconvolved and extracted actin linescans from fits
x_actin_hd = np.linspace(self.actin.x[0],self.actin.x[-1],1000)
i_actin_unconv = unconvolved([self.actin.i_in, self.i_c,
self.actin.i_out, self.h, self.X_c],
x_actin_hd)
i_actin_conv = convolved([self.actin.i_in, self.i_c,
self.actin.i_out, self.h, self.X_c, self.sigma_actin],
x_actin_hd)
pylab.plot(x_actin_hd,i_actin_unconv,ls='-',color=color_actin, label='fit')
pylab.plot(x_actin_hd,i_actin_conv,ls='--',color=color_actin, label='fit (conv.)')
pylab.axvline(x=self.memb.x_peak, color=color_memb, ls='--', label="Memb. (peak)")
#finishes plot
y_min, y_max = ax.get_ylim()
pylab.ylim = (0,y_max)
pylab.xlabel("Position ($\mu$m)")
pylab.ylabel("Intensity (AU)")
pylab.legend(loc='upper right')
pylab.gcf().subplots_adjust(bottom=0.15)
def write_master_list(parent_dir,version):
"""Writes a master data lis in the parent directory for batch mode.
Args:
parent_dir (string): path of the parent directory
version (string): the version of the software (for naming output file)
"""
dir_list_path = parent_dir + '/dir_list.dat'
subdir_list = [_[0] for _ in uf.read_file(dir_list_path)][1:]
master_data = []
for i in range(len(subdir_list)):
data_dir = parent_dir + '/' + subdir_list[i]
data = uf.read_file(data_dir + '/ls_data/ls_data.dat')
if i==0:
for line in data:
master_data.append(line)
else:
for line in data[1:]:
master_data.append(line)
# print master_data
uf.save_data_array(master_data, parent_dir + '/master_list_v%s.dat'%version)
def load_ls(ls_path,px_size=1.):
"""Loads a linescan file
Args:
ls_path (str): path of the average linescan file to be loaded
px_size (float): pixel size in microns
Returns:
x (numpy array): the positions (in microns)
i (numpy array): the intensities
"""
ls_data = uf.read_file(ls_path)
x = np.array([float(_[0]) for _ in ls_data]) * px_size
i = np.array([float(_[1]) for _ in ls_data])
return x,i
def analyze_cortex(file_ch1,file_ch2,px_size,ch_actin,sigma_actin):
"""Extracts linescan parameters and coretx thickness/density
for a pair of linescans
Args:
file_ch1 (str): the filepath for the first linescan
file_ch2 (str): the filepath for the second linescan
px_size (float): the pixel size for the linescans (for the whole directory)
ch_actin (int): the channel of the actin linescan (1 or 2)
sigma_actin (float): the sigma of the PSF for the actin channel
Kwargs:
category (str): used to keep track of different conditions in the output data file
Returns:
cortex (Cortex class): the cortex with associated attributes
"""
x_ch1, i_ch1 = load_ls(file_ch1,px_size=px_size)
x_ch2, i_ch2 = load_ls(file_ch2,px_size=px_size)
x = deepcopy(x_ch1) #the x values should be the same for both linescans!
basename = file_ch1.split('/')[-1][:-4]
print('Analyzing file pair for:', basename)
# extracts data
actin = Linescan(x,i_ch1)
memb = Linescan(x,i_ch2)
cortex = Cortex(actin, memb, sigma_actin, ch_actin=ch_actin)
if ch_actin==1 or ch_actin==2:
cortex.get_h_i_c()
elif ch_actin == "None":
pass
else:
raise ValueError("Please specify ch_actin as <<1>> or <<2>> for %s!"%file_ch1)
print('h =', cortex.h)
return cortex
def analyze_ls_pair(file_ch1,file_ch2,px_size,ch_actin,sigma_actin,version):
"""Analyzes linescans to extract cortex thickness/density
for a single linescan pair. Data and plots are generated and saved
to a new folder with same name as file_ch1
Args:
file_ch1 (str): the filepath for the first linescan
file_ch2 (str): the filepath for the second linescan
px_size (float): the pixel size for the linescans (for the whole directory)
ch_actin (int): the channel of the actin linescan (1 or 2)
sigma_actin (float): the sigma of the PSF for the actin channel
"""
# makes directory in data_dir for saving
save_dir = file_ch1[:-4] + '_ls_data'
uf.make_dir(save_dir)
# makes a list of parameters to extract from cortex data
data_to_write = [['basename', 'category',
'delta', 'h', 'i_c', 'density', 'X_c', 'solution',
'ch1.i_tot', 'ch1.H', 'ch1.x_peak', 'ch1.i_peak', 'ch1.i_in', 'ch1.i_out', 'ch1.fwhm',
'ch2.i_tot', 'ch2.H', 'ch2.x_peak', 'ch2.i_peak', 'ch2.i_in', 'ch2.i_out', 'ch2.fwhm'
]]
basename = file_ch1.split('/')[-1][:-4]
category = 'pair'
#gets cortex and linescan data
cortex = analyze_cortex(file_ch1, file_ch2, px_size, ch_actin, sigma_actin)
# plots raw linescans
cortex.plot_lss()
pylab.savefig(save_dir + "/" + basename + ".png")
pylab.close()
# plots linescans with h fits
if cortex.h != None:
cortex.plot_fits()
pylab.savefig(save_dir + "/" + basename + "_fit.png")
pylab.close()
# gets extracted linescan data
data_temp = [basename, category]
for param in data_to_write[0][2:]:
data_temp.append(eval("cortex.%s" % param))
data_to_write.append(data_temp)
# print data_to_write
uf.save_data_array(data_to_write, save_dir + "/ls_data.dat")
def analyze_dir(data_dir,px_size,category,ch_actin,sigma_actin,version):
""" Analyzes all linescan pairs in a directory full of linescans
Args:
data_dir (str): the directory containing the linescans
px_size (float): the pixel size for the linescans (for the whole directory)
category (str): the category for the experiment
ch_actin (int): the channel of the actin linescan (1 or 2)
version (str): version number (for output filenames)
"""
#makes necessary directories in data_dir for saving
save_dir = data_dir + '/ls_data'
uf.make_dir(save_dir)
#makes a list of parameters to extract from cortex data
data_to_write = [['basename','category',
'delta', 'h', 'i_c', 'density', 'X_c', 'solution',
'ch1.i_tot','ch1.H','ch1.x_peak','ch1.i_peak','ch1.i_in','ch1.i_out','ch1.fwhm',
'ch2.i_tot','ch2.H','ch2.x_peak','ch2.i_peak','ch2.i_in','ch2.i_out','ch2.fwhm'
]]
#gets and sorts list of average linescans
linescan_list = [x for x in os.listdir(data_dir) if 'average.dat' in x]
for _ in linescan_list:
print(_)
print(re.search('frame' + '_([0-9]+)_', _).group(1))
linescan_list = sort_ls_list(linescan_list)
#extracts linescan parameters and thickness/density
for i in range(int(len(linescan_list)/2)):
file_ch1 = data_dir + '/' + linescan_list[2*i]
file_ch2 = data_dir + '/' + linescan_list[2*i + 1]
basename = file_ch1.split('/')[-1][:-4]
cortex = analyze_cortex(file_ch1,file_ch2,px_size,ch_actin,sigma_actin)
# plots raw linescans
cortex.plot_lss()
pylab.savefig(save_dir + "/" + basename + ".png")
pylab.close()
# plots linescans with h fits
if cortex.h != None:
cortex.plot_fits()
pylab.savefig(save_dir + "/" + basename + "_fit.png")
pylab.close()
# gets extracted linescan data
data_temp = [basename,category]
for param in data_to_write[0][2:]:
data_temp.append(eval("cortex.%s"%param))
data_to_write.append(data_temp)
# print data_to_write
uf.save_data_array(data_to_write,save_dir + "/ls_data.dat")
def main():
"""__main__ function"""
version = '5'
#set up root for asking questions
# root = Tk() #moved this up to the imports
root.withdraw()
#chooses analysis mode
mode = askinteger(title="Analysis Mode Selection",
prompt="Please enter:\n1 for pairwise analysis or\n2 for batch analysis",
minvalue=1,maxvalue=2)
if mode==1:
ch1_path = askopenfilename(title='Select an average linescan file for channel 1',
filetypes=[("dat", "*.dat")],
initialdir='.',
initialfile="")
ch2_path = askopenfilename(title='Select an average linescan file for channel 2',
filetypes=[("dat", "*.dat")],
initialdir='/'.join(ch1_path.split('/')[:-1]),
initialfile=ch1_path.split('/')[-1])
px_size = askfloat(title='Pixel Size',prompt='Please enter your pixel size')
ch_actin = askinteger(title='Actin Channel',prompt='Please enter the actin channel',
minvalue=1, maxvalue=2)
sigma_actin = askfloat(title='Actin Sigma',prompt='Please enter the sigma value\nfor the PSF for the actin channel\n(in microns)')
analyze_ls_pair(ch1_path,ch2_path,px_size,ch_actin,sigma_actin,version)
if mode==2:
parent_dir = askdirectory(title='Select the parent directory (be sure it contains dir_list.dat!)',
initialdir=os.path.split(os.path.realpath(__file__))[0])
# parent_dir = './test_data'
dir_list = uf.get_dict_list(uf.read_file(parent_dir + '/dir_list.dat'))
for line in dir_list:
sub_dir = line['sub_dir']
px_size = float(line['px_size'])
category = line['category']
ch_actin = int(line['ch_actin'])
sigma_actin = float(line['sigma_actin'])
data_dir = parent_dir + '/' + sub_dir
print(data_dir)
analyze_dir(data_dir,px_size,category,ch_actin,sigma_actin,version)
write_master_list(parent_dir,version)
if __name__ == '__main__':
main()
| [
"pylab.close",
"numpy.abs",
"numpy.sum",
"utility_functions.save_data_array",
"scipy.optimize.leastsq",
"numpy.mean",
"pylab.figure",
"numpy.arange",
"pylab.gcf",
"scipy.exp",
"pylab.ylabel",
"scipy.stats.norm.cdf",
"utility_functions.read_file",
"numpy.linspace",
"pylab.xlabel",
"pyla... | [((23141, 23219), 'utility_functions.save_data_array', 'uf.save_data_array', (['master_data', "(parent_dir + '/master_list_v%s.dat' % version)"], {}), "(master_data, parent_dir + '/master_list_v%s.dat' % version)\n", (23159, 23219), True, 'import utility_functions as uf\n'), ((23540, 23561), 'utility_functions.read_file', 'uf.read_file', (['ls_path'], {}), '(ls_path)\n', (23552, 23561), True, 'import utility_functions as uf\n'), ((24514, 24529), 'copy.deepcopy', 'deepcopy', (['x_ch1'], {}), '(x_ch1)\n', (24522, 24529), False, 'from copy import deepcopy\n'), ((25783, 25804), 'utility_functions.make_dir', 'uf.make_dir', (['save_dir'], {}), '(save_dir)\n', (25794, 25804), True, 'import utility_functions as uf\n'), ((26464, 26513), 'pylab.savefig', 'pylab.savefig', (["(save_dir + '/' + basename + '.png')"], {}), "(save_dir + '/' + basename + '.png')\n", (26477, 26513), False, 'import pylab\n'), ((26518, 26531), 'pylab.close', 'pylab.close', ([], {}), '()\n', (26529, 26531), False, 'import pylab\n'), ((26934, 26994), 'utility_functions.save_data_array', 'uf.save_data_array', (['data_to_write', "(save_dir + '/ls_data.dat')"], {}), "(data_to_write, save_dir + '/ls_data.dat')\n", (26952, 26994), True, 'import utility_functions as uf\n'), ((27587, 27608), 'utility_functions.make_dir', 'uf.make_dir', (['save_dir'], {}), '(save_dir)\n', (27598, 27608), True, 'import utility_functions as uf\n'), ((29221, 29281), 'utility_functions.save_data_array', 'uf.save_data_array', (['data_to_write', "(save_dir + '/ls_data.dat')"], {}), "(data_to_write, save_dir + '/ls_data.dat')\n", (29239, 29281), True, 'import utility_functions as uf\n'), ((4676, 4702), 'numpy.array', 'np.array', (['x'], {'dtype': '"""float"""'}), "(x, dtype='float')\n", (4684, 4702), True, 'import numpy as np\n'), ((4759, 4785), 'numpy.array', 'np.array', (['i'], {'dtype': '"""float"""'}), "(i, dtype='float')\n", (4767, 4785), True, 'import numpy as np\n'), ((4953, 4977), 'numpy.trapz', 'np.trapz', (['self.i', 'self.x'], {}), '(self.i, self.x)\n', (4961, 4977), True, 'import numpy as np\n'), ((6381, 6427), 'numpy.array', 'np.array', (['[(a * self.px_size) for a in self.x]'], {}), '([(a * self.px_size) for a in self.x])\n', (6389, 6427), True, 'import numpy as np\n'), ((7424, 7517), 'scipy.optimize.leastsq', 'optimize.leastsq', (['self.residuals_gauss', 'p0'], {'args': '(self.x_fit, self.i_fit)', 'maxfev': '(1000000)'}), '(self.residuals_gauss, p0, args=(self.x_fit, self.i_fit),\n maxfev=1000000)\n', (7440, 7517), False, 'from scipy import optimize, stats\n'), ((8599, 8624), 'numpy.mean', 'np.mean', (['self.i_in_i_list'], {}), '(self.i_in_i_list)\n', (8606, 8624), True, 'import numpy as np\n'), ((9031, 9057), 'numpy.mean', 'np.mean', (['self.i_out_i_list'], {}), '(self.i_out_i_list)\n', (9038, 9057), True, 'import numpy as np\n'), ((17456, 17470), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (17468, 17470), False, 'import pylab\n'), ((17540, 17595), 'pylab.plot', 'pylab.plot', (['self.ch1.x', 'self.ch1.i', '"""go"""'], {'label': '"""Ch. 1"""'}), "(self.ch1.x, self.ch1.i, 'go', label='Ch. 1')\n", (17550, 17595), False, 'import pylab\n'), ((17601, 17656), 'pylab.plot', 'pylab.plot', (['self.ch2.x', 'self.ch2.i', '"""ro"""'], {'label': '"""Ch. 2"""'}), "(self.ch2.x, self.ch2.i, 'ro', label='Ch. 2')\n", (17611, 17656), False, 'import pylab\n'), ((17721, 17827), 'pylab.plot', 'pylab.plot', (['self.ch1.i_in_x_list', 'self.ch1.i_in_i_list', '"""yo"""'], {'label': '"""$i_{\\\\rm{in}}$, $i_{\\\\rm{out}}$"""'}), "(self.ch1.i_in_x_list, self.ch1.i_in_i_list, 'yo', label=\n '$i_{\\\\rm{in}}$, $i_{\\\\rm{out}}$')\n", (17731, 17827), False, 'import pylab\n'), ((17827, 17887), 'pylab.plot', 'pylab.plot', (['self.ch2.i_in_x_list', 'self.ch2.i_in_i_list', '"""yo"""'], {}), "(self.ch2.i_in_x_list, self.ch2.i_in_i_list, 'yo')\n", (17837, 17887), False, 'import pylab\n'), ((17894, 17956), 'pylab.plot', 'pylab.plot', (['self.ch1.i_out_x_list', 'self.ch1.i_out_i_list', '"""yo"""'], {}), "(self.ch1.i_out_x_list, self.ch1.i_out_i_list, 'yo')\n", (17904, 17956), False, 'import pylab\n'), ((17963, 18025), 'pylab.plot', 'pylab.plot', (['self.ch2.i_out_x_list', 'self.ch2.i_out_i_list', '"""yo"""'], {}), "(self.ch2.i_out_x_list, self.ch2.i_out_i_list, 'yo')\n", (17973, 18025), False, 'import pylab\n'), ((19196, 19263), 'pylab.plot', 'pylab.plot', (['x_fwhm1', 'i_fwhm1', '"""g"""'], {'ls': '"""-"""', 'marker': '"""x"""', 'label': '"""fwhm"""'}), "(x_fwhm1, i_fwhm1, 'g', ls='-', marker='x', label='fwhm')\n", (19206, 19263), False, 'import pylab\n'), ((19268, 19335), 'pylab.plot', 'pylab.plot', (['x_fwhm2', 'i_fwhm2', '"""r"""'], {'ls': '"""-"""', 'marker': '"""x"""', 'label': '"""fwhm"""'}), "(x_fwhm2, i_fwhm2, 'r', ls='-', marker='x', label='fwhm')\n", (19278, 19335), False, 'import pylab\n'), ((19954, 20009), 'numpy.linspace', 'np.linspace', (['self.ch1.x_fit[0]', 'self.ch1.x_fit[-1]', '(100)'], {}), '(self.ch1.x_fit[0], self.ch1.x_fit[-1], 100)\n', (19965, 20009), True, 'import numpy as np\n'), ((20092, 20159), 'pylab.plot', 'pylab.plot', (['x_gauss_fit_ch1', 'i_gauss_fit_ch1', '"""b"""'], {'label': '"""Peak fit"""'}), "(x_gauss_fit_ch1, i_gauss_fit_ch1, 'b', label='Peak fit')\n", (20102, 20159), False, 'import pylab\n'), ((20184, 20239), 'numpy.linspace', 'np.linspace', (['self.ch2.x_fit[0]', 'self.ch2.x_fit[-1]', '(100)'], {}), '(self.ch2.x_fit[0], self.ch2.x_fit[-1], 100)\n', (20195, 20239), True, 'import numpy as np\n'), ((20322, 20371), 'pylab.plot', 'pylab.plot', (['x_gauss_fit_ch2', 'i_gauss_fit_ch2', '"""b"""'], {}), "(x_gauss_fit_ch2, i_gauss_fit_ch2, 'b')\n", (20332, 20371), False, 'import pylab\n'), ((20469, 20503), 'pylab.xlabel', 'pylab.xlabel', (['"""Position ($\\\\mu$m)"""'], {}), "('Position ($\\\\mu$m)')\n", (20481, 20503), False, 'import pylab\n'), ((20511, 20541), 'pylab.ylabel', 'pylab.ylabel', (['"""Intensity (AU)"""'], {}), "('Intensity (AU)')\n", (20523, 20541), False, 'import pylab\n'), ((20550, 20581), 'pylab.legend', 'pylab.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (20562, 20581), False, 'import pylab\n'), ((20735, 20749), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (20747, 20749), False, 'import pylab\n'), ((21146, 21231), 'pylab.plot', 'pylab.plot', (['self.memb.x', 'self.memb.i', '"""o"""'], {'color': 'color_memb', 'label': '"""Memb. (raw)"""'}), "(self.memb.x, self.memb.i, 'o', color=color_memb, label='Memb. (raw)'\n )\n", (21156, 21231), False, 'import pylab\n'), ((21231, 21319), 'pylab.plot', 'pylab.plot', (['self.actin.x', 'self.actin.i', '"""o"""'], {'color': 'color_actin', 'label': '"""Actin (raw)"""'}), "(self.actin.x, self.actin.i, 'o', color=color_actin, label=\n 'Actin (raw)')\n", (21241, 21319), False, 'import pylab\n'), ((21400, 21452), 'numpy.linspace', 'np.linspace', (['self.actin.x[0]', 'self.actin.x[-1]', '(1000)'], {}), '(self.actin.x[0], self.actin.x[-1], 1000)\n', (21411, 21452), True, 'import numpy as np\n'), ((21848, 21926), 'pylab.plot', 'pylab.plot', (['x_actin_hd', 'i_actin_unconv'], {'ls': '"""-"""', 'color': 'color_actin', 'label': '"""fit"""'}), "(x_actin_hd, i_actin_unconv, ls='-', color=color_actin, label='fit')\n", (21858, 21926), False, 'import pylab\n'), ((21932, 22022), 'pylab.plot', 'pylab.plot', (['x_actin_hd', 'i_actin_conv'], {'ls': '"""--"""', 'color': 'color_actin', 'label': '"""fit (conv.)"""'}), "(x_actin_hd, i_actin_conv, ls='--', color=color_actin, label=\n 'fit (conv.)')\n", (21942, 22022), False, 'import pylab\n'), ((22024, 22111), 'pylab.axvline', 'pylab.axvline', ([], {'x': 'self.memb.x_peak', 'color': 'color_memb', 'ls': '"""--"""', 'label': '"""Memb. (peak)"""'}), "(x=self.memb.x_peak, color=color_memb, ls='--', label=\n 'Memb. (peak)')\n", (22037, 22111), False, 'import pylab\n'), ((22208, 22242), 'pylab.xlabel', 'pylab.xlabel', (['"""Position ($\\\\mu$m)"""'], {}), "('Position ($\\\\mu$m)')\n", (22220, 22242), False, 'import pylab\n'), ((22250, 22280), 'pylab.ylabel', 'pylab.ylabel', (['"""Intensity (AU)"""'], {}), "('Intensity (AU)')\n", (22262, 22280), False, 'import pylab\n'), ((22289, 22320), 'pylab.legend', 'pylab.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (22301, 22320), False, 'import pylab\n'), ((22887, 22934), 'utility_functions.read_file', 'uf.read_file', (["(data_dir + '/ls_data/ls_data.dat')"], {}), "(data_dir + '/ls_data/ls_data.dat')\n", (22899, 22934), True, 'import utility_functions as uf\n'), ((26627, 26680), 'pylab.savefig', 'pylab.savefig', (["(save_dir + '/' + basename + '_fit.png')"], {}), "(save_dir + '/' + basename + '_fit.png')\n", (26640, 26680), False, 'import pylab\n'), ((26689, 26702), 'pylab.close', 'pylab.close', ([], {}), '()\n', (26700, 26702), False, 'import pylab\n'), ((28710, 28759), 'pylab.savefig', 'pylab.savefig', (["(save_dir + '/' + basename + '.png')"], {}), "(save_dir + '/' + basename + '.png')\n", (28723, 28759), False, 'import pylab\n'), ((28768, 28781), 'pylab.close', 'pylab.close', ([], {}), '()\n', (28779, 28781), False, 'import pylab\n'), ((2703, 2747), 'scipy.exp', 'scipy.exp', (['(-(x - mu) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - mu) ** 2 / (2 * sigma ** 2))\n', (2712, 2747), False, 'import scipy\n'), ((3234, 3279), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['(x - x_c - h / 2.0)', '(0.0)', 'sigma'], {}), '(x - x_c - h / 2.0, 0.0, sigma)\n', (3248, 3279), False, 'from scipy import optimize, stats\n'), ((10427, 10452), 'copy.deepcopy', 'deepcopy', (['self.left_index'], {}), '(self.left_index)\n', (10435, 10452), False, 'from copy import deepcopy\n'), ((10564, 10589), 'copy.deepcopy', 'deepcopy', (['self.left_index'], {}), '(self.left_index)\n', (10572, 10589), False, 'from copy import deepcopy\n'), ((11312, 11338), 'copy.deepcopy', 'deepcopy', (['self.right_index'], {}), '(self.right_index)\n', (11320, 11338), False, 'from copy import deepcopy\n'), ((11453, 11479), 'copy.deepcopy', 'deepcopy', (['self.right_index'], {}), '(self.right_index)\n', (11461, 11479), False, 'from copy import deepcopy\n'), ((14314, 14338), 'numpy.arange', 'np.arange', (['(2.0)', '(3.1)', '(0.2)'], {}), '(2.0, 3.1, 0.2)\n', (14323, 14338), True, 'import numpy as np\n'), ((28097, 28117), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (28107, 28117), False, 'import os\n'), ((28893, 28946), 'pylab.savefig', 'pylab.savefig', (["(save_dir + '/' + basename + '_fit.png')"], {}), "(save_dir + '/' + basename + '_fit.png')\n", (28906, 28946), False, 'import pylab\n'), ((28959, 28972), 'pylab.close', 'pylab.close', ([], {}), '()\n', (28970, 28972), False, 'import pylab\n'), ((31000, 31042), 'utility_functions.read_file', 'uf.read_file', (["(parent_dir + '/dir_list.dat')"], {}), "(parent_dir + '/dir_list.dat')\n", (31012, 31042), True, 'import utility_functions as uf\n'), ((3161, 3206), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['(x - x_c + h / 2.0)', '(0.0)', 'sigma'], {}), '(x - x_c + h / 2.0, 0.0, sigma)\n', (3175, 3206), False, 'from scipy import optimize, stats\n'), ((10321, 10340), 'numpy.abs', 'np.abs', (['(search - hm)'], {}), '(search - hm)\n', (10327, 10340), True, 'import numpy as np\n'), ((14369, 14393), 'numpy.arange', 'np.arange', (['(0.5)', '(2.1)', '(0.2)'], {}), '(0.5, 2.1, 0.2)\n', (14378, 14393), True, 'import numpy as np\n'), ((15792, 15849), 'numpy.mean', 'np.mean', (['self.actin.i[:self.actin.x_out_lower_index + 10]'], {}), '(self.actin.i[:self.actin.x_out_lower_index + 10])\n', (15799, 15849), True, 'import numpy as np\n'), ((16900, 16960), 'numpy.log', 'np.log', (['((self.actin.i_out - p[1]) / (self.actin.i_in - p[1]))'], {}), '((self.actin.i_out - p[1]) / (self.actin.i_in - p[1]))\n', (16906, 16960), True, 'import numpy as np\n'), ((20590, 20601), 'pylab.gcf', 'pylab.gcf', ([], {}), '()\n', (20599, 20601), False, 'import pylab\n'), ((22329, 22340), 'pylab.gcf', 'pylab.gcf', ([], {}), '()\n', (22338, 22340), False, 'import pylab\n'), ((22726, 22753), 'utility_functions.read_file', 'uf.read_file', (['dir_list_path'], {}), '(dir_list_path)\n', (22738, 22753), True, 'import utility_functions as uf\n'), ((2677, 2699), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (2686, 2699), False, 'import math\n'), ((11189, 11208), 'numpy.abs', 'np.abs', (['(search - hm)'], {}), '(search - hm)\n', (11195, 11208), True, 'import numpy as np\n'), ((14563, 14633), 'numpy.log', 'np.log', (['((self.actin.i_out - i_c_start) / (self.actin.i_in - i_c_start))'], {}), '((self.actin.i_out - i_c_start) / (self.actin.i_in - i_c_start))\n', (14569, 14633), True, 'import numpy as np\n'), ((14884, 14950), 'scipy.optimize.leastsq', 'optimize.leastsq', (['self.residuals', 'p0'], {'maxfev': '(100000)', 'full_output': '(1)'}), '(self.residuals, p0, maxfev=100000, full_output=1)\n', (14900, 14950), False, 'from scipy import optimize, stats\n'), ((15042, 15087), 'numpy.sum', 'np.sum', (["[(x ** 2) for x in result[2]['fvec']]"], {}), "([(x ** 2) for x in result[2]['fvec']])\n", (15048, 15087), True, 'import numpy as np\n'), ((30895, 30921), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (30911, 30921), False, 'import os\n'), ((15187, 15210), 'copy.deepcopy', 'deepcopy', (['solution_temp'], {}), '(solution_temp)\n', (15195, 15210), False, 'from copy import deepcopy\n')] |
"""
Implement Pruners here.
"""
import numpy as np
from policies.policy import PolicyBase
from utils import (get_total_sparsity,
recompute_bn_stats,
percentile,
get_prunable_children)
import torch
import torch.optim as optim
from torch.utils.data.sampler import SubsetRandomSampler
import logging
from typing import List, Dict
from copy import deepcopy
from tqdm import tqdm
from torch.utils.data import DataLoader
import torch.nn.functional as F
import pdb
def build_pruner_from_config(model, pruner_config):
"""
This function takes the takes pruner config and model that are provided by function
build_pruners_from_config. We assume that each pruner have one parameter
group, i.e., which shares sparsity levels and pruning schedules.
The *suggested!* .yaml file structure is defined in build_pruners_from_config.
"""
pruner_class = pruner_config['class']
pruner_args = {k: v for k, v in pruner_config.items() if k != 'class'}
pruner = globals()[pruner_class](model, **pruner_args)
return pruner
def build_pruners_from_config(model, config):
"""
This function takes *general* config file for current run and model
and returns a list of pruners which are build by build_pruner_from_config.
Example config.yaml for pruner instances:
>>> pruners:
>>> pruner_1:
>>> class: MagnitudePruner
>>> epochs: [0,2,4] # [start, freq, end] for now (TODO: but can extend functionality?)
>>> weight_only: True # if True prunes only *.weight parameters in specified layers
>>> # if *.bias is None thif flag is just ignored
>>> initial_sparsity: 0.05 # initial sparsity level for parameters
>>> target_sparsity: 0.7 # desired sparsity level at the end of pruning
>>> modules: [net.0] # modules of type (nn.Conv2d, nn.Linear, effnet.StaticPaddingConv2d) (or
>>> # any instance containing as parameters *.weight and *.bias? TODO: maybe useful?)
>>> keep_pruned: False # Optional from now on
>>> degree: 3 # Optional degree to use for polynomial schedule
>>> pruner_2:
>>> class: MagnitudePruner
>>> epochs: [0,2,4]
>>> weight_only: True
>>> initial_sparsity: 0.05
>>> target_sparsity: 0.8
>>> modules: [net.2]
>>> keep_pruned: False
There is an optional arguments:
keep_pruned: whether pruned weights values shoud be store, recommended values is false
unless you want to use reintroduction with previous magnitudes
"""
if 'pruners' not in config: return []
pruners_config = config['pruners']
pruners = [build_pruner_from_config(model, pruner_config)
for pruner_config in pruners_config.values()]
return pruners
class Pruner(PolicyBase):
def __init__(self, *args, **kwargs):
# TODO: figure out a better initialization strategy so that we make sure these attributes are present in all descendant objects,
# as well as a method to check that the supplied modules comply with our assumptions. Maybe it is fine this way, too.
# the following asserts are needed because the base class relies on these attributes:
assert hasattr(self, '_modules'), "@Pruner: make sure any Pruner has 'modules' and 'module_names' attribute"
assert hasattr(self, '_module_names'), "@Pruner: make sure any Pruner has 'modules' and 'module_names' attribute"
# this is needed because after_parameter_optimization method assumes this:
assert all([is_wrapped_layer(_module) for _module in self._modules]), \
"@Pruner: currently the code assumes that you supply prunable layers' names directly in the config"
def on_epoch_end(self, **kwargs):
# Included for completeness, but there is nothing to close out here.
pass
def measure_sparsity(self, **kwargs):
sparsity_dict = {}
for _name, _module in zip(self._module_names, self._modules):
num_zeros, num_params = get_total_sparsity(_module)
sparsity_dict[_name] = (num_zeros, num_params)
return sparsity_dict
def after_parameter_optimization(self, model, **kwargs):
"""
Currently this stage is used to mask pruned neurons within the layer's data.
TODO: think if this is general enough to be all Pruners' method, or
it is GradualPruners' method only.
"""
for _module in self._modules:
_module.apply_masks_to_data()
class GradualPruner(Pruner):
def __init__(self, model, **kwargs):
"""
Arguments:
model {nn.Module}: network with wrapped modules to bound pruner
Key arguments:
kwargs['initial_sparsity']: initial_sparsity layer sparsity
kwargs['target_sparsity']: target sparsity for pruning end
kwargs['weight_only']: bool, if only weights are pruned
kwargs['epochs']: list, [start_epoch, pruning_freq, end_epoch]
kwargs['modules']: list of module names to be pruned
kwargs['degree']: float/int, degree to use in polinomial schedule,
degree == 1 stands for uniform schedule
"""
self._start, self._freq, self._end = kwargs['epochs']
self._weight_only = kwargs['weight_only']
self._initial_sparsity = kwargs['initial_sparsity']
self._target_sparsity = kwargs['target_sparsity']
self._keep_pruned = kwargs['keep_pruned'] if 'keep_pruned' in kwargs else False
self._degree = kwargs['degree'] if 'degree' in kwargs else 3
self._model = model
modules_dict = dict(self._model.named_modules())
prefix = ''
if isinstance(self._model, torch.nn.DataParallel):
prefix = 'module.'
# Unwrap user-specified modules to prune into lowest-level prunables:
self._module_names = [prefix + _name for _name in kwargs['modules']]
# self._module_names = [prefix + _name for _name in get_prunable_children(self._model, kwargs['modules'])]
self._modules = [
modules_dict[module_name] for module_name in self._module_names
]
if self._keep_pruned:
for module in self._modules:
module.copy_pruned(True)
logging.debug(f'Constructed {self.__class__.__name__} with config:')
logging.debug('\n'.join([f' -{k}:{v}' for k,v in kwargs.items()]) + '\n')
def update_initial_sparsity(self):
parameter_sparsities = []
for module in self._modules:
w_sparsity, b_sparsity = module.weight_sparsity, module.bias_sparsity
parameter_sparsities.append(w_sparsity)
if b_sparsity is not None: parameter_sparsities.append(b_sparsity)
self._initial_sparsity = np.mean(parameter_sparsities)
@staticmethod
def _get_param_stat(param):
raise NotImplementedError("Implement in child class.")
def _polynomial_schedule(self, curr_epoch):
scale = self._target_sparsity - self._initial_sparsity
progress = min(float(curr_epoch - self._start) / (self._end - self._start), 1.0)
remaining_progress = (1.0 - progress) ** self._degree
return self._target_sparsity - scale * remaining_progress
def _required_sparsity(self, curr_epoch):
return self._polynomial_schedule(curr_epoch)
def _pruner_not_active(self, epoch_num):
return ((epoch_num - self._start) % self._freq != 0 or epoch_num > self._end or epoch_num < self._start)
@staticmethod
def _get_pruning_mask(param_stats, sparsity=None, threshold=None):
if param_stats is None: return None
if sparsity is None and threshold is None: return None
if threshold is None:
threshold = percentile(param_stats, sparsity)
return (param_stats > threshold).float()
class MagnitudePruner(GradualPruner):
def __init__(self, model, **kwargs):
super(MagnitudePruner, self).__init__(model, **kwargs)
@staticmethod
def _get_param_stat(param, param_mask):
if param is None or param_mask is None: return None
return (param.abs() + 1e-4) * param_mask
def on_epoch_begin(self, epoch_num, **kwargs):
if self._pruner_not_active(epoch_num):
return False, {}
for module in self._modules:
level = self._required_sparsity(epoch_num)
w_stat, b_stat = self._get_param_stat(module.weight, module.weight_mask),\
self._get_param_stat(module.bias, module.bias_mask)
module.weight_mask, module.bias_mask = self._get_pruning_mask(w_stat, level),\
self._get_pruning_mask(None if self._weight_only else b_stat, level)
return True, {"level": level}
class UnstructuredMagnitudePruner(GradualPruner):
def __init__(self, model, **kwargs):
super(UnstructuredMagnitudePruner, self).__init__(model, **kwargs)
@staticmethod
def _get_param_stat(param, param_mask):
if param is None or param_mask is None: return None
return ((param.abs() + 1e-4) * param_mask)
def on_epoch_begin(self, epoch_num, device, **kwargs):
if self._pruner_not_active(epoch_num):
return False, {}
level = self._required_sparsity(epoch_num)
logging.debug("Desired sparsity level is ", level)
if level == 0:
return False, {}
weights = torch.zeros(0)
if device.type == 'cuda':
weights = weights.cuda()
total_params = 0
for module in self._modules:
weights = torch.cat((weights, self._get_param_stat(module.weight, module.weight_mask).view(-1)))
if not self._weight_only:
if module.bias is not None:
weights = torch.cat((weights, self._get_param_stat(module.bias, module.bias_mask).view(-1)))
threshold = percentile(weights, level)
for module in self._modules:
w_stat, b_stat = self._get_param_stat(module.weight, module.weight_mask),\
self._get_param_stat(module.bias, module.bias_mask)
module.weight_mask, module.bias_mask = self._get_pruning_mask(w_stat, threshold=threshold),\
self._get_pruning_mask(None if self._weight_only else b_stat,
threshold=threshold)
return True, {"level": level}
# Implements N:M pruner for structured sparsity, as in here: https://github.com/NM-sparsity/NM-sparsity
# Paper: https://openreview.net/pdf?id=K9bw7vqp_s
class MagnitudeNMPruner(Pruner):
def __init__(self, model, **kwargs):
self._start, self._freq, self._end = kwargs['epochs']
self._weight_only = kwargs['weight_only']
self._N = kwargs['N']
self._M = kwargs['M']
self._model = model
modules_dict = dict(self._model.named_modules())
prefix = ''
if isinstance(self._model, torch.nn.DataParallel):
prefix = 'module.'
# Unwrap user-specified modules to prune into lowest-level prunables:
self._module_names = [prefix + _name for _name in kwargs['modules']]
# self._module_names = [prefix + _name for _name in get_prunable_children(self._model, kwargs['modules'])]
self._modules = [
modules_dict[module_name] for module_name in self._module_names
]
logging.debug(f'Constructed {self.__class__.__name__} with config:')
logging.debug('\n'.join([f' -{k}:{v}' for k, v in kwargs.items()]) + '\n')
def _pruner_not_active(self, epoch_num):
return ((epoch_num - self._start) % self._freq != 0 or epoch_num > self._end or epoch_num < self._start)
def on_epoch_begin(self, epoch_num, device, **kwargs):
if self._pruner_not_active(epoch_num):
return False, {}
level = self._N / self._M
for module in self._modules:
module.bias_mask = None
cloned_weight = module.weight.clone()
elem_w = module.weight.numel()
group_w = int(elem_w / self._M)
if len(module.weight.shape)==4:
# N:M sparsity for convolutional layers
weight_temp = module.weight.detach().abs().permute(0, 2, 3, 1).reshape(group_w, self._M)
idxs = torch.argsort(weight_temp, dim=1)[:, :int(self._M - self._N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=idxs, value=0).reshape(cloned_weight.permute(0, 2, 3, 1).shape)
module.weight_mask = w_b.permute(0, 3, 1, 2)
elif len(module.weight.shape)==2:
# N:M sparsity for linear layers
weight_temp = module.weight.detach().abs().reshape(group_w, self._M)
idxs = torch.argsort(weight_temp, dim=1)[:, :int(self._M - self._N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
module.weight_mask = w_b.scatter_(dim=1, index=idxs, value=0).reshape(module.weight.shape)
else:
raise NotImplementedError("Only support layers of dimension 2 or 4")
return True, {"level": level}
class TrustRegionMagnitudePruner(GradualPruner):
def __init__(self, model, **kwargs):
super(TrustRegionMagnitudePruner, self).__init__(model, **kwargs)
@staticmethod
def _get_param_stat(param, param_mask):
if param is None or param_mask is None: return None
return (param.abs() + 1e-4) * param_mask
def _get_meta(self):
meta = {'bottom magnitudes': {}, 'weights': {}}
for idx, module in enumerate(self._modules):
weight = module.weight[module.weight_mask.byte()].abs()
for sp in [0.05,0.1,0.2,0.3,0.4,0.5]:
threshold = percentile(weight, sp)
val = (weight * (weight <= threshold).float()).norm()
meta['bottom magnitudes'][self._module_names[idx] + f'_{sp}'] = val
meta['weights'][self._module_names[idx]] = module.weight * module.weight_mask
return meta
def on_epoch_begin(self, epoch_num, **kwargs):
meta = self._get_meta()
level = self._required_sparsity(epoch_num)
if self._pruner_not_active(epoch_num):
return False, meta
for idx, module in enumerate(self._modules):
w_stat, b_stat = self._get_param_stat(module.weight, module.weight_mask),\
self._get_param_stat(module.bias, module.bias_mask)
module.weight_mask, module.bias_mask = self._get_pruning_mask(w_stat, level),\
self._get_pruning_mask(None if self._weight_only else b_stat, level)
return True, meta
class FisherPruner(GradualPruner):
def __init__(self, model, **kwargs):
super(FisherPruner, self).__init__(model, **kwargs)
@staticmethod
def _get_param_stat(param, param_mask):
if param is None or param_mask is None: return None
return (param.grad * param ** 2 + 1e-4) * param_mask
def _release_grads(self):
optim.SGD(self._model.parameters(), lr=1e-10).zero_grad() # Yeah, I know but don't want to do it manually
def _compute_avg_sum_grad_squared(self, dset, subset_inds, device, num_workers):
self._release_grads()
tmp_hooks, N = [], len(subset_inds) #len(dset)
for module in self._modules:
tmp_hooks.append(module.weight.register_hook(lambda grad: grad ** 2 / (2 * N)))
if module.bias is not None:
tmp_hooks.append(module.bias.register_hook(lambda grad: grad ** 2 / (2 * N)))
dummy_loader = torch.utils.data.DataLoader(dset, batch_size=1, num_workers=num_workers,
sampler=SubsetRandomSampler(subset_inds))
for in_tensor, target in dummy_loader:
in_tensor, target = in_tensor.to(device), target.to(device)
output = self._model(in_tensor)
loss = torch.nn.functional.cross_entropy(output, target)
loss.backward()
for hook in tmp_hooks:
hook.remove()
def on_epoch_begin(self, dset, subset_inds, device, num_workers, epoch_num, **kwargs):
meta = {}
if self._pruner_not_active(epoch_num):
return False, {}
self._compute_avg_sum_grad_squared(dset, subset_inds, device, num_workers)
for module in self._modules:
level = self._required_sparsity(epoch_num)
w_stat, b_stat = self._get_param_stat(module.weight, module.weight_mask),\
self._get_param_stat(module.bias, module.bias_mask)
module.weight_mask, module.bias_mask = self._get_pruning_mask(w_stat, level),\
self._get_pruning_mask(None if self._weight_only else b_stat, level)
self._release_grads()
return True, meta
class SNIPPruner(GradualPruner):
def __init__(self, model, **kwargs):
super(SNIPPruner, self).__init__(model, **kwargs)
@staticmethod
def _get_param_stat(param, param_mask):
if param is None and param_mask is None: return None
return (param.abs() / param.abs().sum() + 1e-4) * param_mask
def _release_grads(self):
optim.SGD(self._model.parameters(), lr=1e-10).zero_grad()
def _compute_mask_grads(self, dset, subset_inds, device, num_workers, batch_size):
self._release_grads()
dummy_loader = torch.utils.data.DataLoader(dset, batch_size=batch_size, num_workers=num_workers,
sampler=SubsetRandomSampler(subset_inds))
for in_tensor, target in dummy_loader:
in_tensor, target = in_tensor.to(device), target.to(device)
output = self._model(in_tensor)
loss = torch.nn.functional.cross_entropy(output, target)
loss.backward()
def on_epoch_begin(self, dset, subset_inds, device, num_workers, batch_size, epoch_num, **kwargs):
meta = {}
if self._pruner_not_active(epoch_num):
return False, {}
self._compute_mask_grads(dset, subset_inds, device, num_workers, batch_size)
for module in self._modules:
level = self._required_sparsity(epoch_num)
w_stat, b_stat = self._get_param_stat(module.weight_mask_grad, module.weight_mask),\
self._get_param_stat(module.bias_mask_grad, module.bias_mask)
module.weight_mask, module.bias_mask = self._get_pruning_mask(w_stat, level),\
self._get_pruning_mask(None if self._weight_only else b_stat, level)
self._release_grads()
return True, meta
class NaiveHessianPruner(GradualPruner):
def __init__(self, model, **kwargs):
super(NaiveHessianPruner, self).__init__(model, **kwargs)
@staticmethod
def _get_param_stat(param, param_mask):
if param is None or param_mask is None: return None
#statistic can be negative so zeros breaking sparsity level
#can substract (minimal + eps) and then zero out pruned stats
param_stat = param.pow(2).mul(param.hess_diag.view_as(param))
return (param_stat - param_stat.min() + 1e-8) * param_mask
# param_stat = param.pow(2).mul(param.hess_diag).abs()
# return (param_stat + 1e-4) * param_mask
def _release_grads(self):
optim.SGD(self._model.parameters(), lr=1e-10).zero_grad()
def _add_hess_attr(self):
self._release_grads()
for param in self._model.parameters():
setattr(param, 'hess_diag', torch.zeros(param.numel()))
def _del_hess_attr(self):
self._release_grads()
for param in self._model.parameters():
delattr(param, 'hess_diag')
def _compute_second_derivatives(self):
for module in self._modules:
for param in module.parameters():
for i in tqdm(range(param.grad.numel())):
param.hess_diag[i] += torch.autograd.grad(param.grad.view(-1)[i], param,
retain_graph=True)[0].view(-1)[i]
def _compute_diag_hessian(self, dset, subset_inds, device, num_workers, batch_size):
dummy_loader = torch.utils.data.DataLoader(dset, batch_size=batch_size, num_workers=num_workers,
sampler=SubsetRandomSampler(subset_inds))
loss = 0.
for in_tensor, target in tqdm(dummy_loader):
in_tensor, target = in_tensor.to(device), target.to(device)
output = self._model(in_tensor)
loss += torch.nn.functional.cross_entropy(output, target, reduction='sum') / len(dummy_loader.dataset)
loss.backward(create_graph=True)
self._compute_second_derivatives()
self._release_grads()
def on_epoch_begin(self, dset, subset_inds, device, num_workers, batch_size, epoch_num, **kwargs):
####### meta for TrainingProgressTracker ######
meta = {
'hess_diag_negatives': {}
}
###############################################
if self._pruner_not_active(epoch_num):
return False, {}
self._add_hess_attr()
self._compute_diag_hessian(dset, subset_inds, device, num_workers, batch_size)
for idx, module in enumerate(self._modules):
level = self._required_sparsity(epoch_num)
w_stat, b_stat = self._get_param_stat(module.weight, module.weight_mask),\
self._get_param_stat(module.bias, module.bias_mask)
module.weight_mask, module.bias_mask = self._get_pruning_mask(w_stat, level),\
self._get_pruning_mask(None if self._weight_only else b_stat, level)
############# adding proportion of negatives in diag hessian meta ############
total_negatives, total = (module.weight.hess_diag < 0).sum().int(),\
module.weight.numel()
if module.bias_mask is not None:
total_negatives += (module.bias.hess_diag < 0).sum().int()
total += (module.bias.numel())
meta['hess_diag_negatives'][self._module_names[idx]] = (total_negatives, total)
##############################################################################
self._del_hess_attr()
return True, meta
class SignSwitchPruner(GradualPruner):
def __init__(self, model, **kwargs):
super(SignSwitchPruner, self).__init__(model, **kwargs)
self._update_old_modules()
def _update_old_modules(self):
self._old_modules = []
for module in self._modules:
self._old_modules.append(deepcopy(module))
@staticmethod
def _get_pruning_mask(param_stats):
if param_stats is None: return None
return (param_stats > 0.).float()
@staticmethod
def _get_param_stat(param, old_param, param_mask):
if param is None or param_mask is None: return None
param_stat = 1. + torch.sign(param) * torch.sign(old_param)
print('stats')
print(param_stat.sum() / 2, param.numel())
return (param_stat * param_mask > 0).float()
def on_epoch_begin(self, dset, subset_inds, device, num_workers,
batch_size, epoch_num, **kwargs):
meta = {}
if self._pruner_not_active(epoch_num):
return False, {}
for idx, module in enumerate(self._modules):
old_module = self._old_modules[idx]
w_stat, b_stat = self._get_param_stat(module.weight, old_module.weight,
module.weight_mask),\
self._get_param_stat(module.bias, old_module.bias,
module.bias_mask)
module.weight_mask, module.bias_mask = self._get_pruning_mask(w_stat),\
self._get_pruning_mask(None if self._weight_only else b_stat)
self._update_old_modules()
return True, meta
class AdjustedTaylorPruner(GradualPruner):
def __init__(self, model, **kwargs):
super(AdjustedTaylorPruner, self).__init__(model, **kwargs)
@staticmethod
def _get_param_stat(param, param_mask):
if param is None or param_mask is None: return None
param_stat = (
param.pow(2).mul(0.5).mul(param.hess_diag)
- param.mul(param.grad_tmp)
)
return (param_stat - param_stat.min() + 1e-10) * param_mask
def _release_grads(self):
optim.SGD(self._model.parameters(), lr=1e-10).zero_grad()
def _add_attrs(self):
self._release_grads()
for param in self._model.parameters():
setattr(param, 'hess_diag', 0)
setattr(param, 'grad_tmp', 0)
def _del_attrs(self):
self._release_grads()
for param in self._model.parameters():
delattr(param, 'hess_diag')
delattr(param, 'grad_tmp')
def _compute_first_second_derivatives(self):
for module in self._modules:
for param in module.parameters():
param.grad_tmp += param.grad.data
param.hess_diag += torch.autograd.grad(param.grad, param, grad_outputs=torch.ones_like(param),
retain_graph=True)[0]
def _compute_derivatives(self, dset, subset_inds, device, num_workers, batch_size):
dummy_loader = torch.utils.data.DataLoader(dset, batch_size=batch_size, num_workers=num_workers,
sampler=SubsetRandomSampler(subset_inds))
for in_tensor, target in dummy_loader:
in_tensor, target = in_tensor.to(device), target.to(device)
output = self._model(in_tensor)
loss = torch.nn.functional.cross_entropy(output, target)
loss.backward(create_graph=True)
self._compute_first_second_derivatives()
self._release_grads()
def on_epoch_begin(self, dset, subset_inds, device, num_workers, batch_size, epoch_num, **kwargs):
meta = {}
if self._pruner_not_active(epoch_num):
return False, {}
self._add_attrs()
self._compute_derivatives(dset, subset_inds, device, num_workers, batch_size)
for idx, module in enumerate(self._modules):
level = self._required_sparsity(epoch_num)
w_stat, b_stat = self._get_param_stat(module.weight, module.weight_mask),\
self._get_param_stat(module.bias, module.bias_mask)
module.weight_mask, module.bias_mask = self._get_pruning_mask(w_stat, level),\
self._get_pruning_mask(None if self._weight_only else b_stat, level)
for _module in self._modules:
_module.apply_masks_to_data()
self._del_attrs()
return True, meta
if __name__ == '__main__':
pass
| [
"utils.get_total_sparsity",
"torch.ones",
"tqdm.tqdm",
"torch.utils.data.sampler.SubsetRandomSampler",
"logging.debug",
"copy.deepcopy",
"torch.ones_like",
"torch.argsort",
"torch.nn.functional.cross_entropy",
"torch.sign",
"numpy.mean",
"torch.zeros",
"utils.percentile"
] | [((6416, 6484), 'logging.debug', 'logging.debug', (['f"""Constructed {self.__class__.__name__} with config:"""'], {}), "(f'Constructed {self.__class__.__name__} with config:')\n", (6429, 6484), False, 'import logging\n'), ((6927, 6956), 'numpy.mean', 'np.mean', (['parameter_sparsities'], {}), '(parameter_sparsities)\n', (6934, 6956), True, 'import numpy as np\n'), ((9486, 9536), 'logging.debug', 'logging.debug', (['"""Desired sparsity level is """', 'level'], {}), "('Desired sparsity level is ', level)\n", (9499, 9536), False, 'import logging\n'), ((9607, 9621), 'torch.zeros', 'torch.zeros', (['(0)'], {}), '(0)\n', (9618, 9621), False, 'import torch\n'), ((10079, 10105), 'utils.percentile', 'percentile', (['weights', 'level'], {}), '(weights, level)\n', (10089, 10105), False, 'from utils import get_total_sparsity, recompute_bn_stats, percentile, get_prunable_children\n'), ((11655, 11723), 'logging.debug', 'logging.debug', (['f"""Constructed {self.__class__.__name__} with config:"""'], {}), "(f'Constructed {self.__class__.__name__} with config:')\n", (11668, 11723), False, 'import logging\n'), ((20866, 20884), 'tqdm.tqdm', 'tqdm', (['dummy_loader'], {}), '(dummy_loader)\n', (20870, 20884), False, 'from tqdm import tqdm\n'), ((4125, 4152), 'utils.get_total_sparsity', 'get_total_sparsity', (['_module'], {}), '(_module)\n', (4143, 4152), False, 'from utils import get_total_sparsity, recompute_bn_stats, percentile, get_prunable_children\n'), ((7911, 7944), 'utils.percentile', 'percentile', (['param_stats', 'sparsity'], {}), '(param_stats, sparsity)\n', (7921, 7944), False, 'from utils import get_total_sparsity, recompute_bn_stats, percentile, get_prunable_children\n'), ((16345, 16394), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['output', 'target'], {}), '(output, target)\n', (16378, 16394), False, 'import torch\n'), ((18201, 18250), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['output', 'target'], {}), '(output, target)\n', (18234, 18250), False, 'import torch\n'), ((26341, 26390), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['output', 'target'], {}), '(output, target)\n', (26374, 26390), False, 'import torch\n'), ((12667, 12723), 'torch.ones', 'torch.ones', (['weight_temp.shape'], {'device': 'weight_temp.device'}), '(weight_temp.shape, device=weight_temp.device)\n', (12677, 12723), False, 'import torch\n'), ((14110, 14132), 'utils.percentile', 'percentile', (['weight', 'sp'], {}), '(weight, sp)\n', (14120, 14132), False, 'from utils import get_total_sparsity, recompute_bn_stats, percentile, get_prunable_children\n'), ((16129, 16161), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['subset_inds'], {}), '(subset_inds)\n', (16148, 16161), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((17985, 18017), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['subset_inds'], {}), '(subset_inds)\n', (18004, 18017), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((20781, 20813), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['subset_inds'], {}), '(subset_inds)\n', (20800, 20813), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((21022, 21088), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (21055, 21088), False, 'import torch\n'), ((23176, 23192), 'copy.deepcopy', 'deepcopy', (['module'], {}), '(module)\n', (23184, 23192), False, 'from copy import deepcopy\n'), ((23499, 23516), 'torch.sign', 'torch.sign', (['param'], {}), '(param)\n', (23509, 23516), False, 'import torch\n'), ((23519, 23540), 'torch.sign', 'torch.sign', (['old_param'], {}), '(old_param)\n', (23529, 23540), False, 'import torch\n'), ((26124, 26156), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['subset_inds'], {}), '(subset_inds)\n', (26143, 26156), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((12583, 12616), 'torch.argsort', 'torch.argsort', (['weight_temp'], {'dim': '(1)'}), '(weight_temp, dim=1)\n', (12596, 12616), False, 'import torch\n'), ((13184, 13240), 'torch.ones', 'torch.ones', (['weight_temp.shape'], {'device': 'weight_temp.device'}), '(weight_temp.shape, device=weight_temp.device)\n', (13194, 13240), False, 'import torch\n'), ((13100, 13133), 'torch.argsort', 'torch.argsort', (['weight_temp'], {'dim': '(1)'}), '(weight_temp, dim=1)\n', (13113, 13133), False, 'import torch\n'), ((25769, 25791), 'torch.ones_like', 'torch.ones_like', (['param'], {}), '(param)\n', (25784, 25791), False, 'import torch\n')] |
import numpy as np
import cv2
cap = cv2.VideoCapture('biler/rød-bil-fra-venstre.mp4')
# take first frame of the video
for i in range(10):
ret,frame = cap.read()
# setup initial location of window
r,h,c,w = 300,90,0,125 # simply hardcoded the values
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)# 180 255 255
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((150.,100.,150.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
#frame = cv2.cvtColor(frame, cv2.COLOR_BGRGRAY)
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
# Draw it on image
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
img2 = cv2.polylines(dst,[pts],True, 255,2)
#img2 = cv2.polylines(frame,[pts],True, 255,2)
cv2.imshow('img2',img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
| [
"numpy.int0",
"cv2.polylines",
"cv2.cvtColor",
"cv2.calcHist",
"cv2.waitKey",
"cv2.imshow",
"cv2.CamShift",
"cv2.VideoCapture",
"cv2.boxPoints",
"numpy.array",
"cv2.calcBackProject",
"cv2.normalize",
"cv2.destroyAllWindows"
] | [((37, 86), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""biler/rød-bil-fra-venstre.mp4"""'], {}), "('biler/rød-bil-fra-venstre.mp4')\n", (53, 86), False, 'import cv2\n'), ((350, 386), 'cv2.cvtColor', 'cv2.cvtColor', (['roi', 'cv2.COLOR_BGR2HSV'], {}), '(roi, cv2.COLOR_BGR2HSV)\n', (362, 386), False, 'import cv2\n'), ((508, 559), 'cv2.calcHist', 'cv2.calcHist', (['[hsv_roi]', '[0]', 'mask', '[180]', '[0, 180]'], {}), '([hsv_roi], [0], mask, [180], [0, 180])\n', (520, 559), False, 'import cv2\n'), ((555, 613), 'cv2.normalize', 'cv2.normalize', (['roi_hist', 'roi_hist', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)\n', (568, 613), False, 'import cv2\n'), ((1513, 1536), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1534, 1536), False, 'import cv2\n'), ((444, 471), 'numpy.array', 'np.array', (['(0.0, 60.0, 32.0)'], {}), '((0.0, 60.0, 32.0))\n', (452, 471), True, 'import numpy as np\n'), ((469, 500), 'numpy.array', 'np.array', (['(150.0, 100.0, 150.0)'], {}), '((150.0, 100.0, 150.0))\n', (477, 500), True, 'import numpy as np\n'), ((885, 924), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (897, 924), False, 'import cv2\n'), ((939, 993), 'cv2.calcBackProject', 'cv2.calcBackProject', (['[hsv]', '[0]', 'roi_hist', '[0, 180]', '(1)'], {}), '([hsv], [0], roi_hist, [0, 180], 1)\n', (958, 993), False, 'import cv2\n'), ((1068, 1110), 'cv2.CamShift', 'cv2.CamShift', (['dst', 'track_window', 'term_crit'], {}), '(dst, track_window, term_crit)\n', (1080, 1110), False, 'import cv2\n'), ((1153, 1171), 'cv2.boxPoints', 'cv2.boxPoints', (['ret'], {}), '(ret)\n', (1166, 1171), False, 'import cv2\n'), ((1186, 1198), 'numpy.int0', 'np.int0', (['pts'], {}), '(pts)\n', (1193, 1198), True, 'import numpy as np\n'), ((1223, 1262), 'cv2.polylines', 'cv2.polylines', (['dst', '[pts]', '(True)', '(255)', '(2)'], {}), '(dst, [pts], True, 255, 2)\n', (1236, 1262), False, 'import cv2\n'), ((1323, 1347), 'cv2.imshow', 'cv2.imshow', (['"""img2"""', 'img2'], {}), "('img2', img2)\n", (1333, 1347), False, 'import cv2\n'), ((1368, 1383), 'cv2.waitKey', 'cv2.waitKey', (['(60)'], {}), '(60)\n', (1379, 1383), False, 'import cv2\n')] |
from torch.utils.data import IterableDataset, Dataset, get_worker_info
import gc
import numpy as np
from typing import List, Optional, Dict
from core.utils import chunk_examples_with_degree, chunk_to_len_batch
import pandas as pd
import os
import torch
import subprocess
from time import time
from itertools import cycle, chain, islice, repeat
from math import ceil
from collections import Counter
class PunctuationDomainDataset(IterableDataset):
def __init__(self,
csv_file:str,
tokenizer,
num_samples:int=256,
max_seq_length:int=256,
degree=0,
punct_label_ids: Dict[str, int] = None,
label_map:Dict[str,str] = None,
domain=0,
labelled=True,
randomize=True,
target_file='',
tmp_path='~/data/tmp',
start=0,
end=-1,
attach_label_to_end=None,
no_space_label=None,
manual_len=0,
pad_start=0,
alpha_sub=0.4,
alpha_del=0.4,
alpha_ins=0.4,
alpha_swp=0,
alpha_spl=0.4,
stride=0,
):
if not (os.path.exists(csv_file)):
raise FileNotFoundError(
f'{csv_file} not found. The 2nd column of the file contains the transcripts.'
)
data_dir = os.path.dirname(csv_file)
filename = os.path.basename(csv_file)
if not filename.endswith('.csv'):
raise ValueError("{text_file} should have extension .csv")
self.csv_file = csv_file
self.max_seq_length = max_seq_length
self.manual_len=manual_len
self.domain= domain
self.punct_label_ids=punct_label_ids
self.label_map=label_map
self.labelled= labelled
self.tokenizer= tokenizer
self.degree=degree
self.randomize=randomize
self.target_file=target_file
self.tmp_path=tmp_path
self.attach_label_to_end=attach_label_to_end
self.no_space_label=no_space_label
self.pad_start=pad_start
self.alpha_sub=alpha_sub
self.alpha_del=alpha_del
self.alpha_ins=alpha_ins
self.alpha_swp=alpha_swp
self.alpha_spl=alpha_spl
self.stride=stride
if not (os.path.exists(self.target_file)):
os.system(f"sed '1d' {self.csv_file} > {self.target_file}")
self.set_num_samples(self.target_file, num_samples, manual_len)
def __iter__(self):
self.dataset=iter(pd.read_csv(
self.target_file,
skiprows=(0 % self.len)*self.num_samples,
header=None,
dtype=str,
chunksize=self.num_samples,
))
return self
def __next__(self):
batch = next(self.dataset)[1]
complete=batch
if self.stride>0:
for i in range(1,self.max_seq_length//self.stride):
l=batch.str.split().map(len).values
a=self.stride*i*np.ones_like(l)
b=l
complete=complete.append(pd.DataFrame({'t':batch,'a':a,'b':b}).apply(lambda row: ' '.join(row.t.split()[row.a:row.b]),axis=1))
# pp(batch.shape,complete.shape)
batch=complete
chunked=chunk_examples_with_degree(self.degree, self.punct_label_ids, self.label_map, self.tokenizer,self.alpha_sub, self.alpha_del,self.alpha_ins,self.alpha_swp,self.alpha_spl)(batch)
batched=chunk_to_len_batch(self.max_seq_length,self.tokenizer,chunked['texts'],chunked['tags'],self.labelled,attach_label_to_end=self.attach_label_to_end,no_space_label=self.no_space_label, pad_start=self.pad_start)
num_samples=batched['labels'].shape[0]
batched['domain']=self.domain*torch.ones(num_samples,1,dtype=torch.long)
gc.collect()
if self.randomize:
rand=torch.randperm(num_samples)
return {k:v[rand] for k,v in batched.items()}
else:
return batched
def set_num_samples(self,csv_file,num_samples, manual_len):
self.num_samples = num_samples
self.total_samples=int(subprocess.Popen(['wc', '-l', csv_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])
if manual_len>0:
self.total_samples=min(manual_len,self.total_samples)
self.num_samples=min(self.num_samples,self.total_samples)
self.len = max(1,int(self.total_samples / self.num_samples))
def __len__(self):
return pp(self.len)
def shuffle(self, randomize=True, seed=42):
int(subprocess.Popen(['wc', '-l', self.target_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])
os.system('bash data/shuffle.sh -i {} -o {} -a {} -s {} -m {} -t {}'.format(self.target_file, self.target_file, ['true','false'][randomize], seed, '100M',self.tmp_path))
int(subprocess.Popen(['wc', '-l', self.target_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])
self.dataset=iter(pd.read_csv(
self.target_file,
skiprows=(0 % self.len)*self.num_samples,
header=None,
dtype=str,
chunksize=self.num_samples,
))
def determine_class_weights(self):
it=iter(self)
ct=torch.zeros(len(self.punct_label_ids))
for _ in range(min(20,self.len)):
print('.',end='')
ni=next(it)
ct+=torch.bincount(ni['labels'].view(-1),minlength=len(self.punct_label_ids))
return ct/sum(ct)
class PunctuationDomainDatasets(IterableDataset):
def __init__(self,
split:str,
num_samples:int,
max_seq_length:int,
punct_label_ids: Dict[str, int],
label_map:Dict[str,str],
labelled: List[str],
unlabelled: List[str],
tokenizer,
randomize:bool=True,
data_id='',
tmp_path='~/data/tmp',
attach_label_to_end=None,
manual_len:int=0,
no_space_label:int=None,
pad_start:int=0,
low_resource_labelled_count:int = 0,
alpha_sub=0,
alpha_del=0,
alpha_ins=0,
alpha_swp=0,
alpha_spl=0,
stride=0,
):
worker_info = get_worker_info()
self.num_workers=1 if worker_info is None else worker_info.num_workers
self.num_labelled=len(labelled)
self.datasets = []
self.iterables=[]
self.randomize=randomize
self.punct_label_ids=punct_label_ids
self.label_map=label_map
self.ds_lengths=[]
self.labelled=labelled
self.stride=stride
for path in labelled:
if manual_len>0:
self.ds_lengths.append(min(manual_len,int(subprocess.Popen(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])))
else:
self.ds_lengths.append(int(subprocess.Popen(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0]))
for path in unlabelled:
if split=='train' and low_resource_labelled_count>0:
if manual_len>0:
self.ds_lengths.append(min(manual_len,int(subprocess.Popen(['wc', '-l', f'{path}.labelled.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])))
self.ds_lengths.append(min(manual_len,int(subprocess.Popen(['wc', '-l', f'{path}.unlabelled.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])))
else:
self.ds_lengths.append(int(subprocess.Popen(['wc', '-l', f'{path}.labelled.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0]))
self.ds_lengths.append(int(subprocess.Popen(['wc', '-l', f'{path}.unlabelled.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0]))
else:
if manual_len>0:
self.ds_lengths.append(min(manual_len,int(subprocess.Popen(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])))
else:
self.ds_lengths.append(int(subprocess.Popen(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0]))
self.max_length=max(self.ds_lengths)
self.per_worker=int(self.max_length/self.num_workers)
self.len=max(1,ceil(self.per_worker/num_samples))
self.class_weights=None
self.alpha_sub=alpha_sub
self.alpha_del=alpha_del
self.alpha_ins=alpha_ins
self.alpha_swp=alpha_swp
self.alpha_spl=alpha_spl
self.stride=stride
for i,path in enumerate(labelled):
target=os.path.join(tmp_path,os.path.split(path)[1])
dataset=PunctuationDomainDataset(
csv_file=f'{path}.{split}.csv', tokenizer=tokenizer,
num_samples=num_samples,max_seq_length=max_seq_length,
punct_label_ids=punct_label_ids,
label_map=label_map,
domain=i,labelled=True,
randomize=randomize,
target_file=f'{target}.{split}.{data_id}.csv',
tmp_path=tmp_path,
attach_label_to_end=attach_label_to_end,
no_space_label=no_space_label,
manual_len=manual_len,
pad_start=pad_start,
alpha_sub=self.alpha_sub,
alpha_del=self.alpha_del,
alpha_ins=self.alpha_ins,
alpha_swp=self.alpha_swp,
alpha_spl=self.alpha_spl,
stride=self.stride,)
self.datasets.append(dataset)
self.iterables.append(cycle(dataset))
for i,path in enumerate(unlabelled):
target=os.path.join(tmp_path,os.path.split(path)[1])
if split=='train' and low_resource_labelled_count>0:
dataset=PunctuationDomainDataset(
csv_file=f'{path}.unlabelled.{split}.csv', tokenizer=tokenizer,
num_samples=num_samples,max_seq_length=max_seq_length,
punct_label_ids=punct_label_ids,
label_map=label_map,domain=len(labelled)+i,labelled=False,
randomize=randomize,
target_file=f'{target}.unlabelled.{split}.{data_id}.csv',
tmp_path=tmp_path,
attach_label_to_end=attach_label_to_end,
no_space_label=no_space_label,
manual_len=manual_len,
pad_start=pad_start,
alpha_sub=self.alpha_sub,
alpha_del=self.alpha_del,
alpha_ins=self.alpha_ins,
alpha_swp=self.alpha_swp,
alpha_spl=self.alpha_spl,
stride=self.stride,)
self.datasets.append(dataset)
self.iterables.append(cycle(dataset))
dataset=PunctuationDomainDataset(
csv_file=f'{path}.labelled.{split}.csv', tokenizer=tokenizer,
num_samples=num_samples,max_seq_length=max_seq_length,
punct_label_ids=punct_label_ids,
label_map=label_map,domain=len(labelled)+i,labelled=True,
randomize=randomize,
target_file=f'{target}.labelled.{split}.{data_id}.csv',
tmp_path=tmp_path,
attach_label_to_end=attach_label_to_end,
no_space_label=no_space_label,
manual_len=manual_len,
pad_start=pad_start,
alpha_sub=self.alpha_sub,
alpha_del=self.alpha_del,
alpha_ins=self.alpha_ins,
alpha_swp=self.alpha_swp,
alpha_spl=self.alpha_spl,
stride=self.stride,)
self.datasets.append(dataset)
self.iterables.append(cycle(dataset))
else:
dataset=PunctuationDomainDataset(
csv_file=f'{path}.{split}.csv', tokenizer=tokenizer,
num_samples=num_samples,max_seq_length=max_seq_length,
punct_label_ids=punct_label_ids,
label_map=label_map,domain=len(labelled)+i,labelled=False,
randomize=randomize,
target_file=f'{target}.{split}.{data_id}.csv',
tmp_path=tmp_path,
attach_label_to_end=attach_label_to_end,
no_space_label=no_space_label,
manual_len=manual_len,
pad_start=pad_start,
alpha_sub=self.alpha_sub,
alpha_del=self.alpha_del,
alpha_ins=self.alpha_ins,
alpha_swp=self.alpha_swp,
alpha_spl=self.alpha_spl,
stride=self.stride,
)
self.datasets.append(dataset)
self.iterables.append(cycle(dataset))
def __iter__(self):
worker_info = get_worker_info()
worker_id = 0 if worker_info is None else worker_info.id
self.iterables=[]
for ds_length, dataset in zip(self.ds_lengths,self.datasets):
start = (worker_id*self.per_worker)%ds_length
self.iterables.append(cycle(chain(islice(iter(dataset),start,None),islice(iter(dataset),start))))
return self
def __next__(self):
ds=[next(d) for d in self.iterables]
if self.randomize:
min_batch=1000000
for d in ds:
size=d['domain'].shape[0]
if size<min_batch:
min_batch=size
#Ensure all domains are evenly represented
b={k:torch.cat([torch.repeat_interleave(d[k],max(1,min_batch/d[k].shape[0]),dim=0)[:min_batch] for d in ds], dim=0) for k in ['input_ids','attention_mask','subtoken_mask','labels','domain']}
rand=torch.randperm(b['labels'].shape[0])
return {k:v[rand] for k,v in b.items()}
else:
return {k:torch.cat([d[k] for d in ds], dim=0) for k in ['input_ids','attention_mask','subtoken_mask','labels','domain']}
def __len__(self):
return self.len
def shuffle(self, randomize=True, seed=42):
worker_info = get_worker_info()
worker_id = 0 if worker_info is None else worker_info.id
if worker_id==0:
for _ in self.datasets:
print(f"shuffling {_}")
_.shuffle(randomize,seed)
def determine_class_weights(self):
if self.class_weights is None:
ct=torch.zeros(len(self.punct_label_ids))
for _ in range(self.num_labelled):
ct+=self.datasets[_].determine_class_weights()
self.class_weights=self.num_labelled/ct
return self.class_weights
class PunctuationInferenceDataset(Dataset):
"""
Creates dataset to use during inference for punctuation and capitalization tasks with a pretrained model.
For dataset to use during training with labels, see BertPunctuationCapitalizationDataset.
Args:
queries file to sequences, each line should a sentence, no header.
max_seq_length: max sequence length minus 2 for [CLS] and [SEP]
tokenizer: such as AutoTokenizer
"""
def __init__(self,
tokenizer,
queries: List[str],
max_seq_length: int,
punct_label_ids:Dict[str,int],
label_map:Dict[str,str],
num_samples:int=256,
degree:int = 0,
attach_label_to_end:bool=None,
no_space_label=None,
pad_start:int=0,
):
""" Initializes BertPunctuationInferDataset. """
self.degree=degree
self.punct_label_ids=punct_label_ids
self.label_map = label_map
chunked=chunk_examples_with_degree(self.degree, self.punct_label_ids, self.label_map,)(queries)
self.features = chunk_to_len_batch(max_seq_length, tokenizer,chunked['texts'],chunked['tags'],attach_label_to_end=attach_label_to_end,no_space_label=no_space_label,pad_start=pad_start)
self.attach_label_to_end=attach_label_to_end
self.num_samples=num_samples
def __len__(self):
return math.ceil(len(self.all_input_ids)/self.num_samples)
def __getitem__(self, idx):
return {k:v for k,v in self.features.items()}
| [
"pandas.DataFrame",
"torch.ones",
"torch.utils.data.get_worker_info",
"core.utils.chunk_to_len_batch",
"numpy.ones_like",
"subprocess.Popen",
"os.path.basename",
"pandas.read_csv",
"math.ceil",
"os.path.dirname",
"core.utils.chunk_examples_with_degree",
"os.path.exists",
"os.system",
"torc... | [((1288, 1313), 'os.path.dirname', 'os.path.dirname', (['csv_file'], {}), '(csv_file)\n', (1303, 1313), False, 'import os\n'), ((1333, 1359), 'os.path.basename', 'os.path.basename', (['csv_file'], {}), '(csv_file)\n', (1349, 1359), False, 'import os\n'), ((3437, 3664), 'core.utils.chunk_to_len_batch', 'chunk_to_len_batch', (['self.max_seq_length', 'self.tokenizer', "chunked['texts']", "chunked['tags']", 'self.labelled'], {'attach_label_to_end': 'self.attach_label_to_end', 'no_space_label': 'self.no_space_label', 'pad_start': 'self.pad_start'}), "(self.max_seq_length, self.tokenizer, chunked['texts'],\n chunked['tags'], self.labelled, attach_label_to_end=self.\n attach_label_to_end, no_space_label=self.no_space_label, pad_start=self\n .pad_start)\n", (3455, 3664), False, 'from core.utils import chunk_examples_with_degree, chunk_to_len_batch\n'), ((3781, 3793), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3791, 3793), False, 'import gc\n'), ((6507, 6524), 'torch.utils.data.get_worker_info', 'get_worker_info', ([], {}), '()\n', (6522, 6524), False, 'from torch.utils.data import IterableDataset, Dataset, get_worker_info\n'), ((13930, 13947), 'torch.utils.data.get_worker_info', 'get_worker_info', ([], {}), '()\n', (13945, 13947), False, 'from torch.utils.data import IterableDataset, Dataset, get_worker_info\n'), ((15192, 15209), 'torch.utils.data.get_worker_info', 'get_worker_info', ([], {}), '()\n', (15207, 15209), False, 'from torch.utils.data import IterableDataset, Dataset, get_worker_info\n'), ((16840, 17023), 'core.utils.chunk_to_len_batch', 'chunk_to_len_batch', (['max_seq_length', 'tokenizer', "chunked['texts']", "chunked['tags']"], {'attach_label_to_end': 'attach_label_to_end', 'no_space_label': 'no_space_label', 'pad_start': 'pad_start'}), "(max_seq_length, tokenizer, chunked['texts'], chunked[\n 'tags'], attach_label_to_end=attach_label_to_end, no_space_label=\n no_space_label, pad_start=pad_start)\n", (16858, 17023), False, 'from core.utils import chunk_examples_with_degree, chunk_to_len_batch\n'), ((1096, 1120), 'os.path.exists', 'os.path.exists', (['csv_file'], {}), '(csv_file)\n', (1110, 1120), False, 'import os\n'), ((2239, 2271), 'os.path.exists', 'os.path.exists', (['self.target_file'], {}), '(self.target_file)\n', (2253, 2271), False, 'import os\n'), ((2286, 2345), 'os.system', 'os.system', (['f"""sed \'1d\' {self.csv_file} > {self.target_file}"""'], {}), '(f"sed \'1d\' {self.csv_file} > {self.target_file}")\n', (2295, 2345), False, 'import os\n'), ((2468, 2595), 'pandas.read_csv', 'pd.read_csv', (['self.target_file'], {'skiprows': '(0 % self.len * self.num_samples)', 'header': 'None', 'dtype': 'str', 'chunksize': 'self.num_samples'}), '(self.target_file, skiprows=0 % self.len * self.num_samples,\n header=None, dtype=str, chunksize=self.num_samples)\n', (2479, 2595), True, 'import pandas as pd\n'), ((3244, 3427), 'core.utils.chunk_examples_with_degree', 'chunk_examples_with_degree', (['self.degree', 'self.punct_label_ids', 'self.label_map', 'self.tokenizer', 'self.alpha_sub', 'self.alpha_del', 'self.alpha_ins', 'self.alpha_swp', 'self.alpha_spl'], {}), '(self.degree, self.punct_label_ids, self.\n label_map, self.tokenizer, self.alpha_sub, self.alpha_del, self.\n alpha_ins, self.alpha_swp, self.alpha_spl)\n', (3270, 3427), False, 'from core.utils import chunk_examples_with_degree, chunk_to_len_batch\n'), ((3730, 3774), 'torch.ones', 'torch.ones', (['num_samples', '(1)'], {'dtype': 'torch.long'}), '(num_samples, 1, dtype=torch.long)\n', (3740, 3774), False, 'import torch\n'), ((3838, 3865), 'torch.randperm', 'torch.randperm', (['num_samples'], {}), '(num_samples)\n', (3852, 3865), False, 'import torch\n'), ((5045, 5172), 'pandas.read_csv', 'pd.read_csv', (['self.target_file'], {'skiprows': '(0 % self.len * self.num_samples)', 'header': 'None', 'dtype': 'str', 'chunksize': 'self.num_samples'}), '(self.target_file, skiprows=0 % self.len * self.num_samples,\n header=None, dtype=str, chunksize=self.num_samples)\n', (5056, 5172), True, 'import pandas as pd\n'), ((8870, 8905), 'math.ceil', 'ceil', (['(self.per_worker / num_samples)'], {}), '(self.per_worker / num_samples)\n', (8874, 8905), False, 'from math import ceil\n'), ((14836, 14872), 'torch.randperm', 'torch.randperm', (["b['labels'].shape[0]"], {}), "(b['labels'].shape[0])\n", (14850, 14872), False, 'import torch\n'), ((16728, 16805), 'core.utils.chunk_examples_with_degree', 'chunk_examples_with_degree', (['self.degree', 'self.punct_label_ids', 'self.label_map'], {}), '(self.degree, self.punct_label_ids, self.label_map)\n', (16754, 16805), False, 'from core.utils import chunk_examples_with_degree, chunk_to_len_batch\n'), ((10261, 10275), 'itertools.cycle', 'cycle', (['dataset'], {}), '(dataset)\n', (10266, 10275), False, 'from itertools import cycle, chain, islice, repeat\n'), ((14961, 14997), 'torch.cat', 'torch.cat', (['[d[k] for d in ds]'], {'dim': '(0)'}), '([d[k] for d in ds], dim=0)\n', (14970, 14997), False, 'import torch\n'), ((2981, 2996), 'numpy.ones_like', 'np.ones_like', (['l'], {}), '(l)\n', (2993, 2996), True, 'import numpy as np\n'), ((9215, 9234), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (9228, 9234), False, 'import os\n'), ((10376, 10395), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (10389, 10395), False, 'import os\n'), ((11583, 11597), 'itertools.cycle', 'cycle', (['dataset'], {}), '(dataset)\n', (11588, 11597), False, 'from itertools import cycle, chain, islice, repeat\n'), ((12712, 12726), 'itertools.cycle', 'cycle', (['dataset'], {}), '(dataset)\n', (12717, 12726), False, 'from itertools import cycle, chain, islice, repeat\n'), ((13867, 13881), 'itertools.cycle', 'cycle', (['dataset'], {}), '(dataset)\n', (13872, 13881), False, 'from itertools import cycle, chain, islice, repeat\n'), ((3058, 3100), 'pandas.DataFrame', 'pd.DataFrame', (["{'t': batch, 'a': a, 'b': b}"], {}), "({'t': batch, 'a': a, 'b': b})\n", (3070, 3100), True, 'import pandas as pd\n'), ((4100, 4195), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', csv_file]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', csv_file], stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n", (4116, 4195), False, 'import subprocess\n'), ((4573, 4675), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', self.target_file]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', self.target_file], stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n", (4589, 4675), False, 'import subprocess\n'), ((4891, 4993), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', self.target_file]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', self.target_file], stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n", (4907, 4993), False, 'import subprocess\n'), ((7206, 7314), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', f'{path}.{split}.csv']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.\n PIPE, stderr=subprocess.STDOUT)\n", (7222, 7314), False, 'import subprocess\n'), ((7010, 7118), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', f'{path}.{split}.csv']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.\n PIPE, stderr=subprocess.STDOUT)\n", (7026, 7118), False, 'import subprocess\n'), ((7953, 8070), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', f'{path}.labelled.{split}.csv']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', f'{path}.labelled.{split}.csv'], stdout=\n subprocess.PIPE, stderr=subprocess.STDOUT)\n", (7969, 8070), False, 'import subprocess\n'), ((8143, 8262), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', f'{path}.unlabelled.{split}.csv']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', f'{path}.unlabelled.{split}.csv'], stdout=\n subprocess.PIPE, stderr=subprocess.STDOUT)\n", (8159, 8262), False, 'import subprocess\n'), ((8605, 8713), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', f'{path}.{split}.csv']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.\n PIPE, stderr=subprocess.STDOUT)\n", (8621, 8713), False, 'import subprocess\n'), ((7532, 7649), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', f'{path}.labelled.{split}.csv']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', f'{path}.labelled.{split}.csv'], stdout=\n subprocess.PIPE, stderr=subprocess.STDOUT)\n", (7548, 7649), False, 'import subprocess\n'), ((7738, 7857), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', f'{path}.unlabelled.{split}.csv']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', f'{path}.unlabelled.{split}.csv'], stdout=\n subprocess.PIPE, stderr=subprocess.STDOUT)\n", (7754, 7857), False, 'import subprocess\n'), ((8401, 8509), 'subprocess.Popen', 'subprocess.Popen', (["['wc', '-l', f'{path}.{split}.csv']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.\n PIPE, stderr=subprocess.STDOUT)\n", (8417, 8509), False, 'import subprocess\n')] |
#!/usr/bin/python3
# coding: UTF-8
# Copyright <NAME>.
# Distributed under the Boost Software License, Version 1.0.
# See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt
import os
import sys
import math
import numpy
import random
import argparse
import multiprocessing
# ------------------------------------------------------------------------------
def mix(b, i, f):
return (1.0-f)*b + f*i
# ------------------------------------------------------------------------------
def inverse_logistic(x):
eps = 0.001
return math.log(max(x, eps)) - math.log(max(1.0 - x, eps ))
# ------------------------------------------------------------------------------
def logistic(x):
return 1.0 / (1.0 + math.exp(-x))
# ------------------------------------------------------------------------------
def sigmoid(x, c):
return logistic(c * inverse_logistic(x))
# ------------------------------------------------------------------------------
def perpendicular(v1):
v2 = numpy.empty_like(v1)
v2[0] = -v1[1]
v2[1] = v1[0]
return v2
# ------------------------------------------------------------------------------
def set_center(points):
return sum(points)/len(points)
# ------------------------------------------------------------------------------
def segment_point(p1, p2, c):
return (1-c)*p1 + c*p2;
# ------------------------------------------------------------------------------
def segment_midpoint(p1, p2):
return (p1+p2)*0.5
# ------------------------------------------------------------------------------
def segment_normal(p1, p2):
return perpendicular(p2-p1)
# ------------------------------------------------------------------------------
def line_intersect_param(l1, l2):
d1 = l1[1]
d2 = l2[1]
dp = l2[0]-l1[0]
d2p = perpendicular(d2)
num = numpy.dot(d2p, dp)
den = numpy.dot(d2p, d1)
if abs(den) > 0.00001:
return num / den
return None
# ------------------------------------------------------------------------------
class ImageSampler(object):
# --------------------------------------------------------------------------
def __init__(self, image, width, height):
self._im = image
self._w = width
self._h = height
# --------------------------------------------------------------------------
@classmethod
def from_file(cls, path, width, height):
import PIL.Image
image = PIL.Image.open(path).convert("RGB")
if width is None:
width, unused = image.size
if height is None:
unused, height = image.size
if (width, height) != image.size:
image = image.resize((width, height), PIL.Image.BICUBIC)
return cls(image, width, height)
# --------------------------------------------------------------------------
def width(self):
return self._w
# --------------------------------------------------------------------------
def height(self):
return self._h
# --------------------------------------------------------------------------
def get_pixel(self, x, y):
x = max(min(x, self._w-1), 0)
y = max(min(y, self._h-1), 0)
c0, c1, c2 = self._im.getpixel((x, y))
return (c0/255.0, c1/255.0, c2/255.0)
# --------------------------------------------------------------------------
def converted(self, mode):
return ImageSampler(self._im.convert(mode), self._w, self._h)
# ------------------------------------------------------------------------------
class NoImageSampler(object):
# --------------------------------------------------------------------------
def __init__(self):
pass
# --------------------------------------------------------------------------
def get_pixel(self, x, y):
return (0.0, 0.0, 0.0)
# --------------------------------------------------------------------------
def converted(self, mode):
return self
# ------------------------------------------------------------------------------
class RandomGenerator(object):
# --------------------------------------------------------------------------
def __init__(self, mrg, rrg):
self._mrg = mrg
self._rrg = rrg
# --------------------------------------------------------------------------
def get(self, rim):
if rim:
try:
return self._rrg.random()
except:
pass
return self._mrg.random()
# ------------------------------------------------------------------------------
class Randomized(object):
# --------------------------------------------------------------------------
def _get_rng0(self):
try:
return self.rng0
except:
self.rng0 = random.Random(self._mid_seed)
return self.rng0
# --------------------------------------------------------------------------
def _mid_rng(self):
import random
if self._mid_seed is None:
import time
try: return random.SystemRandom()
except: return random.Random(time.time())
else:
return random.Random(self._get_rng0().randrange(0, sys.maxsize))
# --------------------------------------------------------------------------
def _rim_rng(self):
if self._rim_seed is not None:
return random.Random(self._rim_seed)
return None
# --------------------------------------------------------------------------
def get_rng(self):
return RandomGenerator(self._mid_rng(), self._rim_rng())
# --------------------------------------------------------------------------
def __init__(self, options):
self._mid_seed = options.seed
self._rim_seed = options.rim_seed
# ------------------------------------------------------------------------------
class RandomCellValues(Randomized):
# --------------------------------------------------------------------------
def _gen_values(self, w, h, transformable):
rc = self.get_rng()
cell_data = list()
for y in range(h):
r = list()
for x in range(w):
rim = x <= 0 or y <= 0 or x+1 >= w or y+1 >= h
r.append(rc.get(rim))
cell_data.append(r)
if transformable:
r = range(int(w/2)+1)
rv = [rc.get(True) for i in r]
for i in r:
v = 0.5 + (rv[i]-0.5)*0.75
cell_data[i][0] = v
cell_data[h-i-1][0] = v
cell_data[i][w-1] = v
cell_data[h-i-1][w-1] = v
cell_data[0][i] = v
cell_data[0][w-i-1] = v
cell_data[h-1][i] = v
cell_data[h-1][w-i-1] = v
return cell_data
# --------------------------------------------------------------------------
def __init__(self, options, w, h):
Randomized.__init__(self, options)
self._values = self._gen_values(w, h, options.transformable)
# --------------------------------------------------------------------------
def get(self, x, y):
return self._values[y][x]
# ------------------------------------------------------------------------------
class RandomCellOffsets(Randomized):
# --------------------------------------------------------------------------
def _gen_offsets(self, w, h, transformable):
rx = self.get_rng()
ry = self.get_rng()
cell_data = list()
for y in range(h):
row = list()
for x in range(w):
rim = x <= 0 or y <= 0 or x+1 >= w or y+1 >= h
row.append((rx.get(rim), ry.get(rim)))
cell_data.append(row)
if transformable:
r = range(int(w/2)+1)
rv = [(rx.get(True), ry.get(True)) for i in r]
for i in r:
xo, yo = rv[i]
l = 0.8
cell_data[i][0] = (l*xo, yo)
cell_data[h-i-1][0] = (l*xo, 1.0-yo)
cell_data[i][w-1] = (1.0-l*xo, 1.0-yo)
cell_data[h-i-1][w-1] = (1.0-l*xo, yo)
cell_data[0][i] = (xo, l*yo)
cell_data[0][w-i-1] = (1.0-xo, l*yo)
cell_data[h-1][i] = (1.0-xo, 1.0-l*yo)
cell_data[h-1][w-i-1] = (xo, 1.0-l*yo)
return cell_data
# --------------------------------------------------------------------------
def __init__(self, options, w, h):
Randomized.__init__(self, options)
self._offsets = self._gen_offsets(w, h, options.transformable)
# --------------------------------------------------------------------------
def get(self, x, y):
return self._offsets[y][x]
# ------------------------------------------------------------------------------
class ImageContourCellOffsets(object):
# --------------------------------------------------------------------------
def _gen_offsets(self, im, bg, w, h):
def _distmod(x, y):
d = abs(x - y)
return d if d < 0.5 else 1.0-d
kernel = [
(-1, -1),
( 0, -1),
( 1, -1),
(-1, 0),
( 1, 0),
(-1, 1),
( 0, 1),
( 1, 1)
]
kn = 1.0/(len(kernel))
cell_data = list()
for y in range(h):
row = list()
for x in range(w):
nx = 0.0
ny = 0.0
dispx = 0.0
dispy = 0.0
h, s, v = im.get_pixel(x, y)
for ox, oy in kernel:
oh, os, ov = im.get_pixel(x+ox, y+oy)
dh = _distmod(h, oh)
ds = s - os
dv = v - ov
adh = abs(dh)
ads = abs(ds)
adv = abs(dv)
dw = dv if adv > ads else ds if ads > adh else dh
vx, vy = ox, oy
vl = math.sqrt(vx*vx + vy*vy)
vx /= vl
vy /= vl
nx += vx*dw
ny += vy*dw
dispx += nx*nx
dispy += ny*ny
nx = nx*kn
ny = ny*kn
dispx = math.sqrt(dispx)*kn
dispy = math.sqrt(dispy)*kn
dispw = sigmoid(
math.sqrt(
max(abs(nx), abs(ny), abs(dispx-dispy))
),
2.5
)
nx = 0.5 + 0.5*nx
ny = 0.5 + 0.5*ny
bx, by = bg.get(x, y)
row.append((mix(bx, nx, dispw), mix(by, ny, dispw)))
cell_data.append(row)
return cell_data
# --------------------------------------------------------------------------
def __init__(self, options, bg, w, h):
self._offsets = self._gen_offsets(
options.image.converted("HSV"),
bg,
w, h)
# --------------------------------------------------------------------------
def get(self, x, y):
return self._offsets[y][x]
# ------------------------------------------------------------------------------
class HoneycombXCellOffsets(object):
# --------------------------------------------------------------------------
def __init__(self, options, bg, w, h):
self._fact_x = 0.8
self._fact_y = 0.9
self._bg = bg
# --------------------------------------------------------------------------
def get(self, x, y):
hx, hy = (0.5, 0.0 if x % 2 == 0 else 0.5)
bx, by = self._bg.get(x, y)
return (mix(bx, hx, self._fact_x), mix(by, hy, self._fact_y))
# ------------------------------------------------------------------------------
class HoneycombYCellOffsets(object):
# --------------------------------------------------------------------------
def __init__(self, options, bg, w, h):
self._fact_x = 0.9
self._fact_y = 0.8
self._bg = bg
# --------------------------------------------------------------------------
def get(self, x, y):
hx, hy = (0.0 if y % 2 == 0 else 0.5, 0.5)
bx, by = self._bg.get(x, y)
return (mix(bx, hx, self._fact_x), mix(by, hy, self._fact_y))
# ------------------------------------------------------------------------------
class VoronoiArgumentParser(argparse.ArgumentParser):
# --------------------------------------------------------------------------
def _nonnegative_int(self, x):
try:
i = int(x)
assert i > 0
return i
except:
self.error("`%s' is not a positive integer value" % str(x))
# --------------------------------------------------------------------------
def __init__(self, **kw):
argparse.ArgumentParser.__init__(self, **kw)
self.add_argument(
'output',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout
)
self.add_argument(
'--log', '-l',
type=argparse.FileType('w'),
default=sys.stderr
)
self.add_argument(
'--jobs', '-j',
dest="job_count",
type=self._nonnegative_int,
action="store",
default=multiprocessing.cpu_count()
)
self.add_argument(
'--x-cells', '-X',
type=self._nonnegative_int,
action="store",
default=None
)
self.add_argument(
'--y-cells', '-Y',
type=self._nonnegative_int,
action="store",
default=None
)
self.add_argument(
'--width', '-W',
type=self._nonnegative_int,
action="store",
default=512
)
self.add_argument(
'--height', '-H',
type=self._nonnegative_int,
action="store",
default=512
)
self.add_argument(
'--units', '-U',
action="store",
default="px"
)
self.add_argument(
'--stroke-width', '-s',
type=float,
action="store",
default=0.5
)
self.add_argument(
'--value-low', '-vl',
type=float,
action="store",
default=0.05
)
self.add_argument(
'--value-high', '-vh',
type=float,
action="store",
default=0.95
)
self.add_argument(
'--cell-z-coord', '-cz',
type=float,
action="store",
default=0.0
)
self.add_argument(
'--scale', '-S',
type=float,
action="store",
default=0.9
)
self.add_argument(
'--scale-mode', '-Q',
type=str,
choices=["constant", "linear", "sqrt", "pow2", "exp", "sigmoid"],
action="store",
default="constant"
)
self.add_argument(
'--seed', '-rs',
type=float,
action="store",
default=None
)
self.add_argument(
'--rim-seed', '-Rs',
type=float,
action="store",
default=None
)
self.add_argument(
'--transformable', '-T',
action="store_true",
default=False
)
self.add_argument(
'--color-mode', '-M',
type=str,
choices=["grayscale", "cell-coord", "image-rgb"],
action="store",
default="grayscale"
)
self.add_argument(
'--cell-mode', '-C',
type=str,
choices=["full", "scaled", "flagstone","pebble", "worley"],
action="store",
default="full"
)
self.add_argument(
'--offs-mode', '-O',
type=str,
choices=["default", "honeycomb-x", "honeycomb-y"],
action="store",
default="default"
)
self.add_argument(
'--image', '-i',
dest="image_path",
type=os.path.realpath,
action="store",
default=None
)
self.add_argument(
'--verbose', '-v',
action="store_true",
default=False
)
# --------------------------------------------------------------------------
def process_parsed_options(self, options):
if options.transformable:
if options.width != options.height:
self.error("width and height must be the same in transformable mode")
if options.x_cells != options.y_cells:
self.error("X-cells and Y-cells must be the same in transformable mode")
if options.image_path is not None:
options.image = ImageSampler.from_file(
options.image_path,
options.x_cells,
options.y_cells
)
if options.x_cells is None:
options.x_cells = options.image.width()
if options.y_cells is None:
options.y_cells = options.image.height()
else:
options.image = NoImageSampler()
if options.x_cells is None:
options.x_cells = 32
if options.y_cells is None:
options.y_cells = 32
if options.cell_mode in ["worley"]:
options.need_neighbors = True
options.job_count = 1
else:
options.need_neighbors = False
return options
# --------------------------------------------------------------------------
def parse_args(self):
return self.process_parsed_options(
argparse.ArgumentParser.parse_args(self)
)
# ------------------------------------------------------------------------------
def make_argument_parser():
return VoronoiArgumentParser(
prog="voronoi-svg",
description="""
Utility annotating lines read from standard input
"""
)
# ------------------------------------------------------------------------------
class Renderer(object):
# --------------------------------------------------------------------------
def grayscale_color_str(self, v):
c = "%02x" % int(255*v)
return "#"+3*c
# --------------------------------------------------------------------------
def cell_offset(self, x, y):
cy = (y+self.y_cells)%self.y_cells
cx = (x+self.x_cells)%self.x_cells
return self.cell_offsets.get(cx, cy)
# --------------------------------------------------------------------------
def cell_value(self, x, y):
cy = (y+self.y_cells)%self.y_cells
cx = (x+self.x_cells)%self.x_cells
return self.cell_values.get(cx, cy)
# --------------------------------------------------------------------------
def cell_grayscale_color(self, x, y):
cv = self.cell_value(x, y)
v = self.value_low + cv*(self.value_high-self.value_low)
return self.grayscale_color_str(v)
# --------------------------------------------------------------------------
def cell_coord_color(self, x, y):
x = (x + self.x_cells) % self.x_cells
y = (y + self.y_cells) % self.y_cells
r = int((256*x)/self.x_cells)
g = int((256*y)/self.y_cells)
b = int((256*self.cell_z_coord))
return "#%02x%02x%02x" % (r, g, b)
# --------------------------------------------------------------------------
def cell_image_color(self, x, y):
r, g, b = self.image.get_pixel(x, y)
return "#%02x%02x%02x" % (int(r*255), int(g*255), int(b*255))
# --------------------------------------------------------------------------
def cell_gradient_id(self, x, y, i, j):
s = "grad%d_%d" % (
(y+3) * (self.x_cells + 6) + (x+3),
(y+j+3) * (self.x_cells + 6) + (x+i+3)
)
return s
# --------------------------------------------------------------------------
def cell_scale(self, x, y):
coef = 1.0
if self.scale_mode == "linear":
coef = self.cell_value(x, y)
elif self.scale_mode == "sqrt":
coef = math.sqrt(self.cell_value(x, y))
elif self.scale_mode == "pow2":
coef = math.pow(self.cell_value(x, y), 2)
elif self.scale_mode == "exp":
coef = math.exp(self.cell_value(x, y)) / math.exp(1)
elif self.scale_mode == "sigmoid":
coef = 0.5 - 0.5*math.cos(self.cell_value(x, y)*math.pi)
return self.scale * coef
# --------------------------------------------------------------------------
def full_cell_element_str(self, x, y, unused, corners, offs):
clist = ["%.3f %.3f" % (c[0], c[1]) for c in corners]
pathstr = "M"+" L".join(clist)+" Z"
yield """
<path d="%(def)s" stroke="%(color)s" fill="%(color)s"/>\n""" % {
"def": pathstr,
"color": self.cell_color(x, y)
}
# --------------------------------------------------------------------------
def scaled_cell_element_str(self, x, y, center, corners, offs):
m = set_center(corners)
newcorners = [segment_point(m, c, self.cell_scale(x, y)) for c in corners]
yield self.full_cell_element_str(x, y, center, newcorners);
# --------------------------------------------------------------------------
def flagstone_cell_element_str(self, x, y, center, corners, offs):
zcorners = zip(corners, corners[1:] + [corners[0]])
c = self.cell_value(x, y)
newcorners = [segment_point(a, b, c) for (a, b) in zcorners]
yield self.scaled_cell_element_str(x, y, center, newcorners);
# --------------------------------------------------------------------------
def pebble_cell_element_str(self, x, y, center, corners, offs):
m = set_center(corners)
apoints = [segment_point(m, c, self.cell_scale(x, y)) for c in corners]
bpoints = apoints[1:] + [apoints[0]]
c = self.cell_value(x, y)
zpoints = zip(apoints, bpoints)
cpoints = [segment_point(a, b, c) for (a, b) in zpoints]
dpoints = cpoints[1:] + [cpoints[0]]
zpoints = zip(bpoints, dpoints)
cfmt = lambda c : "%.3f %.3f" % (c[0], c[1])
clist = ["%s, %s" % (cfmt(b), cfmt(d)) for (b, d) in zpoints]
pathstr = "M%s Q" % cfmt(cpoints[0])+" Q".join(clist)+" Z"
yield """<path d="%(def)s" stroke="%(color)s" fill="%(color)s"/>\n""" % {
"def": pathstr,
"color": self.cell_color(x, y)
}
# --------------------------------------------------------------------------
def worley_cell_element_str(self, x, y, center, corners, offs):
n = len(corners)
for t in range(n):
i, j = offs[t]
verts = (center, corners[t], corners[(t+1)%n])
clist = ["%.3f %.3f" % (v[0], v[1]) for v in verts]
pathstr = "M"+" L".join(clist)+" Z"
yield """<path d="%(def)s" stroke="url(#%(gref)s)" fill="url(#%(gref)s)"/>\n""" % {
"def": pathstr,
"gref": self.cell_gradient_id(x, y, i, j)
}
# --------------------------------------------------------------------------
def __init__(self):
useropts = make_argument_parser().parse_args()
for k, v in useropts.__dict__.items():
self.__dict__[k] = v
if self.color_mode == "grayscale":
self.cell_color = lambda x, y: self.cell_grayscale_color(x, y)
elif self.color_mode == "cell-coord":
self.cell_color = lambda x, y: self.cell_coord_color(x, y)
elif self.color_mode == "image-rgb":
self.cell_color = lambda x, y: self.cell_image_color(x, y)
if self.cell_mode == "full":
self.cell_element_str = self.full_cell_element_str
elif self.cell_mode == "scaled":
self.cell_element_str = self.scaled_cell_element_str
elif self.cell_mode == "flagstone":
self.cell_element_str = self.flagstone_cell_element_str
elif self.cell_mode == "pebble":
self.cell_element_str = self.pebble_cell_element_str
elif self.cell_mode == "worley":
self.cell_element_str = self.worley_cell_element_str
self.cell_values = RandomCellValues(
self,
self.x_cells,
self.y_cells
)
rco = RandomCellOffsets(
self,
self.x_cells,
self.y_cells
)
if self.offs_mode == "honeycomb-x":
self.cell_offsets = HoneycombXCellOffsets(
self,
rco,
self.x_cells,
self.y_cells
)
elif self.offs_mode == "honeycomb-y":
self.cell_offsets = HoneycombYCellOffsets(
self,
rco,
self.x_cells,
self.y_cells
)
else:
self.cell_offsets = ImageContourCellOffsets(
self,
rco,
self.x_cells,
self.y_cells
)
self.values = dict()
self.values["width"] = self.width
self.values["height"] = self.height
self.values["wunit"] = self.units
self.values["hunit"] = self.units
self.cell_fmt = "%%%dd %%%dd\n" % (
int(math.log10(self.x_cells)+1),
int(math.log10(self.y_cells)+1)
)
# ------------------------------------------------------------------------------
def cell_world_coord(renderer, x, y):
c = renderer.cell_offset(x, y)
return numpy.array((
(x+c[0])*(renderer.width/renderer.x_cells),
(y+c[1])*(renderer.height/renderer.y_cells)
))
# ------------------------------------------------------------------------------
def cell_value(renderer, x, y):
return renderer.get_value(x, y)
# ------------------------------------------------------------------------------
def cell_color(renderer, x, y):
return grayscalestr(
renderer.value_low+
cell_value(renderer, x, y)*
(renderer.value_high-renderer.value_low)
)
# ------------------------------------------------------------------------------
def offs_cell_world_coord(renderer, x, y, o):
return cell_world_coord(renderer, x+o[0], y+o[1])
# ------------------------------------------------------------------------------
def make_cell(renderer, x, y):
owc = cell_world_coord(renderer, x, y)
offsets = []
for j in range(-2, 3):
for i in range(-2, 3):
if j != 0 or i != 0:
offsets.append((i, j))
loffs = len(offsets)
cuts = []
for o in offsets:
cwc = offs_cell_world_coord(renderer, x, y, o)
sm = segment_midpoint(owc, cwc)
sn = segment_normal(owc, cwc)
cuts.append((sm, sn))
assert loffs == len(cuts)
intersections = []
for cj in range(loffs):
for ci in range(cj+1, loffs):
t = line_intersect_param(cuts[cj], cuts[ci])
if t is not None:
intersections.append((cuts[cj][0]+cuts[cj][1]*t, set([ci, cj])))
corners_and_cuts = []
for isc, cus in intersections:
seg = (owc, isc-owc)
eps = 0.001
skip = False
for cut in cuts:
t = line_intersect_param(seg, cut)
if t is not None and t >= 0 and t < 1-eps:
skip = True
break
if not skip:
corners_and_cuts.append((isc, cus))
def corner_angle(p):
v = p[0] - owc
return math.atan2(v[1], v[0])
sorted_corners_and_cuts = sorted(corners_and_cuts, key=corner_angle)
corners = []
neighbors = []
caclen = len(sorted_corners_and_cuts)
for c in range(caclen):
co0, cu0 = sorted_corners_and_cuts[c]
co1, cu1 = sorted_corners_and_cuts[(c+1)%caclen]
cu = cu0.intersection(cu1)
corners.append(co0)
if renderer.need_neighbors:
assert len(cu) == 1
neighbors.append(offsets[cu.pop()])
if renderer.need_neighbors:
assert len(corners) == len(neighbors)
return owc, corners, neighbors
# ------------------------------------------------------------------------------
def do_make_cell(renderer, job, output_lock):
w = renderer.x_cells + 2
h = renderer.y_cells + 2
k = job
n = w * h
res = []
log = []
def _flush(res, log):
r = str().join(res)
if renderer.verbose:
l = str().join(log)
try:
output_lock.acquire()
renderer.output.write(r)
if renderer.verbose:
renderer.log.write(l)
finally:
output_lock.release()
return ([], [])
try:
while k < n:
y = int(k / w) - 1
x = int(k % w) - 1
center, corners, offs = make_cell(renderer, x, y)
for svg_str in renderer.cell_element_str(x, y, center, corners, offs):
res.append(svg_str)
if renderer.verbose:
log.append(renderer.cell_fmt % (x, y))
else:
log.append(None)
if len(res) >= renderer.job_count:
res, log = _flush(res, log)
k += renderer.job_count
except KeyboardInterrupt:
pass
_flush(res, log)
# ------------------------------------------------------------------------------
def make_gradients(renderer):
w = renderer.x_cells
h = renderer.y_cells
grad_fmt = """<linearGradient gradientUnits="userSpaceOnUse" id="%(gref)s" """+\
"""x1="%(x1)f" y1="%(y1)f" x2="%(x2)f" y2="%(y2)f">\n"""
stop_fmt = """<stop offset="%(soffs)d%%" style="stop-color:%(color)s"/>\n"""
offsets = []
for j in range(-2, 3):
for i in range(-2, 3):
if j != 0 or i != 0:
offsets.append((i, j))
for y in range(-1, h+2):
for x in range(-1, w+2):
for i, j in offsets:
cwc = cell_world_coord(renderer, x, y)
owc = cell_world_coord(renderer, x+i, y+j)
vec = cwc - owc
renderer.output.write(grad_fmt % {
"gref": renderer.cell_gradient_id(x, y, i, j),
"x1": cwc[0],
"y1": cwc[1],
"x2": owc[0],
"y2": owc[1]
})
if renderer.cell_mode == "worley":
renderer.output.write(stop_fmt % {
"soffs": 0.0,
"color": "#%(r)02x%(g)02x%(b)02x%(a)02x" % {
"r": int(255*float((x+w) % w)/w),
"g": int(255*float((y+h) % h)/h),
"a": int(255*renderer.cell_value(x, y)),
"b": 255
}
})
renderer.output.write(stop_fmt % {
"soffs": 50.0,
"color": "#%(r)02x%(g)02x%(b)02x%(a)02x" % {
"r": int(255*float((x+w) % w)/w),
"g": int(255*float((y+h) % h)/h),
"a": int(255*renderer.cell_value(x, y)),
"b": 0
}
})
renderer.output.write("""</linearGradient>\n""")
# ------------------------------------------------------------------------------
def print_svg(renderer):
renderer.output.write("""<?xml version="1.0" encoding="utf8"?>\n""")
renderer.output.write("""<svg xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"
width="%(width)s%(wunit)s" height="%(height)s%(hunit)s"
viewBox="0 0 %(width)s %(height)s"
version="1.1"
contentScriptType="text/ecmascript"
contentStyleType="text/css"\n>\n""" % renderer.values)
renderer.output.write(
"""<g class="voronoi" stroke-width="%(stroke_width)f">\n""" % {
"stroke_width": renderer.stroke_width
}
)
renderer.output.write("<defs>\n")
if renderer.cell_mode in ["worley"]:
make_gradients(renderer)
renderer.output.write("</defs>\n")
renderer.output.flush()
try:
output_lock = multiprocessing.Lock()
def call_do_make_cell(renderer, job, output_lock):
try:
do_make_cell(renderer, job, output_lock)
except Exception:
sys.stderr.write("failed to generate SVG, please retry\n")
raise SystemExit
tasks = []
for job in range(renderer.job_count):
t = multiprocessing.Process(
target=call_do_make_cell,
args=(renderer, job, output_lock)
)
t.start()
tasks.append(t)
for t in tasks:
t.join()
if t.exitcode is not None and t.exitcode != 0:
return 1
except KeyboardInterrupt:
pass
renderer.output.write("""\n""")
renderer.output.write("""</g>\n""")
renderer.output.write("""</svg>\n""")
return 0
# ------------------------------------------------------------------------------
def main():
renderer = Renderer()
sys.exit(print_svg(renderer))
# ------------------------------------------------------------------------------
if __name__ == "__main__": main()
| [
"math.exp",
"random.SystemRandom",
"multiprocessing.Lock",
"math.atan2",
"math.sqrt",
"random.Random",
"numpy.empty_like",
"time.time",
"argparse.ArgumentParser.parse_args",
"sys.stderr.write",
"numpy.array",
"math.log10",
"numpy.dot",
"multiprocessing.Process",
"argparse.ArgumentParser.... | [((1014, 1034), 'numpy.empty_like', 'numpy.empty_like', (['v1'], {}), '(v1)\n', (1030, 1034), False, 'import numpy\n'), ((1851, 1869), 'numpy.dot', 'numpy.dot', (['d2p', 'dp'], {}), '(d2p, dp)\n', (1860, 1869), False, 'import numpy\n'), ((1880, 1898), 'numpy.dot', 'numpy.dot', (['d2p', 'd1'], {}), '(d2p, d1)\n', (1889, 1898), False, 'import numpy\n'), ((26133, 26251), 'numpy.array', 'numpy.array', (['((x + c[0]) * (renderer.width / renderer.x_cells), (y + c[1]) * (renderer.\n height / renderer.y_cells))'], {}), '(((x + c[0]) * (renderer.width / renderer.x_cells), (y + c[1]) *\n (renderer.height / renderer.y_cells)))\n', (26144, 26251), False, 'import numpy\n'), ((13000, 13044), 'argparse.ArgumentParser.__init__', 'argparse.ArgumentParser.__init__', (['self'], {}), '(self, **kw)\n', (13032, 13044), False, 'import argparse\n'), ((28117, 28139), 'math.atan2', 'math.atan2', (['v[1]', 'v[0]'], {}), '(v[1], v[0])\n', (28127, 28139), False, 'import math\n'), ((32884, 32906), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (32904, 32906), False, 'import multiprocessing\n'), ((742, 754), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (750, 754), False, 'import math\n'), ((5443, 5472), 'random.Random', 'random.Random', (['self._rim_seed'], {}), '(self._rim_seed)\n', (5456, 5472), False, 'import random\n'), ((18095, 18135), 'argparse.ArgumentParser.parse_args', 'argparse.ArgumentParser.parse_args', (['self'], {}), '(self)\n', (18129, 18135), False, 'import argparse\n'), ((33261, 33349), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'call_do_make_cell', 'args': '(renderer, job, output_lock)'}), '(target=call_do_make_cell, args=(renderer, job,\n output_lock))\n', (33284, 33349), False, 'import multiprocessing\n'), ((4841, 4870), 'random.Random', 'random.Random', (['self._mid_seed'], {}), '(self._mid_seed)\n', (4854, 4870), False, 'import random\n'), ((5112, 5133), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (5131, 5133), False, 'import random\n'), ((13135, 13157), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (13152, 13157), False, 'import argparse\n'), ((13272, 13294), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (13289, 13294), False, 'import argparse\n'), ((13511, 13538), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (13536, 13538), False, 'import multiprocessing\n'), ((10122, 10150), 'math.sqrt', 'math.sqrt', (['(vx * vx + vy * vy)'], {}), '(vx * vx + vy * vy)\n', (10131, 10150), False, 'import math\n'), ((10418, 10434), 'math.sqrt', 'math.sqrt', (['dispx'], {}), '(dispx)\n', (10427, 10434), False, 'import math\n'), ((10462, 10478), 'math.sqrt', 'math.sqrt', (['dispy'], {}), '(dispy)\n', (10471, 10478), False, 'import math\n'), ((33087, 33145), 'sys.stderr.write', 'sys.stderr.write', (['"""failed to generate SVG, please retry\n"""'], {}), "('failed to generate SVG, please retry\\n')\n", (33103, 33145), False, 'import sys\n'), ((5175, 5186), 'time.time', 'time.time', ([], {}), '()\n', (5184, 5186), False, 'import time\n'), ((25883, 25907), 'math.log10', 'math.log10', (['self.x_cells'], {}), '(self.x_cells)\n', (25893, 25907), False, 'import math\n'), ((25928, 25952), 'math.log10', 'math.log10', (['self.y_cells'], {}), '(self.y_cells)\n', (25938, 25952), False, 'import math\n'), ((20849, 20860), 'math.exp', 'math.exp', (['(1)'], {}), '(1)\n', (20857, 20860), False, 'import math\n')] |
import bisect
from typing import List, Tuple, NewType, TypeVar
import numpy as np
import pickle
from sklearn.linear_model import LogisticRegression
# Define data types.
Data = List[Tuple[float, float]] # List of (predicted_probability, true_label).
Bins = List[float] # List of bin boundaries, excluding 0.0, but including 1.0.
BinnedData = List[Data] # binned_data[i] contains the data in bin i.
T = TypeVar('T')
eps = 1e-6
# Functions the produce bins from data.
def split(sequence: List[T], parts: int) -> List[List[T]]:
assert parts <= len(sequence)
part_size = int(np.ceil(len(sequence) * 1.0 / parts))
assert part_size * parts >= len(sequence)
assert (part_size - 1) * parts < len(sequence)
return [sequence[i:i + part_size] for i in range(0, len(sequence), part_size)]
def get_equal_bins(probs: List[float], num_bins: int=10) -> Bins:
"""Get bins that contain approximately an equal number of data points."""
sorted_probs = sorted(probs)
binned_data = split(sorted_probs, num_bins)
bins: Bins = []
for i in range(len(binned_data) - 1):
last_prob = binned_data[i][-1]
next_first_prob = binned_data[i + 1][0]
bins.append((last_prob + next_first_prob) / 2.0)
bins.append(1.0)
bins = sorted(list(set(bins)))
return bins
def get_equal_prob_bins(probs: List[float], num_bins: int=10) -> Bins:
return [i * 1.0 / num_bins for i in range(1, num_bins + 1)]
def get_discrete_bins(data: List[float]) -> Bins:
sorted_values = sorted(np.unique(data))
bins = []
for i in range(len(sorted_values) - 1):
mid = (sorted_values[i] + sorted_values[i+1]) / 2.0
bins.append(mid)
bins.append(1.0)
return bins
# User facing functions to measure calibration error.
def get_top_calibration_error_uncertainties(probs, labels, p=2, alpha=0.1):
return get_calibration_error_uncertainties(probs, labels, p, alpha, mode='top-label')
def get_calibration_error_uncertainties(probs, labels, p=2, alpha=0.1, mode='marginal'):
"""Get confidence intervals for the calibration error.
Args:
probs: A numpy array of shape (n,) or (n, k). If the shape is (n,) then
we assume binary classification and probs[i] is the model's confidence
the i-th example is 1. Otherwise, probs[i][j] is the model's confidence
the i-th example is j, with 0 <= probs[i][j] <= 1.
labels: A numpy array of shape (n,). labels[i] denotes the label of the i-th
example. In the binary classification setting, labels[i] must be 0 or 1,
in the k class setting labels[i] is an integer with 0 <= labels[i] <= k-1.
p: We measure the lp calibration error, where p >= 1 is an integer.
mode: 'marginal' or 'top-label'. 'marginal' calibration means we compute the
calibraton error for each class and then average them. Top-label means
we compute the calibration error of the prediction that the model is most
confident about.
Returns:
[lower, mid, upper]: 1-alpha confidence intervals produced by bootstrap resampling.
[lower, upper] represents the confidence interval. mid represents the median of
the bootstrap estimates. When p is not 2 (e.g. for the ECE where p = 1), this
can be used as a debiased estimate as well.
"""
data = list(zip(probs, labels))
def ce_functional(data):
probs, labels = zip(*data)
return get_calibration_error(probs, labels, p, debias=False, mode=mode)
[lower, mid, upper] = bootstrap_uncertainty(data, ce_functional, num_samples=100, alpha=alpha)
return [lower, mid, upper]
def get_top_calibration_error(probs, labels, p=2, debias=True):
return get_calibration_error(probs, labels, p, debias, mode='top-label')
def get_calibration_error(probs, labels, p=2, debias=True, mode='marginal'):
"""Get the calibration error.
Args:
probs: A numpy array of shape (n,) or (n, k). If the shape is (n,) then
we assume binary classification and probs[i] is the model's confidence
the i-th example is 1. Otherwise, probs[i][j] is the model's confidence
the i-th example is j, with 0 <= probs[i][j] <= 1.
labels: A numpy array of shape (n,). labels[i] denotes the label of the i-th
example. In the binary classification setting, labels[i] must be 0 or 1,
in the k class setting labels[i] is an integer with 0 <= labels[i] <= k-1.
p: We measure the lp calibration error, where p >= 1 is an integer.
debias: Should we try to debias the estimates? For p = 2, the debiasing
has provably better sample complexity.
mode: 'marginal' or 'top-label'. 'marginal' calibration means we compute the
calibraton error for each class and then average them. Top-label means
we compute the calibration error of the prediction that the model is most
confident about.
Returns:
Estimated calibration error, a floating point value.
The method first uses heuristics to check if the values came from a scaling
method or binning method, and then calls the corresponding function. For
more explicit control, use lower_bound_scaling_ce or get_binning_ce.
"""
if is_discrete(probs):
return get_binning_ce(probs, labels, p, debias, mode=mode)
else:
return lower_bound_scaling_ce(probs, labels, p, debias, mode=mode)
def lower_bound_scaling_top_ce(probs, labels, p=2, debias=True, num_bins=15,
binning_scheme=get_equal_bins):
return lower_bound_scaling_ce(probs, labels, p, debias, num_bins, binning_scheme,
mode='top-label')
def lower_bound_scaling_ce(probs, labels, p=2, debias=True, num_bins=15,
binning_scheme=get_equal_bins, mode='marginal'):
"""Lower bound the calibration error of a model with continuous outputs.
Args:
probs: A numpy array of shape (n,) or (n, k). If the shape is (n,) then
we assume binary classification and probs[i] is the model's confidence
the i-th example is 1. Otherwise, probs[i][j] is the model's confidence
the i-th example is j, with 0 <= probs[i][j] <= 1.
labels: A numpy array of shape (n,). labels[i] denotes the label of the i-th
example. In the binary classification setting, labels[i] must be 0 or 1,
in the k class setting labels[i] is an integer with 0 <= labels[i] <= k-1.
p: We measure the lp calibration error, where p >= 1 is an integer.
debias: Should we try to debias the estimates? For p = 2, the debiasing
has provably better sample complexity.
num_bins: Integer number of bins used to estimate the calibration error.
binning_scheme: A function that takes in a list of probabilities and number of bins,
and outputs a list of bins. See get_equal_bins, get_equal_prob_bins for examples.
mode: 'marginal' or 'top-label'. 'marginal' calibration means we compute the
calibraton error for each class and then average them. Top-label means
we compute the calibration error of the prediction that the model is most
confident about.
Returns:
Estimated lower bound for calibration error, a floating point value.
For scaling methods we cannot estimate the calibration error, but only a
lower bound.
"""
return _get_ce(probs, labels, p, debias, num_bins, binning_scheme, mode=mode)
def get_binning_top_ce(probs, labels, p=2, debias=True, mode='marginal'):
return get_binning_ce(probs, labels, p, debias, mode='top-label')
def get_binning_ce(probs, labels, p=2, debias=True, mode='marginal'):
"""Estimate the calibration error of a binned model.
Args:
probs: A numpy array of shape (n,) or (n, k). If the shape is (n,) then
we assume binary classification and probs[i] is the model's confidence
the i-th example is 1. Otherwise, probs[i][j] is the model's confidence
the i-th example is j, with 0 <= probs[i][j] <= 1.
labels: A numpy array of shape (n,). labels[i] denotes the label of the i-th
example. In the binary classification setting, labels[i] must be 0 or 1,
in the k class setting labels[i] is an integer with 0 <= labels[i] <= k-1.
p: We measure the lp calibration error, where p >= 1 is an integer.
debias: Should we try to debias the estimates? For p = 2, the debiasing
has provably better sample complexity.
mode: 'marginal' or 'top-label'. 'marginal' calibration means we compute the
calibraton error for each class and then average them. Top-label means
we compute the calibration error of the prediction that the model is most
confident about.
Returns:
Estimated calibration error, a floating point value.
"""
return _get_ce(probs, labels, p, debias, None, binning_scheme=get_discrete_bins, mode=mode)
def get_ece(probs, labels, debias=False, num_bins=15, mode='top-label'):
return lower_bound_scaling_ce(probs, labels, p=1, debias=debias, num_bins=num_bins,
binning_scheme=get_equal_prob_bins, mode=mode)
def _get_ce(probs, labels, p, debias, num_bins, binning_scheme, mode='marginal'):
def ce_1d(probs, labels):
assert probs.shape == labels.shape
assert len(probs.shape) == 1
data = list(zip(probs, labels))
if binning_scheme == get_discrete_bins:
assert(num_bins is None)
bins = binning_scheme(probs)
else:
bins = binning_scheme(probs, num_bins=num_bins)
if p == 2 and debias:
return unbiased_l2_ce(bin(data, bins))
elif debias:
return normal_debiased_ce(bin(data, bins), power=p)
else:
return plugin_ce(bin(data, bins), power=p)
if mode != 'marginal' and mode != 'top-label':
raise ValueError("mode must be 'marginal' or 'top-label'.")
probs = np.array(probs)
labels = np.array(labels)
if not(np.issubdtype(labels.dtype, np.integer)):
raise ValueError('labels should an integer numpy array.')
if len(labels.shape) != 1:
raise ValueError('labels should be a 1D numpy array.')
if probs.shape[0] != labels.shape[0]:
raise ValueError('labels and probs should have the same number of entries.')
if len(probs.shape) == 1:
# If 1D (2-class setting), compute the regular calibration error.
if np.min(labels) != 0 or np.max(labels) != 1:
raise ValueError('If probs is 1D, each label should be 0 or 1.')
return ce_1d(probs, labels)
elif len(probs.shape) == 2:
if np.min(labels) < 0 or np.max(labels) > probs.shape[1] - 1:
raise ValueError('labels should be between 0 and num_classes - 1.')
if mode == 'marginal':
labels_one_hot = get_labels_one_hot(labels, k=probs.shape[1])
assert probs.shape == labels_one_hot.shape
marginal_ces = []
for k in range(probs.shape[1]):
cur_probs = probs[:, k]
cur_labels = labels_one_hot[:, k]
marginal_ces.append(ce_1d(cur_probs, cur_labels) ** p)
return np.mean(marginal_ces) ** (1.0 / p)
elif mode == 'top-label':
preds = get_top_predictions(probs)
correct = (preds == labels).astype(probs.dtype)
confidences = get_top_probs(probs)
return ce_1d(confidences, correct)
else:
raise ValueError('probs should be a 1D or 2D numpy array.')
def is_discrete(probs):
probs = np.array(probs)
if len(probs.shape) == 1:
return enough_duplicates(probs)
elif len(probs.shape) == 2:
for k in range(probs.shape[1]):
if not enough_duplicates(probs[:, k]):
return False
return True
else:
raise ValueError('probs must be a 1D or 2D numpy array.')
def enough_duplicates(array):
# TODO: instead check that we have at least 2 values in each bin.
num_bins = get_discrete_bins(array)
if len(num_bins) < array.shape[0] / 4.0:
return True
return False
# Functions that bin data.
def get_bin(pred_prob: float, bins: List[float]) -> int:
"""Get the index of the bin that pred_prob belongs in."""
assert 0.0 <= pred_prob <= 1.0
assert bins[-1] == 1.0
return bisect.bisect_left(bins, pred_prob)
def bin(data: Data, bins: Bins):
return fast_bin(data, bins)
def fast_bin(data, bins):
prob_label = np.array(data)
bin_indices = np.searchsorted(bins, prob_label[:, 0])
bin_sort_indices = np.argsort(bin_indices)
sorted_bins = bin_indices[bin_sort_indices]
splits = np.searchsorted(sorted_bins, list(range(1, len(bins))))
binned_data = np.split(prob_label[bin_sort_indices], splits)
return binned_data
def equal_bin(data: Data, num_bins : int) -> BinnedData:
sorted_probs = sorted(data)
return split(sorted_probs, num_bins)
# Calibration error estimators.
def difference_mean(data : Data) -> float:
"""Returns average pred_prob - average label."""
data = np.array(data)
ave_pred_prob = np.mean(data[:, 0])
ave_label = np.mean(data[:, 1])
return ave_pred_prob - ave_label
def get_bin_probs(binned_data: BinnedData) -> List[float]:
bin_sizes = list(map(len, binned_data))
num_data = sum(bin_sizes)
bin_probs = list(map(lambda b: b * 1.0 / num_data, bin_sizes))
assert(abs(sum(bin_probs) - 1.0) < eps)
return list(bin_probs)
def plugin_ce(binned_data: BinnedData, power=2) -> float:
def bin_error(data: Data):
if len(data) == 0:
return 0.0
return abs(difference_mean(data)) ** power
bin_probs = get_bin_probs(binned_data)
bin_errors = list(map(bin_error, binned_data))
return np.dot(bin_probs, bin_errors) ** (1.0 / power)
def unbiased_square_ce(binned_data: BinnedData) -> float:
# Note, this is not the l2 CE. It does not take the square root.
def bin_error(data: Data):
if len(data) < 2:
return 0.0
# raise ValueError('Too few values in bin, use fewer bins or get more data.')
biased_estimate = abs(difference_mean(data)) ** 2
label_values = list(map(lambda x: x[1], data))
mean_label = np.mean(label_values)
variance = mean_label * (1.0 - mean_label) / (len(data) - 1.0)
return biased_estimate - variance
bin_probs = get_bin_probs(binned_data)
bin_errors = list(map(bin_error, binned_data))
return np.dot(bin_probs, bin_errors)
def unbiased_l2_ce(binned_data: BinnedData) -> float:
return max(unbiased_square_ce(binned_data), 0.0) ** 0.5
def normal_debiased_ce(binned_data : BinnedData, power=1, resamples=1000) -> float:
bin_sizes = np.array(list(map(len, binned_data)))
if np.min(bin_sizes) <= 1:
raise ValueError('Every bin must have at least 2 points for debiased estimator. '
'Try adding the argument debias=False to your function call.')
label_means = np.array(list(map(lambda l: np.mean([b for a, b in l]), binned_data)))
label_stddev = np.sqrt(label_means * (1 - label_means) / bin_sizes)
model_vals = np.array(list(map(lambda l: np.mean([a for a, b in l]), binned_data)))
assert(label_means.shape == (len(binned_data),))
assert(model_vals.shape == (len(binned_data),))
ce = plugin_ce(binned_data, power=power)
bin_probs = get_bin_probs(binned_data)
resampled_ces = []
for i in range(resamples):
label_samples = np.random.normal(loc=label_means, scale=label_stddev)
# TODO: we can also correct the bias for the model_vals, although this is
# smaller.
diffs = np.power(np.abs(label_samples - model_vals), power)
cur_ce = np.power(np.dot(bin_probs, diffs), 1.0 / power)
resampled_ces.append(cur_ce)
mean_resampled = np.mean(resampled_ces)
bias_corrected_ce = 2 * ce - mean_resampled
return bias_corrected_ce
# MSE Estimators.
def eval_top_mse(calibrated_probs, probs, labels):
correct = (get_top_predictions(probs) == labels)
return np.mean(np.square(calibrated_probs - correct))
def eval_marginal_mse(calibrated_probs, probs, labels):
assert calibrated_probs.shape == probs.shape
k = probs.shape[1]
labels_one_hot = get_labels_one_hot(np.array(labels), k)
return np.mean(np.square(calibrated_probs - labels_one_hot)) * calibrated_probs.shape[1] / 2.0
# Bootstrap utilities.
def resample(data: List[T]) -> List[T]:
indices = np.random.choice(list(range(len(data))), size=len(data), replace=True)
return [data[i] for i in indices]
def bootstrap_uncertainty(data: List[T], functional, estimator=None, alpha=10.0,
num_samples=1000) -> Tuple[float, float]:
"""Return boostrap uncertained for 1 - alpha percent confidence interval."""
if estimator is None:
estimator = functional
estimate = estimator(data)
plugin = functional(data)
bootstrap_estimates = []
for _ in range(num_samples):
bootstrap_estimates.append(estimator(resample(data)))
return (plugin + estimate - np.percentile(bootstrap_estimates, 100 - alpha / 2.0),
plugin + estimate - np.percentile(bootstrap_estimates, 50),
plugin + estimate - np.percentile(bootstrap_estimates, alpha / 2.0))
def precentile_bootstrap_uncertainty(data: List[T], functional, estimator=None, alpha=10.0,
num_samples=1000) -> Tuple[float, float]:
"""Return boostrap uncertained for 1 - alpha percent confidence interval."""
if estimator is None:
estimator = functional
plugin = functional(data)
estimate = estimator(data)
bootstrap_estimates = []
for _ in range(num_samples):
bootstrap_estimates.append(estimator(resample(data)))
bias = 2 * np.percentile(bootstrap_estimates, 50) - plugin - estimate
return (np.percentile(bootstrap_estimates, alpha / 2.0) - bias,
np.percentile(bootstrap_estimates, 50) - bias,
np.percentile(bootstrap_estimates, 100 - alpha / 2.0) - bias)
def bootstrap_std(data: List[T], estimator=None, num_samples=100) -> Tuple[float, float]:
"""Return boostrap uncertained for 1 - alpha percent confidence interval."""
bootstrap_estimates = []
for _ in range(num_samples):
bootstrap_estimates.append(estimator(resample(data)))
return np.std(bootstrap_estimates)
# Re-Calibration utilities.
def get_platt_scaler(model_probs, labels):
clf = LogisticRegression(C=1e10, solver='lbfgs')
eps = 1e-12
model_probs = model_probs.astype(dtype=np.float64)
model_probs = np.expand_dims(model_probs, axis=-1)
model_probs = np.clip(model_probs, eps, 1 - eps)
model_probs = np.log(model_probs / (1 - model_probs))
clf.fit(model_probs, labels)
def calibrator(probs):
x = np.array(probs, dtype=np.float64)
x = np.clip(x, eps, 1 - eps)
x = np.log(x / (1 - x))
x = x * clf.coef_[0] + clf.intercept_
output = 1 / (1 + np.exp(-x))
return output
return calibrator
def get_histogram_calibrator(model_probs, values, bins):
binned_values = [[] for _ in range(len(bins))]
for prob, value in zip(model_probs, values):
bin_idx = get_bin(prob, bins)
binned_values[bin_idx].append(float(value))
def safe_mean(values, bin_idx):
if len(values) == 0:
if bin_idx == 0:
return float(bins[0]) / 2.0
return float(bins[bin_idx] + bins[bin_idx - 1]) / 2.0
return np.mean(values)
bin_means = [safe_mean(values, bidx) for values, bidx in zip(binned_values, range(len(bins)))]
bin_means = np.array(bin_means)
def calibrator(probs):
indices = np.searchsorted(bins, probs)
return bin_means[indices]
return calibrator
def get_discrete_calibrator(model_probs, bins):
return get_histogram_calibrator(model_probs, model_probs, bins)
# Utils to load and save files.
def save_test_probs_labels(dataset, model, filename):
(x_train, y_train), (x_test, y_test) = dataset.load_data()
probs = model.predict(x_test)
pickle.dump((probs, y_test), open(filename, "wb"))
def load_test_probs_labels(filename):
probs, labels = pickle.load(open(filename, "rb"))
if len(labels.shape) > 1:
labels = labels[:, 0]
indices = np.random.choice(list(range(len(probs))), size=len(probs), replace=False)
probs = np.array([probs[i] for i in indices])
labels = np.array([labels[i] for i in indices])
return probs, labels
def get_top_predictions(probs):
return np.argmax(probs, 1)
def get_top_probs(probs):
return np.max(probs, 1)
def get_accuracy(probs, labels):
return sum(labels == predictions) * 1.0 / len(labels)
def get_labels_one_hot(labels, k):
assert np.min(labels) == 0
assert np.max(labels) == k - 1
num_labels = labels.shape[0]
labels_one_hot = np.zeros((num_labels, k))
labels_one_hot[np.arange(num_labels), labels] = 1
return labels_one_hot
| [
"numpy.abs",
"numpy.argmax",
"numpy.clip",
"numpy.argsort",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"numpy.random.normal",
"numpy.unique",
"numpy.std",
"numpy.max",
"typing.TypeVar",
"numpy.square",
"numpy.percentile",
"sklearn.linear_model.LogisticRegression",
"numpy.min",
"numpy... | [((407, 419), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (414, 419), False, 'from typing import List, Tuple, NewType, TypeVar\n'), ((10205, 10220), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (10213, 10220), True, 'import numpy as np\n'), ((10234, 10250), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (10242, 10250), True, 'import numpy as np\n'), ((11845, 11860), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (11853, 11860), True, 'import numpy as np\n'), ((12625, 12660), 'bisect.bisect_left', 'bisect.bisect_left', (['bins', 'pred_prob'], {}), '(bins, pred_prob)\n', (12643, 12660), False, 'import bisect\n'), ((12773, 12787), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (12781, 12787), True, 'import numpy as np\n'), ((12806, 12845), 'numpy.searchsorted', 'np.searchsorted', (['bins', 'prob_label[:, 0]'], {}), '(bins, prob_label[:, 0])\n', (12821, 12845), True, 'import numpy as np\n'), ((12869, 12892), 'numpy.argsort', 'np.argsort', (['bin_indices'], {}), '(bin_indices)\n', (12879, 12892), True, 'import numpy as np\n'), ((13028, 13074), 'numpy.split', 'np.split', (['prob_label[bin_sort_indices]', 'splits'], {}), '(prob_label[bin_sort_indices], splits)\n', (13036, 13074), True, 'import numpy as np\n'), ((13372, 13386), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (13380, 13386), True, 'import numpy as np\n'), ((13407, 13426), 'numpy.mean', 'np.mean', (['data[:, 0]'], {}), '(data[:, 0])\n', (13414, 13426), True, 'import numpy as np\n'), ((13443, 13462), 'numpy.mean', 'np.mean', (['data[:, 1]'], {}), '(data[:, 1])\n', (13450, 13462), True, 'import numpy as np\n'), ((14790, 14819), 'numpy.dot', 'np.dot', (['bin_probs', 'bin_errors'], {}), '(bin_probs, bin_errors)\n', (14796, 14819), True, 'import numpy as np\n'), ((15393, 15445), 'numpy.sqrt', 'np.sqrt', (['(label_means * (1 - label_means) / bin_sizes)'], {}), '(label_means * (1 - label_means) / bin_sizes)\n', (15400, 15445), True, 'import numpy as np\n'), ((16151, 16173), 'numpy.mean', 'np.mean', (['resampled_ces'], {}), '(resampled_ces)\n', (16158, 16173), True, 'import numpy as np\n'), ((18707, 18734), 'numpy.std', 'np.std', (['bootstrap_estimates'], {}), '(bootstrap_estimates)\n', (18713, 18734), True, 'import numpy as np\n'), ((18819, 18870), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(10000000000.0)', 'solver': '"""lbfgs"""'}), "(C=10000000000.0, solver='lbfgs')\n", (18837, 18870), False, 'from sklearn.linear_model import LogisticRegression\n'), ((18951, 18987), 'numpy.expand_dims', 'np.expand_dims', (['model_probs'], {'axis': '(-1)'}), '(model_probs, axis=-1)\n', (18965, 18987), True, 'import numpy as np\n'), ((19006, 19040), 'numpy.clip', 'np.clip', (['model_probs', 'eps', '(1 - eps)'], {}), '(model_probs, eps, 1 - eps)\n', (19013, 19040), True, 'import numpy as np\n'), ((19059, 19098), 'numpy.log', 'np.log', (['(model_probs / (1 - model_probs))'], {}), '(model_probs / (1 - model_probs))\n', (19065, 19098), True, 'import numpy as np\n'), ((20001, 20020), 'numpy.array', 'np.array', (['bin_means'], {}), '(bin_means)\n', (20009, 20020), True, 'import numpy as np\n'), ((20765, 20802), 'numpy.array', 'np.array', (['[probs[i] for i in indices]'], {}), '([probs[i] for i in indices])\n', (20773, 20802), True, 'import numpy as np\n'), ((20816, 20854), 'numpy.array', 'np.array', (['[labels[i] for i in indices]'], {}), '([labels[i] for i in indices])\n', (20824, 20854), True, 'import numpy as np\n'), ((20925, 20944), 'numpy.argmax', 'np.argmax', (['probs', '(1)'], {}), '(probs, 1)\n', (20934, 20944), True, 'import numpy as np\n'), ((20984, 21000), 'numpy.max', 'np.max', (['probs', '(1)'], {}), '(probs, 1)\n', (20990, 21000), True, 'import numpy as np\n'), ((21251, 21276), 'numpy.zeros', 'np.zeros', (['(num_labels, k)'], {}), '((num_labels, k))\n', (21259, 21276), True, 'import numpy as np\n'), ((1527, 1542), 'numpy.unique', 'np.unique', (['data'], {}), '(data)\n', (1536, 1542), True, 'import numpy as np\n'), ((10262, 10301), 'numpy.issubdtype', 'np.issubdtype', (['labels.dtype', 'np.integer'], {}), '(labels.dtype, np.integer)\n', (10275, 10301), True, 'import numpy as np\n'), ((14070, 14099), 'numpy.dot', 'np.dot', (['bin_probs', 'bin_errors'], {}), '(bin_probs, bin_errors)\n', (14076, 14099), True, 'import numpy as np\n'), ((14550, 14571), 'numpy.mean', 'np.mean', (['label_values'], {}), '(label_values)\n', (14557, 14571), True, 'import numpy as np\n'), ((15083, 15100), 'numpy.min', 'np.min', (['bin_sizes'], {}), '(bin_sizes)\n', (15089, 15100), True, 'import numpy as np\n'), ((15805, 15858), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'label_means', 'scale': 'label_stddev'}), '(loc=label_means, scale=label_stddev)\n', (15821, 15858), True, 'import numpy as np\n'), ((16395, 16432), 'numpy.square', 'np.square', (['(calibrated_probs - correct)'], {}), '(calibrated_probs - correct)\n', (16404, 16432), True, 'import numpy as np\n'), ((16604, 16620), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (16612, 16620), True, 'import numpy as np\n'), ((19171, 19204), 'numpy.array', 'np.array', (['probs'], {'dtype': 'np.float64'}), '(probs, dtype=np.float64)\n', (19179, 19204), True, 'import numpy as np\n'), ((19217, 19241), 'numpy.clip', 'np.clip', (['x', 'eps', '(1 - eps)'], {}), '(x, eps, 1 - eps)\n', (19224, 19241), True, 'import numpy as np\n'), ((19254, 19273), 'numpy.log', 'np.log', (['(x / (1 - x))'], {}), '(x / (1 - x))\n', (19260, 19273), True, 'import numpy as np\n'), ((19870, 19885), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (19877, 19885), True, 'import numpy as np\n'), ((20066, 20094), 'numpy.searchsorted', 'np.searchsorted', (['bins', 'probs'], {}), '(bins, probs)\n', (20081, 20094), True, 'import numpy as np\n'), ((21142, 21156), 'numpy.min', 'np.min', (['labels'], {}), '(labels)\n', (21148, 21156), True, 'import numpy as np\n'), ((21173, 21187), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (21179, 21187), True, 'import numpy as np\n'), ((15985, 16019), 'numpy.abs', 'np.abs', (['(label_samples - model_vals)'], {}), '(label_samples - model_vals)\n', (15991, 16019), True, 'import numpy as np\n'), ((16054, 16078), 'numpy.dot', 'np.dot', (['bin_probs', 'diffs'], {}), '(bin_probs, diffs)\n', (16060, 16078), True, 'import numpy as np\n'), ((17420, 17473), 'numpy.percentile', 'np.percentile', (['bootstrap_estimates', '(100 - alpha / 2.0)'], {}), '(bootstrap_estimates, 100 - alpha / 2.0)\n', (17433, 17473), True, 'import numpy as np\n'), ((17507, 17545), 'numpy.percentile', 'np.percentile', (['bootstrap_estimates', '(50)'], {}), '(bootstrap_estimates, 50)\n', (17520, 17545), True, 'import numpy as np\n'), ((17579, 17626), 'numpy.percentile', 'np.percentile', (['bootstrap_estimates', '(alpha / 2.0)'], {}), '(bootstrap_estimates, alpha / 2.0)\n', (17592, 17626), True, 'import numpy as np\n'), ((18210, 18257), 'numpy.percentile', 'np.percentile', (['bootstrap_estimates', '(alpha / 2.0)'], {}), '(bootstrap_estimates, alpha / 2.0)\n', (18223, 18257), True, 'import numpy as np\n'), ((18278, 18316), 'numpy.percentile', 'np.percentile', (['bootstrap_estimates', '(50)'], {}), '(bootstrap_estimates, 50)\n', (18291, 18316), True, 'import numpy as np\n'), ((18337, 18390), 'numpy.percentile', 'np.percentile', (['bootstrap_estimates', '(100 - alpha / 2.0)'], {}), '(bootstrap_estimates, 100 - alpha / 2.0)\n', (18350, 18390), True, 'import numpy as np\n'), ((21296, 21317), 'numpy.arange', 'np.arange', (['num_labels'], {}), '(num_labels)\n', (21305, 21317), True, 'import numpy as np\n'), ((10706, 10720), 'numpy.min', 'np.min', (['labels'], {}), '(labels)\n', (10712, 10720), True, 'import numpy as np\n'), ((10729, 10743), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (10735, 10743), True, 'import numpy as np\n'), ((16644, 16688), 'numpy.square', 'np.square', (['(calibrated_probs - labels_one_hot)'], {}), '(calibrated_probs - labels_one_hot)\n', (16653, 16688), True, 'import numpy as np\n'), ((18139, 18177), 'numpy.percentile', 'np.percentile', (['bootstrap_estimates', '(50)'], {}), '(bootstrap_estimates, 50)\n', (18152, 18177), True, 'import numpy as np\n'), ((19346, 19356), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (19352, 19356), True, 'import numpy as np\n'), ((10906, 10920), 'numpy.min', 'np.min', (['labels'], {}), '(labels)\n', (10912, 10920), True, 'import numpy as np\n'), ((10928, 10942), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (10934, 10942), True, 'import numpy as np\n'), ((11459, 11480), 'numpy.mean', 'np.mean', (['marginal_ces'], {}), '(marginal_ces)\n', (11466, 11480), True, 'import numpy as np\n'), ((15331, 15357), 'numpy.mean', 'np.mean', (['[b for a, b in l]'], {}), '([b for a, b in l])\n', (15338, 15357), True, 'import numpy as np\n'), ((15491, 15517), 'numpy.mean', 'np.mean', (['[a for a, b in l]'], {}), '([a for a, b in l])\n', (15498, 15517), True, 'import numpy as np\n')] |
from skorecard import datasets
from skorecard.bucketers import DecisionTreeBucketer, EqualFrequencyBucketer, EqualWidthBucketer, OptimalBucketer
import numpy as np
import pytest
@pytest.fixture()
def df():
"""Generate dataframe."""
return datasets.load_uci_credit_card(as_frame=True)
def test_specials_tree_bucketer(df):
"""Test that when adding specials,the binner performs as expected.
Context: special values should be binned in their own bin.
"""
X = df[["LIMIT_BAL", "BILL_AMT1"]]
y = df["default"]
specials = {"LIMIT_BAL": {"=50000": [50000], "in [20001,30000]": [20000, 30000]}}
# Because 2 special buckets are defined, the decision tree
# will be fitted with max_leaf_nodes=1. This will create a crash in the sklearn implementation.
# In this case, Skorecard raises an exception with a clear recommendation to the user when the fit method is called.
tbt = DecisionTreeBucketer(variables=["LIMIT_BAL", "BILL_AMT1"], random_state=1, max_n_bins=3, specials=specials)
with pytest.raises(ValueError):
tbt.fit_transform(X, y)
tbt = DecisionTreeBucketer(variables=["LIMIT_BAL", "BILL_AMT1"], random_state=1, max_n_bins=5, specials=specials)
X_bins = tbt.fit_transform(X, y)
assert X_bins["BILL_AMT1"].nunique() == 5
assert X_bins["LIMIT_BAL"].nunique() == 5
assert X_bins[X["LIMIT_BAL"] == 50000]["LIMIT_BAL"].unique() == np.array(4)
# Test that the labels are properly assigned. Because there are no specials in BILL_AMT1, there should be no extra
# bins
assert len(tbt.features_bucket_mapping_["BILL_AMT1"].labels) == 6
# check that the last label finishes with inf
assert tbt.features_bucket_mapping_["BILL_AMT1"].labels[0].startswith("(-inf")
assert tbt.features_bucket_mapping_["BILL_AMT1"].labels[4].endswith("inf)")
# Test that the labels are properly assigned. Because there are 2 specials in LIMIT_BAL, there should be 2 extra
# bins
assert len(tbt.features_bucket_mapping_["LIMIT_BAL"].labels) == 6
# check that the labels match the specials dictionary
assert (
tbt.features_bucket_mapping_["LIMIT_BAL"].labels[4].endswith([key for key in specials["LIMIT_BAL"].keys()][0])
)
assert (
tbt.features_bucket_mapping_["LIMIT_BAL"].labels[5].endswith([key for key in specials["LIMIT_BAL"].keys()][1])
)
# Assert a value error is raised if the specials contains features not defined in the bucketer.
specials = {"LIMIT_BAL": {"=50000": [50000], "in [20001,30000]": [20000, 30000]}, "Undefinedfeature": {"1": [2]}}
with pytest.raises(ValueError):
DecisionTreeBucketer(variables=["LIMIT_BAL", "BILL_AMT1"], random_state=1, max_n_bins=3, specials=specials).fit_transform(X, y)
def test_specials_equal_width_bucketer(df):
"""Test that when adding specials,the binner performs as expected.
Context: special values should be binned in their own bin.
"""
X = df[["LIMIT_BAL", "BILL_AMT1"]]
y = df["default"]
specials = {"LIMIT_BAL": {"=50000": [50000], "in [20001,30000]": [20000, 30000]}}
ebt = EqualWidthBucketer(variables=["LIMIT_BAL", "BILL_AMT1"], n_bins=3, specials=specials)
X_bins = ebt.fit_transform(X, y)
assert X_bins["BILL_AMT1"].nunique() == 3
assert X_bins["LIMIT_BAL"].nunique() == 5 # maximum n_bins +2 coming from the specials
assert X_bins[X["LIMIT_BAL"] == 50000]["LIMIT_BAL"].unique() == np.array(4)
# Test that the labels are properly assigned. Because there are no specials in BILL_AMT1, there should be no extra
# bins
assert len(ebt.features_bucket_mapping_["BILL_AMT1"].labels) == 4
# check that the last label finishes with inf
assert ebt.features_bucket_mapping_["BILL_AMT1"].labels[0].startswith("(-inf")
assert ebt.features_bucket_mapping_["BILL_AMT1"].labels[2].endswith("inf)")
# Test that the labels are properly assigned. Because there are 2 specials in LIMIT_BAL, there should be 2 extra
# bins
assert len(ebt.features_bucket_mapping_["LIMIT_BAL"].labels) == 6
# check that the labels match the specials dictionary
assert (
ebt.features_bucket_mapping_["LIMIT_BAL"].labels[4].endswith([key for key in specials["LIMIT_BAL"].keys()][0])
)
assert (
ebt.features_bucket_mapping_["LIMIT_BAL"].labels[5].endswith([key for key in specials["LIMIT_BAL"].keys()][1])
)
# Assert a value error is raised if the specials contains features not defined in the bucketer.
specials = {"LIMIT_BAL": {"=50000": [50000], "in [20001,30000]": [20000, 30000]}, "Undefinedfeature": {"1": [2]}}
with pytest.raises(ValueError):
EqualWidthBucketer(variables=["LIMIT_BAL", "BILL_AMT1"], n_bins=3, specials=specials).fit_transform(X, y)
def test_specials_equal_frequency_bucketer(df):
"""Test that when adding specials,the binner performs as expected.
Context: special values should be binned in their own bin.
"""
X = df[["LIMIT_BAL", "BILL_AMT1"]]
y = df["default"]
specials = {"LIMIT_BAL": {"=50000": [50000], "in [20001,30000]": [20000, 30000]}}
ebt = EqualFrequencyBucketer(variables=["LIMIT_BAL", "BILL_AMT1"], n_bins=3, specials=specials)
X_bins = ebt.fit_transform(X, y)
assert X_bins["BILL_AMT1"].nunique() == 3
assert X_bins["LIMIT_BAL"].nunique() == 5 # maximum n_bins +2 coming from the specials
assert X_bins[X["LIMIT_BAL"] == 50000]["LIMIT_BAL"].unique() == np.array(4)
# Test that the labels are properly assigned. Because there are no specials in BILL_AMT1, there should be no extra
# bins
assert len(ebt.features_bucket_mapping_["BILL_AMT1"].labels) == 4
# check that the last label finishes with inf
assert ebt.features_bucket_mapping_["BILL_AMT1"].labels[0].startswith("(-inf")
assert ebt.features_bucket_mapping_["BILL_AMT1"].labels[2].endswith("inf)")
# Test that tha labels are properly assigned. Because there are 2 specials in LIMIT_BAL, there should be 2 extra
# bins
assert len(ebt.features_bucket_mapping_["LIMIT_BAL"].labels) == 6
# check that the labels match the specials dictionary
assert (
ebt.features_bucket_mapping_["LIMIT_BAL"].labels[4].endswith([key for key in specials["LIMIT_BAL"].keys()][0])
)
assert (
ebt.features_bucket_mapping_["LIMIT_BAL"].labels[5].endswith([key for key in specials["LIMIT_BAL"].keys()][1])
)
# Assert a value error is raised if the specials contains features not defined in the bucketer.
specials = {"LIMIT_BAL": {"=50000": [50000], "in [20001,30000]": [20000, 30000]}, "Undefinedfeature": {"1": [2]}}
with pytest.raises(ValueError):
EqualFrequencyBucketer(variables=["LIMIT_BAL", "BILL_AMT1"], n_bins=3, specials=specials).fit_transform(X, y)
def _test_specials_optimal_bucketer(df):
"""Test that when adding specials,the binner performs as expected.
Context: special values should be binned in their own bin.
"""
X = df[["LIMIT_BAL", "BILL_AMT1"]]
y = df["default"]
specials = {"LIMIT_BAL": {"=50000": [50000], "in [20001,30000]": [20000, 30000]}}
opt = OptimalBucketer(variables=["LIMIT_BAL", "BILL_AMT1"], random_state=1, max_n_bins=3, specials=specials)
X_bins = opt.fit_transform(X, y)
assert X_bins["BILL_AMT1"].nunique() == 3
assert X_bins["LIMIT_BAL"].nunique() == 5 # maximum n_bins +2 coming from the specials
assert X_bins[X["LIMIT_BAL"] == 50000]["LIMIT_BAL"].unique() == np.array(3)
# Test that tha labels are properly assigned. Because there are no specials in BILL_AMT1, there should be no extra
# bins
assert len(opt.features_bucket_mapping_["BILL_AMT1"].labels) == 3
# check that the last label finishes with inf
assert opt.features_bucket_mapping_["BILL_AMT1"].labels[0].startswith("(-inf")
assert opt.features_bucket_mapping_["BILL_AMT1"].labels[2].endswith("inf)")
# Test that tha labels are properly assigned. Because there are 2 specials in LIMIT_BAL, there should be 2 extra
# bins
assert len(opt.features_bucket_mapping_["LIMIT_BAL"].labels) == 5
# check that the labels match the specials dictionary
assert (
opt.features_bucket_mapping_["LIMIT_BAL"].labels[3].endswith([key for key in specials["LIMIT_BAL"].keys()][0])
)
assert (
opt.features_bucket_mapping_["LIMIT_BAL"].labels[4].endswith([key for key in specials["LIMIT_BAL"].keys()][1])
)
| [
"skorecard.bucketers.EqualFrequencyBucketer",
"skorecard.bucketers.OptimalBucketer",
"pytest.fixture",
"skorecard.datasets.load_uci_credit_card",
"pytest.raises",
"numpy.array",
"skorecard.bucketers.DecisionTreeBucketer",
"skorecard.bucketers.EqualWidthBucketer"
] | [((182, 198), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (196, 198), False, 'import pytest\n'), ((250, 294), 'skorecard.datasets.load_uci_credit_card', 'datasets.load_uci_credit_card', ([], {'as_frame': '(True)'}), '(as_frame=True)\n', (279, 294), False, 'from skorecard import datasets\n'), ((920, 1031), 'skorecard.bucketers.DecisionTreeBucketer', 'DecisionTreeBucketer', ([], {'variables': "['LIMIT_BAL', 'BILL_AMT1']", 'random_state': '(1)', 'max_n_bins': '(3)', 'specials': 'specials'}), "(variables=['LIMIT_BAL', 'BILL_AMT1'], random_state=1,\n max_n_bins=3, specials=specials)\n", (940, 1031), False, 'from skorecard.bucketers import DecisionTreeBucketer, EqualFrequencyBucketer, EqualWidthBucketer, OptimalBucketer\n'), ((1107, 1218), 'skorecard.bucketers.DecisionTreeBucketer', 'DecisionTreeBucketer', ([], {'variables': "['LIMIT_BAL', 'BILL_AMT1']", 'random_state': '(1)', 'max_n_bins': '(5)', 'specials': 'specials'}), "(variables=['LIMIT_BAL', 'BILL_AMT1'], random_state=1,\n max_n_bins=5, specials=specials)\n", (1127, 1218), False, 'from skorecard.bucketers import DecisionTreeBucketer, EqualFrequencyBucketer, EqualWidthBucketer, OptimalBucketer\n'), ((3113, 3203), 'skorecard.bucketers.EqualWidthBucketer', 'EqualWidthBucketer', ([], {'variables': "['LIMIT_BAL', 'BILL_AMT1']", 'n_bins': '(3)', 'specials': 'specials'}), "(variables=['LIMIT_BAL', 'BILL_AMT1'], n_bins=3, specials\n =specials)\n", (3131, 3203), False, 'from skorecard.bucketers import DecisionTreeBucketer, EqualFrequencyBucketer, EqualWidthBucketer, OptimalBucketer\n'), ((5125, 5218), 'skorecard.bucketers.EqualFrequencyBucketer', 'EqualFrequencyBucketer', ([], {'variables': "['LIMIT_BAL', 'BILL_AMT1']", 'n_bins': '(3)', 'specials': 'specials'}), "(variables=['LIMIT_BAL', 'BILL_AMT1'], n_bins=3,\n specials=specials)\n", (5147, 5218), False, 'from skorecard.bucketers import DecisionTreeBucketer, EqualFrequencyBucketer, EqualWidthBucketer, OptimalBucketer\n'), ((7138, 7244), 'skorecard.bucketers.OptimalBucketer', 'OptimalBucketer', ([], {'variables': "['LIMIT_BAL', 'BILL_AMT1']", 'random_state': '(1)', 'max_n_bins': '(3)', 'specials': 'specials'}), "(variables=['LIMIT_BAL', 'BILL_AMT1'], random_state=1,\n max_n_bins=3, specials=specials)\n", (7153, 7244), False, 'from skorecard.bucketers import DecisionTreeBucketer, EqualFrequencyBucketer, EqualWidthBucketer, OptimalBucketer\n'), ((1037, 1062), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1050, 1062), False, 'import pytest\n'), ((1414, 1425), 'numpy.array', 'np.array', (['(4)'], {}), '(4)\n', (1422, 1425), True, 'import numpy as np\n'), ((2602, 2627), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2615, 2627), False, 'import pytest\n'), ((3444, 3455), 'numpy.array', 'np.array', (['(4)'], {}), '(4)\n', (3452, 3455), True, 'import numpy as np\n'), ((4632, 4657), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4645, 4657), False, 'import pytest\n'), ((5460, 5471), 'numpy.array', 'np.array', (['(4)'], {}), '(4)\n', (5468, 5471), True, 'import numpy as np\n'), ((6648, 6673), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6661, 6673), False, 'import pytest\n'), ((7486, 7497), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (7494, 7497), True, 'import numpy as np\n'), ((2637, 2748), 'skorecard.bucketers.DecisionTreeBucketer', 'DecisionTreeBucketer', ([], {'variables': "['LIMIT_BAL', 'BILL_AMT1']", 'random_state': '(1)', 'max_n_bins': '(3)', 'specials': 'specials'}), "(variables=['LIMIT_BAL', 'BILL_AMT1'], random_state=1,\n max_n_bins=3, specials=specials)\n", (2657, 2748), False, 'from skorecard.bucketers import DecisionTreeBucketer, EqualFrequencyBucketer, EqualWidthBucketer, OptimalBucketer\n'), ((4667, 4757), 'skorecard.bucketers.EqualWidthBucketer', 'EqualWidthBucketer', ([], {'variables': "['LIMIT_BAL', 'BILL_AMT1']", 'n_bins': '(3)', 'specials': 'specials'}), "(variables=['LIMIT_BAL', 'BILL_AMT1'], n_bins=3, specials\n =specials)\n", (4685, 4757), False, 'from skorecard.bucketers import DecisionTreeBucketer, EqualFrequencyBucketer, EqualWidthBucketer, OptimalBucketer\n'), ((6683, 6776), 'skorecard.bucketers.EqualFrequencyBucketer', 'EqualFrequencyBucketer', ([], {'variables': "['LIMIT_BAL', 'BILL_AMT1']", 'n_bins': '(3)', 'specials': 'specials'}), "(variables=['LIMIT_BAL', 'BILL_AMT1'], n_bins=3,\n specials=specials)\n", (6705, 6776), False, 'from skorecard.bucketers import DecisionTreeBucketer, EqualFrequencyBucketer, EqualWidthBucketer, OptimalBucketer\n')] |
"""Auxiliary **connector** and **selector** functions to create edges.
This module provides auxiliary **connector** and **selector** functions
for the ``dg.DeepGraph.create_edges`` and
``dg.DeepGraph.create_ft_edges`` methods.
They are described in their corresponding docstrings.
"""
from __future__ import print_function, division, absolute_import
# Copyright (C) 2017-2020 by
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
# py2/3 compatibility
try:
range = xrange
except NameError:
pass
import numpy as np
__all__ = ['great_circle_dist',
'cp_node_intersection',
'cp_intersection_strength',
'hypergeometric_p_value',
]
# ============================================================================
# CONNECTORS
# ============================================================================
def great_circle_dist(lat_s, lat_t, lon_s, lon_t):
"""Return the great circle distance between nodes.
The latitude and longitude values in the node table have to be in signed
decimal degrees without compass direction (the sign indicates west/south).
The great circle distance is calculated using the spherical law of cosines.
"""
# dtypes
lat_s = np.array(lat_s, dtype=float)
lat_t = np.array(lat_t, dtype=float)
lon_s = np.array(lon_s, dtype=float)
lon_t = np.array(lon_t, dtype=float)
# select by event_indices
phi_i = np.radians(lat_s)
phi_j = np.radians(lat_t)
delta_alpha = np.radians(lon_t) - np.radians(lon_s)
# earth's radius
R = 6371
# spatial distance of nodes
gcd = np.arccos(np.sin(phi_i) * np.sin(phi_j) +
np.cos(phi_i) * np.cos(phi_j) *
np.cos(delta_alpha)) * R
# for 0 gcd, there might be nans, convert to 0.
gcd = np.nan_to_num(gcd)
return gcd
def cp_node_intersection(supernode_ids, sources, targets):
"""Work in progress!
"""
nodess = supernode_ids[sources]
nodest = supernode_ids[targets]
identical_nodes = (nodess == nodest)
intsec = np.zeros(len(sources), dtype=object)
intsec_card = np.zeros(len(sources), dtype=np.int)
for i in range(len(sources)):
intsec[i] = nodess[i].intersection(nodest[i])
intsec_card[i] = len(intsec[i])
return intsec, intsec_card, identical_nodes
def cp_intersection_strength(n_unique_nodes, intsec_card, sources, targets):
"""Work in progress!
"""
us = n_unique_nodes[sources]
ut = n_unique_nodes[targets]
# min cardinality
min_card = np.array(np.vstack((us, ut)).min(axis=0), dtype=np.float64)
# intersection strength
intsec_strength = intsec_card / min_card
return intsec_strength
def hypergeometric_p_value(n_unique_nodes, intsec_card, sources, targets):
"""Work in progress!
"""
from scipy.stats import hypergeom
us = n_unique_nodes[sources]
ut = n_unique_nodes[targets]
# population size
M = 220*220
# number of success states in population
n = np.vstack((us, ut)).max(axis=0)
# total draws
N = np.vstack((us, ut)).min(axis=0)
# successes
x = intsec_card
hg_p = np.zeros(len(sources))
for i in range(len(sources)):
hg_p[i] = hypergeom.sf(x[i], M, n[i], N[i])
return hg_p
# ============================================================================
# Selectors
# ============================================================================
| [
"scipy.stats.hypergeom.sf",
"numpy.radians",
"numpy.nan_to_num",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.vstack"
] | [((1244, 1272), 'numpy.array', 'np.array', (['lat_s'], {'dtype': 'float'}), '(lat_s, dtype=float)\n', (1252, 1272), True, 'import numpy as np\n'), ((1285, 1313), 'numpy.array', 'np.array', (['lat_t'], {'dtype': 'float'}), '(lat_t, dtype=float)\n', (1293, 1313), True, 'import numpy as np\n'), ((1326, 1354), 'numpy.array', 'np.array', (['lon_s'], {'dtype': 'float'}), '(lon_s, dtype=float)\n', (1334, 1354), True, 'import numpy as np\n'), ((1367, 1395), 'numpy.array', 'np.array', (['lon_t'], {'dtype': 'float'}), '(lon_t, dtype=float)\n', (1375, 1395), True, 'import numpy as np\n'), ((1439, 1456), 'numpy.radians', 'np.radians', (['lat_s'], {}), '(lat_s)\n', (1449, 1456), True, 'import numpy as np\n'), ((1469, 1486), 'numpy.radians', 'np.radians', (['lat_t'], {}), '(lat_t)\n', (1479, 1486), True, 'import numpy as np\n'), ((1824, 1842), 'numpy.nan_to_num', 'np.nan_to_num', (['gcd'], {}), '(gcd)\n', (1837, 1842), True, 'import numpy as np\n'), ((1506, 1523), 'numpy.radians', 'np.radians', (['lon_t'], {}), '(lon_t)\n', (1516, 1523), True, 'import numpy as np\n'), ((1526, 1543), 'numpy.radians', 'np.radians', (['lon_s'], {}), '(lon_s)\n', (1536, 1543), True, 'import numpy as np\n'), ((3252, 3285), 'scipy.stats.hypergeom.sf', 'hypergeom.sf', (['x[i]', 'M', 'n[i]', 'N[i]'], {}), '(x[i], M, n[i], N[i])\n', (3264, 3285), False, 'from scipy.stats import hypergeom\n'), ((3039, 3058), 'numpy.vstack', 'np.vstack', (['(us, ut)'], {}), '((us, ut))\n', (3048, 3058), True, 'import numpy as np\n'), ((3097, 3116), 'numpy.vstack', 'np.vstack', (['(us, ut)'], {}), '((us, ut))\n', (3106, 3116), True, 'import numpy as np\n'), ((2578, 2597), 'numpy.vstack', 'np.vstack', (['(us, ut)'], {}), '((us, ut))\n', (2587, 2597), True, 'import numpy as np\n'), ((1632, 1645), 'numpy.sin', 'np.sin', (['phi_i'], {}), '(phi_i)\n', (1638, 1645), True, 'import numpy as np\n'), ((1648, 1661), 'numpy.sin', 'np.sin', (['phi_j'], {}), '(phi_j)\n', (1654, 1661), True, 'import numpy as np\n'), ((1736, 1755), 'numpy.cos', 'np.cos', (['delta_alpha'], {}), '(delta_alpha)\n', (1742, 1755), True, 'import numpy as np\n'), ((1684, 1697), 'numpy.cos', 'np.cos', (['phi_i'], {}), '(phi_i)\n', (1690, 1697), True, 'import numpy as np\n'), ((1700, 1713), 'numpy.cos', 'np.cos', (['phi_j'], {}), '(phi_j)\n', (1706, 1713), True, 'import numpy as np\n')] |
# Thanks <NAME> for his: https://github.com/informramiz/opencv-face-recognition-python
from IPython.display import display
from PIL import Image
import sys, os
import numpy as np
import cv2
class FaceClassifier:
faceSize = -1
face_recognizer = None
debug = False
subjects = []
def __init__(self, debug=False, face_resize=-1):
#self.face_recognizer = cv2.face.LBPHFaceRecognizer_create()
self.face_recognizer = cv2.face.FisherFaceRecognizer_create()
self.debug = debug
self.faceSize = face_resize
def train(self, train_path, delete_originals=False, show_train_images=False):
print("Preparing data...")
faces, labels = self.prepare_training_data(train_path, show_train_images=show_train_images)
print("Data prepared")
#print total faces and labels
print("Total faces: ", len(faces))
print("Labels tagged: ", len(labels), "with", len(np.unique(labels)), "labels")
print("Training...")
self.face_recognizer.train(faces, np.array(labels))
print("We are ready!")
def predict(self, test_path):
ret = []
detections = self.detect_face(test_path);
for detection in detections:
face, rect = detection[0], detection[1]
if face is None:
return "No_face"
#predict the image using our face recognizer
label = self.face_recognizer.predict(face)
#get name of respective label returned by face recognizer
ret.append(self.subjects[label[0]-1])
return ret
def detect_face(self, img, grayscale_output=True):
ret = []
img = cv2.imread(img)
if img is None:
ret.append([None, None])
return ret
#convert the test image to gray image as opencv face detector expects gray images
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#load OpenCV face detector, I am using LBP which is fast
#there is also a more accurate but slow Haar classifier
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
#face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface.xml')
#let's detect multiscale (some images may be closer to camera than others) images
#result is a list of faces
faces = face_cascade.detectMultiScale(gray)#, scaleFactor=1.2, minNeighbors=5);
#if no faces are detected then return original img
if (len(faces) == 0):
ret.append([None, None])
return ret
i = 0
for face in faces:
(x, y, w, h) = face
#return only the face part of the image
if (grayscale_output):
img_face = gray[y:y+w, x:x+h]
else:
img_face = img[y:y+w, x:x+h]
img_face = cv2.cvtColor(img_face, cv2.COLOR_BGR2RGB)
if self.faceSize > 0:
img_face = cv2.resize(img_face, (self.faceSize, self.faceSize), interpolation = cv2.INTER_AREA)
ret.append([img_face, faces[i]])
return ret
def prepare_training_data(self, train_path, show_train_images=False):
#------STEP-1--------
#get the directories (one directory for each subject) in data folder
dirs = os.listdir(train_path)
#list to hold all subject faces
faces = []
#list to hold labels for all subjects
labels = []
self.subjects = []
#let's go through each directory and read images within it
label = 0
for dir_name in dirs:
#------STEP-2--------
#extract label number of subject from dir_name
#format of dir name = slabel
#, so removing letter 's' from dir_name will give us label
label += 1
self.subjects.append(dir_name)
print (dir_name, "label = ", label)
#build path of directory containin images for current subject subject
#sample subject_dir_path = "training-data/s1"
subject_dir_path = train_path + "/" + dir_name
#get the images names that are inside the given subject directory
subject_images_names = os.listdir(subject_dir_path)
#------STEP-3--------
#go through each image name, read image,
#detect face and add face to list of faces
for image_name in subject_images_names:
#ignore system files like .DS_Store
if image_name.startswith("."):
continue;
#build image path
#sample image path = training-data/s1/1.pgm
image_path = subject_dir_path + "/" + image_name
#detect face
detections = self.detect_face(image_path);
face, rect = detections[0][0], detections[0][1]
#------STEP-4--------
#for the purpose of this tutorial
#we will ignore faces that are not detected
if face is not None:
if show_train_images:
print ("Image: ", image_path)
display(Image.fromarray(face))
faces.append(face)
labels.append(label)
cv2.destroyAllWindows()
cv2.waitKey(1)
cv2.destroyAllWindows()
return faces, labels | [
"cv2.resize",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imread",
"numpy.array",
"cv2.CascadeClassifier",
"PIL.Image.fromarray",
"cv2.face.FisherFaceRecognizer_create",
"cv2.destroyAllWindows",
"os.listdir",
"numpy.unique"
] | [((453, 491), 'cv2.face.FisherFaceRecognizer_create', 'cv2.face.FisherFaceRecognizer_create', ([], {}), '()\n', (489, 491), False, 'import cv2\n'), ((1711, 1726), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (1721, 1726), False, 'import cv2\n'), ((1925, 1962), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1937, 1962), False, 'import cv2\n'), ((2116, 2172), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt.xml"""'], {}), "('haarcascade_frontalface_alt.xml')\n", (2137, 2172), False, 'import cv2\n'), ((3397, 3419), 'os.listdir', 'os.listdir', (['train_path'], {}), '(train_path)\n', (3407, 3419), False, 'import sys, os\n'), ((5437, 5460), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5458, 5460), False, 'import cv2\n'), ((5469, 5483), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5480, 5483), False, 'import cv2\n'), ((5492, 5515), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5513, 5515), False, 'import cv2\n'), ((1053, 1069), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1061, 1069), True, 'import numpy as np\n'), ((4330, 4358), 'os.listdir', 'os.listdir', (['subject_dir_path'], {}), '(subject_dir_path)\n', (4340, 4358), False, 'import sys, os\n'), ((952, 969), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (961, 969), True, 'import numpy as np\n'), ((2931, 2972), 'cv2.cvtColor', 'cv2.cvtColor', (['img_face', 'cv2.COLOR_BGR2RGB'], {}), '(img_face, cv2.COLOR_BGR2RGB)\n', (2943, 2972), False, 'import cv2\n'), ((3035, 3122), 'cv2.resize', 'cv2.resize', (['img_face', '(self.faceSize, self.faceSize)'], {'interpolation': 'cv2.INTER_AREA'}), '(img_face, (self.faceSize, self.faceSize), interpolation=cv2.\n INTER_AREA)\n', (3045, 3122), False, 'import cv2\n'), ((5312, 5333), 'PIL.Image.fromarray', 'Image.fromarray', (['face'], {}), '(face)\n', (5327, 5333), False, 'from PIL import Image\n')] |
#TA 2 BASE
import pdb
import numpy as np
import gym
from gym import make
import cProfile
import re
import uuid
import os
import random
import time
from utils import rollout
import time
import pickle
#import cv2
import PIL
import torch
import json
import argparse
from collections import OrderedDict
from functools import partial
from torch import Tensor
import torch.multiprocessing as mp
from my_lib import *
from vast.opensetAlgos.EVM import EVM_Training , EVM_Inference, EVM_Inference_simple_cpu
from vast import activations
from statistics import mean
import gc
import random
import csv
import UCCS_TA2_helper
from UCCS_TA2_helper import UCCSTA2
UCCS = UCCSTA2()
try:
torch.multiprocessing.set_sharing_strategy('file_system')
except RuntimeError:
pass
try:
mp.set_start_method('spawn', force=True)
except RuntimeError:
pass
number_of_classes = 2
n_cpu = int(os.cpu_count()*0.8)
SEED = 1
os.environ['PYTHONHASHSEED']=str(SEED)
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
random.seed(SEED)
np.random.seed(SEED)
#tf.random.set_seed(SEED)
env_to_use = 'CartPole-v0'
print('ENV TO USE', env_to_use)
env = gym.make(env_to_use)
noveltyStdMn = []
state_and_dec = []
X = []; Y = [];
x = env.reset()
numSteps = 0
KLlist = []
currentRunProb = []
def testrun():
UCCS.debug = True
nruns = 6
for k in range(nruns):
actual_state = env.reset()
numSteps = 0
state_and_dec = []
currentRunProb = []
for i in range(200):
# Predict steps
action = UCCS.process_instance(actual_state)
# if(UCCS.debug and UCCS.cnt < 25): print(UCCS.debugstring)
if (UCCS.cnt == 4 and k==2):
env.modify("masspole",.1001)
print("Modified mass pole .1001")
UCCS.given=True
UCCS.trial=1
UCCS.episode=2
elif (UCCS.cnt == 4 and k==3):
env.modify("length",.5001)
env.modify("masspole",.1)
print("Modified Lenth to .50001" )
UCCS.given=True
UCCS.trial=1
UCCS.episode=3
elif (UCCS.cnt == 4 and k==4):
env.modify("length",.5)
env.modify("gravity",9.80001)
print("Modified gravity" )
UCCS.given=True
UCCS.trial=1
UCCS.episode=4
elif (UCCS.cnt == 4 and k==5):
env.modify("length",.5)
env.modify("gravity",9.8)
UCCS.given=False
print("Reutrn to normal")
actual_state, r, done, _ = env.step(action) # Take the predicted best action to get next actual state
if done:
if(UCCS.cnt < 199): print("!!!!!!!!!!!!!!!!!!!!!Steps only:", numSteps)
# print (UCCS.problist)
mu = np.mean(UCCS.problist[3:])
sigma = np.std(UCCS.problist[3:])
# print(mu,sigma)
kl = UCCS.kullback_leibler( mu, sigma,UCCS.mean_train, UCCS.stdev_train)
KLlist.append(float(kl))
UCCS.episode += 1
print("Steps, KL/WC",UCCS.cnt,kl, UCCS.world_change_prob())
if(UCCS.given) :
fname = 'Given-History-{}-{}-{}.csv'.format(UCCS.trial,UCCS.episode,uuid.uuid4().hex)
with open(fname, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(UCCS.statelist)
f.close()
UCCS.cnt=0
UCCS.problist=[]
UCCS.statelist=[]
UCCS.reset(0)
break
# KLlist.append(currentRunProb)
#pdb.set_trace()
cProfile.run('testrun()')
print("mean/stdev KL", np.mean(KLlist),np.std(KLlist))
| [
"uuid.uuid4",
"numpy.random.seed",
"gym.make",
"torch.multiprocessing.set_sharing_strategy",
"csv.writer",
"numpy.std",
"os.cpu_count",
"torch.multiprocessing.set_start_method",
"numpy.mean",
"random.seed",
"UCCS_TA2_helper.UCCSTA2",
"cProfile.run"
] | [((663, 672), 'UCCS_TA2_helper.UCCSTA2', 'UCCSTA2', ([], {}), '()\n', (670, 672), False, 'from UCCS_TA2_helper import UCCSTA2\n'), ((995, 1012), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (1006, 1012), False, 'import random\n'), ((1013, 1033), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (1027, 1033), True, 'import numpy as np\n'), ((1128, 1148), 'gym.make', 'gym.make', (['env_to_use'], {}), '(env_to_use)\n', (1136, 1148), False, 'import gym\n'), ((3735, 3760), 'cProfile.run', 'cProfile.run', (['"""testrun()"""'], {}), "('testrun()')\n", (3747, 3760), False, 'import cProfile\n'), ((682, 739), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (724, 739), False, 'import torch\n'), ((777, 817), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {'force': '(True)'}), "('spawn', force=True)\n", (796, 817), True, 'import torch.multiprocessing as mp\n'), ((3786, 3801), 'numpy.mean', 'np.mean', (['KLlist'], {}), '(KLlist)\n', (3793, 3801), True, 'import numpy as np\n'), ((3802, 3816), 'numpy.std', 'np.std', (['KLlist'], {}), '(KLlist)\n', (3808, 3816), True, 'import numpy as np\n'), ((882, 896), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (894, 896), False, 'import os\n'), ((2907, 2933), 'numpy.mean', 'np.mean', (['UCCS.problist[3:]'], {}), '(UCCS.problist[3:])\n', (2914, 2933), True, 'import numpy as np\n'), ((2954, 2979), 'numpy.std', 'np.std', (['UCCS.problist[3:]'], {}), '(UCCS.problist[3:])\n', (2960, 2979), True, 'import numpy as np\n'), ((3439, 3452), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3449, 3452), False, 'import csv\n'), ((3343, 3355), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3353, 3355), False, 'import uuid\n')] |
import numpy as np
from mlagents.trainers.buffer import (
AgentBuffer,
AgentBufferField,
BufferKey,
ObservationKeyPrefix,
RewardSignalKeyPrefix,
)
from mlagents.trainers.trajectory import ObsUtil
def assert_array(a, b):
assert a.shape == b.shape
la = list(a.flatten())
lb = list(b.flatten())
for i in range(len(la)):
assert la[i] == lb[i]
def construct_fake_buffer(fake_agent_id):
b = AgentBuffer()
for step in range(9):
b[ObsUtil.get_name_at(0)].append(
np.array(
[
100 * fake_agent_id + 10 * step + 1,
100 * fake_agent_id + 10 * step + 2,
100 * fake_agent_id + 10 * step + 3,
],
dtype=np.float32,
)
)
b[BufferKey.CONTINUOUS_ACTION].append(
np.array(
[
100 * fake_agent_id + 10 * step + 4,
100 * fake_agent_id + 10 * step + 5,
],
dtype=np.float32,
)
)
b[BufferKey.GROUP_CONTINUOUS_ACTION].append(
[
np.array(
[
100 * fake_agent_id + 10 * step + 4,
100 * fake_agent_id + 10 * step + 5,
],
dtype=np.float32,
)
]
* 3
)
return b
def test_buffer():
agent_1_buffer = construct_fake_buffer(1)
agent_2_buffer = construct_fake_buffer(2)
agent_3_buffer = construct_fake_buffer(3)
# Test get_batch
a = agent_1_buffer[ObsUtil.get_name_at(0)].get_batch(
batch_size=2, training_length=1, sequential=True
)
assert_array(
np.array(a), np.array([[171, 172, 173], [181, 182, 183]], dtype=np.float32)
)
# Test get_batch
a = agent_2_buffer[ObsUtil.get_name_at(0)].get_batch(
batch_size=2, training_length=3, sequential=True
)
assert_array(
np.array(a),
np.array(
[
[231, 232, 233],
[241, 242, 243],
[251, 252, 253],
[261, 262, 263],
[271, 272, 273],
[281, 282, 283],
],
dtype=np.float32,
),
)
a = agent_2_buffer[ObsUtil.get_name_at(0)].get_batch(
batch_size=2, training_length=3, sequential=False
)
assert_array(
np.array(a),
np.array(
[
[251, 252, 253],
[261, 262, 263],
[271, 272, 273],
[261, 262, 263],
[271, 272, 273],
[281, 282, 283],
]
),
)
# Test padding
a = agent_2_buffer[ObsUtil.get_name_at(0)].get_batch(
batch_size=None, training_length=4, sequential=True
)
assert_array(
np.array(a),
np.array(
[
[201, 202, 203],
[211, 212, 213],
[221, 222, 223],
[231, 232, 233],
[241, 242, 243],
[251, 252, 253],
[261, 262, 263],
[271, 272, 273],
[281, 282, 283],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
]
),
)
# Test group entries return Lists of Lists. Make sure to pad properly!
a = agent_2_buffer[BufferKey.GROUP_CONTINUOUS_ACTION].get_batch(
batch_size=None, training_length=4, sequential=True
)
for _group_entry in a[:-3]:
assert len(_group_entry) == 3
for _group_entry in a[-3:]:
assert len(_group_entry) == 0
agent_1_buffer.reset_agent()
assert agent_1_buffer.num_experiences == 0
update_buffer = AgentBuffer()
agent_2_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
agent_3_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
assert len(update_buffer[BufferKey.CONTINUOUS_ACTION]) == 20
assert np.array(update_buffer[BufferKey.CONTINUOUS_ACTION]).shape == (20, 2)
c = update_buffer.make_mini_batch(start=0, end=1)
assert c.keys() == update_buffer.keys()
# Make sure the values of c are AgentBufferField
for val in c.values():
assert isinstance(val, AgentBufferField)
assert np.array(c[BufferKey.CONTINUOUS_ACTION]).shape == (1, 2)
def test_agentbufferfield():
# Test constructor
a = AgentBufferField([0, 1, 2])
for i, num in enumerate(a):
assert num == i
# Test indexing
assert a[i] == num
# Test slicing
b = a[1:3]
assert b == [1, 2]
assert isinstance(b, AgentBufferField)
# Test padding
c = AgentBufferField()
for _ in range(2):
c.append([np.array(1), np.array(2)])
for _ in range(2):
c.append([np.array(1)])
padded = c.padded_to_batch(pad_value=3)
assert np.array_equal(padded[0], np.array([1, 1, 1, 1]))
assert np.array_equal(padded[1], np.array([2, 2, 3, 3]))
# Make sure it doesn't fail when the field isn't a list
padded_a = a.padded_to_batch()
assert np.array_equal(padded_a, a)
def fakerandint(values):
return 19
def test_buffer_sample():
agent_1_buffer = construct_fake_buffer(1)
agent_2_buffer = construct_fake_buffer(2)
update_buffer = AgentBuffer()
agent_1_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
agent_2_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
# Test non-LSTM
mb = update_buffer.sample_mini_batch(batch_size=4, sequence_length=1)
assert mb.keys() == update_buffer.keys()
assert np.array(mb[BufferKey.CONTINUOUS_ACTION]).shape == (4, 2)
# Test LSTM
# We need to check if we ever get a breaking start - this will maximize the probability
mb = update_buffer.sample_mini_batch(batch_size=20, sequence_length=19)
assert mb.keys() == update_buffer.keys()
# Should only return one sequence
assert np.array(mb[BufferKey.CONTINUOUS_ACTION]).shape == (19, 2)
def test_num_experiences():
agent_1_buffer = construct_fake_buffer(1)
agent_2_buffer = construct_fake_buffer(2)
update_buffer = AgentBuffer()
assert len(update_buffer[BufferKey.CONTINUOUS_ACTION]) == 0
assert update_buffer.num_experiences == 0
agent_1_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
agent_2_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
assert len(update_buffer[BufferKey.CONTINUOUS_ACTION]) == 20
assert update_buffer.num_experiences == 20
def test_buffer_truncate():
agent_1_buffer = construct_fake_buffer(1)
agent_2_buffer = construct_fake_buffer(2)
update_buffer = AgentBuffer()
agent_1_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
agent_2_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
# Test non-LSTM
update_buffer.truncate(2)
assert update_buffer.num_experiences == 2
agent_1_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
agent_2_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
# Test LSTM, truncate should be some multiple of sequence_length
update_buffer.truncate(4, sequence_length=3)
assert update_buffer.num_experiences == 3
for buffer_field in update_buffer.values():
assert isinstance(buffer_field, AgentBufferField)
def test_key_encode_decode():
keys = (
list(BufferKey)
+ [(k, 42) for k in ObservationKeyPrefix]
+ [(k, "gail") for k in RewardSignalKeyPrefix]
)
for k in keys:
assert k == AgentBuffer._decode_key(AgentBuffer._encode_key(k))
def test_buffer_save_load():
original = construct_fake_buffer(3)
import io
write_buffer = io.BytesIO()
original.save_to_file(write_buffer)
loaded = AgentBuffer()
loaded.load_from_file(write_buffer)
assert len(original) == len(loaded)
for k in original.keys():
assert np.allclose(original[k], loaded[k])
| [
"io.BytesIO",
"mlagents.trainers.buffer.AgentBuffer",
"numpy.allclose",
"mlagents.trainers.buffer.AgentBuffer._encode_key",
"mlagents.trainers.buffer.AgentBufferField",
"numpy.array",
"numpy.array_equal",
"mlagents.trainers.trajectory.ObsUtil.get_name_at"
] | [((437, 450), 'mlagents.trainers.buffer.AgentBuffer', 'AgentBuffer', ([], {}), '()\n', (448, 450), False, 'from mlagents.trainers.buffer import AgentBuffer, AgentBufferField, BufferKey, ObservationKeyPrefix, RewardSignalKeyPrefix\n'), ((3835, 3848), 'mlagents.trainers.buffer.AgentBuffer', 'AgentBuffer', ([], {}), '()\n', (3846, 3848), False, 'from mlagents.trainers.buffer import AgentBuffer, AgentBufferField, BufferKey, ObservationKeyPrefix, RewardSignalKeyPrefix\n'), ((4566, 4593), 'mlagents.trainers.buffer.AgentBufferField', 'AgentBufferField', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (4582, 4593), False, 'from mlagents.trainers.buffer import AgentBuffer, AgentBufferField, BufferKey, ObservationKeyPrefix, RewardSignalKeyPrefix\n'), ((4830, 4848), 'mlagents.trainers.buffer.AgentBufferField', 'AgentBufferField', ([], {}), '()\n', (4846, 4848), False, 'from mlagents.trainers.buffer import AgentBuffer, AgentBufferField, BufferKey, ObservationKeyPrefix, RewardSignalKeyPrefix\n'), ((5247, 5274), 'numpy.array_equal', 'np.array_equal', (['padded_a', 'a'], {}), '(padded_a, a)\n', (5261, 5274), True, 'import numpy as np\n'), ((5456, 5469), 'mlagents.trainers.buffer.AgentBuffer', 'AgentBuffer', ([], {}), '()\n', (5467, 5469), False, 'from mlagents.trainers.buffer import AgentBuffer, AgentBufferField, BufferKey, ObservationKeyPrefix, RewardSignalKeyPrefix\n'), ((6370, 6383), 'mlagents.trainers.buffer.AgentBuffer', 'AgentBuffer', ([], {}), '()\n', (6381, 6383), False, 'from mlagents.trainers.buffer import AgentBuffer, AgentBufferField, BufferKey, ObservationKeyPrefix, RewardSignalKeyPrefix\n'), ((6962, 6975), 'mlagents.trainers.buffer.AgentBuffer', 'AgentBuffer', ([], {}), '()\n', (6973, 6975), False, 'from mlagents.trainers.buffer import AgentBuffer, AgentBufferField, BufferKey, ObservationKeyPrefix, RewardSignalKeyPrefix\n'), ((8143, 8155), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (8153, 8155), False, 'import io\n'), ((8210, 8223), 'mlagents.trainers.buffer.AgentBuffer', 'AgentBuffer', ([], {}), '()\n', (8221, 8223), False, 'from mlagents.trainers.buffer import AgentBuffer, AgentBufferField, BufferKey, ObservationKeyPrefix, RewardSignalKeyPrefix\n'), ((1782, 1793), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1790, 1793), True, 'import numpy as np\n'), ((1795, 1857), 'numpy.array', 'np.array', (['[[171, 172, 173], [181, 182, 183]]'], {'dtype': 'np.float32'}), '([[171, 172, 173], [181, 182, 183]], dtype=np.float32)\n', (1803, 1857), True, 'import numpy as np\n'), ((2033, 2044), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (2041, 2044), True, 'import numpy as np\n'), ((2054, 2189), 'numpy.array', 'np.array', (['[[231, 232, 233], [241, 242, 243], [251, 252, 253], [261, 262, 263], [271, \n 272, 273], [281, 282, 283]]'], {'dtype': 'np.float32'}), '([[231, 232, 233], [241, 242, 243], [251, 252, 253], [261, 262, 263\n ], [271, 272, 273], [281, 282, 283]], dtype=np.float32)\n', (2062, 2189), True, 'import numpy as np\n'), ((2486, 2497), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (2494, 2497), True, 'import numpy as np\n'), ((2507, 2624), 'numpy.array', 'np.array', (['[[251, 252, 253], [261, 262, 263], [271, 272, 273], [261, 262, 263], [271, \n 272, 273], [281, 282, 283]]'], {}), '([[251, 252, 253], [261, 262, 263], [271, 272, 273], [261, 262, 263\n ], [271, 272, 273], [281, 282, 283]])\n', (2515, 2624), True, 'import numpy as np\n'), ((2930, 2941), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (2938, 2941), True, 'import numpy as np\n'), ((2951, 3156), 'numpy.array', 'np.array', (['[[201, 202, 203], [211, 212, 213], [221, 222, 223], [231, 232, 233], [241, \n 242, 243], [251, 252, 253], [261, 262, 263], [271, 272, 273], [281, 282,\n 283], [0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[201, 202, 203], [211, 212, 213], [221, 222, 223], [231, 232, 233\n ], [241, 242, 243], [251, 252, 253], [261, 262, 263], [271, 272, 273],\n [281, 282, 283], [0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (2959, 3156), True, 'import numpy as np\n'), ((5055, 5077), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (5063, 5077), True, 'import numpy as np\n'), ((5116, 5138), 'numpy.array', 'np.array', (['[2, 2, 3, 3]'], {}), '([2, 2, 3, 3])\n', (5124, 5138), True, 'import numpy as np\n'), ((8350, 8385), 'numpy.allclose', 'np.allclose', (['original[k]', 'loaded[k]'], {}), '(original[k], loaded[k])\n', (8361, 8385), True, 'import numpy as np\n'), ((531, 674), 'numpy.array', 'np.array', (['[100 * fake_agent_id + 10 * step + 1, 100 * fake_agent_id + 10 * step + 2, \n 100 * fake_agent_id + 10 * step + 3]'], {'dtype': 'np.float32'}), '([100 * fake_agent_id + 10 * step + 1, 100 * fake_agent_id + 10 *\n step + 2, 100 * fake_agent_id + 10 * step + 3], dtype=np.float32)\n', (539, 674), True, 'import numpy as np\n'), ((866, 972), 'numpy.array', 'np.array', (['[100 * fake_agent_id + 10 * step + 4, 100 * fake_agent_id + 10 * step + 5]'], {'dtype': 'np.float32'}), '([100 * fake_agent_id + 10 * step + 4, 100 * fake_agent_id + 10 *\n step + 5], dtype=np.float32)\n', (874, 972), True, 'import numpy as np\n'), ((4138, 4190), 'numpy.array', 'np.array', (['update_buffer[BufferKey.CONTINUOUS_ACTION]'], {}), '(update_buffer[BufferKey.CONTINUOUS_ACTION])\n', (4146, 4190), True, 'import numpy as np\n'), ((4447, 4487), 'numpy.array', 'np.array', (['c[BufferKey.CONTINUOUS_ACTION]'], {}), '(c[BufferKey.CONTINUOUS_ACTION])\n', (4455, 4487), True, 'import numpy as np\n'), ((5832, 5873), 'numpy.array', 'np.array', (['mb[BufferKey.CONTINUOUS_ACTION]'], {}), '(mb[BufferKey.CONTINUOUS_ACTION])\n', (5840, 5873), True, 'import numpy as np\n'), ((6169, 6210), 'numpy.array', 'np.array', (['mb[BufferKey.CONTINUOUS_ACTION]'], {}), '(mb[BufferKey.CONTINUOUS_ACTION])\n', (6177, 6210), True, 'import numpy as np\n'), ((1658, 1680), 'mlagents.trainers.trajectory.ObsUtil.get_name_at', 'ObsUtil.get_name_at', (['(0)'], {}), '(0)\n', (1677, 1680), False, 'from mlagents.trainers.trajectory import ObsUtil\n'), ((1909, 1931), 'mlagents.trainers.trajectory.ObsUtil.get_name_at', 'ObsUtil.get_name_at', (['(0)'], {}), '(0)\n', (1928, 1931), False, 'from mlagents.trainers.trajectory import ObsUtil\n'), ((2361, 2383), 'mlagents.trainers.trajectory.ObsUtil.get_name_at', 'ObsUtil.get_name_at', (['(0)'], {}), '(0)\n', (2380, 2383), False, 'from mlagents.trainers.trajectory import ObsUtil\n'), ((2803, 2825), 'mlagents.trainers.trajectory.ObsUtil.get_name_at', 'ObsUtil.get_name_at', (['(0)'], {}), '(0)\n', (2822, 2825), False, 'from mlagents.trainers.trajectory import ObsUtil\n'), ((4890, 4901), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (4898, 4901), True, 'import numpy as np\n'), ((4903, 4914), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (4911, 4914), True, 'import numpy as np\n'), ((4959, 4970), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (4967, 4970), True, 'import numpy as np\n'), ((8010, 8036), 'mlagents.trainers.buffer.AgentBuffer._encode_key', 'AgentBuffer._encode_key', (['k'], {}), '(k)\n', (8033, 8036), False, 'from mlagents.trainers.buffer import AgentBuffer, AgentBufferField, BufferKey, ObservationKeyPrefix, RewardSignalKeyPrefix\n'), ((487, 509), 'mlagents.trainers.trajectory.ObsUtil.get_name_at', 'ObsUtil.get_name_at', (['(0)'], {}), '(0)\n', (506, 509), False, 'from mlagents.trainers.trajectory import ObsUtil\n'), ((1168, 1274), 'numpy.array', 'np.array', (['[100 * fake_agent_id + 10 * step + 4, 100 * fake_agent_id + 10 * step + 5]'], {'dtype': 'np.float32'}), '([100 * fake_agent_id + 10 * step + 4, 100 * fake_agent_id + 10 *\n step + 5], dtype=np.float32)\n', (1176, 1274), True, 'import numpy as np\n')] |
import numpy as np
#
#
#
def _siesta2blanko_denvec(orb2m, vec, orb_sc2orb_uc=None):
n,nreim = vec.shape
if orb_sc2orb_uc is None:
orb_sc2m = orb2m
else:
orb_sc2m = np.zeros_like(orb_sc2orb_uc)
for orb_sc,orb_uc in enumerate(orb_sc2orb_uc): orb_sc2m[orb_sc] = orb2m[orb_uc]
orb2ph = (-1.0)**orb_sc2m
if(nreim==1):
vec[:,0] = vec[:,0]*orb2ph[:]
elif(nreim==2):
#print(vec[0:3,:], ' vec')
cvec = vec.view(dtype=np.complex64)
#print(cvec[0:3], 'cvec', cvec.shape) # I expected cvec.shape = (n), but got (n,1)...
cvec[:,0] = cvec[:,0] * orb2ph
#print(cvec[0:3], ' cvec2')
vec = cvec.view(dtype=np.float32)
#print(vec[0:3], ' vec2')
#raise RuntimeError('debug')
else:
raise SystemError('!nreim')
return(0)
| [
"numpy.zeros_like"
] | [((181, 209), 'numpy.zeros_like', 'np.zeros_like', (['orb_sc2orb_uc'], {}), '(orb_sc2orb_uc)\n', (194, 209), True, 'import numpy as np\n')] |
from .RainbowSim import RainbowSim
import numpy as np
class RbCartesianSumsEq(RainbowSim):
def __init__(self, m, n, a, b=[0, 0]):
super(RbCartesianSumsEq, self).__init__(m * n, a, Point(b[0], b[1]), False)
self.M = m # rows
self.N = n # columns
self.sums = self.sets
self.__generate_sums()
def get_equation(self):
eq = ""
for i in self.a:
eq += str(i) + "x + "
eq = eq[:-2] + "= " + str(self.b) + ", M = " + str(self.M) + ", N =" + str(self.N)
return eq
def __generate_sums(self):
values = list(range(1, self.k))
self.__recur_gen_sums(values, 0)
def __recur_gen_sums(self, values, loop):
k = self.k - 1
if loop == k:
return
stop_case = self.n - k + loop
while values[loop] <= stop_case:
self.__recur_gen_sums(values, loop + 1)
if loop == k - 1:
sum = Point(0, 0)
out = [0 for _ in range(self.k)]
for i in range(len(values)):
sum = sum + self.a[i] * self.__translate(values[i])
out[i] = values[i]
valid = True
sum = (sum - self.b) / -self.a[k]
if sum.x == int(sum.x) and sum.y == int(sum.y) and sum.x <= self.M and sum.y <= self.N:
out[k] = self.__point_to_i(sum)
valid = self._set_leq_n(out, valid)
valid = self._is_distinct(out, valid)
out = self._decrement_if_not_mod(out, valid)
self._add_set(out, valid)
values[loop] = values[loop] + 1
for lp in range(loop + 1, k):
values[lp] = values[lp - 1] + 1
def __translate(self, n):
x = (n + self.N - 1) // self.N
y = 1 + ((n - 1) % self.N)
return Point(x, y)
def __point_to_i(self, p):
return self.N * (p.x - 1) + p.y
def print_extreme_matrices(self, quantity=-1):
if self.start != -1:
temp = self.colorings.head
while temp.next is not None:
matrix = [temp.data[i * self.N:(i + 1) * self.N] for i in range((len(temp.data) + self.N - 1) // self.N )]
print(np.matrix(matrix), "\n")
temp = temp.next
print()
def print_set_matrices(self):
sum = self.sums[self.n].head.next
while sum.next is not None:
matrix = [["*" for _ in range(self.N)] for _ in range(self.M)]
for i in sum.data:
p = self.__translate(i + 1)
matrix[p.x - 1][p.y - 1] = "0"
print(np.matrix(matrix), "\n")
sum = sum.next
return
def print_sets(self, nums=-1):
print('Sets Generated:', end='')
if nums is -1 and self.mod:
nums = list(range(self.n))
elif nums is -1 and not self.mod:
nums = list(range(1, self.n + 1))
for n in nums:
if self.mod:
temp = self.sets[n].head.next
else:
temp = self.sets[n - 1].head.next
if self.mod:
print('\n', n, ':', temp, end='')
else:
if temp is not None:
print('\n', n, ':',
'[%s]' % ', '.join(map(str, [self.__translate(i + 1) for i in temp.data])), end='')
else:
print('\n', n, ':', temp, end='')
if temp is not None:
temp = temp.next
while temp is not None:
if self.mod:
print(',', temp, end='')
else:
print(',', '[%s]' % ', '.join(map(str, [self.__translate(i + 1) for i in temp.data])),
end='')
temp = temp.next
print("\n")
class Point:
def __init__(self, x, y):
if int(x) != x:
raise TypeError("Points cannot have parameter of type double: x")
if int(y) != y:
raise TypeError("Points cannot have parameter of type double: y")
self.x = int(x)
self.y = int(y)
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point(self.x - other.x, self.y - other.y)
def __rmul__(self, other):
return Point(other * self.x, other * self.y)
def __truediv__(self, other):
return Point(self.x / other, self.y / other)
def __str__(self):
return "[" + str(self.x) + ", " + str(self.y) + "]"
| [
"numpy.matrix"
] | [((2681, 2698), 'numpy.matrix', 'np.matrix', (['matrix'], {}), '(matrix)\n', (2690, 2698), True, 'import numpy as np\n'), ((2279, 2296), 'numpy.matrix', 'np.matrix', (['matrix'], {}), '(matrix)\n', (2288, 2296), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys,time
import rospy
import roslib
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
import cv2
import message_filters
from sensor_msgs.msg import Image, CameraInfo
from std_msgs.msg import Header
class CutterNode:
def __init__(self):
self.image = np.zeros([480,640])
self.depth = np.zeros([480,640])
self.depth_output = np.zeros([480,640])
self.depth_timestamp = 0
self.header = Header()
self.debbbug = Image()
'''Initialize ros publisher, ros subscriber'''
# topic where we publish
self.depth_aligned_pub = rospy.Publisher("/cutter_node_align_depth",Image, queue_size=10)
# cv bridge
self.cv_bridge = CvBridge()
# subscribed Topic
self.image_subscriber = rospy.Subscriber("/grabcut",Image, self.callback_image,queue_size=1)
self.depth_subscriber = rospy.Subscriber("/align_depth",Image,self.callback_depth,queue_size=1)
def callback_image(self,data):
# filter image elementwise numpy
try:
cv_image = self.cv_bridge.imgmsg_to_cv2(data, "mono8")
self.image = cv_image
except CvBridgeError as e:
print(e)
# compare two images
##### TO DO HERE #####
self.depth_output = np.array(np.zeros([480,640]), dtype = np.dtype('f4'))
ret,thresh1 = cv2.threshold(cv_image,10.0,255.0,cv2.THRESH_BINARY)
thresh1_norm = cv2.normalize(thresh1,thresh1,0,1,cv2.NORM_MINMAX)
# self.depth_output = thresh1_norm * self.depth
self.depth_output = self.depth
#self.depth_output = cv2.normalize(depth_out, depth_out, 0, 1, cv2.NORM_MINMAX)
#self.depth_output = np.float32(self.depth_output)
# self.depth_output = np.array(np.zeros([480,640]), dtype = np.dtype('f4'))
# ret,thresh1 = cv2.threshold(cv_image,10.0,255.0,cv2.THRESH_BINARY)
# thresh1_norm = cv2.normalize(thresh1,thresh1,0,1,cv2.NORM_MINMAX)
# thresh1_norm_32 = np.float32(thresh1_norm)
# depth_out = np.multiply(self.depth,thresh1_norm_32)
# self.depth_output = cv2.normalize(depth_out, depth_out, 0, 255, cv2.NORM_MINMAX)
# self.depth_output = np.float32(depth_out)
print("x")
##### END TO DO #####
try:
self.align_message = self.cv_bridge.cv2_to_imgmsg(self.depth_output, "16UC1")
self.align_message.header.stamp = self.depth_timestamp
self.align_message.header.frame_id = "map"
self.align_message.header = self.header
self.depth_aligned_pub.publish(self.align_message)
except CvBridgeError as e:
print(e)
cv2.imshow("cutter_node_depth_output", self.depth_output)
cv2.imshow("cutter_node_mask", thresh1_norm)
cv2.waitKey(3)
def callback_depth(self,data):
# filter image elementwise numpy
try:
self.debbbug = data
cv_image = self.cv_bridge.imgmsg_to_cv2(data, "16UC1")
self.depth_timestamp = data.header.stamp
self.header = data.header
# Convert the depth image to a Numpy array since most cv2 functions require Numpy arrays.
# cv_image_array = np.array(cv_image, dtype = np.dtype('f4'))
# Normalize the depth image to fall between 0 (black) and 1 (white)
#cv_image_norm = cv2.normalize(cv_image_array, cv_image_array, 0, 1, cv2.NORM_MINMAX)
# cv_image_norm = np.float32(cv_image_array)
self.depth = cv_image
# cv_image = self.cv_bridge.imgmsg_to_cv2(data, "32FC1")
# self.depth_timestamp = data.header.stamp
# Convert the depth image to a Numpy array since most cv2 functions require Numpy arrays.
#cv_image_array = np.array(cv_image, dtype = np.dtype('f4'))
# Normalize the depth image to fall between 0 (black) and 1 (white)
# cv_image_norm = cv2.normalize(cv_image_array, cv_image_array, 0, 1, cv2.NORM_MINMAX)
# cv_image_norm = np.float32(cv_image_norm)
#self.depth = cv_image_norm
#self.depth = cv_image_array
except CvBridgeError as e:
print("cv bridge: ",e)
def cameras_callback(self, camera_msg, depth_msg,image_msg):
timestamp_depth = depth_msg.header.stamp
timestamp_camera = camera_msg.header.stamp
timestamp_image = image_msg.header.stamp
#print("D: ", timestamp_depth, " C: ", timestamp_camera, " difference: ", timestamp_depth - timestamp_camera)
# filter image elementwise numpy DEPTH
try:
depth_cv_image = self.cv_bridge.imgmsg_to_cv2(depth_msg, "32FC1")
depth_cv_image_array = np.array(depth_cv_image, dtype = np.dtype('f4'))
depth_cv_image_norm = cv2.normalize(depth_cv_image_array, depth_cv_image_array, 0, 1, cv2.NORM_MINMAX)
depth_cv_image_norm = np.float32(depth_cv_image_norm)
self.depth = depth_cv_image_norm
except CvBridgeError as e:
print("cv bridge: ",e)
# filter image elementwise numpy IMAGE
try:
image_cv_image = self.cv_bridge.imgmsg_to_cv2(data, "mono8")
self.image = image_cv_image
except CvBridgeError as e:
print(e)
# compare two images
self.depth_output = np.array(np.zeros([480,640]), dtype = np.dtype('f4'))
ret,thresh1 = cv2.threshold(image_cv_image,10.0,255.0,cv2.THRESH_BINARY)
depth_out = thresh1 * self.depth
depth_out = np.float32(depth_out)
self.depth_output = cv2.normalize(depth_out, depth_out, 0, 1, cv2.NORM_MINMAX)
self.depth_output = np.float32(self.depth_output)
try:
self.align_message.header.stamp = timestamp_camera
self.align_message = self.cv_bridge.cv2_to_imgmsg(self.depth_output, "32FC1")
self.align_message.header.frame_id = "map"
self.depth_aligned_pub.publish(self.align_message)
except CvBridgeError as e:
print(e)
# pub_odom.publish(odom_msg)
# pub_pointcloud.publish(point_cloud2_msg)
def main(args):
## debug
cv2.namedWindow('cutter_node_depth_output')
cv2.namedWindow('cutter_node_mask')
rospy.init_node('cutter_node',anonymous=True)
cn = CutterNode()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down ROS cutter node")
## debug
cv2.destroyAllWindows()
if __name__ == "__main__":
main(sys.argv) | [
"sensor_msgs.msg.Image",
"cv_bridge.CvBridge",
"rospy.Subscriber",
"std_msgs.msg.Header",
"cv2.waitKey",
"cv2.threshold",
"numpy.float32",
"numpy.zeros",
"rospy.Publisher",
"cv2.imshow",
"numpy.dtype",
"rospy.init_node",
"cv2.normalize",
"rospy.spin",
"cv2.destroyAllWindows",
"cv2.name... | [((6360, 6403), 'cv2.namedWindow', 'cv2.namedWindow', (['"""cutter_node_depth_output"""'], {}), "('cutter_node_depth_output')\n", (6375, 6403), False, 'import cv2\n'), ((6408, 6443), 'cv2.namedWindow', 'cv2.namedWindow', (['"""cutter_node_mask"""'], {}), "('cutter_node_mask')\n", (6423, 6443), False, 'import cv2\n'), ((6448, 6494), 'rospy.init_node', 'rospy.init_node', (['"""cutter_node"""'], {'anonymous': '(True)'}), "('cutter_node', anonymous=True)\n", (6463, 6494), False, 'import rospy\n'), ((6640, 6663), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6661, 6663), False, 'import cv2\n'), ((312, 332), 'numpy.zeros', 'np.zeros', (['[480, 640]'], {}), '([480, 640])\n', (320, 332), True, 'import numpy as np\n'), ((353, 373), 'numpy.zeros', 'np.zeros', (['[480, 640]'], {}), '([480, 640])\n', (361, 373), True, 'import numpy as np\n'), ((401, 421), 'numpy.zeros', 'np.zeros', (['[480, 640]'], {}), '([480, 640])\n', (409, 421), True, 'import numpy as np\n'), ((477, 485), 'std_msgs.msg.Header', 'Header', ([], {}), '()\n', (483, 485), False, 'from std_msgs.msg import Header\n'), ((509, 516), 'sensor_msgs.msg.Image', 'Image', ([], {}), '()\n', (514, 516), False, 'from sensor_msgs.msg import Image, CameraInfo\n'), ((639, 704), 'rospy.Publisher', 'rospy.Publisher', (['"""/cutter_node_align_depth"""', 'Image'], {'queue_size': '(10)'}), "('/cutter_node_align_depth', Image, queue_size=10)\n", (654, 704), False, 'import rospy\n'), ((749, 759), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (757, 759), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((819, 889), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/grabcut"""', 'Image', 'self.callback_image'], {'queue_size': '(1)'}), "('/grabcut', Image, self.callback_image, queue_size=1)\n", (835, 889), False, 'import rospy\n'), ((920, 994), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/align_depth"""', 'Image', 'self.callback_depth'], {'queue_size': '(1)'}), "('/align_depth', Image, self.callback_depth, queue_size=1)\n", (936, 994), False, 'import rospy\n'), ((1431, 1486), 'cv2.threshold', 'cv2.threshold', (['cv_image', '(10.0)', '(255.0)', 'cv2.THRESH_BINARY'], {}), '(cv_image, 10.0, 255.0, cv2.THRESH_BINARY)\n', (1444, 1486), False, 'import cv2\n'), ((1507, 1561), 'cv2.normalize', 'cv2.normalize', (['thresh1', 'thresh1', '(0)', '(1)', 'cv2.NORM_MINMAX'], {}), '(thresh1, thresh1, 0, 1, cv2.NORM_MINMAX)\n', (1520, 1561), False, 'import cv2\n'), ((2762, 2819), 'cv2.imshow', 'cv2.imshow', (['"""cutter_node_depth_output"""', 'self.depth_output'], {}), "('cutter_node_depth_output', self.depth_output)\n", (2772, 2819), False, 'import cv2\n'), ((2828, 2872), 'cv2.imshow', 'cv2.imshow', (['"""cutter_node_mask"""', 'thresh1_norm'], {}), "('cutter_node_mask', thresh1_norm)\n", (2838, 2872), False, 'import cv2\n'), ((2881, 2895), 'cv2.waitKey', 'cv2.waitKey', (['(3)'], {}), '(3)\n', (2892, 2895), False, 'import cv2\n'), ((5565, 5626), 'cv2.threshold', 'cv2.threshold', (['image_cv_image', '(10.0)', '(255.0)', 'cv2.THRESH_BINARY'], {}), '(image_cv_image, 10.0, 255.0, cv2.THRESH_BINARY)\n', (5578, 5626), False, 'import cv2\n'), ((5697, 5718), 'numpy.float32', 'np.float32', (['depth_out'], {}), '(depth_out)\n', (5707, 5718), True, 'import numpy as np\n'), ((5748, 5806), 'cv2.normalize', 'cv2.normalize', (['depth_out', 'depth_out', '(0)', '(1)', 'cv2.NORM_MINMAX'], {}), '(depth_out, depth_out, 0, 1, cv2.NORM_MINMAX)\n', (5761, 5806), False, 'import cv2\n'), ((5836, 5865), 'numpy.float32', 'np.float32', (['self.depth_output'], {}), '(self.depth_output)\n', (5846, 5865), True, 'import numpy as np\n'), ((6533, 6545), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (6543, 6545), False, 'import rospy\n'), ((1364, 1384), 'numpy.zeros', 'np.zeros', (['[480, 640]'], {}), '([480, 640])\n', (1372, 1384), True, 'import numpy as np\n'), ((4929, 5014), 'cv2.normalize', 'cv2.normalize', (['depth_cv_image_array', 'depth_cv_image_array', '(0)', '(1)', 'cv2.NORM_MINMAX'], {}), '(depth_cv_image_array, depth_cv_image_array, 0, 1, cv2.NORM_MINMAX\n )\n', (4942, 5014), False, 'import cv2\n'), ((5044, 5075), 'numpy.float32', 'np.float32', (['depth_cv_image_norm'], {}), '(depth_cv_image_norm)\n', (5054, 5075), True, 'import numpy as np\n'), ((5497, 5517), 'numpy.zeros', 'np.zeros', (['[480, 640]'], {}), '([480, 640])\n', (5505, 5517), True, 'import numpy as np\n'), ((1393, 1407), 'numpy.dtype', 'np.dtype', (['"""f4"""'], {}), "('f4')\n", (1401, 1407), True, 'import numpy as np\n'), ((5526, 5540), 'numpy.dtype', 'np.dtype', (['"""f4"""'], {}), "('f4')\n", (5534, 5540), True, 'import numpy as np\n'), ((4872, 4886), 'numpy.dtype', 'np.dtype', (['"""f4"""'], {}), "('f4')\n", (4880, 4886), True, 'import numpy as np\n')] |
from __future__ import division
from numpy.testing import assert_almost_equal
from statsmodels.emplike.originregress import ELOriginRegress
from statsmodels.datasets import cancer
from .results.el_results import OriginResults
import numpy as np
class GenRes(object):
"""
Loads data and creates class instance ot be tested.
"""
def __init__(self):
data = cancer.load()
self.res1 = ELOriginRegress(data.endog, data.exog).fit()
self.res2 = OriginResults()
class TestOrigin(GenRes):
"""
See OriginResults for details on how tests were computed
"""
def __init__(self):
super(TestOrigin, self).__init__()
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.test_params, 4)
def test_llf(self):
assert_almost_equal(self.res1.llf_el, self.res2.test_llf_hat, 4)
def test_hypothesis_beta1(self):
assert_almost_equal(self.res1.el_test([.0034],[1])[0],
self.res2.test_llf_hypoth,4)
def test_ci_beta(self):
ci = self.res1.conf_int_el(1)
ll = ci[0]
ul = ci[1]
llf_low = np.sum(np.log(self.res1.el_test([ll],[1], return_weights=1)[2]))
llf_high = np.sum(np.log(self.res1.el_test([ul],[1], return_weights=1)[2]))
assert_almost_equal(llf_low, self.res2.test_llf_conf, 4)
assert_almost_equal(llf_high, self.res2.test_llf_conf, 4)
| [
"statsmodels.datasets.cancer.load",
"statsmodels.emplike.originregress.ELOriginRegress",
"numpy.testing.assert_almost_equal"
] | [((381, 394), 'statsmodels.datasets.cancer.load', 'cancer.load', ([], {}), '()\n', (392, 394), False, 'from statsmodels.datasets import cancer\n'), ((704, 767), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.res1.params', 'self.res2.test_params', '(4)'], {}), '(self.res1.params, self.res2.test_params, 4)\n', (723, 767), False, 'from numpy.testing import assert_almost_equal\n'), ((801, 865), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.res1.llf_el', 'self.res2.test_llf_hat', '(4)'], {}), '(self.res1.llf_el, self.res2.test_llf_hat, 4)\n', (820, 865), False, 'from numpy.testing import assert_almost_equal\n'), ((1304, 1360), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['llf_low', 'self.res2.test_llf_conf', '(4)'], {}), '(llf_low, self.res2.test_llf_conf, 4)\n', (1323, 1360), False, 'from numpy.testing import assert_almost_equal\n'), ((1369, 1426), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['llf_high', 'self.res2.test_llf_conf', '(4)'], {}), '(llf_high, self.res2.test_llf_conf, 4)\n', (1388, 1426), False, 'from numpy.testing import assert_almost_equal\n'), ((415, 453), 'statsmodels.emplike.originregress.ELOriginRegress', 'ELOriginRegress', (['data.endog', 'data.exog'], {}), '(data.endog, data.exog)\n', (430, 453), False, 'from statsmodels.emplike.originregress import ELOriginRegress\n')] |
import os
import SimpleITK as sitk
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from torch import nn
from torch.nn import Module, Conv3d, ConvTranspose3d, Linear, ReLU, Sequential, Linear, Flatten, L1Loss, BatchNorm3d, \
Dropout, BatchNorm1d
from torch.optim import Adam, lr_scheduler
from torch.utils.data import Dataset, DataLoader
from ..utils.utils import plot_preds
from ..utils.models import ImageSegmentationDataset, Part3, resample_image, PrintTensor
import os.path as osp
PATH_TO_VOLUME3D = osp.join(osp.dirname(osp.realpath(__file__)), '..') + '/'
def save_graphs_train(fn, num_epochs, training_loss, val_loss_epoch5):
'''
Saves all the necessary graphs
:param fn: path to folder where to save
:param num_epochs: epoch list
:param training_loss: loss list
:param test_loss_epoch5: test loss list
:param writer: tensorboard writer
:return:
'''
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', f'{fn}/')
plt.plot([epoch for epoch in range(num_epochs)], training_loss, color='b', label='Train')
plt.plot([5 * i for i in range(len(val_loss_epoch5))], val_loss_epoch5, color='r', label='Val')
plt.title("Loss")
plt.xlabel("Number of Epochs")
plt.ylabel("Loss")
plt.ylim(0, 5)
plt.xlim(-5, num_epochs + 5)
plt.legend()
plt.savefig(path + f'graph.png')
plt.close()
def save_to_log(model, params, fn, final_MAE, num_epochs, batch_size, lr, feats, gamma, smoothen, edgen, dropout_p, img_spacing, img_size, scheduler_freq):
'''
Save all the information about the run to log
'''
print(f"Average Loss on whole val set: {final_MAE}")
result = f"""
########################################################################
***** Score = {final_MAE} *****
2. Number of epochs:
num_epochs = {num_epochs}
Batch size during training
batch_size = {batch_size}
Learning rate for optimizers
lr = {lr}
Size of feature amplifier
Feature Amplifier: {feats}
Gamma (using sched)
Gamma: {gamma}
Frequency of step: {scheduler_freq}
7. Image spacing and size
img_spacing = {img_spacing}
img_size = {img_size}
Smooth:
smoothen = {smoothen}
Edgen:
edgen = {edgen}
Amount of dropout:
dropout_p = {dropout_p}
Total number of parameters is: {params}
Model:
{model.__str__()}
########################################################################
"""
with open(f'{fn}/log.txt', 'a+') as log:
log.write('\n')
log.write(result)
log.write('\n')
torch.save(model, f'{fn}/model.pth')
path = osp.join(fn, '../')
with open(path + 'all_log.txt', 'a+') as log:
log.write('\n')
log.write(f'SUBJECT #{fn[-1]}: Validation = {final_MAE}, ')
def train_validate(lr, feats, num_epochs, gamma, batch_size, dropout_p, dataset_train, dataset_val, fn, number_here, scheduler_freq, writer):
'''
Main train-val loop. Train on training data and evaluate on validation data.
:param lr: learning rate
:param feats: feature amplifier (multiplier of the number of parameters in the CNN)
:param num_epochs:
:param gamma: scheduler gamma
:param batch_size
:param dropout_p: dropout proba
:param dataset_train
:param dataset_val
:param fn: saving folder
:param scheduler_freq
:param writer: tensorboard
:return: model, params, final_MAE
'''
# 1. Display GPU Settings:
cuda_dev = '0' # GPU device 0 (can be changed if multiple GPUs are available)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:" + cuda_dev if use_cuda else "cpu")
print('Device: ' + str(device))
if use_cuda:
print('GPU: ' + str(torch.cuda.get_device_name(int(cuda_dev))))
# 2. Define loss function
loss_function = L1Loss()
# 3. Print parameters
print(f"Learning Rate: {lr} and Feature Amplifier: {feats}, Num_epochs: {num_epochs}, Gamma: {gamma}")
# 4. Define collector lists
folds_val_scores = []
training_loss = []
val_loss_epoch5 = []
i_fold_val_scores = []
# 5. Create data loaders
train_loader = DataLoader(dataset_train, batch_size=batch_size)
val_loader = DataLoader(dataset_val, batch_size=batch_size)
# 6. Define a model
model = Part3(feats, dropout_p).to(device=device)
# 7. Print parameters
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Total Params: {params}")
# 8. Create an optimizer + LR scheduler
optimizer = Adam(model.parameters(), lr, weight_decay=0.005)
scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma, last_epoch=-1)
# 9. Proceed to train
for epoch in range(num_epochs):
model.train()
epoch_loss = []
for batch_data, batch_labels in train_loader:
batch_labels = batch_labels.to(device=device)
batch_data = batch_data.to(device=device) # move to device, e.g. GPU
batch_preds = model(batch_data)
loss = loss_function(batch_preds, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss.append(loss.item())
training_MAE = np.mean(epoch_loss)
training_loss.append(training_MAE)
writer.add_scalar('MAE Loss/train', training_MAE, epoch)
if epoch % scheduler_freq == 0:
scheduler.step()
# 10. Validate every N epochs
if (epoch % 5 == 0):
val_loss = []
model.eval()
pred_ages = []
actual_ages = []
with torch.no_grad():
for batch_data, batch_labels in val_loader:
batch_data = batch_data.to(device=device) # move to device, e.g. GPU
batch_labels = batch_labels.to(device=device)
batch_preds = model(batch_data)
pred_ages.append([batch_preds[i].item() for i in range(len(batch_preds))])
actual_ages.append([batch_labels[i].item() for i in range(len(batch_labels))])
loss = loss_function(batch_preds, batch_labels)
val_loss.append(loss.item())
mean_val_error5 = np.mean(val_loss)
val_loss_epoch5.append(mean_val_error5)
plot_preds(pred_ages, actual_ages, writer, epoch, test=False)
print(f"Epoch: {epoch}:: Learning Rate: {scheduler.get_lr()[0]}")
print(
f"{number_here}:: Maxiumum Age Error: {np.round(np.max(epoch_loss))} Average Age Error: {training_MAE}, MAE Validation: {mean_val_error5}")
writer.add_scalar('Max Age Error/validate', np.round(np.max(epoch_loss)), epoch)
writer.add_scalar('MAE Loss/validate', mean_val_error5, epoch)
# 11. Validate the last time
model.eval()
pred_ages = []
actual_ages = []
with torch.no_grad():
for batch_data, batch_labels in val_loader:
batch_data = batch_data.to(device=device) # move to device, e.g. GPU
batch_labels = batch_labels.to(device=device)
batch_preds = model(batch_data)
pred_ages.append([batch_preds[i].item() for i in range(len(batch_preds))])
actual_ages.append([batch_labels[i].item() for i in range(len(batch_labels))])
loss = loss_function(batch_preds, batch_labels)
i_fold_val_scores.append(loss.item())
plot_preds(pred_ages, actual_ages, writer, epoch, test=False)
# 12. Summarise the results
mean_fold_score = np.mean(i_fold_val_scores)
val_loss_epoch5.append(mean_fold_score)
print(f"Mean Age Error: {mean_fold_score}")
folds_val_scores.append(mean_fold_score)
final_MAE = np.mean(folds_val_scores)
save_graphs_train(fn, num_epochs, training_loss, val_loss_epoch5)
return model, params, final_MAE | [
"matplotlib.pyplot.title",
"torch.optim.lr_scheduler.StepLR",
"numpy.mean",
"torch.device",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.close",
"numpy.max",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"os.path.realpath",
"torch.cuda.is_availab... | [((1314, 1331), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss"""'], {}), "('Loss')\n", (1323, 1331), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1366), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Epochs"""'], {}), "('Number of Epochs')\n", (1346, 1366), True, 'import matplotlib.pyplot as plt\n'), ((1371, 1389), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (1381, 1389), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1408), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(5)'], {}), '(0, 5)\n', (1402, 1408), True, 'import matplotlib.pyplot as plt\n'), ((1413, 1441), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-5)', '(num_epochs + 5)'], {}), '(-5, num_epochs + 5)\n', (1421, 1441), True, 'import matplotlib.pyplot as plt\n'), ((1446, 1458), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1456, 1458), True, 'import matplotlib.pyplot as plt\n'), ((1463, 1495), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + f'graph.png')"], {}), "(path + f'graph.png')\n", (1474, 1495), True, 'import matplotlib.pyplot as plt\n'), ((1501, 1512), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1510, 1512), True, 'import matplotlib.pyplot as plt\n'), ((2916, 2935), 'os.path.join', 'osp.join', (['fn', '"""../"""'], {}), "(fn, '../')\n", (2924, 2935), True, 'import os.path as osp\n'), ((3860, 3885), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3883, 3885), False, 'import torch\n'), ((3899, 3954), 'torch.device', 'torch.device', (["('cuda:' + cuda_dev if use_cuda else 'cpu')"], {}), "('cuda:' + cuda_dev if use_cuda else 'cpu')\n", (3911, 3954), False, 'import torch\n'), ((4131, 4139), 'torch.nn.L1Loss', 'L1Loss', ([], {}), '()\n', (4137, 4139), False, 'from torch.nn import Module, Conv3d, ConvTranspose3d, Linear, ReLU, Sequential, Linear, Flatten, L1Loss, BatchNorm3d, Dropout, BatchNorm1d\n'), ((4457, 4505), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'batch_size': 'batch_size'}), '(dataset_train, batch_size=batch_size)\n', (4467, 4505), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((4523, 4569), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_val'], {'batch_size': 'batch_size'}), '(dataset_val, batch_size=batch_size)\n', (4533, 4569), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((4914, 4985), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': '(1)', 'gamma': 'gamma', 'last_epoch': '(-1)'}), '(optimizer, step_size=1, gamma=gamma, last_epoch=-1)\n', (4933, 4985), False, 'from torch.optim import Adam, lr_scheduler\n'), ((7915, 7941), 'numpy.mean', 'np.mean', (['i_fold_val_scores'], {}), '(i_fold_val_scores)\n', (7922, 7941), True, 'import numpy as np\n'), ((8097, 8122), 'numpy.mean', 'np.mean', (['folds_val_scores'], {}), '(folds_val_scores)\n', (8104, 8122), True, 'import numpy as np\n'), ((2867, 2903), 'torch.save', 'torch.save', (['model', 'f"""{fn}/model.pth"""'], {}), "(model, f'{fn}/model.pth')\n", (2877, 2903), False, 'import torch\n'), ((5552, 5571), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (5559, 5571), True, 'import numpy as np\n'), ((7250, 7265), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7263, 7265), False, 'import torch\n'), ((671, 693), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (683, 693), True, 'import os.path as osp\n'), ((1073, 1095), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (1085, 1095), True, 'import os.path as osp\n'), ((5944, 5959), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5957, 5959), False, 'import torch\n'), ((6577, 6594), 'numpy.mean', 'np.mean', (['val_loss'], {}), '(val_loss)\n', (6584, 6594), True, 'import numpy as np\n'), ((7046, 7064), 'numpy.max', 'np.max', (['epoch_loss'], {}), '(epoch_loss)\n', (7052, 7064), True, 'import numpy as np\n'), ((6888, 6906), 'numpy.max', 'np.max', (['epoch_loss'], {}), '(epoch_loss)\n', (6894, 6906), True, 'import numpy as np\n')] |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from akg.ops.nn import sparse_softmax_cross_entropy_with_logits
from gen_random import random_gaussian
from base import get_rtol_atol
def np_sparse_softmax_cross_entropy_with_logits(shape1, dtype1, shape2, dtype2, reduction="mean", scale=1.0):
logits = random_gaussian(shape2, miu=0, sigma=1).astype(dtype2)
num_class = logits.shape[1]
labels = np.random.randint(low=0, high=num_class, size=shape1).astype(dtype1)
batch_dim = 0
class_dim = 1
batch_size = logits.shape[batch_dim]
e = np.exp(logits - np.reshape(
np.amax(
logits, axis=class_dim), [batch_size, 1]))
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
labels_one_hot = np.zeros_like(probs).astype(probs.dtype)
labels_one_hot[np.arange(batch_size), labels] = 1.0
bp = (probs - labels_one_hot)
cost = -np.sum(labels_one_hot * np.log(probs + 1.0e-20), axis=1)
if not reduction or reduction.lower() == "none":
loss = cost
elif reduction.lower() == "mean":
loss = np.mean(cost)
cost_num = 1
for i in range(len(cost.shape)):
cost_num *= cost.shape[i]
bp = np.divide(bp, cost_num)
elif reduction.lower() == "sum":
loss = np.sum(cost)
else:
raise ValueError("reduction method for {} is not supported")
# loss_res = np.reshape(loss, labels.shape)
bp = np.multiply(bp, scale)
return labels, logits, loss, bp
def sparse_softmax_cross_entropy_with_logits_run(shape1, dtype1, shape2, dtype2, reduction, kernel_name, attrs):
op_attrs = [reduction]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(sparse_softmax_cross_entropy_with_logits.sparse_softmax_cross_entropy_with_logits,
[shape1, shape2], [dtype1, dtype2], op_attrs=op_attrs,
kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, labels, logits, output = gen_data(dtype1, dtype2, reduction, shape1, shape2)
return mod, expect, (labels, logits, output)
else:
return mod
else:
mod = utils.op_build_test(sparse_softmax_cross_entropy_with_logits.sparse_softmax_cross_entropy_with_logits,
[shape1, shape2], [dtype1, dtype2], op_attrs=op_attrs,
kernel_name=kernel_name, attrs=attrs)
expect, labels, logits, output = gen_data(dtype1, dtype2, reduction, shape1, shape2)
output = utils.mod_launch(mod, (labels, logits, output), expect=expect)
rtol, atol = get_rtol_atol("sparse_softmax_cross_entropy_with_logits", dtype2)
compare_res = compare_tensor(output, expect, rtol=rtol, atol=atol)
return (labels, logits), output, expect, compare_res
def gen_data(dtype1, dtype2, reduction, shape1, shape2):
labels, logits, loss_res, bp_res = np_sparse_softmax_cross_entropy_with_logits(shape1, dtype1, shape2, dtype2,
reduction)
expect = loss_res
output_shape = expect.shape
if reduction and reduction.lower() != "none":
output_shape = (1,)
expect = [expect]
output = np.full(output_shape, np.nan, dtype2)
return expect, labels, logits, output
| [
"numpy.full",
"numpy.divide",
"tensorio.compare_tensor",
"numpy.zeros_like",
"numpy.multiply",
"numpy.sum",
"numpy.log",
"numpy.amax",
"akg.utils.kernel_exec.op_build_test",
"numpy.random.randint",
"numpy.arange",
"gen_random.random_gaussian",
"base.get_rtol_atol",
"numpy.mean",
"akg.uti... | [((2070, 2092), 'numpy.multiply', 'np.multiply', (['bp', 'scale'], {}), '(bp, scale)\n', (2081, 2092), True, 'import numpy as np\n'), ((4014, 4051), 'numpy.full', 'np.full', (['output_shape', 'np.nan', 'dtype2'], {}), '(output_shape, np.nan, dtype2)\n', (4021, 4051), True, 'import numpy as np\n'), ((2412, 2626), 'akg.utils.kernel_exec.op_build_test', 'utils.op_build_test', (['sparse_softmax_cross_entropy_with_logits.sparse_softmax_cross_entropy_with_logits', '[shape1, shape2]', '[dtype1, dtype2]'], {'op_attrs': 'op_attrs', 'kernel_name': 'kernel_name', 'attrs': 'attrs', 'tuning': 't'}), '(sparse_softmax_cross_entropy_with_logits.\n sparse_softmax_cross_entropy_with_logits, [shape1, shape2], [dtype1,\n dtype2], op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)\n', (2431, 2626), True, 'from akg.utils import kernel_exec as utils\n'), ((2915, 3119), 'akg.utils.kernel_exec.op_build_test', 'utils.op_build_test', (['sparse_softmax_cross_entropy_with_logits.sparse_softmax_cross_entropy_with_logits', '[shape1, shape2]', '[dtype1, dtype2]'], {'op_attrs': 'op_attrs', 'kernel_name': 'kernel_name', 'attrs': 'attrs'}), '(sparse_softmax_cross_entropy_with_logits.\n sparse_softmax_cross_entropy_with_logits, [shape1, shape2], [dtype1,\n dtype2], op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs)\n', (2934, 3119), True, 'from akg.utils import kernel_exec as utils\n'), ((3289, 3351), 'akg.utils.kernel_exec.mod_launch', 'utils.mod_launch', (['mod', '(labels, logits, output)'], {'expect': 'expect'}), '(mod, (labels, logits, output), expect=expect)\n', (3305, 3351), True, 'from akg.utils import kernel_exec as utils\n'), ((3373, 3438), 'base.get_rtol_atol', 'get_rtol_atol', (['"""sparse_softmax_cross_entropy_with_logits"""', 'dtype2'], {}), "('sparse_softmax_cross_entropy_with_logits', dtype2)\n", (3386, 3438), False, 'from base import get_rtol_atol\n'), ((3461, 3513), 'tensorio.compare_tensor', 'compare_tensor', (['output', 'expect'], {'rtol': 'rtol', 'atol': 'atol'}), '(output, expect, rtol=rtol, atol=atol)\n', (3475, 3513), False, 'from tensorio import compare_tensor\n'), ((946, 985), 'gen_random.random_gaussian', 'random_gaussian', (['shape2'], {'miu': '(0)', 'sigma': '(1)'}), '(shape2, miu=0, sigma=1)\n', (961, 985), False, 'from gen_random import random_gaussian\n'), ((1046, 1099), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'num_class', 'size': 'shape1'}), '(low=0, high=num_class, size=shape1)\n', (1063, 1099), True, 'import numpy as np\n'), ((1327, 1352), 'numpy.sum', 'np.sum', (['e'], {'axis': 'class_dim'}), '(e, axis=class_dim)\n', (1333, 1352), True, 'import numpy as np\n'), ((1392, 1412), 'numpy.zeros_like', 'np.zeros_like', (['probs'], {}), '(probs)\n', (1405, 1412), True, 'import numpy as np\n'), ((1452, 1473), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (1461, 1473), True, 'import numpy as np\n'), ((1718, 1731), 'numpy.mean', 'np.mean', (['cost'], {}), '(cost)\n', (1725, 1731), True, 'import numpy as np\n'), ((1845, 1868), 'numpy.divide', 'np.divide', (['bp', 'cost_num'], {}), '(bp, cost_num)\n', (1854, 1868), True, 'import numpy as np\n'), ((1236, 1267), 'numpy.amax', 'np.amax', (['logits'], {'axis': 'class_dim'}), '(logits, axis=class_dim)\n', (1243, 1267), True, 'import numpy as np\n'), ((1559, 1580), 'numpy.log', 'np.log', (['(probs + 1e-20)'], {}), '(probs + 1e-20)\n', (1565, 1580), True, 'import numpy as np\n'), ((1921, 1933), 'numpy.sum', 'np.sum', (['cost'], {}), '(cost)\n', (1927, 1933), True, 'import numpy as np\n')] |
"""
Benchmarks for argument dispatching and call overhead of ``@jit`` functions.
"""
import numpy as np
rec_dtype = np.dtype([('a', np.float64),
('b', np.int32),
('c', np.complex64),
])
samples = {
'bool': True,
'int': 100000,
'float': 0.5,
'complex': 0.5 + 1.0j,
'array_1d': np.zeros(10, dtype=np.int64),
'array_3d': np.zeros(20, dtype=np.float64).reshape(2, 2, 5),
'array_records': np.zeros(10, dtype=rec_dtype),
'recarray': np.recarray(10, dtype=rec_dtype),
'tuple': (0.5, 1.0j, ()),
'record': np.empty(1, dtype=rec_dtype)[0],
'bytearray': bytearray(3),
}
def setup():
"""
Precompile jitted functions. This will register many specializations
to choose from.
"""
from numba import jit
global binary, binary_pyobj, unary_default
@jit(nopython=True)
def binary(x, y):
pass
@jit(forceobj=True)
def binary_pyobj(x, y):
pass
@jit(nopython=True)
def unary_default(x=None):
pass
for tp in samples.values():
binary(tp, tp)
binary_pyobj(object(), object())
unary_default()
class NoPythonDispatch:
"""
Time dispatching to a jitted function's specializations based on argument
types.
This stresses two things:
- the typing of arguments (from argument value to typecode)
- the selection of the best specialization amongst all the known ones
"""
# We repeat 1000 times so as to make the overhead of benchmark launching
# negligible.
@classmethod
def generate_benchmarks(cls, names):
for name in names:
def timefunc(self, arg=samples[name]):
func = binary
for i in range(1000):
func(arg, arg)
timefunc.__name__ = "time_dispatch_" + name
setattr(cls, timefunc.__name__, timefunc)
def time_dispatch_defaults(self):
unary_default()
NoPythonDispatch.generate_benchmarks(samples.keys())
class PyObjectDispatch:
def time_dispatch_pyobject(self):
x = object()
for i in range(1000):
binary_pyobj(x, x)
| [
"numpy.empty",
"numpy.dtype",
"numpy.zeros",
"numpy.recarray",
"numba.jit"
] | [((120, 187), 'numpy.dtype', 'np.dtype', (["[('a', np.float64), ('b', np.int32), ('c', np.complex64)]"], {}), "([('a', np.float64), ('b', np.int32), ('c', np.complex64)])\n", (128, 187), True, 'import numpy as np\n'), ((368, 396), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.int64'}), '(10, dtype=np.int64)\n', (376, 396), True, 'import numpy as np\n'), ((484, 513), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'rec_dtype'}), '(10, dtype=rec_dtype)\n', (492, 513), True, 'import numpy as np\n'), ((531, 563), 'numpy.recarray', 'np.recarray', (['(10)'], {'dtype': 'rec_dtype'}), '(10, dtype=rec_dtype)\n', (542, 563), True, 'import numpy as np\n'), ((883, 901), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (886, 901), False, 'from numba import jit\n'), ((943, 961), 'numba.jit', 'jit', ([], {'forceobj': '(True)'}), '(forceobj=True)\n', (946, 961), False, 'from numba import jit\n'), ((1009, 1027), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1012, 1027), False, 'from numba import jit\n'), ((609, 637), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': 'rec_dtype'}), '(1, dtype=rec_dtype)\n', (617, 637), True, 'import numpy as np\n'), ((414, 444), 'numpy.zeros', 'np.zeros', (['(20)'], {'dtype': 'np.float64'}), '(20, dtype=np.float64)\n', (422, 444), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME> (Nagoya University)
# based on PyTorch implementation for WaveNet vocoder by <NAME> (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
from __future__ import print_function
import argparse
import multiprocessing as mp
import os
import sys
from distutils.util import strtobool
import logging
import numpy as np
from numpy.matlib import repmat
from scipy.interpolate import interp1d
#from scipy.io import wavfile
from scipy.signal import firwin
from scipy.signal import lfilter
from utils import find_files
from utils import read_txt
from utils import write_hdf5, read_hdf5
from multiprocessing import Array
import pysptk as ps
import pyworld as pw
#import librosa
import soundfile as sf
np.set_printoptions(threshold=np.inf)
FS = 22050
FS = 24000
#FS = 44100
#FS = 48000
SHIFTMS = 5.0
MINF0 = 40
MAXF0 = 700
#MCEP_DIM = 34
MCEP_DIM = 49
MCEP_ALPHA = 0.455
MCEP_ALPHA = 0.466
#MCEP_ALPHA = 0.544
#MCEP_ALPHA = 0.554
FFTL = 1024
LOWPASS_CUTOFF = 20
HIGHPASS_CUTOFF = 70
OVERWRITE = True
def low_cut_filter(x, fs, cutoff=HIGHPASS_CUTOFF):
"""FUNCTION TO APPLY LOW CUT FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low cut filter
Return:
(ndarray): Low cut filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def analyze(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):
#f0_flr = pw.get_cheaptrick_f0_floor(fs, fftl)
#logging.info(f0_flr)
#fft_size = pw.get_cheaptrick_fft_size(fs, f0_flr)
#logging.info(fft_size)
#f0_flr = pw.get_cheaptrick_f0_floor(fs, fft_size)
#logging.info(f0_flr)
if f0 is None or time_axis is None:
_f0, time_axis = pw.harvest(wav, fs, f0_floor=60.0, frame_period=fperiod)
f0 = pw.stonemask(wav, _f0, time_axis, fs)
sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)
ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)
return time_axis, f0, sp, ap
def analyze_range(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):
if f0 is None or time_axis is None:
#logging.info("%lf %lf %lf %lf" % (minf0, maxf0, fperiod, fftl))
#logging.info("1")
_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, f0_ceil=maxf0, frame_period=fperiod)
#_f0, time_axis = pw.harvest(wav, fs, f0_floor=60, f0_ceil=maxf0, frame_period=fperiod)
#_f0, time_axis = pw.harvest(wav, fs, f0_floor=60, frame_period=fperiod)
#_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, frame_period=fperiod)
#_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, frame_period=fperiod)
#logging.info("2")
f0 = pw.stonemask(wav, _f0, time_axis, fs)
#logging.info("3")
#f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, f0_ceil=maxf0, frame_period=fperiod)
sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)
#logging.info("4")
ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)
#logging.info("5")
return time_axis, f0, sp, ap
#def read_wav(wav_file, cutoff=HIGHPASS_CUTOFF, fftl_ns=None):
def read_wav(wav_file, cutoff=HIGHPASS_CUTOFF):
#fs, x = wavfile.read(wav_file)
#x = librosa.util.fix_length(x, len(x) + fftl_ns // 2)
x, fs = sf.read(wav_file)
#x = np.array(x, dtype=np.float64)
if cutoff != 0:
x = low_cut_filter(x, fs, cutoff)
return fs, x
def low_pass_filter(x, fs, cutoff=LOWPASS_CUTOFF, padding=True):
"""FUNCTION TO APPLY LOW PASS FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low pass filter
Return:
(ndarray): Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def convert_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0
Args:
f0 (ndarray): original f0 sequence with the shape (T)
Return:
(ndarray): continuous f0 with the shape (T)
"""
# get uv information as binary
uv = np.float32(f0 != 0)
# get start and end of f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def main():
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument("--expdir", required=True,
type=str, help="directory to save the log")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--hdf5dir", default=None,
help="directory to save hdf5")
parser.add_argument(
"--wavdir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--wavanasyndir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--fs", default=FS,
type=int, help="Sampling frequency")
parser.add_argument(
"--shiftms", default=SHIFTMS,
type=float, help="Frame shift in msec")
parser.add_argument(
"--minf0", default=MINF0,
type=int, help="minimum f0")
parser.add_argument(
"--maxf0", default=MAXF0,
type=int, help="maximum f0")
parser.add_argument(
"--mcep_dim", default=MCEP_DIM,
type=int, help="Dimension of mel cepstrum")
parser.add_argument(
"--mcep_alpha", default=MCEP_ALPHA,
type=float, help="Alpha of mel cepstrum")
parser.add_argument(
"--fftl", default=FFTL,
type=int, help="FFT length")
parser.add_argument(
"--fftl_ns", default=None,
type=int, help="FFT length for noise shaped waveforms")
parser.add_argument(
"--highpass_cutoff", default=HIGHPASS_CUTOFF,
type=int, help="Cut off frequency in lowpass filter")
parser.add_argument("--init", default=False,
type=strtobool, help="flag for computing stats of dtw-ed feature")
parser.add_argument(
"--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
args = parser.parse_args()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
# check directory existence
if (args.wavdir is not None) and (not os.path.exists(args.wavdir)):
os.makedirs(args.wavdir)
if (args.wavanasyndir is not None) and (not os.path.exists(args.wavanasyndir)):
os.makedirs(args.wavanasyndir)
if not os.path.exists(args.hdf5dir):
os.makedirs(args.hdf5dir)
def feature_extract(wav_list, arr):
n_wav = len(wav_list)
n_sample = 0
n_frame = 0
count = 1
max_frame = 0
for wav_name in wav_list:
# load wavfile and highpass-filter
fs, x = read_wav(wav_name, cutoff=args.highpass_cutoff)
n_sample += x.shape[0]
logging.info(wav_name+" "+str(x.shape[0])+" "+str(n_sample)+" "+str(count))
# check sampling frequency
if not fs == args.fs:
logging.debug("ERROR: sampling frequency is not matched.")
sys.exit(1)
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
logging.info(hdf5name)
if not args.init:
_, f0, spc, ap = analyze_range(x, fs=fs, minf0=args.minf0, maxf0=args.maxf0, \
fperiod=args.shiftms, fftl=args.fftl)
# concatenate
uv, cont_f0 = convert_continuos_f0(np.array(f0))
cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)
codeap = pw.code_aperiodicity(ap, fs)
#logging.info(codeap)
logging.info(codeap.shape)
mcep = ps.sp2mc(spc, args.mcep_dim, args.mcep_alpha)
cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)
uv = np.expand_dims(uv, axis=-1)
log_contf0_lpf = np.log(cont_f0_lpf)
feats_lf0 = np.concatenate([uv, log_contf0_lpf, codeap, mcep], axis=1)
logging.info(feats_lf0.shape)
write_hdf5(hdf5name, "/feat_org_lf0", feats_lf0)
n_frame += feats_lf0.shape[0]
if max_frame < feats_lf0.shape[0]:
max_frame = feats_lf0.shape[0]
# overwrite wav file
if args.highpass_cutoff != 0:
#wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
sf.write(args.wavdir + "/" + os.path.basename(wav_name), x, fs, 'PCM_16')
wavpath = args.wavanasyndir + "/" + os.path.basename(wav_name)
logging.info(wavpath)
sp_rec = ps.mc2sp(mcep, args.mcep_alpha, args.fftl)
#wav = np.clip(pw.synthesize(f0, sp_rec, ap, fs, frame_period=args.shiftms), -32768, 32767)
wav = np.clip(pw.synthesize(f0, sp_rec, ap, fs, frame_period=args.shiftms), -1, 1)
#wavfile.write(wavpath, fs, np.int16(wav))
sf.write(wavpath, wav, fs, 'PCM_16')
else:
_, f0, _, _ = analyze(x, fs=fs, fperiod=args.shiftms, fftl=args.fftl)
write_hdf5(hdf5name, "/f0", f0)
n_frame += f0.shape[0]
if max_frame < f0.shape[0]:
max_frame = f0.shape[0]
count += 1
arr[0] += n_wav
arr[1] += n_sample
arr[2] += n_frame
if (n_wav > 0):
logging.info(str(arr[0])+" "+str(n_wav)+" "+str(arr[1])+" "+str(n_sample/n_wav)+" "+str(arr[2])\
+" "+str(n_frame/n_wav)+" max_frame = "+str(max_frame))
# divie list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
processes = []
arr = mp.Array('d', 3)
#logging.info(arr[:])
for f in file_lists:
p = mp.Process(target=feature_extract, args=(f,arr))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
logging.info(str(arr[0])+" "+str(arr[1])+" "+str(arr[1]/arr[0])+" "+str(arr[2])+" "+str(arr[2]/arr[0]))
if __name__ == "__main__":
main()
| [
"utils.write_hdf5",
"argparse.ArgumentParser",
"utils.find_files",
"scipy.signal.firwin",
"numpy.arange",
"scipy.interpolate.interp1d",
"numpy.pad",
"numpy.set_printoptions",
"pyworld.synthesize",
"scipy.signal.lfilter",
"os.path.exists",
"soundfile.write",
"pyworld.d4c",
"soundfile.read",... | [((825, 862), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (844, 862), True, 'import numpy as np\n'), ((1528, 1569), 'scipy.signal.firwin', 'firwin', (['(255)', 'norm_cutoff'], {'pass_zero': '(False)'}), '(255, norm_cutoff, pass_zero=False)\n', (1534, 1569), False, 'from scipy.signal import firwin\n'), ((1582, 1600), 'scipy.signal.lfilter', 'lfilter', (['fil', '(1)', 'x'], {}), '(fil, 1, x)\n', (1589, 1600), False, 'from scipy.signal import lfilter\n'), ((2148, 2200), 'pyworld.cheaptrick', 'pw.cheaptrick', (['wav', 'f0', 'time_axis', 'fs'], {'fft_size': 'fftl'}), '(wav, f0, time_axis, fs, fft_size=fftl)\n', (2161, 2200), True, 'import pyworld as pw\n'), ((2210, 2255), 'pyworld.d4c', 'pw.d4c', (['wav', 'f0', 'time_axis', 'fs'], {'fft_size': 'fftl'}), '(wav, f0, time_axis, fs, fft_size=fftl)\n', (2216, 2255), True, 'import pyworld as pw\n'), ((3197, 3249), 'pyworld.cheaptrick', 'pw.cheaptrick', (['wav', 'f0', 'time_axis', 'fs'], {'fft_size': 'fftl'}), '(wav, f0, time_axis, fs, fft_size=fftl)\n', (3210, 3249), True, 'import pyworld as pw\n'), ((3282, 3327), 'pyworld.d4c', 'pw.d4c', (['wav', 'f0', 'time_axis', 'fs'], {'fft_size': 'fftl'}), '(wav, f0, time_axis, fs, fft_size=fftl)\n', (3288, 3327), True, 'import pyworld as pw\n'), ((3605, 3622), 'soundfile.read', 'sf.read', (['wav_file'], {}), '(wav_file)\n', (3612, 3622), True, 'import soundfile as sf\n'), ((4181, 4209), 'scipy.signal.firwin', 'firwin', (['numtaps', 'norm_cutoff'], {}), '(numtaps, norm_cutoff)\n', (4187, 4209), False, 'from scipy.signal import firwin\n'), ((4222, 4259), 'numpy.pad', 'np.pad', (['x', '(numtaps, numtaps)', '"""edge"""'], {}), "(x, (numtaps, numtaps), 'edge')\n", (4228, 4259), True, 'import numpy as np\n'), ((4272, 4294), 'scipy.signal.lfilter', 'lfilter', (['fil', '(1)', 'x_pad'], {}), '(fil, 1, x_pad)\n', (4279, 4294), False, 'from scipy.signal import lfilter\n'), ((4627, 4646), 'numpy.float32', 'np.float32', (['(f0 != 0)'], {}), '(f0 != 0)\n', (4637, 4646), True, 'import numpy as np\n'), ((5041, 5075), 'scipy.interpolate.interp1d', 'interp1d', (['nz_frames', 'f0[nz_frames]'], {}), '(nz_frames, f0[nz_frames])\n', (5049, 5075), False, 'from scipy.interpolate import interp1d\n'), ((5170, 5242), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""making feature file argsurations."""'}), "(description='making feature file argsurations.')\n", (5193, 5242), False, 'import argparse\n'), ((8371, 8400), 'os.path.isdir', 'os.path.isdir', (['args.waveforms'], {}), '(args.waveforms)\n', (8384, 8400), False, 'import os\n'), ((12126, 12164), 'numpy.array_split', 'np.array_split', (['file_list', 'args.n_jobs'], {}), '(file_list, args.n_jobs)\n', (12140, 12164), True, 'import numpy as np\n'), ((12278, 12294), 'multiprocessing.Array', 'mp.Array', (['"""d"""', '(3)'], {}), "('d', 3)\n", (12286, 12294), True, 'import multiprocessing as mp\n'), ((2031, 2087), 'pyworld.harvest', 'pw.harvest', (['wav', 'fs'], {'f0_floor': '(60.0)', 'frame_period': 'fperiod'}), '(wav, fs, f0_floor=60.0, frame_period=fperiod)\n', (2041, 2087), True, 'import pyworld as pw\n'), ((2101, 2138), 'pyworld.stonemask', 'pw.stonemask', (['wav', '_f0', 'time_axis', 'fs'], {}), '(wav, _f0, time_axis, fs)\n', (2113, 2138), True, 'import pyworld as pw\n'), ((2567, 2639), 'pyworld.harvest', 'pw.harvest', (['wav', 'fs'], {'f0_floor': 'minf0', 'f0_ceil': 'maxf0', 'frame_period': 'fperiod'}), '(wav, fs, f0_floor=minf0, f0_ceil=maxf0, frame_period=fperiod)\n', (2577, 2639), True, 'import pyworld as pw\n'), ((3025, 3062), 'pyworld.stonemask', 'pw.stonemask', (['wav', '_f0', 'time_axis', 'fs'], {}), '(wav, _f0, time_axis, fs)\n', (3037, 3062), True, 'import pyworld as pw\n'), ((4976, 4993), 'numpy.where', 'np.where', (['(f0 != 0)'], {}), '(f0 != 0)\n', (4984, 4993), True, 'import numpy as np\n'), ((5092, 5117), 'numpy.arange', 'np.arange', (['(0)', 'f0.shape[0]'], {}), '(0, f0.shape[0])\n', (5101, 5117), True, 'import numpy as np\n'), ((7237, 7434), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S"""', 'filename': "(args.expdir + '/feature_extract.log')"}), "(level=logging.INFO, format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S', filename=args.expdir + '/feature_extract.log')\n", (7256, 7434), False, 'import logging\n'), ((8496, 8520), 'utils.read_txt', 'read_txt', (['args.waveforms'], {}), '(args.waveforms)\n', (8504, 8520), False, 'from utils import read_txt\n'), ((8634, 8658), 'os.makedirs', 'os.makedirs', (['args.wavdir'], {}), '(args.wavdir)\n', (8645, 8658), False, 'import os\n'), ((8751, 8781), 'os.makedirs', 'os.makedirs', (['args.wavanasyndir'], {}), '(args.wavanasyndir)\n', (8762, 8781), False, 'import os\n'), ((8793, 8821), 'os.path.exists', 'os.path.exists', (['args.hdf5dir'], {}), '(args.hdf5dir)\n', (8807, 8821), False, 'import os\n'), ((8831, 8856), 'os.makedirs', 'os.makedirs', (['args.hdf5dir'], {}), '(args.hdf5dir)\n', (8842, 8856), False, 'import os\n'), ((12358, 12407), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'feature_extract', 'args': '(f, arr)'}), '(target=feature_extract, args=(f, arr))\n', (12368, 12407), True, 'import multiprocessing as mp\n'), ((4797, 4821), 'numpy.where', 'np.where', (['(f0 == start_f0)'], {}), '(f0 == start_f0)\n', (4805, 4821), True, 'import numpy as np\n'), ((4842, 4864), 'numpy.where', 'np.where', (['(f0 == end_f0)'], {}), '(f0 == end_f0)\n', (4850, 4864), True, 'import numpy as np\n'), ((7549, 7572), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (7570, 7572), False, 'import logging\n'), ((7609, 7807), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S"""', 'filename': "(args.expdir + '/feature_extract.log')"}), "(level=logging.DEBUG, format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S', filename=args.expdir + '/feature_extract.log')\n", (7628, 7807), False, 'import logging\n'), ((7965, 8162), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARN', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S"""', 'filename': "(args.expdir + '/feature_extract.log')"}), "(level=logging.WARN, format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S', filename=args.expdir + '/feature_extract.log')\n", (7984, 8162), False, 'import logging\n'), ((8310, 8346), 'logging.warn', 'logging.warn', (['"""logging is disabled."""'], {}), "('logging is disabled.')\n", (8322, 8346), False, 'import logging\n'), ((8429, 8464), 'utils.find_files', 'find_files', (['args.waveforms', '"""*.wav"""'], {}), "(args.waveforms, '*.wav')\n", (8439, 8464), False, 'from utils import find_files\n'), ((8596, 8623), 'os.path.exists', 'os.path.exists', (['args.wavdir'], {}), '(args.wavdir)\n', (8610, 8623), False, 'import os\n'), ((8707, 8740), 'os.path.exists', 'os.path.exists', (['args.wavanasyndir'], {}), '(args.wavanasyndir)\n', (8721, 8740), False, 'import os\n'), ((9565, 9587), 'logging.info', 'logging.info', (['hdf5name'], {}), '(hdf5name)\n', (9577, 9587), False, 'import logging\n'), ((7518, 7537), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7535, 7537), False, 'import logging\n'), ((7922, 7945), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (7943, 7945), False, 'import logging\n'), ((8277, 8300), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (8298, 8300), False, 'import logging\n'), ((9371, 9429), 'logging.debug', 'logging.debug', (['"""ERROR: sampling frequency is not matched."""'], {}), "('ERROR: sampling frequency is not matched.')\n", (9384, 9429), False, 'import logging\n'), ((9446, 9457), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9454, 9457), False, 'import sys\n'), ((10017, 10045), 'pyworld.code_aperiodicity', 'pw.code_aperiodicity', (['ap', 'fs'], {}), '(ap, fs)\n', (10037, 10045), True, 'import pyworld as pw\n'), ((10100, 10126), 'logging.info', 'logging.info', (['codeap.shape'], {}), '(codeap.shape)\n', (10112, 10126), False, 'import logging\n'), ((10150, 10195), 'pysptk.sp2mc', 'ps.sp2mc', (['spc', 'args.mcep_dim', 'args.mcep_alpha'], {}), '(spc, args.mcep_dim, args.mcep_alpha)\n', (10158, 10195), True, 'import pysptk as ps\n'), ((10226, 10262), 'numpy.expand_dims', 'np.expand_dims', (['cont_f0_lpf'], {'axis': '(-1)'}), '(cont_f0_lpf, axis=-1)\n', (10240, 10262), True, 'import numpy as np\n'), ((10284, 10311), 'numpy.expand_dims', 'np.expand_dims', (['uv'], {'axis': '(-1)'}), '(uv, axis=-1)\n', (10298, 10311), True, 'import numpy as np\n'), ((10345, 10364), 'numpy.log', 'np.log', (['cont_f0_lpf'], {}), '(cont_f0_lpf)\n', (10351, 10364), True, 'import numpy as np\n'), ((10393, 10451), 'numpy.concatenate', 'np.concatenate', (['[uv, log_contf0_lpf, codeap, mcep]'], {'axis': '(1)'}), '([uv, log_contf0_lpf, codeap, mcep], axis=1)\n', (10407, 10451), True, 'import numpy as np\n'), ((10468, 10497), 'logging.info', 'logging.info', (['feats_lf0.shape'], {}), '(feats_lf0.shape)\n', (10480, 10497), False, 'import logging\n'), ((10515, 10563), 'utils.write_hdf5', 'write_hdf5', (['hdf5name', '"""/feat_org_lf0"""', 'feats_lf0'], {}), "(hdf5name, '/feat_org_lf0', feats_lf0)\n", (10525, 10563), False, 'from utils import write_hdf5, read_hdf5\n'), ((11085, 11106), 'logging.info', 'logging.info', (['wavpath'], {}), '(wavpath)\n', (11097, 11106), False, 'import logging\n'), ((11132, 11174), 'pysptk.mc2sp', 'ps.mc2sp', (['mcep', 'args.mcep_alpha', 'args.fftl'], {}), '(mcep, args.mcep_alpha, args.fftl)\n', (11140, 11174), True, 'import pysptk as ps\n'), ((11457, 11493), 'soundfile.write', 'sf.write', (['wavpath', 'wav', 'fs', '"""PCM_16"""'], {}), "(wavpath, wav, fs, 'PCM_16')\n", (11465, 11493), True, 'import soundfile as sf\n'), ((11614, 11645), 'utils.write_hdf5', 'write_hdf5', (['hdf5name', '"""/f0"""', 'f0'], {}), "(hdf5name, '/f0', f0)\n", (11624, 11645), False, 'from utils import write_hdf5, read_hdf5\n'), ((7891, 7910), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7908, 7910), False, 'import logging\n'), ((8246, 8265), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (8263, 8265), False, 'import logging\n'), ((9877, 9889), 'numpy.array', 'np.array', (['f0'], {}), '(f0)\n', (9885, 9889), True, 'import numpy as np\n'), ((11042, 11068), 'os.path.basename', 'os.path.basename', (['wav_name'], {}), '(wav_name)\n', (11058, 11068), False, 'import os\n'), ((11313, 11373), 'pyworld.synthesize', 'pw.synthesize', (['f0', 'sp_rec', 'ap', 'fs'], {'frame_period': 'args.shiftms'}), '(f0, sp_rec, ap, fs, frame_period=args.shiftms)\n', (11326, 11373), True, 'import pyworld as pw\n'), ((9503, 9529), 'os.path.basename', 'os.path.basename', (['wav_name'], {}), '(wav_name)\n', (9519, 9529), False, 'import os\n'), ((10945, 10971), 'os.path.basename', 'os.path.basename', (['wav_name'], {}), '(wav_name)\n', (10961, 10971), False, 'import os\n')] |
from __future__ import print_function
import sys, os
sys.path.append('../')
import torch.utils.data as data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import pickle
import settings
import time
dates = [];
dates.append('2012-01-08')
dates.append('2012-01-15')
dates.append('2012-01-22')
dates.append('2012-02-02')
dates.append('2012-02-04')
dates.append('2012-02-05')
dates.append('2012-02-12')
dates.append('2012-02-18')
dates.append('2012-02-19')
dates.append('2012-03-17')
dates.append('2012-03-25')
dates.append('2012-03-31')
dates.append('2012-04-29')
dates.append('2012-05-11')
dates.append('2012-05-26')
dates.append('2012-06-15')
dates.append('2012-08-04')
dates.append('2012-08-20')
dates.append('2012-09-28')
dates.append('2012-10-28')
dates.append('2012-11-04')
dates.append('2012-11-16')
dates.append('2012-11-17')
dates.append('2012-12-01')
dates.append('2013-01-10')
dates.append('2013-02-23')
dates.append('2013-04-05')
dates = ['2012-01-22']
path_gps = "data/nclt/sensor_data/%s/gps.csv"
path_gps_rtk = "data/nclt/sensor_data/%s/gps_rtk.csv"
path_gps_rtk_err = "data/nclt/sensor_data/%s/gps_rtk_err.csv"
path_gt = "data/nclt/ground_truth/groundtruth_%s.csv"
compact_path = "temp/nclt_%s.pickle"
class NCLT(data.Dataset):
def __init__(self, date, partition='train', ratio=1.0):
self.partition = partition
self.ratio = ratio
if not os.path.exists(compact_path % date):
print("Loading NCLT dataset ...")
self.gps, self.gps_rtk, self.gps_rtk_err, self.gt = self.__load_data(date)
self.__process_data()
self.dump(compact_path % date, [self.gps, self.gps_rtk, self.gps_rtk_err, self.gt])
else:
[self.gps, self.gps_rtk, self.gps_rtk_err, self.gt] = self.load(compact_path % date)
if self.partition == 'train':
indexes = [1, 3]
elif self.partition == 'val':
indexes = [0, 2]
elif self.partition == 'test':
indexes = [4, 5, 6]
else:
raise Exception('Wrong partition')
self.gps = [self.gps[i].astype(np.float32) for i in indexes]
self.gps_rtk = [self.gps_rtk[i].astype(np.float32) for i in indexes]
self.gt = [self.gt[i].astype(np.float32) for i in indexes]
self.cut_data()
print("NCLT %s loaded: %d samples " % (partition, sum([x.shape[0] for x in self.gps_rtk])))
self.operators_b = [self.__buildoperators_sparse(self.gps[i].shape[0]) for i in range(len(self.gps))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (state, meas) where target is index of the target class.
"""
x0, P0 = self.__pos2x0(self.gps_rtk[index][0, 1:].astype(np.float32))
return self.gt[index][:, 0], self.gt[index][:, 1:], self.gps_rtk[index][:, 1:], x0, P0, self.operators_b[index]
def cut_data(self):
self.gps = [cut_array(e, self.ratio) for e in self.gps]
self.gps_rtk = [cut_array(e, self.ratio) for e in self.gps_rtk]
self.gt = [cut_array(e, self.ratio) for e in self.gt]
def __pos2x0(self, pos):
if settings.x0_v.shape[0] == 4:
x0 = np.zeros(4).astype(np.float32)
x0[0] = pos[0]
x0[2] = pos[1]
P0 = np.eye(4)*1
else:
x0 = np.zeros(6).astype(np.float32)
x0[0] = pos[0]
x0[3] = pos[1]
P0 = np.eye(6)*1
return x0, P0
def dump(self, path, object):
if not os.path.exists('temp'):
os.makedirs('temp')
with open(path, 'wb') as f:
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)
def load(self, path):
with open(path, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
return pickle.load(f)
def __len__(self):
return len(self.gt)
def total_len(self):
total = 0
for arr in self.gt:
total += arr.shape[0]
return total
def _generate_sample(self, seed):
np.random.seed(seed)
if self.acceleration:
return simulate_system(create_model_parameters_a, K=self.K, x0=self.x0)
else:
return simulate_system(create_model_parameters_v, K=self.K, x0=self.x0)
def __buildoperators_sparse_old(self, nn=20):
# Identity
i = torch.LongTensor([[i, i] for i in range(nn)])
v = torch.FloatTensor([1 for i in range(nn)])
I = torch.sparse.FloatTensor(i.t(), v)
#Message right
i = torch.LongTensor([[i, i+1] for i in range(nn-1)] + [[nn-1, nn-1]])
v = torch.FloatTensor([1 for i in range(nn-1)] + [0])
mr = torch.sparse.FloatTensor(i.t(), v)
#Message left
i = torch.LongTensor([[0, nn-1]] + [[i+1, i] for i in range(nn-1)])
v = torch.FloatTensor([0] + [1 for i in range(nn-1)])
ml = torch.sparse.FloatTensor(i.t(), v)
return [I, mr, ml]
def __buildoperators_sparse(self, nn=20):
# Message right to left
m_left_r = []
m_left_c = []
m_right_r = []
m_right_c = []
m_up_r = []
m_up_c = []
for i in range(nn - 1):
m_left_r.append(i)
m_left_c.append((i + 1))
m_right_r.append(i + 1)
m_right_c.append((i))
for i in range(nn):
m_up_r.append(i)
m_up_c.append(i + nn)
m_left = [torch.LongTensor(m_left_r), torch.LongTensor(m_left_c)]
m_right = [torch.LongTensor(m_right_r), torch.LongTensor(m_right_c)]
m_up = [torch.LongTensor(m_up_r), torch.LongTensor(m_up_c)]
return {"m_left": m_left, "m_right": m_right, "m_up": m_up}
def __load_gps(self, path, date):
df = pd.read_csv(path % date)
df = df.iloc[:, [0, 3, 4]]
return df.values
def __load_gps_err(self, date):
df = pd.read_csv(path_gps % date)
df = df.iloc[:, 6]
return df.values
def __load_gt(self, date):
df = pd.read_csv(path_gt % date)
gt = df.iloc[:, [0, 2, 1]].values
gt_err = df.iloc[:, [5, 4]].values
return gt, gt_err
def __load_gps_rtk_err(self, date):
df = pd.read_csv(path_gps_rtk_err % date)
return df.values
def __compute_gps_err(self, gps, gt):
return np.mean(np.square(gps - gt), axis=1)
def __load_data(self, date):
"We use the timestamp of gps_rtk which has the lowest frequency 1 Hz"
gps = self.__load_gps(path_gps, date)
gps_rtk = self.__load_gps(path_gps_rtk, date)
gps_rtk_err = self.__load_gps_rtk_err(date)
gt, _ = self.__load_gt(date)
self.lat0 = gps_rtk[0, 1]
self.lng0 = gps_rtk[0, 2]
self.bias = [gt[0, 1], gt[0, 2]]
gps_rtk_dec = self.__decompose(gps_rtk, date)
gps_rtk_err_dec = self.__decompose(gps_rtk_err, date)
gps_ar = []
gt_ar = []
gps_rtk_ar, gps_rtk_err_ar = [], []
for gps_rtk_i, gps_rtk_err_i in zip(gps_rtk_dec, gps_rtk_err_dec):
idxs = self.__filer_freq(gps_rtk_i[:, 0], f=1.)
gps_rtk_ar.append(gps_rtk_i[idxs, :])
gps_rtk_err_ar.append(gps_rtk_err_i[idxs, :])
#Matching with GT
idxs_gt = self.__match_tt(gps_rtk_ar[-1][:, 0], gt[:, 0])
gt_ar.append(gt[idxs_gt, :])
#Matching with gps
idxs = self.__match_tt(gps_rtk_ar[-1][:, 0], gps[:, 0])
gps_ar.append(gps[idxs, :])
return gps_ar, gps_rtk_ar, gps_rtk_err_ar, gt_ar
def __decompose(self, data, date):
if date == '2012-01-22':
return [data[100:2054], data[2054:4009], data[4147:6400], data[6400:8890], data[9103:10856], data[11113:12608],
data[12733:13525]]#, [0, 4147, 9103, 11113, 12733]
else:
return data
def concatenate(self, arrays):
return np.concatenate(arrays, axis=0)
def __process_data(self):
'''
lat0 = self.gps_rtk[0][0, 1]
lng0 = self.gps_rtk[0][0, 2]
bias = [self.gt[0][0, 1], self.gt[0][0, 2]]
'''
for i in range(len(self.gps_rtk)):
self.gps_rtk[i][:, 1:] = polar2cartesian(self.gps_rtk[i][:, 1], self.gps_rtk[i][:, 2], self.lat0,
self.lng0)
self.gps[i][:, 1:] = polar2cartesian(self.gps[i][:, 1], self.gps[i][:, 2], self.lat0,
self.lng0)
self.gt[i][:, 1:] = remove_bias(self.gt[i][:, 1:], self.bias)
def __match_tt(self, tt1, tt2):
print("\tMatching gps and gt timestamps")
arr_idx = []
for i, ti in enumerate(tt1):
diff = np.abs(tt2 - ti)
min_idx = np.argmin(diff)
arr_idx.append(min_idx)
return arr_idx
def _match_gt_step1(self, gps, gps_err, gt, margin=5):
gt_aux = gt.copy()
min_err = 1e10
min_x, min_y = 0, 0
for x in np.linspace(-margin, margin, 200):
for y in np.linspace(-margin, margin, 200):
gt_aux[:, 0] = gt[:, 0] + x
gt_aux[:, 1] = gt[:, 1] + y
err = mse(gps, gps_err, gt_aux)
if err < min_err:
min_err = err
min_x = x
min_y = y
#print("x: %.4f \t y:%.4f \t err:%.4f" % (min_x, min_y, err))
print(err)
print("Fixing GT bias x: %.4f \t y:%.4f \t error:%.4f" % (min_x, min_y, min_err))
return (min_x, min_y)
def _match_gt_step2(self, gt, err):
(min_x, min_y) = err
gt[:, 0] = gt[:, 0] + min_x
gt[:, 1] = gt[:, 1] + min_y
return gt
def __filer_freq(self, ts, f=1., window=5):
arr_idx = []
last_id = 0
arr_idx.append(last_id)
check = False
while last_id < len(ts) - window:
rel_j = []
for j in range(1, window):
rel_j.append(np.abs(f - (ts[last_id+j] - ts[last_id])/1000000))
last_id = last_id + 1 + np.argmin(rel_j)
min_val = np.min(rel_j)
if min_val > 0.05:
check = True
arr_idx.append(last_id)
if check:
print("\tWarning: Not all frequencies are %.3fHz" % f)
print("\tFiltering finished!")
return arr_idx
def mse(gps, gps_err, gt, th=2):
error = np.mean(np.square(gps - gt), axis=1)
mapping = (gps_err < th).astype(np.float32)
return np.mean(error*mapping)
def polar2cartesian(lat, lng, lat0, lng0):
dLat = lat - lat0
dLng = lng - lng0
r = 6400000 # approx. radius of earth (m)
x = r * np.cos(lat0) * np.sin(dLng)
y = r * np.sin(dLat)
return np.concatenate((np.expand_dims(x, 1), np.expand_dims(y, 1)), 1)
def remove_bias(vector, bias):
for i in range(vector.shape[1]):
vector[:, i] = vector[:, i] - bias[i]
return vector
if __name__ == '__main__':
for date in dates:
dataset = NCLT('2012-01-22', partition='train')
dataset = NCLT('2012-01-22', partition='val')
dataset = NCLT('2012-01-22', partition='test')
def cut_array(array, ratio):
length = len(array)
return array[0:int(round(ratio*length))] | [
"pickle.dump",
"numpy.random.seed",
"numpy.abs",
"pandas.read_csv",
"numpy.argmin",
"numpy.mean",
"numpy.sin",
"pickle.load",
"sys.path.append",
"os.path.exists",
"numpy.linspace",
"numpy.square",
"numpy.min",
"numpy.cos",
"numpy.concatenate",
"os.makedirs",
"torch.LongTensor",
"nu... | [((53, 75), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (68, 75), False, 'import sys, os\n'), ((10785, 10809), 'numpy.mean', 'np.mean', (['(error * mapping)'], {}), '(error * mapping)\n', (10792, 10809), True, 'import numpy as np\n'), ((4254, 4274), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4268, 4274), True, 'import numpy as np\n'), ((5983, 6007), 'pandas.read_csv', 'pd.read_csv', (['(path % date)'], {}), '(path % date)\n', (5994, 6007), True, 'import pandas as pd\n'), ((6118, 6146), 'pandas.read_csv', 'pd.read_csv', (['(path_gps % date)'], {}), '(path_gps % date)\n', (6129, 6146), True, 'import pandas as pd\n'), ((6244, 6271), 'pandas.read_csv', 'pd.read_csv', (['(path_gt % date)'], {}), '(path_gt % date)\n', (6255, 6271), True, 'import pandas as pd\n'), ((6437, 6473), 'pandas.read_csv', 'pd.read_csv', (['(path_gps_rtk_err % date)'], {}), '(path_gps_rtk_err % date)\n', (6448, 6473), True, 'import pandas as pd\n'), ((8148, 8178), 'numpy.concatenate', 'np.concatenate', (['arrays'], {'axis': '(0)'}), '(arrays, axis=0)\n', (8162, 8178), True, 'import numpy as np\n'), ((9244, 9277), 'numpy.linspace', 'np.linspace', (['(-margin)', 'margin', '(200)'], {}), '(-margin, margin, 200)\n', (9255, 9277), True, 'import numpy as np\n'), ((10697, 10716), 'numpy.square', 'np.square', (['(gps - gt)'], {}), '(gps - gt)\n', (10706, 10716), True, 'import numpy as np\n'), ((10971, 10983), 'numpy.sin', 'np.sin', (['dLng'], {}), '(dLng)\n', (10977, 10983), True, 'import numpy as np\n'), ((10996, 11008), 'numpy.sin', 'np.sin', (['dLat'], {}), '(dLat)\n', (11002, 11008), True, 'import numpy as np\n'), ((1415, 1450), 'os.path.exists', 'os.path.exists', (['(compact_path % date)'], {}), '(compact_path % date)\n', (1429, 1450), False, 'import sys, os\n'), ((3584, 3606), 'os.path.exists', 'os.path.exists', (['"""temp"""'], {}), "('temp')\n", (3598, 3606), False, 'import sys, os\n'), ((3620, 3639), 'os.makedirs', 'os.makedirs', (['"""temp"""'], {}), "('temp')\n", (3631, 3639), False, 'import sys, os\n'), ((3769, 3816), 'pickle.dump', 'pickle.dump', (['object', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(object, f, pickle.HIGHEST_PROTOCOL)\n', (3780, 3816), False, 'import pickle\n'), ((4013, 4027), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4024, 4027), False, 'import pickle\n'), ((5661, 5687), 'torch.LongTensor', 'torch.LongTensor', (['m_left_r'], {}), '(m_left_r)\n', (5677, 5687), False, 'import torch\n'), ((5689, 5715), 'torch.LongTensor', 'torch.LongTensor', (['m_left_c'], {}), '(m_left_c)\n', (5705, 5715), False, 'import torch\n'), ((5736, 5763), 'torch.LongTensor', 'torch.LongTensor', (['m_right_r'], {}), '(m_right_r)\n', (5752, 5763), False, 'import torch\n'), ((5765, 5792), 'torch.LongTensor', 'torch.LongTensor', (['m_right_c'], {}), '(m_right_c)\n', (5781, 5792), False, 'import torch\n'), ((5810, 5834), 'torch.LongTensor', 'torch.LongTensor', (['m_up_r'], {}), '(m_up_r)\n', (5826, 5834), False, 'import torch\n'), ((5836, 5860), 'torch.LongTensor', 'torch.LongTensor', (['m_up_c'], {}), '(m_up_c)\n', (5852, 5860), False, 'import torch\n'), ((6565, 6584), 'numpy.square', 'np.square', (['(gps - gt)'], {}), '(gps - gt)\n', (6574, 6584), True, 'import numpy as np\n'), ((8975, 8991), 'numpy.abs', 'np.abs', (['(tt2 - ti)'], {}), '(tt2 - ti)\n', (8981, 8991), True, 'import numpy as np\n'), ((9014, 9029), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (9023, 9029), True, 'import numpy as np\n'), ((9300, 9333), 'numpy.linspace', 'np.linspace', (['(-margin)', 'margin', '(200)'], {}), '(-margin, margin, 200)\n', (9311, 9333), True, 'import numpy as np\n'), ((10385, 10398), 'numpy.min', 'np.min', (['rel_j'], {}), '(rel_j)\n', (10391, 10398), True, 'import numpy as np\n'), ((10956, 10968), 'numpy.cos', 'np.cos', (['lat0'], {}), '(lat0)\n', (10962, 10968), True, 'import numpy as np\n'), ((11036, 11056), 'numpy.expand_dims', 'np.expand_dims', (['x', '(1)'], {}), '(x, 1)\n', (11050, 11056), True, 'import numpy as np\n'), ((11058, 11078), 'numpy.expand_dims', 'np.expand_dims', (['y', '(1)'], {}), '(y, 1)\n', (11072, 11078), True, 'import numpy as np\n'), ((3355, 3364), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3361, 3364), True, 'import numpy as np\n'), ((3500, 3509), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (3506, 3509), True, 'import numpy as np\n'), ((10345, 10361), 'numpy.argmin', 'np.argmin', (['rel_j'], {}), '(rel_j)\n', (10354, 10361), True, 'import numpy as np\n'), ((3253, 3264), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (3261, 3264), True, 'import numpy as np\n'), ((3398, 3409), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (3406, 3409), True, 'import numpy as np\n'), ((10258, 10311), 'numpy.abs', 'np.abs', (['(f - (ts[last_id + j] - ts[last_id]) / 1000000)'], {}), '(f - (ts[last_id + j] - ts[last_id]) / 1000000)\n', (10264, 10311), True, 'import numpy as np\n')] |
"""
RANSAC for Similarity Transformation Estimation
Modified from https://github.com/hughw19/NOCS_CVPR2019
Originally Written by <NAME>
"""
import time
import numpy as np
def estimateSimilarityUmeyama(SourceHom, TargetHom):
# Copy of original paper is at: http://web.stanford.edu/class/cs273/refs/umeyama.pdf
SourceCentroid = np.mean(SourceHom[:3, :], axis=1)
TargetCentroid = np.mean(TargetHom[:3, :], axis=1)
nPoints = SourceHom.shape[1]
CenteredSource = SourceHom[:3, :] - np.tile(SourceCentroid, (nPoints, 1)).transpose()
CenteredTarget = TargetHom[:3, :] - np.tile(TargetCentroid, (nPoints, 1)).transpose()
CovMatrix = np.matmul(CenteredTarget, np.transpose(CenteredSource)) / nPoints
if np.isnan(CovMatrix).any():
print('nPoints:', nPoints)
print(SourceHom.shape)
print(TargetHom.shape)
raise RuntimeError('There are NANs in the input.')
U, D, Vh = np.linalg.svd(CovMatrix, full_matrices=True)
d = (np.linalg.det(U) * np.linalg.det(Vh)) < 0.0
if d:
D[-1] = -D[-1]
U[:, -1] = -U[:, -1]
# rotation
Rotation = np.matmul(U, Vh)
# scale
varP = np.var(SourceHom[:3, :], axis=1).sum()
Scale = 1 / varP * np.sum(D)
# translation
Translation = TargetHom[:3, :].mean(axis=1) - SourceHom[:3, :].mean(axis=1).dot(Scale*Rotation.T)
# transformation matrix
OutTransform = np.identity(4)
OutTransform[:3, :3] = Scale * Rotation
OutTransform[:3, 3] = Translation
return Scale, Rotation, Translation, OutTransform
def estimateSimilarityTransform(source: np.array, target: np.array, verbose=False):
""" Add RANSAC algorithm to account for outliers.
"""
assert source.shape[0] == target.shape[0], 'Source and Target must have same number of points.'
SourceHom = np.transpose(np.hstack([source, np.ones([source.shape[0], 1])]))
TargetHom = np.transpose(np.hstack([target, np.ones([target.shape[0], 1])]))
# Auto-parameter selection based on source heuristics
# Assume source is object model or gt nocs map, which is of high quality
SourceCentroid = np.mean(SourceHom[:3, :], axis=1)
nPoints = SourceHom.shape[1]
CenteredSource = SourceHom[:3, :] - np.tile(SourceCentroid, (nPoints, 1)).transpose()
SourceDiameter = 2 * np.amax(np.linalg.norm(CenteredSource, axis=0))
InlierT = SourceDiameter / 10.0 # 0.1 of source diameter
maxIter = 128
confidence = 0.99
if verbose:
print('Inlier threshold: ', InlierT)
print('Max number of iterations: ', maxIter)
BestInlierRatio = 0
BestInlierIdx = np.arange(nPoints)
for i in range(0, maxIter):
# Pick 5 random (but corresponding) points from source and target
RandIdx = np.random.randint(nPoints, size=5)
Scale, _, _, OutTransform = estimateSimilarityUmeyama(SourceHom[:, RandIdx], TargetHom[:, RandIdx])
PassThreshold = Scale * InlierT # propagate inlier threshold to target scale
Diff = TargetHom - np.matmul(OutTransform, SourceHom)
ResidualVec = np.linalg.norm(Diff[:3, :], axis=0)
InlierIdx = np.where(ResidualVec < PassThreshold)[0]
nInliers = InlierIdx.shape[0]
InlierRatio = nInliers / nPoints
# update best hypothesis
if InlierRatio > BestInlierRatio:
BestInlierRatio = InlierRatio
BestInlierIdx = InlierIdx
if verbose:
print('Iteration: ', i)
print('Inlier ratio: ', BestInlierRatio)
# early break
if (1 - (1 - BestInlierRatio ** 5) ** i) > confidence:
break
if(BestInlierRatio < 0.1):
print('[ WARN ] - Something is wrong. Small BestInlierRatio: ', BestInlierRatio)
return None, None, None, None
SourceInliersHom = SourceHom[:, BestInlierIdx]
TargetInliersHom = TargetHom[:, BestInlierIdx]
Scale, Rotation, Translation, OutTransform = estimateSimilarityUmeyama(SourceInliersHom, TargetInliersHom)
if verbose:
print('BestInlierRatio:', BestInlierRatio)
print('Rotation:\n', Rotation)
print('Translation:\n', Translation)
print('Scale:', Scale)
return Scale, Rotation, Translation, OutTransform
def backproject(depth, intrinsics, instance_mask):
""" Back-projection, use opencv camera coordinate frame.
"""
cam_fx = intrinsics[0, 0]
cam_fy = intrinsics[1, 1]
cam_cx = intrinsics[0, 2]
cam_cy = intrinsics[1, 2]
non_zero_mask = (depth > 0)
final_instance_mask = np.logical_and(instance_mask, non_zero_mask)
idxs = np.where(final_instance_mask)
z = depth[idxs[0], idxs[1]]
x = (idxs[1] - cam_cx) * z / cam_fx
y = (idxs[0] - cam_cy) * z / cam_fy
pts = np.stack((x, y, z), axis=1)
return pts, idxs
def align_nocs_to_depth(masks, coords, depth, intrinsics, instance_ids, img_path, verbose=False):
num_instances = len(instance_ids)
error_messages = ''
elapses = []
scales = np.zeros(num_instances)
rotations = np.zeros((num_instances, 3, 3))
translations = np.zeros((num_instances, 3))
for i in range(num_instances):
mask = masks[:, :, i]
coord = coords[:, :, i, :]
pts, idxs = backproject(depth, intrinsics, mask)
coord_pts = coord[idxs[0], idxs[1], :] - 0.5
try:
start = time.time()
s, R, T, outtransform = estimateSimilarityTransform(coord_pts, pts, False)
elapsed = time.time() - start
if verbose:
print('elapsed: ', elapsed)
elapses.append(elapsed)
except Exception as e:
message = '[ Error ] aligning instance {} in {} fails. Message: {}.'.format(instance_ids[i], img_path, str(e))
print(message)
error_messages += message + '\n'
s = 1.0
R = np.eye(3)
T = np.zeros(3)
outtransform = np.identity(4, dtype=np.float32)
scales[i] = s / 1000.0
rotations[i, :, :] = R
translations[i, :] = T / 1000.0
return scales, rotations, translations, error_messages, elapses
| [
"numpy.sum",
"numpy.ones",
"numpy.isnan",
"numpy.linalg.svd",
"numpy.mean",
"numpy.arange",
"numpy.random.randint",
"numpy.linalg.norm",
"numpy.tile",
"numpy.transpose",
"numpy.identity",
"numpy.linalg.det",
"numpy.var",
"numpy.stack",
"numpy.logical_and",
"numpy.zeros",
"time.time",... | [((348, 381), 'numpy.mean', 'np.mean', (['SourceHom[:3, :]'], {'axis': '(1)'}), '(SourceHom[:3, :], axis=1)\n', (355, 381), True, 'import numpy as np\n'), ((403, 436), 'numpy.mean', 'np.mean', (['TargetHom[:3, :]'], {'axis': '(1)'}), '(TargetHom[:3, :], axis=1)\n', (410, 436), True, 'import numpy as np\n'), ((938, 982), 'numpy.linalg.svd', 'np.linalg.svd', (['CovMatrix'], {'full_matrices': '(True)'}), '(CovMatrix, full_matrices=True)\n', (951, 982), True, 'import numpy as np\n'), ((1128, 1144), 'numpy.matmul', 'np.matmul', (['U', 'Vh'], {}), '(U, Vh)\n', (1137, 1144), True, 'import numpy as np\n'), ((1407, 1421), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (1418, 1421), True, 'import numpy as np\n'), ((2126, 2159), 'numpy.mean', 'np.mean', (['SourceHom[:3, :]'], {'axis': '(1)'}), '(SourceHom[:3, :], axis=1)\n', (2133, 2159), True, 'import numpy as np\n'), ((2618, 2636), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (2627, 2636), True, 'import numpy as np\n'), ((4532, 4576), 'numpy.logical_and', 'np.logical_and', (['instance_mask', 'non_zero_mask'], {}), '(instance_mask, non_zero_mask)\n', (4546, 4576), True, 'import numpy as np\n'), ((4588, 4617), 'numpy.where', 'np.where', (['final_instance_mask'], {}), '(final_instance_mask)\n', (4596, 4617), True, 'import numpy as np\n'), ((4741, 4768), 'numpy.stack', 'np.stack', (['(x, y, z)'], {'axis': '(1)'}), '((x, y, z), axis=1)\n', (4749, 4768), True, 'import numpy as np\n'), ((4983, 5006), 'numpy.zeros', 'np.zeros', (['num_instances'], {}), '(num_instances)\n', (4991, 5006), True, 'import numpy as np\n'), ((5023, 5054), 'numpy.zeros', 'np.zeros', (['(num_instances, 3, 3)'], {}), '((num_instances, 3, 3))\n', (5031, 5054), True, 'import numpy as np\n'), ((5074, 5102), 'numpy.zeros', 'np.zeros', (['(num_instances, 3)'], {}), '((num_instances, 3))\n', (5082, 5102), True, 'import numpy as np\n'), ((1230, 1239), 'numpy.sum', 'np.sum', (['D'], {}), '(D)\n', (1236, 1239), True, 'import numpy as np\n'), ((2761, 2795), 'numpy.random.randint', 'np.random.randint', (['nPoints'], {'size': '(5)'}), '(nPoints, size=5)\n', (2778, 2795), True, 'import numpy as np\n'), ((3076, 3111), 'numpy.linalg.norm', 'np.linalg.norm', (['Diff[:3, :]'], {'axis': '(0)'}), '(Diff[:3, :], axis=0)\n', (3090, 3111), True, 'import numpy as np\n'), ((692, 720), 'numpy.transpose', 'np.transpose', (['CenteredSource'], {}), '(CenteredSource)\n', (704, 720), True, 'import numpy as np\n'), ((739, 758), 'numpy.isnan', 'np.isnan', (['CovMatrix'], {}), '(CovMatrix)\n', (747, 758), True, 'import numpy as np\n'), ((992, 1008), 'numpy.linalg.det', 'np.linalg.det', (['U'], {}), '(U)\n', (1005, 1008), True, 'import numpy as np\n'), ((1011, 1028), 'numpy.linalg.det', 'np.linalg.det', (['Vh'], {}), '(Vh)\n', (1024, 1028), True, 'import numpy as np\n'), ((1168, 1200), 'numpy.var', 'np.var', (['SourceHom[:3, :]'], {'axis': '(1)'}), '(SourceHom[:3, :], axis=1)\n', (1174, 1200), True, 'import numpy as np\n'), ((2316, 2354), 'numpy.linalg.norm', 'np.linalg.norm', (['CenteredSource'], {'axis': '(0)'}), '(CenteredSource, axis=0)\n', (2330, 2354), True, 'import numpy as np\n'), ((3019, 3053), 'numpy.matmul', 'np.matmul', (['OutTransform', 'SourceHom'], {}), '(OutTransform, SourceHom)\n', (3028, 3053), True, 'import numpy as np\n'), ((3132, 3169), 'numpy.where', 'np.where', (['(ResidualVec < PassThreshold)'], {}), '(ResidualVec < PassThreshold)\n', (3140, 3169), True, 'import numpy as np\n'), ((5347, 5358), 'time.time', 'time.time', ([], {}), '()\n', (5356, 5358), False, 'import time\n'), ((510, 547), 'numpy.tile', 'np.tile', (['SourceCentroid', '(nPoints, 1)'], {}), '(SourceCentroid, (nPoints, 1))\n', (517, 547), True, 'import numpy as np\n'), ((600, 637), 'numpy.tile', 'np.tile', (['TargetCentroid', '(nPoints, 1)'], {}), '(TargetCentroid, (nPoints, 1))\n', (607, 637), True, 'import numpy as np\n'), ((1856, 1885), 'numpy.ones', 'np.ones', (['[source.shape[0], 1]'], {}), '([source.shape[0], 1])\n', (1863, 1885), True, 'import numpy as np\n'), ((1937, 1966), 'numpy.ones', 'np.ones', (['[target.shape[0], 1]'], {}), '([target.shape[0], 1])\n', (1944, 1966), True, 'import numpy as np\n'), ((2233, 2270), 'numpy.tile', 'np.tile', (['SourceCentroid', '(nPoints, 1)'], {}), '(SourceCentroid, (nPoints, 1))\n', (2240, 2270), True, 'import numpy as np\n'), ((5468, 5479), 'time.time', 'time.time', ([], {}), '()\n', (5477, 5479), False, 'import time\n'), ((5854, 5863), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5860, 5863), True, 'import numpy as np\n'), ((5880, 5891), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5888, 5891), True, 'import numpy as np\n'), ((5919, 5951), 'numpy.identity', 'np.identity', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (5930, 5951), True, 'import numpy as np\n')] |
pathTo20Nar = "/home/r0b3/dev/rust/20mlish6"
# load module from path for python 3.5+
# from https://stackoverflow.com/a/67692/388614
import importlib.util
spec = importlib.util.spec_from_file_location("module.Binding", pathTo20Nar+"/Binding.py")
Binding = importlib.util.module_from_spec(spec)
spec.loader.exec_module(Binding)
b = Binding.Binding(pathTo20Nar) # instantiate python binding for 20NAR1
import math
import numpy as np
import pybullet as p
import time
import pybullet_data
physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
cubeStartPos = [0,0,1]
cubeStartOrientation = p.getQuaternionFromEuler([0,0,0])
robotId = p.loadURDF("r2d2.urdf",cubeStartPos, cubeStartOrientation)
mass = 1.0
sphereRadius = 0.05
colSphereId = p.createCollisionShape(p.GEOM_SPHERE, radius=sphereRadius)
colBoxId = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[sphereRadius, sphereRadius, sphereRadius])
mass = 1
visualShapeId = -1
basePosition = [1.0, 0.0, 3.0]
baseOrientation = [0, 0, 0, 1]
phyObjUid = p.createMultiBody(mass, colSphereId, visualShapeId, basePosition,
baseOrientation)
p.changeDynamics(phyObjUid,
-1,
spinningFriction=0.001,
rollingFriction=0.001,
linearDamping=0.0)
# register ops
#b.i("!por NOP ^left")
#b.i("!por NOP ^right")
#b.i("!por NOP ^forward")
#b.i("!por NOP ^backward")
# op to set distance to 2
b.i("!por NOP ^setDist2")
# op to set distance to 4
b.i("!por NOP ^setDist4")
# set motor velocity for testing
maxForce = 100.0
targetVel = 15.0
# interpret robot command and set controls for physics
def roboCmd(code):
jointRightIdxs = [2, 3] # right front and back wheel
jointLeftIdxs = [6, 7] # left front and back wheel
jointIdxsPos = []
jointIdxsNeg = []
if code == "l":
jointIdxsPos = jointLeftIdxs
jointIdxsNeg = jointRightIdxs
elif code == "r":
jointIdxsPos = jointRightIdxs
jointIdxsNeg = jointLeftIdxs
elif code == "f" or code == "f2": # forward
jointIdxsPos = jointRightIdxs + jointLeftIdxs
jointIdxsNeg = []
elif code == "b" or code == "b2": # backward
jointIdxsPos = []
jointIdxsNeg = jointRightIdxs + jointLeftIdxs
thisMaxForce = maxForce
thisTargetVel = targetVel
if code == "f" or code == "f2":
thisMaxForce = maxForce * 0.05 # slow
thisTargetVel = targetVel * 0.5
if code == "b" or code == "b2":
thisMaxForce = maxForce * 0.05 # slow
thisTargetVel = targetVel * 0.5
for iJointIdx in jointIdxsPos:
p.setJointMotorControl2(bodyUniqueId=robotId,
jointIndex=iJointIdx,
controlMode=p.VELOCITY_CONTROL,
targetVelocity = thisTargetVel,
force = thisMaxForce)
for iJointIdx in jointIdxsNeg:
p.setJointMotorControl2(bodyUniqueId=robotId,
jointIndex=iJointIdx,
controlMode=p.VELOCITY_CONTROL,
targetVelocity = -thisTargetVel,
force = thisMaxForce)
#roboCmd("l") # for testing
for idx in range(15):
print(idx)
print(
p.getJointInfo(
bodyUniqueId = robotId,
jointIndex = idx))
def normalize(vec):
vec2 = np.array(vec[:])
len2 = math.sqrt(vec2[0]**2.0+vec2[1]**2.0+vec2[2]**2.0)
vec2 /= len2
return vec2
def dot(a,b):
return a[0]*b[0]+ a[1]*b[1]+ a[2]*b[2]
distToTargetGoal = 2.0 # goal distance to target
oldState = ""
for i in range(100000000):
p.stepSimulation()
p.stepSimulation()
time.sleep(1./120.)
robotPos, robotOrn = p.getBasePositionAndOrientation(robotId)
targetPos, targetOrn = p.getBasePositionAndOrientation(phyObjUid)
r2d2ornEuler = p.getEulerFromQuaternion(robotOrn)
yaw = r2d2ornEuler[2]
#yaw += 3.141*2.0 # correct by rotating 90 degree
yaw -= 3.141*0.5
# this is a "basic rotation around Z
# see https://en.wikipedia.org/wiki/Rotation_matrix "Basic Rotations"
robotDir = np.array([math.cos(-yaw), -math.sin(-yaw), 0])
# rotated by 90 degrees because we care only about side
robotDirZ90 = np.array([math.cos(-(yaw-3.141*0.5)), -math.sin(-(yaw-3.141*0.5)), 0])
diffRobotToTarget = np.array([(robotPos[0]-targetPos[0]),(robotPos[1]-targetPos[1]),(robotPos[2]-targetPos[2])])
normalizedDiffRobotToTarget = normalize(diffRobotToTarget)
# compute dot product to get direction dot vector
sideDot = dot(robotDirZ90, normalizedDiffRobotToTarget)
dirDot = dot(robotDir, normalizedDiffRobotToTarget)
dirDotNotNormalized = dot(robotDir, diffRobotToTarget)
if False: # deebug dir and dist etc
print("[d] dirDot"+str(dirDot))
print("[d] robo dir"+str(robotDir))
print("[d] diff "+str(diffRobotToTarget[0])+"," +str(diffRobotToTarget[1])+","+str(diffRobotToTarget[2]))
print("[d] dirDotNotNormalized "+str(dirDotNotNormalized))
distToTarget = dirDotNotNormalized # distance to target is the dot product
#if i > 100:
# break
state2 = "" # more detailed state
if i % 1 == 0: # send state to NAR?
state = ""
if dirDot > 0.0: # is robot pointing to target?
if np.abs(sideDot) < 0.3:
state = "c"
state2 = "c"
elif sideDot > 0.8:
state = "l2"
state2 = "l"
elif sideDot > 0.0:
state = "l"
state2 = "l"
elif sideDot < -0.8:
state = "r2"
state2 = "r"
else:
state = "r"
state2 = "r"
else:
state = "back"
state2 = "back"
#print(state)
if state != oldState:
#b.i(state+". :|:") # send current state
#print(state)
oldState = state
#print(state2)
distToTargetDiff = None
# hardcoded low level control
if True:
if state2 == "back":
roboCmd("l")
if state2 == "r":
roboCmd("l")
elif state2 == "l":
roboCmd("r")
elif state2 == "c":
pass # do nothing
# we can now adjust the distance to the target
#distToTargetGoal = 2.0 # goal distance to target
distToTargetDiff = distToTargetGoal - distToTarget
#print(distToTargetDiff)
if np.abs(distToTargetDiff) < 0.3:
pass # don't do anything to not topple robot over
elif distToTargetDiff > 0.0:
if distToTargetDiff > 0.8:
roboCmd("f2") # soft forward to not topple robot over
else:
roboCmd("f")
else:
if distToTargetDiff > -0.8:
roboCmd("b")
else:
roboCmd("b2")
if i % 40 == 0:
if distToTarget == None:
pass
elif distToTarget < 2.0:
b.i("db2. :|:")
elif distToTarget > 2.0 and distToTarget < 3.0:
b.i("d2. :|:")
elif distToTarget > 3.0 and distToTarget < 4.0:
b.i("d3. :|:")
elif distToTarget > 4.0 and distToTarget < 5.0:
b.i("da4. :|:")
if i % 40*2 == 0: # refresh goal
b.i("d2! :|:")
pass
# procedural step for NAR
if i % 40 == 0:
b.sp()
while True:
narLine = b.tryRead() # try to read from binding to NAR
if narLine == None:
#print("NOTHING", flush=True)
break
if narLine: # was something returned?
trimmedNarLine = narLine.rstrip()
if trimmedNarLine[0] != "!": # is it not a command?
print("[d] NAR returned:"+trimmedNarLine, flush=True) # for debugging
pass
if len(trimmedNarLine) > 0 and trimmedNarLine[-1] == "!": # is a execution?
if trimmedNarLine.find("^left") != -1: # left op
print("OP left", flush=True)
roboCmd("l")
elif trimmedNarLine.find("^right") != -1: # right op
print("OP right", flush=True)
roboCmd("r")
elif trimmedNarLine.find("^setDist2") != -1:
distToTargetGoal = 2.0
elif trimmedNarLine.find("^setDist4") != -1:
distToTargetGoal = 4.0
cubePos, cubeOrn = p.getBasePositionAndOrientation(boxId)
print(cubePos,cubeOrn)
#experiment
#cuid = pybullet.createCollisionShape(pybullet.GEOM_BOX, halfExtents = [1, 1, 1])
#mass= 0 #static box
#pybullet.createMultiBody(mass,cuid)
p.disconnect()
| [
"numpy.abs",
"pybullet.setJointMotorControl2",
"pybullet.connect",
"pybullet.createCollisionShape",
"pybullet.getQuaternionFromEuler",
"pybullet.setGravity",
"math.cos",
"pybullet.getEulerFromQuaternion",
"pybullet.getJointInfo",
"math.sqrt",
"pybullet.changeDynamics",
"math.sin",
"time.slee... | [((503, 519), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (512, 519), True, 'import pybullet as p\n'), ((625, 648), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (637, 648), True, 'import pybullet as p\n'), ((657, 681), 'pybullet.loadURDF', 'p.loadURDF', (['"""plane.urdf"""'], {}), "('plane.urdf')\n", (667, 681), True, 'import pybullet as p\n'), ((728, 763), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (752, 763), True, 'import pybullet as p\n'), ((772, 831), 'pybullet.loadURDF', 'p.loadURDF', (['"""r2d2.urdf"""', 'cubeStartPos', 'cubeStartOrientation'], {}), "('r2d2.urdf', cubeStartPos, cubeStartOrientation)\n", (782, 831), True, 'import pybullet as p\n'), ((880, 938), 'pybullet.createCollisionShape', 'p.createCollisionShape', (['p.GEOM_SPHERE'], {'radius': 'sphereRadius'}), '(p.GEOM_SPHERE, radius=sphereRadius)\n', (902, 938), True, 'import pybullet as p\n'), ((950, 1044), 'pybullet.createCollisionShape', 'p.createCollisionShape', (['p.GEOM_BOX'], {'halfExtents': '[sphereRadius, sphereRadius, sphereRadius]'}), '(p.GEOM_BOX, halfExtents=[sphereRadius, sphereRadius,\n sphereRadius])\n', (972, 1044), True, 'import pybullet as p\n'), ((1180, 1266), 'pybullet.createMultiBody', 'p.createMultiBody', (['mass', 'colSphereId', 'visualShapeId', 'basePosition', 'baseOrientation'], {}), '(mass, colSphereId, visualShapeId, basePosition,\n baseOrientation)\n', (1197, 1266), True, 'import pybullet as p\n'), ((1302, 1404), 'pybullet.changeDynamics', 'p.changeDynamics', (['phyObjUid', '(-1)'], {'spinningFriction': '(0.001)', 'rollingFriction': '(0.001)', 'linearDamping': '(0.0)'}), '(phyObjUid, -1, spinningFriction=0.001, rollingFriction=\n 0.001, linearDamping=0.0)\n', (1318, 1404), True, 'import pybullet as p\n'), ((8769, 8807), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['boxId'], {}), '(boxId)\n', (8800, 8807), True, 'import pybullet as p\n'), ((8989, 9003), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (9001, 9003), True, 'import pybullet as p\n'), ((584, 611), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (609, 611), False, 'import pybullet_data\n'), ((3458, 3474), 'numpy.array', 'np.array', (['vec[:]'], {}), '(vec[:])\n', (3466, 3474), True, 'import numpy as np\n'), ((3486, 3545), 'math.sqrt', 'math.sqrt', (['(vec2[0] ** 2.0 + vec2[1] ** 2.0 + vec2[2] ** 2.0)'], {}), '(vec2[0] ** 2.0 + vec2[1] ** 2.0 + vec2[2] ** 2.0)\n', (3495, 3545), False, 'import math\n'), ((3729, 3747), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (3745, 3747), True, 'import pybullet as p\n'), ((3752, 3770), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (3768, 3770), True, 'import pybullet as p\n'), ((3775, 3798), 'time.sleep', 'time.sleep', (['(1.0 / 120.0)'], {}), '(1.0 / 120.0)\n', (3785, 3798), False, 'import time\n'), ((3821, 3861), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['robotId'], {}), '(robotId)\n', (3852, 3861), True, 'import pybullet as p\n'), ((3889, 3931), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['phyObjUid'], {}), '(phyObjUid)\n', (3920, 3931), True, 'import pybullet as p\n'), ((3952, 3986), 'pybullet.getEulerFromQuaternion', 'p.getEulerFromQuaternion', (['robotOrn'], {}), '(robotOrn)\n', (3976, 3986), True, 'import pybullet as p\n'), ((4439, 4538), 'numpy.array', 'np.array', (['[robotPos[0] - targetPos[0], robotPos[1] - targetPos[1], robotPos[2] -\n targetPos[2]]'], {}), '([robotPos[0] - targetPos[0], robotPos[1] - targetPos[1], robotPos[\n 2] - targetPos[2]])\n', (4447, 4538), True, 'import numpy as np\n'), ((2803, 2961), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', ([], {'bodyUniqueId': 'robotId', 'jointIndex': 'iJointIdx', 'controlMode': 'p.VELOCITY_CONTROL', 'targetVelocity': 'thisTargetVel', 'force': 'thisMaxForce'}), '(bodyUniqueId=robotId, jointIndex=iJointIdx,\n controlMode=p.VELOCITY_CONTROL, targetVelocity=thisTargetVel, force=\n thisMaxForce)\n', (2826, 2961), True, 'import pybullet as p\n'), ((3053, 3212), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', ([], {'bodyUniqueId': 'robotId', 'jointIndex': 'iJointIdx', 'controlMode': 'p.VELOCITY_CONTROL', 'targetVelocity': '(-thisTargetVel)', 'force': 'thisMaxForce'}), '(bodyUniqueId=robotId, jointIndex=iJointIdx,\n controlMode=p.VELOCITY_CONTROL, targetVelocity=-thisTargetVel, force=\n thisMaxForce)\n', (3076, 3212), True, 'import pybullet as p\n'), ((3342, 3394), 'pybullet.getJointInfo', 'p.getJointInfo', ([], {'bodyUniqueId': 'robotId', 'jointIndex': 'idx'}), '(bodyUniqueId=robotId, jointIndex=idx)\n', (3356, 3394), True, 'import pybullet as p\n'), ((4229, 4243), 'math.cos', 'math.cos', (['(-yaw)'], {}), '(-yaw)\n', (4237, 4243), False, 'import math\n'), ((4354, 4384), 'math.cos', 'math.cos', (['(-(yaw - 3.141 * 0.5))'], {}), '(-(yaw - 3.141 * 0.5))\n', (4362, 4384), False, 'import math\n'), ((4246, 4260), 'math.sin', 'math.sin', (['(-yaw)'], {}), '(-yaw)\n', (4254, 4260), False, 'import math\n'), ((4383, 4413), 'math.sin', 'math.sin', (['(-(yaw - 3.141 * 0.5))'], {}), '(-(yaw - 3.141 * 0.5))\n', (4391, 4413), False, 'import math\n'), ((5412, 5427), 'numpy.abs', 'np.abs', (['sideDot'], {}), '(sideDot)\n', (5418, 5427), True, 'import numpy as np\n'), ((6657, 6681), 'numpy.abs', 'np.abs', (['distToTargetDiff'], {}), '(distToTargetDiff)\n', (6663, 6681), True, 'import numpy as np\n')] |
import unittest
from dedupe.distance.haversine import compareLatLong
import numpy
class TestHaversine(unittest.TestCase):
def setUp(self):
self.sfo = (37.619105, -122.375236)
self.ord = (41.981649, -87.906670)
def test_haversine_equal(self):
km_dist_val = compareLatLong(self.sfo, self.ord)
self.assertAlmostEqual(km_dist_val, 2964, -1)
def test_haversine_zero(self):
km_dist_zero = compareLatLong(self.ord, self.ord)
self.assertAlmostEqual(km_dist_zero, 0.0, 0)
def test_haversine_na(self):
km_dist_na = compareLatLong((0.0, 0.0), (1.0, 2.0))
assert numpy.isnan(km_dist_na)
km_dist_na = compareLatLong((1.0, 2.0), (0.0, 0.0))
assert numpy.isnan(km_dist_na)
km_dist_n_na = compareLatLong((0.0, 1.0), (1.0, 2.0))
self.assertAlmostEqual(km_dist_n_na, 157, -1)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"dedupe.distance.haversine.compareLatLong",
"numpy.isnan"
] | [((917, 932), 'unittest.main', 'unittest.main', ([], {}), '()\n', (930, 932), False, 'import unittest\n'), ((290, 324), 'dedupe.distance.haversine.compareLatLong', 'compareLatLong', (['self.sfo', 'self.ord'], {}), '(self.sfo, self.ord)\n', (304, 324), False, 'from dedupe.distance.haversine import compareLatLong\n'), ((439, 473), 'dedupe.distance.haversine.compareLatLong', 'compareLatLong', (['self.ord', 'self.ord'], {}), '(self.ord, self.ord)\n', (453, 473), False, 'from dedupe.distance.haversine import compareLatLong\n'), ((582, 620), 'dedupe.distance.haversine.compareLatLong', 'compareLatLong', (['(0.0, 0.0)', '(1.0, 2.0)'], {}), '((0.0, 0.0), (1.0, 2.0))\n', (596, 620), False, 'from dedupe.distance.haversine import compareLatLong\n'), ((636, 659), 'numpy.isnan', 'numpy.isnan', (['km_dist_na'], {}), '(km_dist_na)\n', (647, 659), False, 'import numpy\n'), ((681, 719), 'dedupe.distance.haversine.compareLatLong', 'compareLatLong', (['(1.0, 2.0)', '(0.0, 0.0)'], {}), '((1.0, 2.0), (0.0, 0.0))\n', (695, 719), False, 'from dedupe.distance.haversine import compareLatLong\n'), ((735, 758), 'numpy.isnan', 'numpy.isnan', (['km_dist_na'], {}), '(km_dist_na)\n', (746, 758), False, 'import numpy\n'), ((782, 820), 'dedupe.distance.haversine.compareLatLong', 'compareLatLong', (['(0.0, 1.0)', '(1.0, 2.0)'], {}), '((0.0, 1.0), (1.0, 2.0))\n', (796, 820), False, 'from dedupe.distance.haversine import compareLatLong\n')] |
import itertools
import os
import more_itertools
import torch
import numpy as np
from bpe.functional import utils
from collections import namedtuple
import multiprocessing
BodyPart = namedtuple('BodyPart', [
'right_arm',
'left_arm',
'right_leg',
'left_leg',
'torso', # 6 joints + velocity
])
class Config:
name = None
device = None
# data paths
data_dir = None
meanpose_path = None
stdpose_path = None
meanpose_rc_path = None
stdpose_rc_path = None
# training paths
save_dir = './train_log'
exp_dir = None
log_dir = None
model_dir = None
# data info
img_size = (512, 512)
unit = 128 # TODO: more descriptive variable name
unique_nr_joints = 15
view_angles = [(np.pi * pitch_ang / 8.0, np.pi * yaw_ang / 2.0, 0)
for pitch_ang in np.arange(-0.5, 0.5001, 0.5) for yaw_ang in np.arange(-1.0, 1.001, 0.25)]
# order of names is important, modify at your own risk2
num_of_motions = 3 # positive, semi_positive, negative
num_of_skeletons = 2
num_of_views = 2
length_of_frames_train = 32
length_of_frames_test = 32
# inputs idx for view embedding learning : e.g. combination of positive, first skeleton idx, first view idx
quadruplet_inputs_name_for_view_learning = ["p_1_1", "p_1_2", "n_2_1", "n_2_2"]
nr_body_parts = len(BodyPart._fields)
_nr_joints = BodyPart(3, 3, 3, 3, 7) # BodyPartWithVelocity(3, 3, 3, 3, 6, 1)
velocity_xy = 2
# training settings
L2regular = True
Batchnorm = True
invisibility_augmentation = False
num_of_max_invis_joints = 3
triplet_distance = 'cosine'
similarity_distance_metric = 'cosine'
use_all_joints_on_each_bp = False
action_category_balancing = True
recon_weight = 1.0
triplet_margin = 0.3 # TODO: Increase (up to 1.0)
triplet_weight = 0.7
quadruplet_margin = 0.5 # TODO: Increase (up to 1.0)
quadruplet_weight = 1.0
quadruplet_sim_weight = 1.0
variation_control_param = 0.2
use_footvel_loss = False
foot_idx = None # idx of foot in right_leg of left_leg
footvel_loss_weight = 0.0
motion_embedding_l2reg = True
joint_noise_level = 0.05 # 0 -> disabled
nr_epochs = 70
batch_size = 2048
num_workers = min(multiprocessing.cpu_count() - 1, 20)
lr = 1e-3
lr_decay_rate = 0.98
weight_decay = 1e-2
save_frequency = 1
val_frequency = 8
lr_update_frequency_per_epoch = 3
def generate_joints_parts_idxs(self, num_channels, invis_aug=False, entire_body=False):
len_joints = BodyPart(*(np.asarray(self._nr_joints_entire_body) * num_channels)) if entire_body \
else BodyPart(*(np.asarray(self._nr_joints) * num_channels))
if invis_aug:
len_joints = BodyPart(*(list(len_joints[:-1]) + [len_joints[-1] - 1])) # remove visibility on velocity
# BodyPartWithVelocity idxs for coordinates + (opt. visibility)
body_parts = BodyPart(
*more_itertools.split_before(range(sum(len_joints)), lambda i: i in list(itertools.accumulate(len_joints)))
)
return len_joints, body_parts
def __init__(self, args):
self.name = args.name
self.data_dir = args.data_dir
self.use_footvel_loss = args.use_footvel_loss if hasattr(args, 'use_footvel_loss') else False
self.invisibility_augmentation = args.use_invisibility_aug if hasattr(args, 'use_invisibility_aug') else False
if hasattr(args, "triplet_distance"):
self.triplet_distance = args.triplet_distance
self.similarity_distance_metric = args.similarity_distance_metric
if hasattr(args, "sim_loss_weight") and args.sim_loss_weight is not None:
self.quadruplet_sim_weight = args.sim_loss_weight
if hasattr(args, 'norecon') and args.norecon:
self.recon_weight = 0.0
self.foot_idx = [4, 5]
self.unit = 64
len_joints, self.body_parts = self.generate_joints_parts_idxs(2)
len_joints_decoder = len_joints # decoder should output same #channels as without visibility aug
self.default_body_parts = self.body_parts
# x, y, (visibility)
if self.invisibility_augmentation:
len_joints, self.body_parts_invis = self.generate_joints_parts_idxs(3, invis_aug=True)
self.default_body_parts = self.body_parts_invis
self.use_all_joints_on_each_bp = \
args.use_all_joints_on_each_bp if hasattr(args, 'use_all_joints_on_each_bp') else False
if self.name == 'sim_test' and args.use_all_joints_on_each_bp:
self.meanpose_rc_path = os.path.join(self.data_dir, "meanpose_rc_all_joints_on_each_bp_unit128.npy")
self.stdpose_rc_path = os.path.join(self.data_dir, "stdpose_rc_all_joints_on_each_bp_unit128.npy")
else:
self.meanpose_rc_path = os.path.join(self.data_dir, "meanpose_rc_with_view_unit64.npy")
self.stdpose_rc_path = os.path.join(self.data_dir, "stdpose_rc_with_view_unit64.npy")
if self.use_all_joints_on_each_bp:
if not self.name == 'sim_test':
self.meanpose_rc_all_joints_on_each_bp_path = \
os.path.join(args.data_dir, 'meanpose_rc_all_joints_on_each_bp_unit64.npy')
self.stdpose_rc_all_joints_on_each_bp_path = \
os.path.join(args.data_dir, 'stdpose_rc_all_joints_on_each_bp_unit64.npy')
self._nr_joints_entire_body = BodyPart(self.unique_nr_joints, self.unique_nr_joints, self.unique_nr_joints,
self.unique_nr_joints, self.unique_nr_joints + 1)
len_joints_entire_body, self.body_parts_entire_body = self.generate_joints_parts_idxs(2, entire_body=True)
self.default_body_parts = self.body_parts_entire_body
if self.invisibility_augmentation:
len_joints_entire_body, self.body_parts_invis_entire_body = \
self.generate_joints_parts_idxs(3, invis_aug=True, entire_body=True)
self.default_body_parts = self.body_parts_invis_entire_body
velocity_xy = 2
self.body_part_names = ['ra', 'la', 'rl', 'll', 'torso']
base_channels = 16
mot_en_arm_leg_layer2_ch = 1 * base_channels
mot_en_arm_leg_layer3_ch = 2 * base_channels
mot_en_arm_leg_layer4_ch = 4 * base_channels
mot_en_torso_layer2_ch = 2 * base_channels
mot_en_torso_layer3_ch = 4 * base_channels
mot_en_torso_layer4_ch = 8 * base_channels
body_en_arm_leg_layer2_ch = base_channels
body_en_arm_leg_layer3_ch = 2 * base_channels
body_en_arm_leg_layer4_ch = 4 * base_channels
body_en_arm_leg_layer5_ch = base_channels
body_en_torso_layer2_ch = base_channels
body_en_torso_layer3_ch = 2 * base_channels
body_en_torso_layer4_ch = 4 * base_channels
body_en_torso_layer5_ch = 2 * base_channels
view_en_layer2_ch = 2 * base_channels
view_en_layer3_ch = 3 * base_channels
view_en_layer4_ch = 4 * base_channels
de_layer2_ch = 4 * base_channels
de_layer3_ch = 2 * base_channels
self.view_en_channels = [sum(len_joints) - velocity_xy, view_en_layer2_ch, view_en_layer3_ch, view_en_layer4_ch]
if self.use_all_joints_on_each_bp:
body_en_layer2_ch = 4 * base_channels
body_en_layer3_ch = 6 * base_channels
body_en_layer4_ch = 8 * base_channels
self.mot_en_channels = BodyPart(
[len_joints_entire_body.right_arm, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch,
mot_en_arm_leg_layer4_ch],
[len_joints_entire_body.left_arm, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch,
mot_en_arm_leg_layer4_ch],
[len_joints_entire_body.right_leg, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch,
mot_en_arm_leg_layer4_ch],
[len_joints_entire_body.left_leg, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch,
mot_en_arm_leg_layer4_ch],
[len_joints_entire_body.torso, mot_en_torso_layer2_ch, mot_en_torso_layer3_ch, mot_en_torso_layer4_ch])
self.body_en_channels = [sum(len_joints) - velocity_xy, body_en_layer2_ch, body_en_layer3_ch,
body_en_layer4_ch]
self.de_channels = BodyPart(
*[(mot_en_item[-1] + self.body_en_channels[-1] + self.view_en_channels[-1], de_layer2_ch, de_layer3_ch,
x_len_joints)
for mot_en_item, x_len_joints in
zip(self.mot_en_channels, len_joints_decoder)])
else:
self.mot_en_channels = BodyPart(
[len_joints.right_arm, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch, mot_en_arm_leg_layer4_ch],
[len_joints.left_arm, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch, mot_en_arm_leg_layer4_ch],
[len_joints.right_leg, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch, mot_en_arm_leg_layer4_ch],
[len_joints.left_leg, mot_en_arm_leg_layer2_ch, mot_en_arm_leg_layer3_ch, mot_en_arm_leg_layer4_ch],
[len_joints.torso, mot_en_torso_layer2_ch, mot_en_torso_layer3_ch, mot_en_torso_layer4_ch])
self.body_en_channels = BodyPart(
[len_joints.right_arm, body_en_arm_leg_layer2_ch, body_en_arm_leg_layer3_ch, body_en_arm_leg_layer4_ch,
body_en_arm_leg_layer5_ch],
[len_joints.left_arm, body_en_arm_leg_layer2_ch, body_en_arm_leg_layer3_ch, body_en_arm_leg_layer4_ch,
body_en_arm_leg_layer5_ch],
[len_joints.right_leg, body_en_arm_leg_layer2_ch, body_en_arm_leg_layer3_ch, body_en_arm_leg_layer4_ch,
body_en_arm_leg_layer5_ch],
[len_joints.left_leg, body_en_arm_leg_layer2_ch, body_en_arm_leg_layer3_ch, body_en_arm_leg_layer4_ch,
body_en_arm_leg_layer5_ch],
[len_joints.torso - velocity_xy, body_en_torso_layer2_ch, body_en_torso_layer3_ch,
body_en_torso_layer4_ch, body_en_torso_layer5_ch])
self.de_channels = BodyPart(
*[(mot_en_item[-1] + body_en_item[-1] + self.view_en_channels[-1], de_layer2_ch, de_layer3_ch,
x_len_joints)
for mot_en_item, body_en_item, x_len_joints in
zip(self.mot_en_channels, self.body_en_channels, len_joints_decoder)])
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_ids)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if "logdir" in args and args.logdir:
self.save_dir = args.logdir
self.exp_dir = os.path.join(self.save_dir, 'exp_' + self.name)
self.log_dir = os.path.join(self.exp_dir, 'log/')
self.model_dir = os.path.join(self.exp_dir, 'model/')
utils.ensure_dirs([self.log_dir, self.model_dir])
| [
"numpy.asarray",
"bpe.functional.utils.ensure_dirs",
"itertools.accumulate",
"numpy.arange",
"collections.namedtuple",
"torch.cuda.is_available",
"os.path.join",
"multiprocessing.cpu_count"
] | [((187, 274), 'collections.namedtuple', 'namedtuple', (['"""BodyPart"""', "['right_arm', 'left_arm', 'right_leg', 'left_leg', 'torso']"], {}), "('BodyPart', ['right_arm', 'left_arm', 'right_leg', 'left_leg',\n 'torso'])\n", (197, 274), False, 'from collections import namedtuple\n'), ((10897, 10944), 'os.path.join', 'os.path.join', (['self.save_dir', "('exp_' + self.name)"], {}), "(self.save_dir, 'exp_' + self.name)\n", (10909, 10944), False, 'import os\n'), ((10968, 11002), 'os.path.join', 'os.path.join', (['self.exp_dir', '"""log/"""'], {}), "(self.exp_dir, 'log/')\n", (10980, 11002), False, 'import os\n'), ((11028, 11064), 'os.path.join', 'os.path.join', (['self.exp_dir', '"""model/"""'], {}), "(self.exp_dir, 'model/')\n", (11040, 11064), False, 'import os\n'), ((11073, 11122), 'bpe.functional.utils.ensure_dirs', 'utils.ensure_dirs', (['[self.log_dir, self.model_dir]'], {}), '([self.log_dir, self.model_dir])\n', (11090, 11122), False, 'from bpe.functional import utils\n'), ((850, 878), 'numpy.arange', 'np.arange', (['(-0.5)', '(0.5001)', '(0.5)'], {}), '(-0.5, 0.5001, 0.5)\n', (859, 878), True, 'import numpy as np\n'), ((894, 922), 'numpy.arange', 'np.arange', (['(-1.0)', '(1.001)', '(0.25)'], {}), '(-1.0, 1.001, 0.25)\n', (903, 922), True, 'import numpy as np\n'), ((2305, 2332), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2330, 2332), False, 'import multiprocessing\n'), ((4682, 4758), 'os.path.join', 'os.path.join', (['self.data_dir', '"""meanpose_rc_all_joints_on_each_bp_unit128.npy"""'], {}), "(self.data_dir, 'meanpose_rc_all_joints_on_each_bp_unit128.npy')\n", (4694, 4758), False, 'import os\n'), ((4794, 4869), 'os.path.join', 'os.path.join', (['self.data_dir', '"""stdpose_rc_all_joints_on_each_bp_unit128.npy"""'], {}), "(self.data_dir, 'stdpose_rc_all_joints_on_each_bp_unit128.npy')\n", (4806, 4869), False, 'import os\n'), ((4920, 4983), 'os.path.join', 'os.path.join', (['self.data_dir', '"""meanpose_rc_with_view_unit64.npy"""'], {}), "(self.data_dir, 'meanpose_rc_with_view_unit64.npy')\n", (4932, 4983), False, 'import os\n'), ((5019, 5081), 'os.path.join', 'os.path.join', (['self.data_dir', '"""stdpose_rc_with_view_unit64.npy"""'], {}), "(self.data_dir, 'stdpose_rc_with_view_unit64.npy')\n", (5031, 5081), False, 'import os\n'), ((5254, 5329), 'os.path.join', 'os.path.join', (['args.data_dir', '"""meanpose_rc_all_joints_on_each_bp_unit64.npy"""'], {}), "(args.data_dir, 'meanpose_rc_all_joints_on_each_bp_unit64.npy')\n", (5266, 5329), False, 'import os\n'), ((5413, 5487), 'os.path.join', 'os.path.join', (['args.data_dir', '"""stdpose_rc_all_joints_on_each_bp_unit64.npy"""'], {}), "(args.data_dir, 'stdpose_rc_all_joints_on_each_bp_unit64.npy')\n", (5425, 5487), False, 'import os\n'), ((10750, 10775), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10773, 10775), False, 'import torch\n'), ((2615, 2654), 'numpy.asarray', 'np.asarray', (['self._nr_joints_entire_body'], {}), '(self._nr_joints_entire_body)\n', (2625, 2654), True, 'import numpy as np\n'), ((2717, 2744), 'numpy.asarray', 'np.asarray', (['self._nr_joints'], {}), '(self._nr_joints)\n', (2727, 2744), True, 'import numpy as np\n'), ((3089, 3121), 'itertools.accumulate', 'itertools.accumulate', (['len_joints'], {}), '(len_joints)\n', (3109, 3121), False, 'import itertools\n')] |
import os
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
import numpy as np
from mmcv.parallel import DataContainer as DC
from torch.utils.data import Dataset
from .transforms import (ImageTransform, BboxTransform, MaskTransform,
SegMapTransform, Numpy2Tensor)
from .utils import to_tensor, random_scale
from .extra_aug import ExtraAugmentation
import tqdm
import collections
import torch
import torch.nn.functional as F
from torch.utils.data.dataloader import default_collate
import json
class Bbox(object):
def __init__(self,x1,x2,y1,y2,tag,frame):
if x1 is not None:
self.x1=round(x1,2)
self.x2=round(x2,2)
self.y1=round(y1,2)
self.y2=round(y2,2)
else:
self.x1=x1
self.x2=x2
self.y1=y1
self.y2=y2
self.tag=tag
self.frame=frame
assert isinstance(frame,int) and frame>=0
def get_box(self):
return (self.x1,self.y1,self.x2,self.y2)
def get_frame(self):
return self.frame
def get_tag(self):
return self.tag
def __repr__(self):
if self.is_none():
return 'frame:{},tag:{}'.format(self.frame,self.tag)
else:
return 'frame:{},tag:{},box:{}'.format(self.frame,self.tag,self.get_box())
def __str__(self):
if self.is_none():
return 'frame:{},tag:{}'.format(self.frame,self.tag)
else:
return 'frame:{},tag:{},box:{}'.format(self.frame,self.tag,self.get_box())
def is_none(self):
def isnone(c):
if c is None:
return True
else:
return False
return isnone(self.x1) or isnone(self.x2) or isnone(self.y1) or isnone(self.y2) or isnone(self.tag)
class CHIMP(Dataset):
CLASSES = ['Chimp']
def __init__(self,
ann_file,
img_prefix,
img_scale,
img_norm_cfg,
multiscale_mode='value',
size_divisor=None,
proposal_file=None,
num_max_proposals=1000,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=True,
with_semantic_seg=False,
seg_prefix=None,
seg_scale_factor=1,
extra_aug=None,
aug_prob=[],
aug_p=0,
resize_keep_ratio=True,
test_mode=False,
min_val=False,
min_seed=1011729,
snip_frame=8,
how_sparse=3,
debug=False,
repeat_mode=False):
self.cat2label = {cat: i + 1 for i, cat in enumerate(self.CLASSES)}
self.snip_frames=snip_frame
self.how_sparse=how_sparse
self.test_mode = test_mode
self.min_val=min_val
self.repeat_mode=repeat_mode
self.ann_file=ann_file
if repeat_mode:
assert self.snip_frames>2 and self.snip_frames%2==1, 'snip frame should be odd number and larger than 2'
self.debug=debug
if self.test_mode:
self.how_sparse=1
else:
self.min_val=False
############################################
self.img_prefix = img_prefix
self.img_infos,self.box_infos = self.load_annotations(ann_file)
if self.min_val:
np.random.seed(min_seed)
np.random.shuffle(self.img_infos)
self.img_infos=self.img_infos[::1]
if self.debug:
np.random.shuffle(self.img_infos)
self.img_infos=self.img_infos[:16] # for testing
if proposal_file is not None:
self.proposals = self.load_proposals(proposal_file)
else:
self.proposals = None
# filter images with no annotation during training
if not test_mode:
valid_inds = self._filter_imgs()
self.img_infos = [self.img_infos[i] for i in valid_inds]
if self.proposals is not None:
self.proposals = [self.proposals[i] for i in valid_inds]
# (long_edge, short_edge) or [(long1, short1), (long2, short2), ...]
self.img_scales = img_scale if isinstance(img_scale,
list) else [img_scale]
assert mmcv.is_list_of(self.img_scales, tuple)
# normalization configs
self.img_norm_cfg = img_norm_cfg
# multi-scale mode (only applicable for multi-scale training)
self.multiscale_mode = multiscale_mode
assert multiscale_mode in ['value', 'range']
# max proposals per image
self.num_max_proposals = num_max_proposals
# flip ratio
self.flip_ratio = flip_ratio
assert flip_ratio >= 0 and flip_ratio <= 1
# padding border to ensure the image size can be divided by
# size_divisor (used for FPN)
self.size_divisor = size_divisor
# with mask or not (reserved field, takes no effect)
self.with_mask = with_mask
# some datasets provide bbox annotations as ignore/crowd/difficult,
# if `with_crowd` is True, then these info is returned.
self.with_crowd = with_crowd
# with label is False for RPN
self.with_label = with_label
# with semantic segmentation (stuff) annotation or not
self.with_seg = with_semantic_seg
# prefix of semantic segmentation map path
self.seg_prefix = seg_prefix
# rescale factor for segmentation maps
self.seg_scale_factor = seg_scale_factor
# in test mode or not
# set group flag for the sampler
if not self.test_mode:
self._set_group_flag()
# transforms
self.img_transform = ImageTransform(
size_divisor=self.size_divisor, **self.img_norm_cfg)
self.bbox_transform = BboxTransform()
self.mask_transform = MaskTransform()
self.seg_transform = SegMapTransform(self.size_divisor)
self.numpy2tensor = Numpy2Tensor()
# if use extra augmentation
self.aug_p=aug_p
if extra_aug is not None:
self.extra_aug = ExtraAugmentation(**extra_aug)
self.aug_prob=aug_prob
else:
self.extra_aug = None
# image rescale if keep ratio
self.resize_keep_ratio = resize_keep_ratio
def __len__(self):
return len(self.img_infos)
def _rand_another(self, idx):
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def load_proposals(self, proposal_file):
return mmcv.load(proposal_file)
def all_bbox_to_tubes(self,all_bbox):
tube_names=set(i.get_tag() for i in all_bbox)
tubes={}
for tube_name in tube_names:
tubes[tube_name]=[]
for bbox in all_bbox:
tubes[bbox.get_tag()].append(bbox)
return tubes
def get_cands(self,l):
p=0
f=0
l=sorted(l)
a=[]
for i,j in enumerate(l[:-1]):
if j+1==l[i+1]:
continue
else:
p=j
f=l[i+1]
if (f-p)<20 and (f-p)>0:
a.append((p,f))
return a
def get_bbox(self,f,tubes):
for i in tubes:
if i.get_frame()==f:
return i
def interplote(self,bbox1,bbox2):
assert bbox2.get_frame()-bbox1.get_frame()>1
assert bbox2.get_tag()==bbox1.get_tag()
f1=bbox1.get_frame()
f2=bbox2.get_frame()
inter_num=f2-f1-1
assert inter_num>0
box1=bbox1.get_box()
box2=bbox2.get_box()
diff=[None,None,None,None]
each_delta=[None,None,None,None]
for i in range(4):
diff[i]=box2[i]-box1[i]
each_delta[i]=diff[i]/(1+inter_num)
inter_boxes=[]
for j in range(inter_num):
box=[None,None,None,None]
for i in range(4):
box[i]=box1[i]+each_delta[i]*(j+1)
f=f1+j+1
bbox=Bbox(x1=box[0],y1=box[1],x2=box[2],y2=box[3],tag=bbox1.get_tag(),frame=f)
inter_boxes.append(bbox)
return inter_boxes
def tubes2bboxes(self,tubes):
out={}
for tube in tubes:
if tube is not None:
for bbox in tubes[tube]:
f=bbox.get_frame()
if f not in out:
out[f]=[]
out[f].append(bbox)
else:
out[f].append(bbox)
return out
def is_continuous(self,l):
l=sorted(l)
return len(l)==(max(l)-min(l)+1)
def process_imgid(self,frame_id,basename,shape):
filename =basename+'_frame_{}'.format(frame_id)+'.jpg'
return dict(id=frame_id, filename=filename, video_prefix=basename,width=shape[1],height=shape[0])
def get_snippet(self, basename, len_of_video, num_snippets,f_num,shape):
'''Group snippets.
Returns:
grouped_snippet_frame: (list) [[cand1,cand2,...],.....] cand:[filename]*#snip_frames
grouped_snippet_label: (list) [[cand1,cand2,...],....] cand:[filename]*#num_snip_frames
'''
def index2meta(cand,basename,f_num,shape):
video_data_cand=[]
for each in cand:
frame_id=f_num[each]
filename =basename+'_frame_{}'.format(frame_id)+'.jpg'
video_data_cand.append(dict(id=frame_id, filename=filename, video_prefix=basename,width=shape[1],height=shape[0]))
return video_data_cand
frames=[i for i in range(len_of_video)]
grouped_snippet_frame=[]
for i in range(num_snippets):
cands=[]
for j in range(self.how_sparse):
cand=frames[j+i*self.snip_frames*self.how_sparse:j+(i+1)*self.how_sparse*self.snip_frames:self.how_sparse]
if len(cand)!=self.snip_frames and i!=0:
diff=self.snip_frames-len(cand)
cand=frames[j+i*self.snip_frames*self.how_sparse-self.how_sparse*diff:j+(i+1)*self.how_sparse*self.snip_frames-self.how_sparse*diff:self.how_sparse]
if len(cand)!=self.snip_frames:
diff=self.snip_frames-len(cand)
cand=cand+[frames[-1]]*diff
cand=index2meta(cand,basename,f_num,shape)
cands.append(cand)
grouped_snippet_frame.append(cands)
return grouped_snippet_frame
def load_annotations(self, ann_file):
sort_=lambda x:int(x.split('.')[0])
sort_bboxes=lambda b :sorted(b,key=lambda x:x.get_frame())
snip_cand_infos = []
box_infos={}
videos= os.listdir(ann_file)
videos=[i for i in videos if 'output' in i]
videos=sorted(videos)
num_videos=len(videos)
#split train and val with random seed
a=np.arange(len(videos))
np.random.seed(10)
np.random.shuffle(a)
np.random.seed()
val_index=a[:int(0.1*num_videos)]
train_index=a[int(0.1*num_videos):]
train_videos=[videos[i] for i in train_index]
val_videos=[videos[i] for i in val_index]
if self.test_mode:
target_videos=val_videos
else:
target_videos=train_videos
damage_videos=0
print('Loading to memory ...')
for video in tqdm.tqdm(target_videos): # vidoe in formate 'somethin_out'
video_prefix=os.path.join(ann_file,video)
basename=video.split('_')[0]
annot_path=os.path.join(ann_file,basename+'.mp4.json')
assert os.path.isfile(annot_path),'error annot path : '+annot_path
assert 'JPEGImages' in os.listdir(video_prefix),'no jpeg folder : {}'.format(basename)
frames= os.listdir(os.path.join(video_prefix,'JPEGImages'))
frames=sorted(frames,key=lambda x:int(x.split('_')[-1].split('.')[0])) # start with frame 1
frames_num=[int(i.split('_')[-1].split('.')[0]) for i in frames]
# interpolation
with open(annot_path) as file:
data=json.load(file)
all_bboxes=[]
for f in data['frames']:
list_bboxes=data['frames'][f]
if not len(list_bboxes)==0: # if this frame is not empty
for each_box in list_bboxes:
tag=each_box['tags'][0]
x1=each_box['box']['x1']
x2=each_box['box']['x2']
y1=each_box['box']['y1']
y2=each_box['box']['y2']
orig_h=each_box['height']
orig_w=each_box['width']
bbox=Bbox(x1,x2,y1,y2,tag,int(f))
all_bboxes.append(bbox)
else:
bbox=Bbox(x1=None,x2=None,y1=None,y2=None,tag=None,frame=int(f))
all_bboxes.append(bbox)
tubes=self.all_bbox_to_tubes(all_bboxes)
for tube in tubes:
if tube is None:
continue
bboxes=tubes[tube]
fs=sorted([i.get_frame() for i in bboxes])
pairs=self.get_cands(fs)
for pair in pairs:
bbox1=self.get_bbox(pair[0],bboxes)
bbox2=self.get_bbox(pair[1],bboxes)
tubes[tube]+=self.interplote(bbox1,bbox2)
tubes[tube]=sort_bboxes(tubes[tube])
dic_bboxes=self.tubes2bboxes(tubes)
box_infos[f'{basename}']=dic_bboxes
# if not self.is_continuous(list(dic_bboxes.keys())):
# print('not continus:', list(dic_bboxes.keys())) # make sure it is continuous
f_num=sorted(list(dic_bboxes.keys()))
len_video=len(f_num)
if not self.repeat_mode:
num_snippets, remain =divmod(len_video,self.snip_frames*self.how_sparse)
if remain/(self.snip_frames*self.how_sparse)>0.4:
num_snippets+=1
# exclude the no one
if num_snippets==0:
damage_videos+=1
continue
cand_snippets=self.get_snippet(basename,len_video,num_snippets,f_num,(orig_h,orig_w))
else:
num_seg, remain =divmod(len_video,self.how_sparse)
if remain/self.how_sparse>0.6:
num_seg+=1
f_num=f_num+[f_num[-1]]*(self.how_sparse-remain) # make img_ids can be divided by how_sparse
if num_seg==0:
damage_videos+=1
continue
if len(f_num)<self.how_sparse*self.snip_frames:
damage_videos+=1
continue
grouped=[]
for i in range(num_seg):
togroup=f_num[i*self.how_sparse:(i+1)*self.how_sparse]
togroup=[self.process_imgid(i,basename,(orig_h,orig_w)) for i in togroup]
grouped.append(togroup)
if num_seg < self.snip_frames:
damage_videos+=1
continue
assert num_seg>=self.snip_frames,'num seg not enough got {}'.format(num_seg)
cand_snippets=[]
radius=(self.snip_frames-1)//2
for i in range(num_seg):
region = min(i,num_seg-i-1)
if region < radius: #head and tail frame need special
head = True if i< num_seg-i-1 else False
diff=radius-region
if head:
cand_snippet=grouped[diff:0:-1]+grouped[:i+radius+1]
else:
cand_snippet=grouped[i-radius:]+grouped[-2:-2-diff:-1]
assert len(cand_snippet) == self.snip_frames, 'cand_snip in head tail special region fail'
else:
cand_snippet=grouped[i-radius:i+radius+1]
assert len(cand_snippet) == self.snip_frames, '333cand_snip in head tail'
cand_snippet=list(zip(*cand_snippet))
assert len(cand_snippet[0]) == self.snip_frames, '222cand_snip in head tail special region fail {} vs {}'.format(len(cand_snippet[0]),self.snip_frames)
cand_snippets.append(cand_snippet)
snip_cand_infos+=cand_snippets
print('damage videos',damage_videos)
return snip_cand_infos,box_infos
def get_ann_info(self, idx):
assert len(self.img_infos[0]) == self.how_sparse
rand_num=np.random.randint(0,len(self.img_infos[0]))
#print(rand_num,self.img_infos[idx][rand_num][0]['video_prefix']+':'+self.img_infos[idx][rand_num][0]['id'])
anns=[]
for i in range(self.snip_frames):
img_id = self.img_infos[idx][rand_num][i]['id']
img_name= self.img_infos[idx][rand_num][i]['filename']
basename=self.img_infos[idx][rand_num][i]['video_prefix']
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
list_of_boxes=self.box_infos[basename][img_id]
for box in list_of_boxes:
if box.is_none():
continue
else:
xmin,ymin,xmax,ymax=box.get_box()
bbox=[xmin,ymin,xmax,ymax]
bboxes.append(bbox)
labels.append(1)
if not bboxes:
bboxes = np.zeros((0, 4))
labels = np.zeros((0, ))
else:
bboxes = np.array(bboxes, ndmin=2) - 1
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0, ))
else:
bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
labels_ignore = np.array(labels_ignore)
ann = dict(
bboxes=bboxes.astype(np.float32),
labels=labels.astype(np.int64),
bboxes_ignore=bboxes_ignore.astype(np.float32),
labels_ignore=labels_ignore.astype(np.int64))
anns.append(ann)
return anns,rand_num
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
valid_inds = list(range(len(self.img_infos)))
return valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.img_infos[i]
img_info=img_info[0][0]
if img_info['width'] / img_info['height'] > 1:
self.flag[i] = 1
def prepare_train_img(self, idx):
anns,rand_num = self.get_ann_info(idx)
#print('rand_num',rand_num)
img_info = self.img_infos[idx][rand_num]
# load image
imgs=[]
#infos=[]
for each_img_info in img_info:
img = mmcv.imread(osp.join(self.ann_file,each_img_info['video_prefix']+'_output','JPEGImages' ,each_img_info['filename']))
imgs.append(img)
#infos.append({'video_id':each_img_info['video_prefix'],'frame_id':each_img_info['filename']})
# load proposals if necessary
orig_h,orig_w,_=imgs[0].shape
if self.proposals is not None:
proposals = self.proposals[idx][:self.num_max_proposals]
# TODO: Handle empty proposals properly. Currently images with
# no proposals are just ignored, but they can be used for
# training in concept.
if len(proposals) == 0:
return None
if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
if proposals.shape[1] == 5:
scores = proposals[:, 4, None]
proposals = proposals[:, :4]
else:
scores = None
gt_bboxes = []
gt_labels = []
for ann in anns:
gt_bboxes.append(ann['bboxes'])
gt_labels.append(ann['labels'])
# skip the image if there is no valid gt bbox
if any([len(i)==0 for i in gt_bboxes]):
return None
# extra augmentation
np.random.seed()
if self.extra_aug is not None and np.random.rand()<self.aug_p:
aug_status=[True if np.random.rand() < aug_pro else False for aug_pro in self.aug_prob]
seeds=[np.random.randint(1e16) for aug_pro in self.aug_prob]
for i in range(len(imgs)):
g_b=gt_bboxes[i].clip(0.)
g_l=gt_labels[i]
assert len(g_b)==len(g_l)
aug_input={'image':imgs[i],'bboxes':g_b.tolist(),'category_id':g_l.tolist(),'each_force_apply':aug_status,'seeds':seeds}
aug_out = self.extra_aug(aug_input)
imgs[i]=aug_out['image'].copy()
auged_box=aug_out['bboxes']
auged_label=aug_out['category_id']
auged_box=np.array(auged_box).astype(np.float32)
auged_label=np.array(auged_label).astype(np.int64)
gt_bboxes[i]=auged_box.copy()
gt_labels[i]=auged_label.copy()
if any([len(i)==0 for i in gt_bboxes]):
return None
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
imgs_=[]
gt_bboxes_=[]
for i,img in enumerate(imgs):
img, img_shape, pad_shape, scale_factor = self.img_transform(
img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
if self.proposals is not None:
proposals = self.bbox_transform(proposals, img_shape, scale_factor,
flip)
proposals = np.hstack(
[proposals, scores]) if scores is not None else proposals
gt_bbox = self.bbox_transform(gt_bboxes[i], img_shape, scale_factor,
flip)
gt_bbox=gt_bbox.copy()
if self.with_crowd:
gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
scale_factor, flip)
if self.with_mask:
gt_masks = self.mask_transform(ann['masks'], pad_shape,
scale_factor, flip)
imgs_.append(img)
gt_bboxes_.append(gt_bbox)
_, img_shape, pad_shape, scale_factor = self.img_transform(
imgs[0], img_scale, flip, keep_ratio=self.resize_keep_ratio)
ori_shape = (orig_h, orig_w, 3)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip,
videoframe=os.path.basename(img_info[0]['video_prefix'])+'+'+str([img_info[i]['id'] for i in range(len(imgs))]))
data = dict(
img=DC(to_tensor(imgs_), stack=True),
img_meta=DC(img_meta, cpu_only=True),
gt_bboxes=DC([to_tensor(gt_bbox) for gt_bbox in gt_bboxes_]))
if self.proposals is not None:
data['proposals'] = DC(to_tensor(proposals))
if self.with_label:
data['gt_labels'] = DC([to_tensor(gt_label) for gt_label in gt_labels])
if self.with_crowd:
data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
if self.with_mask:
data['gt_masks'] = DC(gt_masks, cpu_only=True)
if self.with_seg:
data['gt_semantic_seg'] = DC(to_tensor(gt_seg), stack=True)
return data
def prepare_test_img(self, idx):
"""Prepare an image for testing (multi-scale and flipping)"""
img_info = self.img_infos[idx][0]
def prepare_single(imgs, scale, flip, orig_shape,proposal=None):
_imgs=[]
for img in imgs:
_img, img_shape, pad_shape, scale_factor = self.img_transform(
img, scale, flip, keep_ratio=self.resize_keep_ratio)
_imgs.append(_img)
frame_ids = [int(i['id']) for i in img_info]
if self.repeat_mode:
frame_ids = frame_ids[len(imgs)//2:len(imgs)//2+1]
if proposal is not None:
if proposal.shape[1] == 5:
score = proposal[:, 4, None]
proposal = proposal[:, :4]
else:
score = None
_proposal = self.bbox_transform(proposal, img_shape,
scale_factor, flip)
_proposal = np.hstack(
[_proposal, score]) if score is not None else _proposal
_proposal = to_tensor(_proposal)
else:
_proposal = None
_img_meta = dict(
ori_shape=(orig_shape[0], orig_shape[1], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip,
#meta about val frame id
frame_ids=frame_ids
)
return to_tensor(_imgs), _img_meta, _proposal
imgs=[]
for each_img_info in img_info:
img = mmcv.imread(osp.join(self.ann_file,each_img_info['video_prefix']+'_output','JPEGImages', each_img_info['filename']))
imgs.append(img)
orig_h,orig_w,_=imgs[0].shape
if self.proposals is not None:
proposal = self.proposals[idx][:self.num_max_proposals]
if not (proposal.shape[1] == 4 or proposal.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposal.shape))
else:
proposal = None
images=[]
img_metas = []
proposals = []
for scale in self.img_scales:
_imgs, _img_meta, _proposal = prepare_single(
imgs, scale, False, (orig_h,orig_w),proposal)
images.append(_imgs)
img_metas.append(DC(_img_meta, cpu_only=True))
proposals.append(_proposal)
if self.flip_ratio > 0:
_img, _img_meta, _proposal = prepare_single(
img, scale, True,(orig_h,orig_w),proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
proposals.append(_proposal)
data = dict(img=images, img_meta=img_metas)
if self.proposals is not None:
data['proposals'] = proposals
return data
def collate_fn(self, batch, samples_per_gpu=1):
"""Puts each data field into a tensor/DataContainer with outer dimension
batch size.
Extend default_collate to add support for
:type:`~mmcv.parallel.DataContainer`. There are 3 cases.
1. cpu_only = True, e.g., meta data
2. cpu_only = False, stack = True, e.g., images tensors
3. cpu_only = False, stack = False, e.g., gt bboxes
"""
if not isinstance(batch, collections.Sequence):
raise TypeError("{} is not supported.".format(batch.dtype))
if isinstance(batch[0], DC):
assert len(batch) % samples_per_gpu == 0
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DC(
stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
# TODO: handle tensors other than 3d
assert batch[i].dim() == 4
s,c, h, w = batch[i].size()
for sample in batch[i:i + samples_per_gpu]:
assert s == sample.size(0)
h = max(h, sample.size(2))
w = max(w, sample.size(3))
padded_samples = [
F.pad(
sample.data,
(0, w - sample.size(3), 0, h - sample.size(2)),
value=sample.padding_value)
for sample in batch[i:i + samples_per_gpu]
]
stacked.append(default_collate(padded_samples))
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DC(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [self.collate_fn(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], collections.Mapping):
return {
key: self.collate_fn([d[key] for d in batch], samples_per_gpu)
for key in batch[0]
}
else:
return default_collate(batch)
| [
"numpy.random.seed",
"os.path.isfile",
"numpy.random.randint",
"mmcv.parallel.DataContainer",
"os.path.join",
"numpy.random.choice",
"numpy.random.shuffle",
"tqdm.tqdm",
"torch.utils.data.dataloader.default_collate",
"mmcv.is_list_of",
"os.path.basename",
"numpy.hstack",
"os.listdir",
"jso... | [((4454, 4493), 'mmcv.is_list_of', 'mmcv.is_list_of', (['self.img_scales', 'tuple'], {}), '(self.img_scales, tuple)\n', (4469, 4493), False, 'import mmcv\n'), ((6677, 6699), 'numpy.random.choice', 'np.random.choice', (['pool'], {}), '(pool)\n', (6693, 6699), True, 'import numpy as np\n'), ((7056, 7080), 'mmcv.load', 'mmcv.load', (['proposal_file'], {}), '(proposal_file)\n', (7065, 7080), False, 'import mmcv\n'), ((11214, 11234), 'os.listdir', 'os.listdir', (['ann_file'], {}), '(ann_file)\n', (11224, 11234), False, 'import os\n'), ((11435, 11453), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (11449, 11453), True, 'import numpy as np\n'), ((11462, 11482), 'numpy.random.shuffle', 'np.random.shuffle', (['a'], {}), '(a)\n', (11479, 11482), True, 'import numpy as np\n'), ((11491, 11507), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (11505, 11507), True, 'import numpy as np\n'), ((11900, 11924), 'tqdm.tqdm', 'tqdm.tqdm', (['target_videos'], {}), '(target_videos)\n', (11909, 11924), False, 'import tqdm\n'), ((21260, 21276), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (21274, 21276), True, 'import numpy as np\n'), ((3513, 3537), 'numpy.random.seed', 'np.random.seed', (['min_seed'], {}), '(min_seed)\n', (3527, 3537), True, 'import numpy as np\n'), ((3550, 3583), 'numpy.random.shuffle', 'np.random.shuffle', (['self.img_infos'], {}), '(self.img_infos)\n', (3567, 3583), True, 'import numpy as np\n'), ((3666, 3699), 'numpy.random.shuffle', 'np.random.shuffle', (['self.img_infos'], {}), '(self.img_infos)\n', (3683, 3699), True, 'import numpy as np\n'), ((6621, 6658), 'numpy.where', 'np.where', (['(self.flag == self.flag[idx])'], {}), '(self.flag == self.flag[idx])\n', (6629, 6658), True, 'import numpy as np\n'), ((11985, 12014), 'os.path.join', 'os.path.join', (['ann_file', 'video'], {}), '(ann_file, video)\n', (11997, 12014), False, 'import os\n'), ((12078, 12124), 'os.path.join', 'os.path.join', (['ann_file', "(basename + '.mp4.json')"], {}), "(ann_file, basename + '.mp4.json')\n", (12090, 12124), False, 'import os\n'), ((12141, 12167), 'os.path.isfile', 'os.path.isfile', (['annot_path'], {}), '(annot_path)\n', (12155, 12167), False, 'import os\n'), ((24720, 24747), 'mmcv.parallel.DataContainer', 'DC', (['gt_masks'], {'cpu_only': '(True)'}), '(gt_masks, cpu_only=True)\n', (24722, 24747), True, 'from mmcv.parallel import DataContainer as DC\n'), ((30078, 30129), 'mmcv.parallel.DataContainer', 'DC', (['stacked', 'batch[0].stack', 'batch[0].padding_value'], {}), '(stacked, batch[0].stack, batch[0].padding_value)\n', (30080, 30129), True, 'from mmcv.parallel import DataContainer as DC\n'), ((12236, 12260), 'os.listdir', 'os.listdir', (['video_prefix'], {}), '(video_prefix)\n', (12246, 12260), False, 'import os\n'), ((12331, 12371), 'os.path.join', 'os.path.join', (['video_prefix', '"""JPEGImages"""'], {}), "(video_prefix, 'JPEGImages')\n", (12343, 12371), False, 'import os\n'), ((12645, 12660), 'json.load', 'json.load', (['file'], {}), '(file)\n', (12654, 12660), False, 'import json\n'), ((18183, 18199), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (18191, 18199), True, 'import numpy as np\n'), ((18225, 18239), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (18233, 18239), True, 'import numpy as np\n'), ((18339, 18355), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (18347, 18355), True, 'import numpy as np\n'), ((18422, 18438), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (18430, 18438), True, 'import numpy as np\n'), ((18471, 18485), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (18479, 18485), True, 'import numpy as np\n'), ((18606, 18629), 'numpy.array', 'np.array', (['labels_ignore'], {}), '(labels_ignore)\n', (18614, 18629), True, 'import numpy as np\n'), ((19846, 19957), 'os.path.join', 'osp.join', (['self.ann_file', "(each_img_info['video_prefix'] + '_output')", '"""JPEGImages"""', "each_img_info['filename']"], {}), "(self.ann_file, each_img_info['video_prefix'] + '_output',\n 'JPEGImages', each_img_info['filename'])\n", (19854, 19957), True, 'import os.path as osp\n'), ((21319, 21335), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (21333, 21335), True, 'import numpy as np\n'), ((21467, 21491), 'numpy.random.randint', 'np.random.randint', (['(1e+16)'], {}), '(1e+16)\n', (21484, 21491), True, 'import numpy as np\n'), ((22369, 22385), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (22383, 22385), True, 'import numpy as np\n'), ((24252, 24279), 'mmcv.parallel.DataContainer', 'DC', (['img_meta'], {'cpu_only': '(True)'}), '(img_meta, cpu_only=True)\n', (24254, 24279), True, 'from mmcv.parallel import DataContainer as DC\n'), ((26539, 26650), 'os.path.join', 'osp.join', (['self.ann_file', "(each_img_info['video_prefix'] + '_output')", '"""JPEGImages"""', "each_img_info['filename']"], {}), "(self.ann_file, each_img_info['video_prefix'] + '_output',\n 'JPEGImages', each_img_info['filename'])\n", (26547, 26650), True, 'import os.path as osp\n'), ((27382, 27410), 'mmcv.parallel.DataContainer', 'DC', (['_img_meta'], {'cpu_only': '(True)'}), '(_img_meta, cpu_only=True)\n', (27384, 27410), True, 'from mmcv.parallel import DataContainer as DC\n'), ((28838, 28904), 'mmcv.parallel.DataContainer', 'DC', (['stacked', 'batch[0].stack', 'batch[0].padding_value'], {'cpu_only': '(True)'}), '(stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)\n', (28840, 28904), True, 'from mmcv.parallel import DataContainer as DC\n'), ((18284, 18309), 'numpy.array', 'np.array', (['bboxes'], {'ndmin': '(2)'}), '(bboxes, ndmin=2)\n', (18292, 18309), True, 'import numpy as np\n'), ((18537, 18569), 'numpy.array', 'np.array', (['bboxes_ignore'], {'ndmin': '(2)'}), '(bboxes_ignore, ndmin=2)\n', (18545, 18569), True, 'import numpy as np\n'), ((22983, 23013), 'numpy.hstack', 'np.hstack', (['[proposals, scores]'], {}), '([proposals, scores])\n', (22992, 23013), True, 'import numpy as np\n'), ((25879, 25908), 'numpy.hstack', 'np.hstack', (['[_proposal, score]'], {}), '([_proposal, score])\n', (25888, 25908), True, 'import numpy as np\n'), ((27679, 27707), 'mmcv.parallel.DataContainer', 'DC', (['_img_meta'], {'cpu_only': '(True)'}), '(_img_meta, cpu_only=True)\n', (27681, 27707), True, 'from mmcv.parallel import DataContainer as DC\n'), ((30552, 30574), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['batch'], {}), '(batch)\n', (30567, 30574), False, 'from torch.utils.data.dataloader import default_collate\n'), ((21380, 21396), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (21394, 21396), True, 'import numpy as np\n'), ((22037, 22056), 'numpy.array', 'np.array', (['auged_box'], {}), '(auged_box)\n', (22045, 22056), True, 'import numpy as np\n'), ((22104, 22125), 'numpy.array', 'np.array', (['auged_label'], {}), '(auged_label)\n', (22112, 22125), True, 'import numpy as np\n'), ((24057, 24102), 'os.path.basename', 'os.path.basename', (["img_info[0]['video_prefix']"], {}), "(img_info[0]['video_prefix'])\n", (24073, 24102), False, 'import os\n'), ((29826, 29857), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['padded_samples'], {}), '(padded_samples)\n', (29841, 29857), False, 'from torch.utils.data.dataloader import default_collate\n')] |
# 画图
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.transforms as trs
import math
import seaborn
data = pd.read_csv('cs-training.csv', index_col=0)
data.drop_duplicates(inplace=True)
data.boxplot(column=['NumberOfTime30-59DaysPastDueNotWorse',
'NumberOfTime60-89DaysPastDueNotWorse', 'NumberOfTimes90DaysLate'])
plt.title('Before Cleaning', fontproperties='Times New Roman', size=16)
plt.xticks([1, 2, 3], ['30-59Days', '60-89Days', '90Days'],
fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.savefig('001_NumberOfTime_before_cleaning.png', dpi=1200)
plt.close()
data = data[(data['NumberOfTime30-59DaysPastDueNotWorse'] < 80) &
(data['NumberOfTime60-89DaysPastDueNotWorse'] < 80) & (data['NumberOfTimes90DaysLate'] < 80)]
data.boxplot(column=['NumberOfTime30-59DaysPastDueNotWorse',
'NumberOfTime60-89DaysPastDueNotWorse', 'NumberOfTimes90DaysLate'])
plt.title('After Cleaning', fontproperties='Times New Roman', size=16)
plt.xticks([1, 2, 3], ['30-59Days', '60-89Days', '90Days'],
fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.savefig('002_NumberOfTime_after_cleaning.png', dpi=1200)
plt.close()
data.boxplot(column='age', figsize=(5, 5))
plt.title('Age', fontproperties='Times New Roman', size=16)
plt.xticks(fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.savefig('003_Age_box.png', dpi=1200)
plt.close()
data = data[data['age'] != 0]
data.hist(column='age', bins=data['age'].max()-data['age'].min())
plt.title('Age', fontproperties='Times New Roman', size=16)
plt.xticks(fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.savefig('004_Age_hist.png', dpi=1200)
plt.close()
data['LogDebtRatio'] = data['DebtRatio'].apply(
lambda x: math.log(x) if x else np.nan)
plt.subplots(figsize=(8, 5))
plt.subplot(121)
data.boxplot(column='DebtRatio')
plt.xticks([1], ['Original'], fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.subplot(122)
data.boxplot(column='LogDebtRatio')
plt.xticks([1], ['Logarithmic'], fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.suptitle('Before and After Logarithmic Comparison\n(DebtRatio)',
fontproperties='Times New Roman', size=16)
plt.savefig('005_DebtRatio_box_Logarithmic_Comparison.png', dpi=1200)
plt.close()
data['LogRevolvingUtilizationOfUnsecuredLines'] = data['RevolvingUtilizationOfUnsecuredLines'].apply(
lambda x: math.log(x) if x else np.nan)
plt.subplots(figsize=(8, 5))
plt.subplot(121)
data.boxplot(column='RevolvingUtilizationOfUnsecuredLines')
plt.xticks([1], ['Original'], fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.subplot(122)
data.boxplot(column='LogRevolvingUtilizationOfUnsecuredLines')
plt.xticks([1], ['Logarithmic'], fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.suptitle('Before and After Logarithmic Comparison\n(RevolvingUtilizationOfUnsecuredLines)',
fontproperties='Times New Roman', size=16)
plt.savefig('006_RUOUL_box_Logarithmic_Comparison.png', dpi=1200)
plt.close()
data['LogMonthlyIncome'] = data['MonthlyIncome'].apply(
lambda x: math.log(x) if x else np.nan)
plt.subplots(figsize=(8, 5))
plt.subplot(121)
data.boxplot(column='MonthlyIncome')
plt.xticks([1], ['Original'], fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.subplot(122)
data.boxplot(column='LogMonthlyIncome')
plt.xticks([1], ['Logarithmic'], fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.suptitle('Before and After Logarithmic Comparison\n(MonthlyIncome)',
fontproperties='Times New Roman', size=16)
plt.savefig('007_MonthlyIncome_box_Logarithmic_Comparison.png', dpi=1200)
plt.close()
data['LogDebtRatio'].hist(bins=100)
plt.title('Logarithmic DebtRatio', fontproperties='Times New Roman', size=16)
plt.xticks(fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.savefig('008_Logarithmic_DebtRatio_hist.png', dpi=1200)
plt.close()
data['LogRevolvingUtilizationOfUnsecuredLines'].hist(bins=100)
plt.title('Logarithmic RevolvingUtilizationOfUnsecuredLines',
fontproperties='Times New Roman', size=14)
plt.xticks(fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.savefig('009_Logarithmic_RUOUL_hist.png', dpi=1200)
plt.close()
data['LogMonthlyIncome'].hist(bins=100)
plt.title('Logarithmic MonthlyIncome',
fontproperties='Times New Roman', size=16)
plt.xticks(fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.savefig('010_Logarithmic_MonthlyIncome_hist.png', dpi=1200)
plt.close()
# 以上是数据预处理以及直观可视化
data['LowIncome'] = data['MonthlyIncome'] < 180
data['NormalIncome'] = data['MonthlyIncome'] >= 180
data[['LowIncome', 'NormalIncome']].sum().plot(kind='bar')
plt.xticks(rotation=0, fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.title('MonthlyIncome',
fontproperties='Times New Roman', size=16)
plt.savefig('011_Income_binning.png', dpi=1200)
plt.close()
data['YoungAge'] = data['age'] < 24
data['OldAge'] = data['age'] > 65
data['NormalAge'] = (data['age'] <= 65) & (data['age'] >= 24)
data[['YoungAge', 'NormalAge', 'OldAge']].sum().plot(kind='bar')
plt.xticks(rotation=0, fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.title('Age',
fontproperties='Times New Roman', size=16)
plt.savefig('012_Age_binning.png', dpi=1200)
plt.close()
# LogAge图像比较分立就弃了
# data=data[data['age']!=0]
#data['LogAge'] = np.log(data['age'])
# data.hist(column='LogAge',bins=100)
# plt.show()
data['LogIncomePerPerson'] = data['LogMonthlyIncome'] / \
data['NumberOfDependents']
data.loc[~np.isfinite(data['LogIncomePerPerson']),
'LogIncomePerPerson'] = np.nan
data['LogIncomePerPerson'].hist(bins=100)
plt.title('LogIncomePerPerson',
fontproperties='Times New Roman', size=16)
plt.xticks(fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.savefig('013_LogIncomePerPerson.png', dpi=1200)
plt.close()
data['LogDebt'] = np.log(data['DebtRatio'] * data['LogMonthlyIncome'])
data.loc[~np.isfinite(data['LogDebt']), 'LogDebt'] = np.nan
data['LogDebt'].hist(bins=100)
plt.title('LogDebt',
fontproperties='Times New Roman', size=16)
plt.xticks(fontproperties='Times New Roman', size=14)
plt.yticks(fontproperties='Times New Roman', size=14)
plt.savefig('014_LogDebt.png', dpi=1200)
plt.close()
# 以上为新增特征
original_data = pd.read_csv('cs-training.csv', index_col=0)
original_data = original_data[original_data['age'] != 0]
original_data = original_data[original_data['NumberOfTime30-59DaysPastDueNotWorse'] < 80]
plt.subplots(figsize=(15, 15))
seaborn.heatmap(original_data.corr(), annot=True,
vmax=1, square=True, cmap='Blues')
plt.title('Heatmap', size=20)
plt.savefig('015_Heatmap.png', dpi=1200,bbox_inches=trs.Bbox([[-2,-1],[13,14]]))
plt.close()
# 以上为变量间相关关系
print('done')
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.log",
"matplotlib.transforms.Bbox",
"pandas.read_csv",
"matplotlib.pyplot.close",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.yticks",
"numpy.isfinite",
"math.log",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"... | [((148, 191), 'pandas.read_csv', 'pd.read_csv', (['"""cs-training.csv"""'], {'index_col': '(0)'}), "('cs-training.csv', index_col=0)\n", (159, 191), True, 'import pandas as pd\n'), ((378, 449), 'matplotlib.pyplot.title', 'plt.title', (['"""Before Cleaning"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), "('Before Cleaning', fontproperties='Times New Roman', size=16)\n", (387, 449), True, 'import matplotlib.pyplot as plt\n'), ((450, 557), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1, 2, 3]', "['30-59Days', '60-89Days', '90Days']"], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "([1, 2, 3], ['30-59Days', '60-89Days', '90Days'], fontproperties=\n 'Times New Roman', size=14)\n", (460, 557), True, 'import matplotlib.pyplot as plt\n'), ((564, 617), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (574, 617), True, 'import matplotlib.pyplot as plt\n'), ((618, 679), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""001_NumberOfTime_before_cleaning.png"""'], {'dpi': '(1200)'}), "('001_NumberOfTime_before_cleaning.png', dpi=1200)\n", (629, 679), True, 'import matplotlib.pyplot as plt\n'), ((680, 691), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (689, 691), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1086), 'matplotlib.pyplot.title', 'plt.title', (['"""After Cleaning"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), "('After Cleaning', fontproperties='Times New Roman', size=16)\n", (1025, 1086), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1194), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1, 2, 3]', "['30-59Days', '60-89Days', '90Days']"], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "([1, 2, 3], ['30-59Days', '60-89Days', '90Days'], fontproperties=\n 'Times New Roman', size=14)\n", (1097, 1194), True, 'import matplotlib.pyplot as plt\n'), ((1201, 1254), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (1211, 1254), True, 'import matplotlib.pyplot as plt\n'), ((1255, 1315), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""002_NumberOfTime_after_cleaning.png"""'], {'dpi': '(1200)'}), "('002_NumberOfTime_after_cleaning.png', dpi=1200)\n", (1266, 1315), True, 'import matplotlib.pyplot as plt\n'), ((1316, 1327), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1325, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1431), 'matplotlib.pyplot.title', 'plt.title', (['"""Age"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), "('Age', fontproperties='Times New Roman', size=16)\n", (1381, 1431), True, 'import matplotlib.pyplot as plt\n'), ((1432, 1485), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (1442, 1485), True, 'import matplotlib.pyplot as plt\n'), ((1486, 1539), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (1496, 1539), True, 'import matplotlib.pyplot as plt\n'), ((1540, 1580), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""003_Age_box.png"""'], {'dpi': '(1200)'}), "('003_Age_box.png', dpi=1200)\n", (1551, 1580), True, 'import matplotlib.pyplot as plt\n'), ((1581, 1592), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1590, 1592), True, 'import matplotlib.pyplot as plt\n'), ((1690, 1749), 'matplotlib.pyplot.title', 'plt.title', (['"""Age"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), "('Age', fontproperties='Times New Roman', size=16)\n", (1699, 1749), True, 'import matplotlib.pyplot as plt\n'), ((1750, 1803), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (1760, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1857), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (1814, 1857), True, 'import matplotlib.pyplot as plt\n'), ((1858, 1899), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""004_Age_hist.png"""'], {'dpi': '(1200)'}), "('004_Age_hist.png', dpi=1200)\n", (1869, 1899), True, 'import matplotlib.pyplot as plt\n'), ((1900, 1911), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1909, 1911), True, 'import matplotlib.pyplot as plt\n'), ((2005, 2033), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (2017, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2034, 2050), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2045, 2050), True, 'import matplotlib.pyplot as plt\n'), ((2084, 2156), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1]', "['Original']"], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "([1], ['Original'], fontproperties='Times New Roman', size=14)\n", (2094, 2156), True, 'import matplotlib.pyplot as plt\n'), ((2157, 2210), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (2167, 2210), True, 'import matplotlib.pyplot as plt\n'), ((2211, 2227), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2222, 2227), True, 'import matplotlib.pyplot as plt\n'), ((2264, 2339), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1]', "['Logarithmic']"], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "([1], ['Logarithmic'], fontproperties='Times New Roman', size=14)\n", (2274, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2340, 2393), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (2350, 2393), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2512), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Before and After Logarithmic Comparison\n(DebtRatio)"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), '("""Before and After Logarithmic Comparison\n(DebtRatio)""",\n fontproperties=\'Times New Roman\', size=16)\n', (2406, 2512), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2588), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""005_DebtRatio_box_Logarithmic_Comparison.png"""'], {'dpi': '(1200)'}), "('005_DebtRatio_box_Logarithmic_Comparison.png', dpi=1200)\n", (2530, 2588), True, 'import matplotlib.pyplot as plt\n'), ((2589, 2600), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2598, 2600), True, 'import matplotlib.pyplot as plt\n'), ((2748, 2776), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (2760, 2776), True, 'import matplotlib.pyplot as plt\n'), ((2777, 2793), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2788, 2793), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2926), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1]', "['Original']"], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "([1], ['Original'], fontproperties='Times New Roman', size=14)\n", (2864, 2926), True, 'import matplotlib.pyplot as plt\n'), ((2927, 2980), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (2937, 2980), True, 'import matplotlib.pyplot as plt\n'), ((2981, 2997), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2992, 2997), True, 'import matplotlib.pyplot as plt\n'), ((3061, 3136), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1]', "['Logarithmic']"], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "([1], ['Logarithmic'], fontproperties='Times New Roman', size=14)\n", (3071, 3136), True, 'import matplotlib.pyplot as plt\n'), ((3137, 3190), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (3147, 3190), True, 'import matplotlib.pyplot as plt\n'), ((3191, 3342), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Before and After Logarithmic Comparison\n(RevolvingUtilizationOfUnsecuredLines)"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), '(\n """Before and After Logarithmic Comparison\n(RevolvingUtilizationOfUnsecuredLines)"""\n , fontproperties=\'Times New Roman\', size=16)\n', (3203, 3342), True, 'import matplotlib.pyplot as plt\n'), ((3343, 3408), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""006_RUOUL_box_Logarithmic_Comparison.png"""'], {'dpi': '(1200)'}), "('006_RUOUL_box_Logarithmic_Comparison.png', dpi=1200)\n", (3354, 3408), True, 'import matplotlib.pyplot as plt\n'), ((3409, 3420), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3418, 3420), True, 'import matplotlib.pyplot as plt\n'), ((3522, 3550), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (3534, 3550), True, 'import matplotlib.pyplot as plt\n'), ((3551, 3567), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (3562, 3567), True, 'import matplotlib.pyplot as plt\n'), ((3605, 3677), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1]', "['Original']"], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "([1], ['Original'], fontproperties='Times New Roman', size=14)\n", (3615, 3677), True, 'import matplotlib.pyplot as plt\n'), ((3678, 3731), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (3688, 3731), True, 'import matplotlib.pyplot as plt\n'), ((3732, 3748), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (3743, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3789, 3864), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1]', "['Logarithmic']"], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "([1], ['Logarithmic'], fontproperties='Times New Roman', size=14)\n", (3799, 3864), True, 'import matplotlib.pyplot as plt\n'), ((3865, 3918), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (3875, 3918), True, 'import matplotlib.pyplot as plt\n'), ((3919, 4041), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Before and After Logarithmic Comparison\n(MonthlyIncome)"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), '("""Before and After Logarithmic Comparison\n(MonthlyIncome)""",\n fontproperties=\'Times New Roman\', size=16)\n', (3931, 4041), True, 'import matplotlib.pyplot as plt\n'), ((4048, 4121), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""007_MonthlyIncome_box_Logarithmic_Comparison.png"""'], {'dpi': '(1200)'}), "('007_MonthlyIncome_box_Logarithmic_Comparison.png', dpi=1200)\n", (4059, 4121), True, 'import matplotlib.pyplot as plt\n'), ((4122, 4133), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4131, 4133), True, 'import matplotlib.pyplot as plt\n'), ((4171, 4248), 'matplotlib.pyplot.title', 'plt.title', (['"""Logarithmic DebtRatio"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), "('Logarithmic DebtRatio', fontproperties='Times New Roman', size=16)\n", (4180, 4248), True, 'import matplotlib.pyplot as plt\n'), ((4249, 4302), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (4259, 4302), True, 'import matplotlib.pyplot as plt\n'), ((4303, 4356), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (4313, 4356), True, 'import matplotlib.pyplot as plt\n'), ((4357, 4416), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""008_Logarithmic_DebtRatio_hist.png"""'], {'dpi': '(1200)'}), "('008_Logarithmic_DebtRatio_hist.png', dpi=1200)\n", (4368, 4416), True, 'import matplotlib.pyplot as plt\n'), ((4417, 4428), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4426, 4428), True, 'import matplotlib.pyplot as plt\n'), ((4493, 4601), 'matplotlib.pyplot.title', 'plt.title', (['"""Logarithmic RevolvingUtilizationOfUnsecuredLines"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "('Logarithmic RevolvingUtilizationOfUnsecuredLines',\n fontproperties='Times New Roman', size=14)\n", (4502, 4601), True, 'import matplotlib.pyplot as plt\n'), ((4608, 4661), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (4618, 4661), True, 'import matplotlib.pyplot as plt\n'), ((4662, 4715), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (4672, 4715), True, 'import matplotlib.pyplot as plt\n'), ((4716, 4771), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""009_Logarithmic_RUOUL_hist.png"""'], {'dpi': '(1200)'}), "('009_Logarithmic_RUOUL_hist.png', dpi=1200)\n", (4727, 4771), True, 'import matplotlib.pyplot as plt\n'), ((4772, 4783), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4781, 4783), True, 'import matplotlib.pyplot as plt\n'), ((4825, 4910), 'matplotlib.pyplot.title', 'plt.title', (['"""Logarithmic MonthlyIncome"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), "('Logarithmic MonthlyIncome', fontproperties='Times New Roman',\n size=16)\n", (4834, 4910), True, 'import matplotlib.pyplot as plt\n'), ((4917, 4970), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (4927, 4970), True, 'import matplotlib.pyplot as plt\n'), ((4971, 5024), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (4981, 5024), True, 'import matplotlib.pyplot as plt\n'), ((5025, 5088), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""010_Logarithmic_MonthlyIncome_hist.png"""'], {'dpi': '(1200)'}), "('010_Logarithmic_MonthlyIncome_hist.png', dpi=1200)\n", (5036, 5088), True, 'import matplotlib.pyplot as plt\n'), ((5089, 5100), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5098, 5100), True, 'import matplotlib.pyplot as plt\n'), ((5280, 5345), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(0)', 'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(rotation=0, fontproperties='Times New Roman', size=14)\n", (5290, 5345), True, 'import matplotlib.pyplot as plt\n'), ((5346, 5399), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (5356, 5399), True, 'import matplotlib.pyplot as plt\n'), ((5400, 5469), 'matplotlib.pyplot.title', 'plt.title', (['"""MonthlyIncome"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), "('MonthlyIncome', fontproperties='Times New Roman', size=16)\n", (5409, 5469), True, 'import matplotlib.pyplot as plt\n'), ((5480, 5527), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""011_Income_binning.png"""'], {'dpi': '(1200)'}), "('011_Income_binning.png', dpi=1200)\n", (5491, 5527), True, 'import matplotlib.pyplot as plt\n'), ((5528, 5539), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5537, 5539), True, 'import matplotlib.pyplot as plt\n'), ((5738, 5803), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(0)', 'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(rotation=0, fontproperties='Times New Roman', size=14)\n", (5748, 5803), True, 'import matplotlib.pyplot as plt\n'), ((5804, 5857), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (5814, 5857), True, 'import matplotlib.pyplot as plt\n'), ((5858, 5917), 'matplotlib.pyplot.title', 'plt.title', (['"""Age"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), "('Age', fontproperties='Times New Roman', size=16)\n", (5867, 5917), True, 'import matplotlib.pyplot as plt\n'), ((5928, 5972), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""012_Age_binning.png"""'], {'dpi': '(1200)'}), "('012_Age_binning.png', dpi=1200)\n", (5939, 5972), True, 'import matplotlib.pyplot as plt\n'), ((5973, 5984), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5982, 5984), True, 'import matplotlib.pyplot as plt\n'), ((6344, 6418), 'matplotlib.pyplot.title', 'plt.title', (['"""LogIncomePerPerson"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), "('LogIncomePerPerson', fontproperties='Times New Roman', size=16)\n", (6353, 6418), True, 'import matplotlib.pyplot as plt\n'), ((6429, 6482), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (6439, 6482), True, 'import matplotlib.pyplot as plt\n'), ((6483, 6536), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (6493, 6536), True, 'import matplotlib.pyplot as plt\n'), ((6537, 6588), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""013_LogIncomePerPerson.png"""'], {'dpi': '(1200)'}), "('013_LogIncomePerPerson.png', dpi=1200)\n", (6548, 6588), True, 'import matplotlib.pyplot as plt\n'), ((6589, 6600), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6598, 6600), True, 'import matplotlib.pyplot as plt\n'), ((6620, 6672), 'numpy.log', 'np.log', (["(data['DebtRatio'] * data['LogMonthlyIncome'])"], {}), "(data['DebtRatio'] * data['LogMonthlyIncome'])\n", (6626, 6672), True, 'import numpy as np\n'), ((6764, 6827), 'matplotlib.pyplot.title', 'plt.title', (['"""LogDebt"""'], {'fontproperties': '"""Times New Roman"""', 'size': '(16)'}), "('LogDebt', fontproperties='Times New Roman', size=16)\n", (6773, 6827), True, 'import matplotlib.pyplot as plt\n'), ((6838, 6891), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (6848, 6891), True, 'import matplotlib.pyplot as plt\n'), ((6892, 6945), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontproperties': '"""Times New Roman"""', 'size': '(14)'}), "(fontproperties='Times New Roman', size=14)\n", (6902, 6945), True, 'import matplotlib.pyplot as plt\n'), ((6946, 6986), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""014_LogDebt.png"""'], {'dpi': '(1200)'}), "('014_LogDebt.png', dpi=1200)\n", (6957, 6986), True, 'import matplotlib.pyplot as plt\n'), ((6987, 6998), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6996, 6998), True, 'import matplotlib.pyplot as plt\n'), ((7027, 7070), 'pandas.read_csv', 'pd.read_csv', (['"""cs-training.csv"""'], {'index_col': '(0)'}), "('cs-training.csv', index_col=0)\n", (7038, 7070), True, 'import pandas as pd\n'), ((7218, 7248), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (7230, 7248), True, 'import matplotlib.pyplot as plt\n'), ((7350, 7379), 'matplotlib.pyplot.title', 'plt.title', (['"""Heatmap"""'], {'size': '(20)'}), "('Heatmap', size=20)\n", (7359, 7379), True, 'import matplotlib.pyplot as plt\n'), ((7461, 7472), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7470, 7472), True, 'import matplotlib.pyplot as plt\n'), ((7432, 7462), 'matplotlib.transforms.Bbox', 'trs.Bbox', (['[[-2, -1], [13, 14]]'], {}), '([[-2, -1], [13, 14]])\n', (7440, 7462), True, 'import matplotlib.transforms as trs\n'), ((1975, 1986), 'math.log', 'math.log', (['x'], {}), '(x)\n', (1983, 1986), False, 'import math\n'), ((2718, 2729), 'math.log', 'math.log', (['x'], {}), '(x)\n', (2726, 2729), False, 'import math\n'), ((3492, 3503), 'math.log', 'math.log', (['x'], {}), '(x)\n', (3500, 3503), False, 'import math\n'), ((6221, 6260), 'numpy.isfinite', 'np.isfinite', (["data['LogIncomePerPerson']"], {}), "(data['LogIncomePerPerson'])\n", (6232, 6260), True, 'import numpy as np\n'), ((6683, 6711), 'numpy.isfinite', 'np.isfinite', (["data['LogDebt']"], {}), "(data['LogDebt'])\n", (6694, 6711), True, 'import numpy as np\n')] |
import numpy as np
def baap_baap(n, index):
print("Running Graph_Gen.baap_baap")
true_hoc = np.load("true_hoc.npy")
weapons = np.load("weapons.npy")
death_e = np.load("death_e.npy")
jail = np.load("jail.npy")
graph = np.zeros([n**2, n**2], dtype=int)
not_all = []
for i in range(n):
for j in range(n):
if true_hoc[i][j]:
not_all.append(n*i + j)
jails = []
for i in range(n):
for j in range(n):
if jail[i][j] != 0:
jails.append(n*i + j)
for i in range(n):
for j in range(n):
if weapons[i][j]:
not_all.append(n*i + j)
for i in range(1, n**2 + 1):
if i % n != 0:
graph[i-1][i] = 100
graph[i][i-1] = 100
if i in not_all:
graph[i-1][i] = 100000
graph[i][i-1] = 100000
if i in jails:
graph[i-1][i] = 0
graph[i][i-1] = 0
for i in range(1, n*(n-1) + 1):
graph[i-1][i+n-1] = 100
graph[i+n-1][i-1] = 100
if i in not_all:
graph[i-1][i+n-1] = 100000
graph[i+n-1][i-1] = 100000
if i in jails:
graph[i - 1][i+n-1] = 0
graph[i+n-1][i - 1] = 0
for i in index:
for j in range(n**2):
if graph[j][i] != 0:
graph[j][i] = 1
graph = graph.tolist()
print("Graph:\n")
print(graph)
return graph
| [
"numpy.load",
"numpy.zeros"
] | [((107, 130), 'numpy.load', 'np.load', (['"""true_hoc.npy"""'], {}), "('true_hoc.npy')\n", (114, 130), True, 'import numpy as np\n'), ((146, 168), 'numpy.load', 'np.load', (['"""weapons.npy"""'], {}), "('weapons.npy')\n", (153, 168), True, 'import numpy as np\n'), ((184, 206), 'numpy.load', 'np.load', (['"""death_e.npy"""'], {}), "('death_e.npy')\n", (191, 206), True, 'import numpy as np\n'), ((219, 238), 'numpy.load', 'np.load', (['"""jail.npy"""'], {}), "('jail.npy')\n", (226, 238), True, 'import numpy as np\n'), ((252, 289), 'numpy.zeros', 'np.zeros', (['[n ** 2, n ** 2]'], {'dtype': 'int'}), '([n ** 2, n ** 2], dtype=int)\n', (260, 289), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script uses Imgaug to display various augmentation methods
for a few labeled images of a mouse (Data in folder mouse_m7s3
from Mathis, A., et al. DeepLabCut: markerless pose estimation of user-defined body parts with deep learning.
Nat Neurosci 21, 1281–1289 (2018). https://doi.org/10.1038/s41593-018-0209-y)
For "A Primer on Motion Capture with Deep Learning: Principles, Pitfalls and Perspectives"
by <NAME>, <NAME>, <NAME>, and <NAME>
Uses Imgaug:
Code: https://github.com/aleju/imgaug
Docs: https://imgaug.readthedocs.io/en/latest/index.html
"""
import pandas as pd
import os
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables import Keypoint, KeypointsOnImage
import imageio
from deeplabcut.utils.auxfun_videos import imread, imresize
scale=.4
##########################
## Loading data
##########################
imfolder='mouse_m7s3'
Dataframe = pd.read_hdf(os.path.join(imfolder,"CollectedData_Pranav.h5"))
scorer=Dataframe.columns.get_level_values(0)[0]
bodyparts=Dataframe.columns.get_level_values(1)
ia.seed(1)
#parameters for plotting:
color=(200,0,0)
size=17
alpha=.4
#setting up augmentations
Augmentations=[]
augtype='invert'
seq = iaa.Sequential([iaa.Invert(1, per_channel=0.5)])
Augmentations.append([augtype,seq])
augtype='coarsedropout'
seq = iaa.Sequential([iaa.CoarseDropout(0.02, size_percent=0.15, per_channel=0.5)])
Augmentations.append([augtype,seq])
augtype='jpegcompression'
seq = iaa.Sequential([iaa.JpegCompression(compression=(70, 99))])
Augmentations.append([augtype,seq])
augtype='motionblur'
seq = iaa.Sequential([iaa.MotionBlur(k=30)])
Augmentations.append([augtype,seq])
augtype='edgedetect'
seq = iaa.Sequential([iaa.EdgeDetect(alpha=(0.8, 1.0))])
Augmentations.append([augtype,seq])
augtype='flipud'
seq = iaa.Sequential([iaa.Flipud(1)])
Augmentations.append([augtype,seq])
augtype='fliplr'
seq = iaa.Sequential([iaa.Fliplr(1)])
Augmentations.append([augtype,seq])
for ind, imname in enumerate(Dataframe.index):
image=imresize(imread(os.path.join(imfolder,imname)),size=scale)
ny,nx,nc=np.shape(image)
kpts=[]
for b in bodyparts:
x, y=Dataframe.iloc[ind][scorer][b]['x'], Dataframe.iloc[ind][scorer][b]['y']
if np.isfinite(x) and np.isfinite(y):
kpts.append(Keypoint(x=x*scale,y=y*scale))
kps=KeypointsOnImage(kpts, shape=image.shape)
cells=[]
# image with keypoints before augmentation
image_before = kps.draw_on_image(image, color=color,size=size,alpha=alpha)
cells.append(image_before)
for name, seq in Augmentations:
image_aug, kps_aug = seq(image=image, keypoints=kps)
image_after = kps_aug.draw_on_image(image_aug, color=color,size=size,alpha=alpha)
cells.append(image_after[:ny,:nx,:nc])
grid_image = np.hstack(cells) # Horizontally stack the images
imageio.imwrite('augmentationexamples/'+str(imfolder)+'_'+imname.split('.png')[0]+'_joint.jpg', grid_image)
| [
"imgaug.augmentables.KeypointsOnImage",
"imgaug.augmenters.JpegCompression",
"imgaug.augmenters.CoarseDropout",
"imgaug.augmenters.Invert",
"imgaug.seed",
"numpy.hstack",
"numpy.shape",
"imgaug.augmenters.MotionBlur",
"imgaug.augmenters.Fliplr",
"imgaug.augmenters.Flipud",
"numpy.isfinite",
"i... | [((1124, 1134), 'imgaug.seed', 'ia.seed', (['(1)'], {}), '(1)\n', (1131, 1134), True, 'import imgaug as ia\n'), ((976, 1025), 'os.path.join', 'os.path.join', (['imfolder', '"""CollectedData_Pranav.h5"""'], {}), "(imfolder, 'CollectedData_Pranav.h5')\n", (988, 1025), False, 'import os\n'), ((2163, 2178), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (2171, 2178), True, 'import numpy as np\n'), ((2436, 2477), 'imgaug.augmentables.KeypointsOnImage', 'KeypointsOnImage', (['kpts'], {'shape': 'image.shape'}), '(kpts, shape=image.shape)\n', (2452, 2477), False, 'from imgaug.augmentables import Keypoint, KeypointsOnImage\n'), ((2940, 2956), 'numpy.hstack', 'np.hstack', (['cells'], {}), '(cells)\n', (2949, 2956), True, 'import numpy as np\n'), ((1279, 1309), 'imgaug.augmenters.Invert', 'iaa.Invert', (['(1)'], {'per_channel': '(0.5)'}), '(1, per_channel=0.5)\n', (1289, 1309), True, 'import imgaug.augmenters as iaa\n'), ((1395, 1454), 'imgaug.augmenters.CoarseDropout', 'iaa.CoarseDropout', (['(0.02)'], {'size_percent': '(0.15)', 'per_channel': '(0.5)'}), '(0.02, size_percent=0.15, per_channel=0.5)\n', (1412, 1454), True, 'import imgaug.augmenters as iaa\n'), ((1542, 1583), 'imgaug.augmenters.JpegCompression', 'iaa.JpegCompression', ([], {'compression': '(70, 99)'}), '(compression=(70, 99))\n', (1561, 1583), True, 'import imgaug.augmenters as iaa\n'), ((1666, 1686), 'imgaug.augmenters.MotionBlur', 'iaa.MotionBlur', ([], {'k': '(30)'}), '(k=30)\n', (1680, 1686), True, 'import imgaug.augmenters as iaa\n'), ((1769, 1801), 'imgaug.augmenters.EdgeDetect', 'iaa.EdgeDetect', ([], {'alpha': '(0.8, 1.0)'}), '(alpha=(0.8, 1.0))\n', (1783, 1801), True, 'import imgaug.augmenters as iaa\n'), ((1880, 1893), 'imgaug.augmenters.Flipud', 'iaa.Flipud', (['(1)'], {}), '(1)\n', (1890, 1893), True, 'import imgaug.augmenters as iaa\n'), ((1972, 1985), 'imgaug.augmenters.Fliplr', 'iaa.Fliplr', (['(1)'], {}), '(1)\n', (1982, 1985), True, 'import imgaug.augmenters as iaa\n'), ((2103, 2133), 'os.path.join', 'os.path.join', (['imfolder', 'imname'], {}), '(imfolder, imname)\n', (2115, 2133), False, 'import os\n'), ((2329, 2343), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (2340, 2343), True, 'import numpy as np\n'), ((2348, 2362), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (2359, 2362), True, 'import numpy as np\n'), ((2392, 2426), 'imgaug.augmentables.Keypoint', 'Keypoint', ([], {'x': '(x * scale)', 'y': '(y * scale)'}), '(x=x * scale, y=y * scale)\n', (2400, 2426), False, 'from imgaug.augmentables import Keypoint, KeypointsOnImage\n')] |
# Copyright (C) 2020 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: <NAME>
import random
import numpy as np
import tqdm
from hydra.utils import instantiate
from mocasin.util import logging
from mocasin.representations import MappingRepresentation
from mocasin.mapper.random import RandomPartialMapper
from mocasin.mapper.utils import SimulationManager
from mocasin.mapper.utils import Statistics
log = logging.getLogger(__name__)
class SimulatedAnnealingMapper(object):
"""Generates a full mapping by using a simulated annealing algorithm from:
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2007).
Automated memory-aware application distribution for multi-processor system-on-chips.
Journal of Systems Architecture, 53(11), 795-815.e.
"""
def __init__(
self,
graph,
platform,
trace,
representation,
random_seed=42,
record_statistics=False,
initial_temperature=1.0,
final_temperature=0.1,
temperature_proportionality_constant=0.5,
radius=3.0,
dump_cache=False,
chunk_size=10,
progress=False,
parallel=False,
jobs=1,
):
"""Generates a full mapping for a given platform and dataflow application.
:param graph: a dataflow graph
:type graph: DataflowGraph
:param platform: a platform
:type platform: Platform
:param trace: a trace generator
:type trace: TraceGenerator
:param representation: a mapping representation object
:type representation: MappingRepresentation
:param random_seed: A random seed for the RNG
:type random_seed: int
:param initial_temperature: Initial temperature for simmulated annealing
:type initial_temperature: float
:param final_temperature: Final temperature for simmulated annealing
:type final_temperature: float
:param temperature_proportionality_constant: Temperature prop. constant for simmulated annealing
:type temperature_proportionality_constant: float
:param radius: Radius for search when moving
:type radius: int
:param record_statistics: Record statistics on mappings evaluated?
:type record_statistics: bool
:param dump_cache: Dump the mapping cache?
:type dump_cache: bool
:param chunk_size: Size of chunks for parallel simulation
:type chunk_size: int
:param progress: Display simulation progress visually?
:type progress: bool
:param parallel: Execute simulations in parallel?
:type parallel: bool
:param jobs: Number of jobs for parallel simulation
:type jobs: int
"""
random.seed(random_seed)
np.random.seed(random_seed)
self.full_mapper = True # flag indicating the mapper type
self.graph = graph
self.platform = platform
self.random_mapper = RandomPartialMapper(
self.graph, self.platform, seed=None
)
self.statistics = Statistics(
log, len(self.graph.processes()), record_statistics
)
self.initial_temperature = initial_temperature
self.final_temperature = final_temperature
self.max_rejections = len(self.graph.processes()) * (
len(self.platform.processors()) - 1
) # R_max = L
self.p = temperature_proportionality_constant
self.radius = radius
self.progress = progress
self.dump_cache = dump_cache
if not (1 > self.p > 0):
log.error(
f"Temperature proportionality constant {self.p} not suitable, "
f"it should be close to, but smaller than 1 (algorithm probably won't terminate)."
)
# This is a workaround until Hydra 1.1 (with recursive instantiaton!)
if not issubclass(type(type(representation)), MappingRepresentation):
representation = instantiate(representation, graph, platform)
self.representation = representation
self.simulation_manager = SimulationManager(
self.representation,
trace,
jobs,
parallel,
progress,
chunk_size,
record_statistics,
)
def temperature_cooling(self, temperature, iter):
return self.initial_temperature * self.p ** np.floor(
iter / self.max_rejections
)
def query_accept(self, time, temperature):
with np.errstate(over="raise"):
try:
normalized_probability = 1 / (
np.exp(time / (0.5 * temperature * self.initial_cost))
)
except FloatingPointError:
normalized_probability = 0
return normalized_probability
def move(self, mapping, temperature):
radius = self.radius
while 1:
new_mappings = self.representation._uniformFromBall(
mapping, radius, 20
)
for m in new_mappings:
if list(m) != list(mapping):
return m
radius *= 1.1
if radius > 10000 * self.radius:
log.error("Could not mutate mapping")
raise RuntimeError("Could not mutate mapping")
def generate_mapping(self):
"""Generates a full mapping using simulated anealing"""
mapping_obj = self.random_mapper.generate_mapping()
if (
hasattr(self.representation, "canonical_operations")
and not self.representation.canonical_operations
):
mapping = self.representation.toRepresentationNoncanonical(
mapping_obj
)
else:
mapping = self.representation.toRepresentation(mapping_obj)
last_mapping = mapping
last_simres = self.simulation_manager.simulate([mapping])[0]
last_exec_time = last_simres.exec_time
self.initial_cost = last_exec_time
best_mapping = mapping
best_exec_time = last_exec_time
rejections = 0
iter = 0
temperature = self.initial_temperature
if self.progress:
pbar = tqdm.tqdm(total=self.max_rejections * 20)
while rejections < self.max_rejections:
temperature = self.temperature_cooling(temperature, iter)
log.info(f"Current temperature {temperature}")
mapping = self.move(last_mapping, temperature)
cur_simres = self.simulation_manager.simulate([mapping])[0]
cur_exec_time = cur_simres.exec_time
faster = cur_exec_time < last_exec_time
if not faster and cur_exec_time != last_exec_time:
prob = self.query_accept(
cur_exec_time - last_exec_time, temperature
)
rand = random.random()
accept_randomly = prob > rand
else:
accept_randomly = False # don't accept if no movement.
if faster or accept_randomly:
# accept
if cur_exec_time < best_exec_time:
best_exec_time = cur_exec_time
best_mapping = mapping
last_mapping = mapping
last_exec_time = cur_exec_time
log.info(f"Rejected ({rejections})")
rejections = 0
else:
# reject
if temperature <= self.final_temperature:
rejections += 1
iter += 1
if self.progress:
pbar.update(1)
if self.progress:
pbar.update(self.max_rejections * 20 - iter)
pbar.close()
self.simulation_manager.statistics.log_statistics()
self.simulation_manager.statistics.to_file()
if self.dump_cache:
self.simulation_manager.dump("mapping_cache.csv")
return self.representation.fromRepresentation(best_mapping)
| [
"tqdm.tqdm",
"numpy.random.seed",
"hydra.utils.instantiate",
"numpy.floor",
"mocasin.mapper.random.RandomPartialMapper",
"numpy.errstate",
"random.random",
"random.seed",
"numpy.exp",
"mocasin.mapper.utils.SimulationManager",
"mocasin.util.logging.getLogger"
] | [((434, 461), 'mocasin.util.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (451, 461), False, 'from mocasin.util import logging\n'), ((2767, 2791), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (2778, 2791), False, 'import random\n'), ((2800, 2827), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (2814, 2827), True, 'import numpy as np\n'), ((2984, 3041), 'mocasin.mapper.random.RandomPartialMapper', 'RandomPartialMapper', (['self.graph', 'self.platform'], {'seed': 'None'}), '(self.graph, self.platform, seed=None)\n', (3003, 3041), False, 'from mocasin.mapper.random import RandomPartialMapper\n'), ((4129, 4235), 'mocasin.mapper.utils.SimulationManager', 'SimulationManager', (['self.representation', 'trace', 'jobs', 'parallel', 'progress', 'chunk_size', 'record_statistics'], {}), '(self.representation, trace, jobs, parallel, progress,\n chunk_size, record_statistics)\n', (4146, 4235), False, 'from mocasin.mapper.utils import SimulationManager\n'), ((4004, 4048), 'hydra.utils.instantiate', 'instantiate', (['representation', 'graph', 'platform'], {}), '(representation, graph, platform)\n', (4015, 4048), False, 'from hydra.utils import instantiate\n'), ((4554, 4579), 'numpy.errstate', 'np.errstate', ([], {'over': '"""raise"""'}), "(over='raise')\n", (4565, 4579), True, 'import numpy as np\n'), ((6262, 6303), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': '(self.max_rejections * 20)'}), '(total=self.max_rejections * 20)\n', (6271, 6303), False, 'import tqdm\n'), ((4434, 4470), 'numpy.floor', 'np.floor', (['(iter / self.max_rejections)'], {}), '(iter / self.max_rejections)\n', (4442, 4470), True, 'import numpy as np\n'), ((6924, 6939), 'random.random', 'random.random', ([], {}), '()\n', (6937, 6939), False, 'import random\n'), ((4665, 4719), 'numpy.exp', 'np.exp', (['(time / (0.5 * temperature * self.initial_cost))'], {}), '(time / (0.5 * temperature * self.initial_cost))\n', (4671, 4719), True, 'import numpy as np\n')] |
import scipy.io
import glob
import numpy as np
def get_data(path, *, debug=False):
data = {
'A': [],
'AA': [],
'Ae': [],
'AeA': [],
'EE': [],
'OO': [],
'UU': []
}
indices = np.array([1, 2, 7, 8, 9, 10, 19]) - 1
if debug:
breakpoint()
for mat in glob.glob(path):
tmp = scipy.io.loadmat(mat)
classname = mat.split('/')[1].split('_')[1].split('.')[0]
avoiders = ['__header__', '__version__', '__globals__']
key = [k for k in tmp.keys() if k not in avoiders][0]
data[classname].append(tmp[key])
if debug:
print(f"{classname} :: {key}")
X_train = []
y_train = []
for category in data:
if category == 'A':
label = [1, 0, 0, 0, 0, 0, 0]
elif category == 'AA':
label = [0, 1, 0, 0, 0, 0, 0]
elif category == 'Ae':
label = [0, 0, 1, 0, 0, 0, 0]
elif category == 'AeA':
label = [0, 0, 0, 1, 0, 0, 0]
elif category == 'EE':
label = [0, 0, 0, 0, 1, 0, 0]
elif category == 'OO':
label = [0, 0, 0, 0, 0, 1, 0]
elif category == 'UU':
label = [0, 0, 0, 0, 0, 0, 1]
label = np.array(label)
for sample in data[category]:
sample = sample[:, indices]
# breakpoint()
X_train.append(sample.reshape(2500 * len(indices),))
y_train.append(label)
return np.array(X_train), np.array(y_train), data.keys() | [
"numpy.array",
"glob.glob"
] | [((333, 348), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (342, 348), False, 'import glob\n'), ((243, 276), 'numpy.array', 'np.array', (['[1, 2, 7, 8, 9, 10, 19]'], {}), '([1, 2, 7, 8, 9, 10, 19])\n', (251, 276), True, 'import numpy as np\n'), ((1270, 1285), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (1278, 1285), True, 'import numpy as np\n'), ((1503, 1520), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (1511, 1520), True, 'import numpy as np\n'), ((1522, 1539), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1530, 1539), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright 2019- <NAME>, MIT license
"""
Utility functions and classes for Auto and Cross Correlogram calculation.
"""
import scipy.signal
import numpy as np
import collections
from obspy import UTCDateTime
from obspy import read
def smooth(x, window_len=None, window='flat', method='zeros'):
"""Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
:param x: the input signal (numpy array)
:param window_len: the dimension of the smoothing window; should be an
odd integer
:param window: the type of window from 'flat', 'hanning', 'hamming',
'bartlett', 'blackman'
flat window will produce a moving average smoothing.
:param method: handling of border effects\n
'zeros': zero padding on both ends (len(smooth(x)) = len(x))\n
'reflect': pad reflected signal on both ends (same)\n
'clip': pad signal on both ends with the last valid value (same)\n
None: no handling of border effects
(len(smooth(x)) = len(x) - len(window_len) + 1)
"""
if window_len is None:
return x
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is one of 'flat', 'hanning', 'hamming',"
"'bartlett', 'blackman'")
if method == 'zeros':
s = np.r_[np.zeros((window_len - 1) // 2), x,
np.zeros(window_len // 2)]
elif method == 'reflect':
s = np.r_[x[(window_len - 1) // 2:0:-1], x,
x[-1:-(window_len + 1) // 2:-1]]
elif method == 'clip':
s = np.r_[x[0] * np.ones((window_len - 1) // 2), x,
x[-1] * np.ones(window_len // 2)]
else:
s = x
if window == 'flat':
w = np.ones(window_len, 'd')
else:
w = getattr(np, window)(window_len)
return scipy.signal.fftconvolve(w / w.sum(), s, mode='valid')
class IterMultipleComponents(object):
"""
Return iterable to iterate over associated components of a stream.
:param stream: Stream with different, possibly many traces. It is
split into substreams with the same seed id (only last character
i.e. component may vary)
:type key: str or None
:param key: Additionally, the stream is grouped by the values of
the given stats entry to differentiate between e.g. different events
(for example key='starttime', key='onset')
:type number_components: int, tuple of ints or None
:param number_components: Only iterate through substreams with
matching number of components.
"""
def __init__(self, stream, key=None, number_components=None):
substreams = collections.defaultdict(stream.__class__)
for tr in stream:
k = (tr.id[:-1], str(tr.stats[key]) if key is not None else None)
substreams[k].append(tr)
n = number_components
self.substreams = [s for _, s in sorted(substreams.items())
if n is None or len(s) == n or
(not isinstance(n, int) and len(s) in n)]
def __len__(self):
return len(self.substreams)
def __iter__(self):
for s in self.substreams:
yield s
def iter_time(tr, length=3600, overlap=1800):
tr2 = tr.copy()
starttime = int(tr2.stats.starttime.timestamp / length) * length
endtime = int(tr2.stats.endtime.timestamp / length) * length
time_series = []
for t in range(starttime, endtime, overlap):
t1 = UTCDateTime(t)
t2 = UTCDateTime(t1 + length)
time_series.append([t1, t2])
return time_series
def pkl2sac1(directory, suffix="pkl", fmt="SAC"):
"""
Convert file from Pickle format with suffix of pkl to SAC format
:param directory: the directory contains files to be converted.
:param suffix: in this case, it should be "pkl".
:param fmt: the target format to be converted. Support SAC, MSEED.
Example: /the/path/hello.pkl to /the/path_SAC/hello.sac
"""
import os
import glob
files = glob.glob(directory + "/*." + suffix)
if fmt not in ["SAC", "MSEED"]:
print("format should be 'SAC' or 'MSEED'.")
os._exit(0)
for file in files:
tr = read(file)[0]
savepath = os.path.dirname(file) + "_SAC"
bn = os.path.basename(file)
bn = os.path.splitext(bn)[0]
bn = ".".join([bn, fmt.lower()])
try:
os.makedirs(savepath)
except:
pass
fn = savepath + "/" + bn
print(fn)
tr.write(fn, format=fmt)
| [
"os.makedirs",
"os.path.basename",
"os.path.dirname",
"numpy.zeros",
"numpy.ones",
"collections.defaultdict",
"os._exit",
"obspy.UTCDateTime",
"os.path.splitext",
"glob.glob",
"obspy.read"
] | [((4378, 4415), 'glob.glob', 'glob.glob', (["(directory + '/*.' + suffix)"], {}), "(directory + '/*.' + suffix)\n", (4387, 4415), False, 'import glob\n'), ((2068, 2092), 'numpy.ones', 'np.ones', (['window_len', '"""d"""'], {}), "(window_len, 'd')\n", (2075, 2092), True, 'import numpy as np\n'), ((2995, 3036), 'collections.defaultdict', 'collections.defaultdict', (['stream.__class__'], {}), '(stream.__class__)\n', (3018, 3036), False, 'import collections\n'), ((3829, 3843), 'obspy.UTCDateTime', 'UTCDateTime', (['t'], {}), '(t)\n', (3840, 3843), False, 'from obspy import UTCDateTime\n'), ((3857, 3881), 'obspy.UTCDateTime', 'UTCDateTime', (['(t1 + length)'], {}), '(t1 + length)\n', (3868, 3881), False, 'from obspy import UTCDateTime\n'), ((4513, 4524), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (4521, 4524), False, 'import os\n'), ((4639, 4661), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (4655, 4661), False, 'import os\n'), ((4562, 4572), 'obspy.read', 'read', (['file'], {}), '(file)\n', (4566, 4572), False, 'from obspy import read\n'), ((4595, 4616), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (4610, 4616), False, 'import os\n'), ((4675, 4695), 'os.path.splitext', 'os.path.splitext', (['bn'], {}), '(bn)\n', (4691, 4695), False, 'import os\n'), ((4765, 4786), 'os.makedirs', 'os.makedirs', (['savepath'], {}), '(savepath)\n', (4776, 4786), False, 'import os\n'), ((1653, 1684), 'numpy.zeros', 'np.zeros', (['((window_len - 1) // 2)'], {}), '((window_len - 1) // 2)\n', (1661, 1684), True, 'import numpy as np\n'), ((1707, 1732), 'numpy.zeros', 'np.zeros', (['(window_len // 2)'], {}), '(window_len // 2)\n', (1715, 1732), True, 'import numpy as np\n'), ((1919, 1949), 'numpy.ones', 'np.ones', (['((window_len - 1) // 2)'], {}), '((window_len - 1) // 2)\n', (1926, 1949), True, 'import numpy as np\n'), ((1980, 2004), 'numpy.ones', 'np.ones', (['(window_len // 2)'], {}), '(window_len // 2)\n', (1987, 2004), True, 'import numpy as np\n')] |
# Copyright 2018 Owkin, inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import numpy as np
from sklearn.model_selection import KFold
N_FOLDS = 4
current_directory = os.path.dirname(__file__)
assets_keys_path = os.path.join(current_directory, '../../titanic/assets_keys.json')
print(f'Loading existing asset keys from {os.path.abspath(assets_keys_path)}...')
with open(assets_keys_path, 'r') as f:
assets_keys = json.load(f)
train_data_sample_keys = assets_keys['train_data_sample_keys']
print('Generating folds...')
X = np.array(train_data_sample_keys)
kf = KFold(n_splits=N_FOLDS, shuffle=True)
folds = [
{
'train_data_sample_keys': list(X[train_index]),
'test_data_sample_keys': list(X[test_index])
} for train_index, test_index in kf.split(X)
]
with open(os.path.join(current_directory, '../folds_keys.json'), 'w') as f:
json.dump({'folds': folds}, f, indent=2)
print(f'Folds keys have been saved to {os.path.abspath(assets_keys_path)}')
| [
"json.dump",
"os.path.abspath",
"json.load",
"os.path.dirname",
"sklearn.model_selection.KFold",
"numpy.array",
"os.path.join"
] | [((695, 720), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (710, 720), False, 'import os\n'), ((740, 805), 'os.path.join', 'os.path.join', (['current_directory', '"""../../titanic/assets_keys.json"""'], {}), "(current_directory, '../../titanic/assets_keys.json')\n", (752, 805), False, 'import os\n'), ((1056, 1088), 'numpy.array', 'np.array', (['train_data_sample_keys'], {}), '(train_data_sample_keys)\n', (1064, 1088), True, 'import numpy as np\n'), ((1094, 1131), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'N_FOLDS', 'shuffle': '(True)'}), '(n_splits=N_FOLDS, shuffle=True)\n', (1099, 1131), False, 'from sklearn.model_selection import KFold\n'), ((946, 958), 'json.load', 'json.load', (['f'], {}), '(f)\n', (955, 958), False, 'import json\n'), ((1390, 1430), 'json.dump', 'json.dump', (["{'folds': folds}", 'f'], {'indent': '(2)'}), "({'folds': folds}, f, indent=2)\n", (1399, 1430), False, 'import json\n'), ((1320, 1373), 'os.path.join', 'os.path.join', (['current_directory', '"""../folds_keys.json"""'], {}), "(current_directory, '../folds_keys.json')\n", (1332, 1373), False, 'import os\n'), ((849, 882), 'os.path.abspath', 'os.path.abspath', (['assets_keys_path'], {}), '(assets_keys_path)\n', (864, 882), False, 'import os\n'), ((1471, 1504), 'os.path.abspath', 'os.path.abspath', (['assets_keys_path'], {}), '(assets_keys_path)\n', (1486, 1504), False, 'import os\n')] |
""" Display_Widget module. """
# ISC License
#
# Copyright (c) 2020–2022, <NAME>, <NAME>. <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import annotations
import numpy as np
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QCheckBox, QComboBox, QHBoxLayout, QVBoxLayout, QMessageBox, QSizePolicy
from magneticalc.QMessageBox2 import QMessageBox2
from magneticalc.Debug import Debug
from magneticalc.QtWidgets2.QGroupBox2 import QGroupBox2
from magneticalc.QtWidgets2.QHLine import QHLine
from magneticalc.QtWidgets2.QIconLabel import QIconLabel
from magneticalc.QtWidgets2.QLabel2 import QLabel2
from magneticalc.QtWidgets2.QSliderFloat import QSliderFloat
from magneticalc.SamplingVolume_Widget import SamplingVolume_Widget
from magneticalc.Theme import Theme
from magneticalc.VisPyCanvas import VisPyCanvas
class Display_Widget(QGroupBox2):
""" Display_Widget class. """
# Slider limits
FieldArrowHeadScaleMin = 0
FieldArrowHeadScaleMax = 1
FieldArrowHeadScaleStep = 1 / VisPyCanvas.FieldArrowHeadSize
FieldArrowLineScaleMin = 0
FieldArrowLineScaleMax = 1
FieldArrowLineScaleStep = 1 / 10
FieldPointScaleMin = 0
FieldPointScaleMax = 1
FieldPointScaleStep = 1 / VisPyCanvas.FieldPointSize
FieldBoostMin = 0
FieldBoostMax = 1
FieldBoostStep = 1 / 20
# Warn about displaying an excessive number of field labels
ExcessiveFieldLabelsThreshold = 250
def __init__(
self,
gui: GUI # type: ignore
) -> None:
"""
Populates the widget.
@param gui: GUI
"""
QGroupBox2.__init__(self, "Display", color=Theme.DarkColor)
Debug(self, ": Init", init=True)
self.gui = gui
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
self.addLayout(QIconLabel("Point Scale", "fa.circle", color=Theme.DarkColor))
self.field_point_scale_slider = QSliderFloat(
orientation=Qt.Horizontal,
minimum=self.FieldPointScaleMin,
maximum=self.FieldPointScaleMax,
step=self.FieldPointScaleStep
)
self.field_point_scale_slider.valueChanged.connect( # type: ignore
lambda: self.set_field_point_scale(self.field_point_scale_slider.get_value())
)
self.addWidget(self.field_point_scale_slider)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
self.addWidget(QHLine())
self.addLayout(QIconLabel("Arrow Scale", "fa.arrow-right", color=Theme.DarkColor))
field_arrow_scale_layout_left = QVBoxLayout()
field_arrow_scale_layout_right = QVBoxLayout()
field_arrow_scale_layout_left.addWidget(QLabel2("Head:", expand=False))
self.field_arrow_head_scale_slider = QSliderFloat(
orientation=Qt.Horizontal,
minimum=self.FieldArrowHeadScaleMin,
maximum=self.FieldArrowHeadScaleMax,
step=self.FieldArrowHeadScaleStep
)
self.field_arrow_head_scale_slider.valueChanged.connect( # type: ignore
lambda: self.set_field_arrow_head_scale(self.field_arrow_head_scale_slider.get_value())
)
field_arrow_scale_layout_right.addWidget(self.field_arrow_head_scale_slider)
field_arrow_scale_layout_left.addWidget(QLabel2("Line:", expand=False))
self.field_arrow_line_scale_slider = QSliderFloat(
orientation=Qt.Horizontal,
minimum=self.FieldArrowLineScaleMin,
maximum=self.FieldArrowLineScaleMax,
step=self.FieldArrowLineScaleStep
)
self.field_arrow_line_scale_slider.valueChanged.connect( # type: ignore
lambda: self.set_field_arrow_line_scale(self.field_arrow_line_scale_slider.get_value())
)
field_arrow_scale_layout_right.addWidget(self.field_arrow_line_scale_slider)
field_arrow_scale_layout = QHBoxLayout()
field_arrow_scale_layout.addLayout(field_arrow_scale_layout_left)
field_arrow_scale_layout.addLayout(field_arrow_scale_layout_right)
self.addLayout(field_arrow_scale_layout)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
self.addWidget(QHLine())
self.addLayout(QIconLabel("Field Boost", "fa.adjust", color=Theme.DarkColor))
self.field_boost_slider = QSliderFloat(
orientation=Qt.Horizontal,
minimum=self.FieldBoostMin,
maximum=self.FieldBoostMax,
step=self.FieldBoostStep
)
self.field_boost_slider.valueChanged.connect( # type: ignore
lambda: self.set_field_boost(self.field_boost_slider.get_value())
)
self.addWidget(self.field_boost_slider)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
self.addWidget(QHLine())
self.addLayout(QIconLabel("Field Labels", "fa.tags", color=Theme.DarkColor))
self.display_field_magnitude_labels_checkbox = QCheckBox(" Display Magnitude")
self.display_field_magnitude_labels_checkbox.toggled.connect( # type: ignore
lambda: self.set_display_field_magnitude_labels(self.display_field_magnitude_labels_checkbox.isChecked())
)
self.addWidget(self.display_field_magnitude_labels_checkbox)
self.field_label_resolution_combobox = QComboBox()
self.field_label_resolution_combobox.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
self.field_label_resolution_combobox_connection = None
field_label_resolution_layout = QHBoxLayout()
field_label_resolution_layout.addWidget(self.field_label_resolution_combobox)
field_label_resolution_layout.addWidget(QLabel2(" Labels / cm", expand=False))
self.addLayout(field_label_resolution_layout)
total_labels_layout = QHBoxLayout()
self.total_labels_label = QLabel2("N/A", color=Theme.MainColor, align_right=True)
total_labels_layout.addWidget(QLabel2("Total labels:", italic=True, color=Theme.LiteColor))
total_labels_layout.addWidget(self.total_labels_label)
self.addLayout(total_labels_layout)
def reload(self) -> None:
"""
Reloads the widget.
"""
Debug(self, ".reload()", refresh=True)
self.blockSignals(True)
self.field_point_scale_slider.setValue(self.gui.project.get_float("field_point_scale"))
self.field_arrow_head_scale_slider.setValue(self.gui.project.get_float("field_arrow_head_scale"))
self.field_arrow_line_scale_slider.setValue(self.gui.project.get_float("field_arrow_line_scale"))
self.field_boost_slider.setValue(self.gui.project.get_float("field_boost"))
self.display_field_magnitude_labels_checkbox.setChecked(
self.gui.project.get_bool("display_field_magnitude_labels")
)
self.blockSignals(False)
self.update()
def update(self) -> None:
"""
Updates this widget.
"""
Debug(self, ".update()", refresh=True)
self.update_labels()
self.update_controls()
def update_labels(self) -> None:
"""
Updates the labels.
"""
Debug(self, ".update_labels()", refresh=True)
if self.gui.model.sampling_volume.valid:
n = self.gui.model.sampling_volume.labels_count
color = Theme.FailureColor if n > self.ExcessiveFieldLabelsThreshold else Theme.MainColor
self.total_labels_label.setText(str(n))
self.total_labels_label.setStyleSheet(f"color: {color}; font-style: italic;")
else:
self.total_labels_label.setText("N/A")
self.total_labels_label.setStyleSheet(f"color: {Theme.LiteColor}; font-style: italic;")
def update_controls(self) -> None:
"""
Updates the field label resolution combobox.
"""
Debug(self, ".update_controls()", refresh=True)
# Possible field label resolution values: Less than or equal to sampling volume resolution
sampling_volume_resolution = self.gui.model.sampling_volume.resolution
label_resolution_options_dict = {
key: value for key, value in SamplingVolume_Widget.ResolutionOptionsDict.items()
if np.power(2.0, value) <= sampling_volume_resolution
}
self.blockSignals(True)
# Repopulate field label resolution combobox
if self.field_label_resolution_combobox_connection is not None:
self.field_label_resolution_combobox.currentIndexChanged.disconnect( # type: ignore
self.field_label_resolution_combobox_connection
)
self.field_label_resolution_combobox.clear()
for i, value in enumerate(label_resolution_options_dict.keys()):
self.field_label_resolution_combobox.addItem(str(value))
def connection():
""" Sets field label resolution. """
self.set_field_label_resolution(
label_resolution_options_dict.get(
self.field_label_resolution_combobox.currentText(),
0
)
)
self.field_label_resolution_combobox_connection = connection
self.field_label_resolution_combobox.currentIndexChanged.connect(connection) # type: ignore
# Set default field label resolution if it is not available anymore
target = self.gui.project.get_int("sampling_volume_label_resolution_exponent")
if target not in label_resolution_options_dict.values():
Debug(self, f": WARNING: Invalid: sampling_volume_label_resolution_exponent = {target}", warning=True)
self.gui.project.set_int(
"sampling_volume_label_resolution_exponent",
next(iter(label_resolution_options_dict.items()))[1] # First value from combobox
)
# Select the field label resolution
target = self.gui.project.get_int("sampling_volume_label_resolution_exponent")
for i, value in enumerate(label_resolution_options_dict.values()):
if value == target:
self.field_label_resolution_combobox.setCurrentIndex(i)
self.blockSignals(False)
# ------------------------------------------------------------------------------------------------------------------
def set_field_point_scale(self, value: float) -> None:
"""
Sets field point scale.
@param value: Value
"""
if self.signalsBlocked():
return
self.gui.project.set_float("field_point_scale", value)
self.gui.redraw()
def set_field_arrow_head_scale(self, value: float) -> None:
"""
Sets field arrow head scale.
@param value: Value
"""
if self.signalsBlocked():
return
self.gui.project.set_float("field_arrow_head_scale", value)
self.gui.redraw()
def set_field_arrow_line_scale(self, value: float) -> None:
"""
Sets field arrow line scale.
@param value: Value
"""
if self.signalsBlocked():
return
self.gui.project.set_float("field_arrow_line_scale", value)
self.gui.redraw()
def set_field_boost(self, value: float) -> None:
"""
Sets field boost value.
@param value: Value
"""
if self.signalsBlocked():
return
self.gui.project.set_float("field_boost", value)
self.gui.redraw()
# ------------------------------------------------------------------------------------------------------------------
def set_display_field_magnitude_labels(self, value: bool) -> None:
"""
Sets field label "Display Magnitude" value.
@param value: Value
"""
if self.signalsBlocked():
return
self.gui.project.set_bool("display_field_magnitude_labels", value)
self.prevent_excessive_field_labels(choice=True)
self.update()
self.gui.redraw()
def set_field_label_resolution(self, value: int) -> None:
"""
Sets field label resolution exponent.
@param value: Value
"""
if self.signalsBlocked():
return
self.gui.sidebar_left.sampling_volume_widget.set_sampling_volume(_label_resolution_exponent_=value)
# Note: "prevent_excessive_field_labels()" will be called by "Model.on_sampling_volume_valid()".
self.gui.redraw()
def disable_field_labels(self) -> None:
"""
Disables field labels.
"""
Debug(self, ".disable_field_labels()")
self.gui.project.set_bool("display_field_magnitude_labels", False)
previous_signals_blocked = self.signalsBlocked()
self.blockSignals(True)
self.display_field_magnitude_labels_checkbox.setChecked(False)
self.blockSignals(previous_signals_blocked)
def prevent_excessive_field_labels(self, choice: bool) -> None:
"""
Prevents displaying an excessive number of field labels.
@param choice: True lets the user choose; False disables field labels if there is an excessive number of them
"""
Debug(self, f".prevent_excessive_field_labels(choice={choice})")
if not self.gui.project.get_bool("display_field_magnitude_labels"):
return
if not self.gui.model.sampling_volume.valid:
return
if not self.gui.model.sampling_volume.labels_count > self.ExcessiveFieldLabelsThreshold:
return
if choice:
text = (
"You are about to display an excessive number of field labels.\n"
"This will be very slow and cannot be interrupted.\n\n"
"DO YOU WANT TO DISPLAY FIELD LABELS ANYWAY?\n"
"Choosing 'No' will disable field labels immediately.\n\n"
"Please consider the following before choosing 'Yes':\n"
"– Save your work first.\n"
"– Decrease sampling volume resolution.\n"
"– Decrease field label resolution."
)
messagebox = QMessageBox2(
title="Excessive Number Of Field Labels",
text=text,
icon=QMessageBox.Question,
buttons=QMessageBox.Yes | QMessageBox.No,
default_button=QMessageBox.No
)
if not messagebox.user_accepted or messagebox.choice == QMessageBox.No:
self.disable_field_labels()
else:
self.disable_field_labels()
text = (
"Field labels were disabled automatically because\n"
"an excessive number of field labels was detected.\n\n"
"You can manually re-enable field labels\n"
"after all calculations have finished."
)
QMessageBox2(
title="Excessive Number Of Field Labels",
text=text,
icon=QMessageBox.Information,
buttons=QMessageBox.Ok,
default_button=QMessageBox.Ok
)
| [
"PyQt5.QtWidgets.QComboBox",
"magneticalc.QtWidgets2.QIconLabel.QIconLabel",
"magneticalc.Debug.Debug",
"numpy.power",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QCheckBox",
"magneticalc.QtWidgets2.QGroupBox2.QGroupBox2.__init__",
"PyQt5.QtWidgets.QVBoxLayout",
"magneticalc.QtWidgets2.QHLine.QH... | [((2311, 2370), 'magneticalc.QtWidgets2.QGroupBox2.QGroupBox2.__init__', 'QGroupBox2.__init__', (['self', '"""Display"""'], {'color': 'Theme.DarkColor'}), "(self, 'Display', color=Theme.DarkColor)\n", (2330, 2370), False, 'from magneticalc.QtWidgets2.QGroupBox2 import QGroupBox2\n'), ((2379, 2411), 'magneticalc.Debug.Debug', 'Debug', (['self', '""": Init"""'], {'init': '(True)'}), "(self, ': Init', init=True)\n", (2384, 2411), False, 'from magneticalc.Debug import Debug\n'), ((2683, 2823), 'magneticalc.QtWidgets2.QSliderFloat.QSliderFloat', 'QSliderFloat', ([], {'orientation': 'Qt.Horizontal', 'minimum': 'self.FieldPointScaleMin', 'maximum': 'self.FieldPointScaleMax', 'step': 'self.FieldPointScaleStep'}), '(orientation=Qt.Horizontal, minimum=self.FieldPointScaleMin,\n maximum=self.FieldPointScaleMax, step=self.FieldPointScaleStep)\n', (2695, 2823), False, 'from magneticalc.QtWidgets2.QSliderFloat import QSliderFloat\n'), ((3396, 3409), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (3407, 3409), False, 'from PyQt5.QtWidgets import QCheckBox, QComboBox, QHBoxLayout, QVBoxLayout, QMessageBox, QSizePolicy\n'), ((3451, 3464), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (3462, 3464), False, 'from PyQt5.QtWidgets import QCheckBox, QComboBox, QHBoxLayout, QVBoxLayout, QMessageBox, QSizePolicy\n'), ((3591, 3743), 'magneticalc.QtWidgets2.QSliderFloat.QSliderFloat', 'QSliderFloat', ([], {'orientation': 'Qt.Horizontal', 'minimum': 'self.FieldArrowHeadScaleMin', 'maximum': 'self.FieldArrowHeadScaleMax', 'step': 'self.FieldArrowHeadScaleStep'}), '(orientation=Qt.Horizontal, minimum=self.FieldArrowHeadScaleMin,\n maximum=self.FieldArrowHeadScaleMax, step=self.FieldArrowHeadScaleStep)\n', (3603, 3743), False, 'from magneticalc.QtWidgets2.QSliderFloat import QSliderFloat\n'), ((4200, 4352), 'magneticalc.QtWidgets2.QSliderFloat.QSliderFloat', 'QSliderFloat', ([], {'orientation': 'Qt.Horizontal', 'minimum': 'self.FieldArrowLineScaleMin', 'maximum': 'self.FieldArrowLineScaleMax', 'step': 'self.FieldArrowLineScaleStep'}), '(orientation=Qt.Horizontal, minimum=self.FieldArrowLineScaleMin,\n maximum=self.FieldArrowLineScaleMax, step=self.FieldArrowLineScaleStep)\n', (4212, 4352), False, 'from magneticalc.QtWidgets2.QSliderFloat import QSliderFloat\n'), ((4719, 4732), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (4730, 4732), False, 'from PyQt5.QtWidgets import QCheckBox, QComboBox, QHBoxLayout, QVBoxLayout, QMessageBox, QSizePolicy\n'), ((5207, 5333), 'magneticalc.QtWidgets2.QSliderFloat.QSliderFloat', 'QSliderFloat', ([], {'orientation': 'Qt.Horizontal', 'minimum': 'self.FieldBoostMin', 'maximum': 'self.FieldBoostMax', 'step': 'self.FieldBoostStep'}), '(orientation=Qt.Horizontal, minimum=self.FieldBoostMin, maximum\n =self.FieldBoostMax, step=self.FieldBoostStep)\n', (5219, 5333), False, 'from magneticalc.QtWidgets2.QSliderFloat import QSliderFloat\n'), ((5889, 5920), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['""" Display Magnitude"""'], {}), "(' Display Magnitude')\n", (5898, 5920), False, 'from PyQt5.QtWidgets import QCheckBox, QComboBox, QHBoxLayout, QVBoxLayout, QMessageBox, QSizePolicy\n'), ((6252, 6263), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (6261, 6263), False, 'from PyQt5.QtWidgets import QCheckBox, QComboBox, QHBoxLayout, QVBoxLayout, QMessageBox, QSizePolicy\n'), ((6470, 6483), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (6481, 6483), False, 'from PyQt5.QtWidgets import QCheckBox, QComboBox, QHBoxLayout, QVBoxLayout, QMessageBox, QSizePolicy\n'), ((6742, 6755), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (6753, 6755), False, 'from PyQt5.QtWidgets import QCheckBox, QComboBox, QHBoxLayout, QVBoxLayout, QMessageBox, QSizePolicy\n'), ((6790, 6845), 'magneticalc.QtWidgets2.QLabel2.QLabel2', 'QLabel2', (['"""N/A"""'], {'color': 'Theme.MainColor', 'align_right': '(True)'}), "('N/A', color=Theme.MainColor, align_right=True)\n", (6797, 6845), False, 'from magneticalc.QtWidgets2.QLabel2 import QLabel2\n'), ((7144, 7182), 'magneticalc.Debug.Debug', 'Debug', (['self', '""".reload()"""'], {'refresh': '(True)'}), "(self, '.reload()', refresh=True)\n", (7149, 7182), False, 'from magneticalc.Debug import Debug\n'), ((7905, 7943), 'magneticalc.Debug.Debug', 'Debug', (['self', '""".update()"""'], {'refresh': '(True)'}), "(self, '.update()', refresh=True)\n", (7910, 7943), False, 'from magneticalc.Debug import Debug\n'), ((8103, 8148), 'magneticalc.Debug.Debug', 'Debug', (['self', '""".update_labels()"""'], {'refresh': '(True)'}), "(self, '.update_labels()', refresh=True)\n", (8108, 8148), False, 'from magneticalc.Debug import Debug\n'), ((8793, 8840), 'magneticalc.Debug.Debug', 'Debug', (['self', '""".update_controls()"""'], {'refresh': '(True)'}), "(self, '.update_controls()', refresh=True)\n", (8798, 8840), False, 'from magneticalc.Debug import Debug\n'), ((13521, 13559), 'magneticalc.Debug.Debug', 'Debug', (['self', '""".disable_field_labels()"""'], {}), "(self, '.disable_field_labels()')\n", (13526, 13559), False, 'from magneticalc.Debug import Debug\n'), ((14134, 14198), 'magneticalc.Debug.Debug', 'Debug', (['self', 'f""".prevent_excessive_field_labels(choice={choice})"""'], {}), "(self, f'.prevent_excessive_field_labels(choice={choice})')\n", (14139, 14198), False, 'from magneticalc.Debug import Debug\n'), ((2580, 2641), 'magneticalc.QtWidgets2.QIconLabel.QIconLabel', 'QIconLabel', (['"""Point Scale"""', '"""fa.circle"""'], {'color': 'Theme.DarkColor'}), "('Point Scale', 'fa.circle', color=Theme.DarkColor)\n", (2590, 2641), False, 'from magneticalc.QtWidgets2.QIconLabel import QIconLabel\n'), ((3253, 3261), 'magneticalc.QtWidgets2.QHLine.QHLine', 'QHLine', ([], {}), '()\n', (3259, 3261), False, 'from magneticalc.QtWidgets2.QHLine import QHLine\n'), ((3287, 3353), 'magneticalc.QtWidgets2.QIconLabel.QIconLabel', 'QIconLabel', (['"""Arrow Scale"""', '"""fa.arrow-right"""'], {'color': 'Theme.DarkColor'}), "('Arrow Scale', 'fa.arrow-right', color=Theme.DarkColor)\n", (3297, 3353), False, 'from magneticalc.QtWidgets2.QIconLabel import QIconLabel\n'), ((3514, 3544), 'magneticalc.QtWidgets2.QLabel2.QLabel2', 'QLabel2', (['"""Head:"""'], {'expand': '(False)'}), "('Head:', expand=False)\n", (3521, 3544), False, 'from magneticalc.QtWidgets2.QLabel2 import QLabel2\n'), ((4123, 4153), 'magneticalc.QtWidgets2.QLabel2.QLabel2', 'QLabel2', (['"""Line:"""'], {'expand': '(False)'}), "('Line:', expand=False)\n", (4130, 4153), False, 'from magneticalc.QtWidgets2.QLabel2 import QLabel2\n'), ((5076, 5084), 'magneticalc.QtWidgets2.QHLine.QHLine', 'QHLine', ([], {}), '()\n', (5082, 5084), False, 'from magneticalc.QtWidgets2.QHLine import QHLine\n'), ((5110, 5171), 'magneticalc.QtWidgets2.QIconLabel.QIconLabel', 'QIconLabel', (['"""Field Boost"""', '"""fa.adjust"""'], {'color': 'Theme.DarkColor'}), "('Field Boost', 'fa.adjust', color=Theme.DarkColor)\n", (5120, 5171), False, 'from magneticalc.QtWidgets2.QIconLabel import QIconLabel\n'), ((5738, 5746), 'magneticalc.QtWidgets2.QHLine.QHLine', 'QHLine', ([], {}), '()\n', (5744, 5746), False, 'from magneticalc.QtWidgets2.QHLine import QHLine\n'), ((5772, 5832), 'magneticalc.QtWidgets2.QIconLabel.QIconLabel', 'QIconLabel', (['"""Field Labels"""', '"""fa.tags"""'], {'color': 'Theme.DarkColor'}), "('Field Labels', 'fa.tags', color=Theme.DarkColor)\n", (5782, 5832), False, 'from magneticalc.QtWidgets2.QIconLabel import QIconLabel\n'), ((6618, 6655), 'magneticalc.QtWidgets2.QLabel2.QLabel2', 'QLabel2', (['""" Labels / cm"""'], {'expand': '(False)'}), "(' Labels / cm', expand=False)\n", (6625, 6655), False, 'from magneticalc.QtWidgets2.QLabel2 import QLabel2\n'), ((6884, 6944), 'magneticalc.QtWidgets2.QLabel2.QLabel2', 'QLabel2', (['"""Total labels:"""'], {'italic': '(True)', 'color': 'Theme.LiteColor'}), "('Total labels:', italic=True, color=Theme.LiteColor)\n", (6891, 6944), False, 'from magneticalc.QtWidgets2.QLabel2 import QLabel2\n'), ((10469, 10580), 'magneticalc.Debug.Debug', 'Debug', (['self', 'f""": WARNING: Invalid: sampling_volume_label_resolution_exponent = {target}"""'], {'warning': '(True)'}), "(self,\n f': WARNING: Invalid: sampling_volume_label_resolution_exponent = {target}'\n , warning=True)\n", (10474, 10580), False, 'from magneticalc.Debug import Debug\n'), ((15089, 15263), 'magneticalc.QMessageBox2.QMessageBox2', 'QMessageBox2', ([], {'title': '"""Excessive Number Of Field Labels"""', 'text': 'text', 'icon': 'QMessageBox.Question', 'buttons': '(QMessageBox.Yes | QMessageBox.No)', 'default_button': 'QMessageBox.No'}), "(title='Excessive Number Of Field Labels', text=text, icon=\n QMessageBox.Question, buttons=QMessageBox.Yes | QMessageBox.No,\n default_button=QMessageBox.No)\n", (15101, 15263), False, 'from magneticalc.QMessageBox2 import QMessageBox2\n'), ((15839, 15999), 'magneticalc.QMessageBox2.QMessageBox2', 'QMessageBox2', ([], {'title': '"""Excessive Number Of Field Labels"""', 'text': 'text', 'icon': 'QMessageBox.Information', 'buttons': 'QMessageBox.Ok', 'default_button': 'QMessageBox.Ok'}), "(title='Excessive Number Of Field Labels', text=text, icon=\n QMessageBox.Information, buttons=QMessageBox.Ok, default_button=\n QMessageBox.Ok)\n", (15851, 15999), False, 'from magneticalc.QMessageBox2 import QMessageBox2\n'), ((9103, 9154), 'magneticalc.SamplingVolume_Widget.SamplingVolume_Widget.ResolutionOptionsDict.items', 'SamplingVolume_Widget.ResolutionOptionsDict.items', ([], {}), '()\n', (9152, 9154), False, 'from magneticalc.SamplingVolume_Widget import SamplingVolume_Widget\n'), ((9170, 9190), 'numpy.power', 'np.power', (['(2.0)', 'value'], {}), '(2.0, value)\n', (9178, 9190), True, 'import numpy as np\n')] |
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import cupy as cp
import numpy as np
import cudf
import pickle
from copy import deepcopy
from numba import cuda
from cudf.core.buffer import Buffer
from cuml.common.array import CumlArray
from cuml.utils.memory_utils import _get_size_from_shape
from rmm import DeviceBuffer
test_input_types = [
'numpy', 'numba', 'cupy', 'series', None
]
test_output_types = {
'numpy': np.ndarray,
'cupy': cp.ndarray,
'numba': None,
'series': cudf.Series,
'dataframe': cudf.DataFrame,
'cudf': None
}
test_dtypes_all = [
np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
"float", "float32", "double", "float64",
"int8", "short", "int16", "int", "int32", "long", "int64",
]
test_dtypes_output = [
np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64
]
test_shapes = [10, (10,), (10, 1), (10, 5), (1, 10)]
test_slices = [0, 5, 'left', 'right', 'both', 'bool_op']
unsupported_cudf_dtypes = [np.uint8, np.uint16, np.uint32, np.uint64,
np.float16]
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_array_init(input_type, dtype, shape, order):
if input_type == 'series':
if dtype in unsupported_cudf_dtypes or \
shape in [(10, 5), (1, 10)]:
pytest.skip("Unsupported cuDF Series parameter")
if input_type is not None:
inp = create_input(input_type, dtype, shape, order)
ary = CumlArray(data=inp)
else:
inp = create_input('cupy', dtype, shape, order)
ptr = inp.__cuda_array_interface__['data'][0]
ary = CumlArray(data=ptr, owner=inp, dtype=inp.dtype, shape=inp.shape,
order=order)
if shape == (10, 5):
assert ary.order == order
if shape == 10:
assert ary.shape == (10,)
len(ary) == 10
elif input_type == 'series':
# cudf Series make their shape (10,) from (10, 1)
if shape == (10, 1):
assert ary.shape == (10,)
else:
assert ary.shape == shape
assert ary.dtype == np.dtype(dtype)
if input_type == 'numpy':
assert isinstance(ary._owner, DeviceBuffer)
elif input_type in ['cupy', 'numba', 'series']:
assert ary._owner is inp
inp_copy = deepcopy(cp.asarray(inp))
# testing owner reference keeps data of ary alive
del inp
assert cp.all(cp.asarray(ary._owner) == cp.asarray(inp_copy))
else:
assert isinstance(ary._owner, cp.ndarray)
truth = cp.asnumpy(inp)
del inp
assert ary.ptr == ptr
data = ary.to_output('numpy')
assert np.array_equal(truth, data)
return True
@pytest.mark.parametrize('data_type', [bytes, bytearray, memoryview])
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_array_init_from_bytes(data_type, dtype, shape, order):
dtype = np.dtype(dtype)
bts = bytes(_get_size_from_shape(shape, dtype)[0])
if data_type != bytes:
bts = data_type(bts)
ary = CumlArray(bts, dtype=dtype, shape=shape, order=order)
if shape == (10, 5):
assert ary.order == order
if shape == 10:
assert ary.shape == (10,)
else:
assert ary.shape == shape
assert ary.dtype == dtype
cp_ary = cp.zeros(shape, dtype=dtype)
assert cp.all(cp.asarray(cp_ary) == cp_ary)
@pytest.mark.parametrize('slice', test_slices)
@pytest.mark.parametrize('order', ['C', 'F'])
def test_get_set_item(slice, order):
inp = create_input('numpy', 'float32', (10, 10), order)
ary = CumlArray(data=inp)
if isinstance(slice, int):
assert np.array_equal(inp[slice], ary[slice].to_output('numpy'))
inp[slice] = 1.0
ary[slice] = 1.0
elif slice == 'left':
assert np.array_equal(inp[5:], ary[5:].to_output('numpy'))
inp[5:] = 1.0
ary[5:] = 1.0
elif slice == 'right':
assert np.array_equal(inp[:5], ary[:5].to_output('numpy'))
inp[:5] = 1.0
ary[:5] = 1.0
elif slice == 'both':
assert np.array_equal(inp[:], ary[:].to_output('numpy'))
inp[:] = 1.0
ary[:] = 1.0
else:
pytest.skip("not implemented logical indexing, unless we need it")
assert np.array_equal(inp, ary.to_output('numpy'))
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('order', ['C', 'F'])
def test_create_empty(shape, dtype, order):
ary = CumlArray.empty(shape=shape, dtype=dtype, order=order)
assert isinstance(ary.ptr, int)
if shape == 10:
assert ary.shape == (shape,)
else:
assert ary.shape == shape
assert ary.dtype == np.dtype(dtype)
assert isinstance(ary._owner, DeviceBuffer)
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_create_zeros(shape, dtype, order):
ary = CumlArray.zeros(shape=shape, dtype=dtype, order=order)
test = cp.zeros(shape).astype(dtype)
assert cp.all(test == cp.asarray(ary))
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_create_ones(shape, dtype, order):
ary = CumlArray.ones(shape=shape, dtype=dtype, order=order)
test = cp.ones(shape).astype(dtype)
assert cp.all(test == cp.asarray(ary))
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_create_full(shape, dtype, order):
value = cp.array([cp.random.randint(100)]).astype(dtype)
ary = CumlArray.full(value=value[0], shape=shape, dtype=dtype, order=order)
test = cp.zeros(shape).astype(dtype) + value[0]
assert cp.all(test == cp.asarray(ary))
@pytest.mark.parametrize('output_type', test_output_types)
@pytest.mark.parametrize('dtype', test_dtypes_output)
@pytest.mark.parametrize('order', ['F', 'C'])
@pytest.mark.parametrize('shape', test_shapes)
def test_output(output_type, dtype, order, shape):
inp = create_input('numpy', dtype, shape, order)
ary = CumlArray(inp)
if dtype in unsupported_cudf_dtypes and \
output_type in ['series', 'dataframe', 'cudf']:
with pytest.raises(ValueError):
res = ary.to_output(output_type)
elif shape in [(10, 5), (1, 10)] and output_type == 'series':
with pytest.raises(ValueError):
res = ary.to_output(output_type)
else:
res = ary.to_output(output_type)
# using correct numba ndarray check
if output_type == 'numba':
assert cuda.devicearray.is_cuda_ndarray(res)
elif output_type == 'cudf':
if shape in [(10, 5), (1, 10)]:
assert isinstance(res, cudf.DataFrame)
else:
assert isinstance(res, cudf.Series)
else:
assert isinstance(res, test_output_types[output_type])
if output_type == 'numpy':
assert np.all(inp == ary.to_output('numpy'))
elif output_type == 'cupy':
assert cp.all(cp.asarray(inp) == ary.to_output('cupy'))
elif output_type == 'numba':
assert cp.all(cp.asarray(cuda.to_device(inp)) == cp.asarray(res))
elif output_type == 'series':
comp = cudf.Series(inp) == res
assert np.all(comp.to_array())
elif output_type == 'dataframe':
mat = cuda.to_device(inp)
if len(mat.shape) == 1:
mat = mat.reshape(mat.shape[0], 1)
comp = cudf.DataFrame.from_gpu_matrix(mat)
comp = comp == res
assert np.all(comp.as_gpu_matrix().copy_to_host())
# check for e2e cartesian product:
if output_type not in ['dataframe', 'cudf']:
res2 = CumlArray(res)
res2 = res2.to_output('numpy')
if output_type == 'series' and shape == (10, 1):
assert np.all(inp.reshape((1, 10)) == res2)
else:
assert np.all(inp == res2)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_cuda_array_interface(dtype, shape, order):
inp = create_input('numba', dtype, shape, 'F')
ary = CumlArray(inp)
if isinstance(shape, tuple):
assert ary.__cuda_array_interface__['shape'] == shape
else:
assert ary.__cuda_array_interface__['shape'] == (shape,)
assert ary.__cuda_array_interface__['strides'] == inp.strides
assert ary.__cuda_array_interface__['typestr'] == inp.dtype.str
assert ary.__cuda_array_interface__['data'] == \
(inp.device_ctypes_pointer.value, False)
assert ary.__cuda_array_interface__['version'] == 2
# since our test array is small, its faster to transfer it to numpy to
# square rather than a numba cuda kernel
truth = np.sqrt(inp.copy_to_host())
result = cp.sqrt(ary)
assert np.all(truth == cp.asnumpy(result))
return True
@pytest.mark.parametrize('input_type', test_input_types)
def test_serialize(input_type):
if input_type == 'series':
inp = create_input(input_type, np.float32, (10, 1), 'C')
else:
inp = create_input(input_type, np.float32, (10, 5), 'F')
ary = CumlArray(data=inp)
header, frames = ary.serialize()
ary2 = CumlArray.deserialize(header, frames)
assert pickle.loads(header['type-serialized']) is CumlArray
assert all(isinstance(f, Buffer) for f in frames)
if input_type == 'numpy':
assert np.all(inp == ary2.to_output('numpy'))
elif input_type == 'series':
assert np.all(inp == ary2.to_output('series'))
else:
assert cp.all(inp == cp.asarray(ary2))
assert ary.__cuda_array_interface__['shape'] == \
ary2.__cuda_array_interface__['shape']
assert ary.__cuda_array_interface__['strides'] == \
ary2.__cuda_array_interface__['strides']
assert ary.__cuda_array_interface__['typestr'] == \
ary2.__cuda_array_interface__['typestr']
if input_type != 'series':
# skipping one dimensional ary order test
assert ary.order == ary2.order
@pytest.mark.parametrize('input_type', test_input_types)
def test_pickle(input_type):
if input_type == 'series':
inp = create_input(input_type, np.float32, (10, 1), 'C')
else:
inp = create_input(input_type, np.float32, (10, 5), 'F')
ary = CumlArray(data=inp)
a = pickle.dumps(ary)
b = pickle.loads(a)
if input_type == 'numpy':
assert np.all(inp == b.to_output('numpy'))
elif input_type == 'series':
assert np.all(inp == b.to_output('series'))
else:
assert cp.all(inp == cp.asarray(b))
assert ary.__cuda_array_interface__['shape'] == \
b.__cuda_array_interface__['shape']
assert ary.__cuda_array_interface__['strides'] == \
b.__cuda_array_interface__['strides']
assert ary.__cuda_array_interface__['typestr'] == \
b.__cuda_array_interface__['typestr']
if input_type != 'series':
# skipping one dimensional ary order test
assert ary.order == b.order
@pytest.mark.parametrize('input_type', test_input_types)
def test_deepcopy(input_type):
if input_type == 'series':
inp = create_input(input_type, np.float32, (10, 1), 'C')
else:
inp = create_input(input_type, np.float32, (10, 5), 'F')
ary = CumlArray(data=inp)
b = deepcopy(ary)
if input_type == 'numpy':
assert np.all(inp == b.to_output('numpy'))
elif input_type == 'series':
assert np.all(inp == b.to_output('series'))
else:
assert cp.all(inp == cp.asarray(b))
assert ary.ptr != b.ptr
assert ary.__cuda_array_interface__['shape'] == \
b.__cuda_array_interface__['shape']
assert ary.__cuda_array_interface__['strides'] == \
b.__cuda_array_interface__['strides']
assert ary.__cuda_array_interface__['typestr'] == \
b.__cuda_array_interface__['typestr']
if input_type != 'series':
# skipping one dimensional ary order test
assert ary.order == b.order
def create_input(input_type, dtype, shape, order):
float_dtypes = [np.float16, np.float32, np.float64]
if dtype in float_dtypes:
rand_ary = np.random.random(shape)
else:
rand_ary = cp.random.randint(100, size=shape)
rand_ary = cp.array(rand_ary, dtype=dtype, order=order)
if input_type == 'numpy':
return np.array(cp.asnumpy(rand_ary), dtype=dtype, order=order)
elif input_type == 'numba':
return cuda.as_cuda_array(rand_ary)
elif input_type == 'series':
return cudf.Series(cuda.as_cuda_array(rand_ary))
else:
return rand_ary
| [
"cupy.array",
"cuml.common.array.CumlArray.full",
"cuml.common.array.CumlArray",
"pytest.mark.parametrize",
"pytest.raises",
"cudf.DataFrame.from_gpu_matrix",
"cupy.asnumpy",
"cupy.sqrt",
"pickle.dumps",
"pickle.loads",
"copy.deepcopy",
"cuml.common.array.CumlArray.empty",
"cupy.zeros",
"c... | [((1763, 1818), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_type"""', 'test_input_types'], {}), "('input_type', test_input_types)\n", (1786, 1818), False, 'import pytest\n'), ((1820, 1869), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'test_dtypes_all'], {}), "('dtype', test_dtypes_all)\n", (1843, 1869), False, 'import pytest\n'), ((1871, 1916), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'test_shapes'], {}), "('shape', test_shapes)\n", (1894, 1916), False, 'import pytest\n'), ((1918, 1962), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['F', 'C']"], {}), "('order', ['F', 'C'])\n", (1941, 1962), False, 'import pytest\n'), ((3547, 3615), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_type"""', '[bytes, bytearray, memoryview]'], {}), "('data_type', [bytes, bytearray, memoryview])\n", (3570, 3615), False, 'import pytest\n'), ((3617, 3666), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'test_dtypes_all'], {}), "('dtype', test_dtypes_all)\n", (3640, 3666), False, 'import pytest\n'), ((3668, 3713), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'test_shapes'], {}), "('shape', test_shapes)\n", (3691, 3713), False, 'import pytest\n'), ((3715, 3759), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['F', 'C']"], {}), "('order', ['F', 'C'])\n", (3738, 3759), False, 'import pytest\n'), ((4314, 4359), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""slice"""', 'test_slices'], {}), "('slice', test_slices)\n", (4337, 4359), False, 'import pytest\n'), ((4361, 4405), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['C', 'F']"], {}), "('order', ['C', 'F'])\n", (4384, 4405), False, 'import pytest\n'), ((5244, 5289), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'test_shapes'], {}), "('shape', test_shapes)\n", (5267, 5289), False, 'import pytest\n'), ((5291, 5340), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'test_dtypes_all'], {}), "('dtype', test_dtypes_all)\n", (5314, 5340), False, 'import pytest\n'), ((5342, 5386), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['C', 'F']"], {}), "('order', ['C', 'F'])\n", (5365, 5386), False, 'import pytest\n'), ((5724, 5769), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'test_shapes'], {}), "('shape', test_shapes)\n", (5747, 5769), False, 'import pytest\n'), ((5771, 5820), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'test_dtypes_all'], {}), "('dtype', test_dtypes_all)\n", (5794, 5820), False, 'import pytest\n'), ((5822, 5866), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['F', 'C']"], {}), "('order', ['F', 'C'])\n", (5845, 5866), False, 'import pytest\n'), ((6063, 6108), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'test_shapes'], {}), "('shape', test_shapes)\n", (6086, 6108), False, 'import pytest\n'), ((6110, 6159), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'test_dtypes_all'], {}), "('dtype', test_dtypes_all)\n", (6133, 6159), False, 'import pytest\n'), ((6161, 6205), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['F', 'C']"], {}), "('order', ['F', 'C'])\n", (6184, 6205), False, 'import pytest\n'), ((6399, 6444), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'test_shapes'], {}), "('shape', test_shapes)\n", (6422, 6444), False, 'import pytest\n'), ((6446, 6495), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'test_dtypes_all'], {}), "('dtype', test_dtypes_all)\n", (6469, 6495), False, 'import pytest\n'), ((6497, 6541), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['F', 'C']"], {}), "('order', ['F', 'C'])\n", (6520, 6541), False, 'import pytest\n'), ((6824, 6881), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""output_type"""', 'test_output_types'], {}), "('output_type', test_output_types)\n", (6847, 6881), False, 'import pytest\n'), ((6883, 6935), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'test_dtypes_output'], {}), "('dtype', test_dtypes_output)\n", (6906, 6935), False, 'import pytest\n'), ((6937, 6981), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['F', 'C']"], {}), "('order', ['F', 'C'])\n", (6960, 6981), False, 'import pytest\n'), ((6983, 7028), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'test_shapes'], {}), "('shape', test_shapes)\n", (7006, 7028), False, 'import pytest\n'), ((9089, 9138), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'test_dtypes_all'], {}), "('dtype', test_dtypes_all)\n", (9112, 9138), False, 'import pytest\n'), ((9140, 9185), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'test_shapes'], {}), "('shape', test_shapes)\n", (9163, 9185), False, 'import pytest\n'), ((9187, 9231), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['F', 'C']"], {}), "('order', ['F', 'C'])\n", (9210, 9231), False, 'import pytest\n'), ((10080, 10135), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_type"""', 'test_input_types'], {}), "('input_type', test_input_types)\n", (10103, 10135), False, 'import pytest\n'), ((11240, 11295), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_type"""', 'test_input_types'], {}), "('input_type', test_input_types)\n", (11263, 11295), False, 'import pytest\n'), ((12220, 12275), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_type"""', 'test_input_types'], {}), "('input_type', test_input_types)\n", (12243, 12275), False, 'import pytest\n'), ((3836, 3851), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (3844, 3851), True, 'import numpy as np\n'), ((3975, 4028), 'cuml.common.array.CumlArray', 'CumlArray', (['bts'], {'dtype': 'dtype', 'shape': 'shape', 'order': 'order'}), '(bts, dtype=dtype, shape=shape, order=order)\n', (3984, 4028), False, 'from cuml.common.array import CumlArray\n'), ((4233, 4261), 'cupy.zeros', 'cp.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (4241, 4261), True, 'import cupy as cp\n'), ((4513, 4532), 'cuml.common.array.CumlArray', 'CumlArray', ([], {'data': 'inp'}), '(data=inp)\n', (4522, 4532), False, 'from cuml.common.array import CumlArray\n'), ((5441, 5495), 'cuml.common.array.CumlArray.empty', 'CumlArray.empty', ([], {'shape': 'shape', 'dtype': 'dtype', 'order': 'order'}), '(shape=shape, dtype=dtype, order=order)\n', (5456, 5495), False, 'from cuml.common.array import CumlArray\n'), ((5921, 5975), 'cuml.common.array.CumlArray.zeros', 'CumlArray.zeros', ([], {'shape': 'shape', 'dtype': 'dtype', 'order': 'order'}), '(shape=shape, dtype=dtype, order=order)\n', (5936, 5975), False, 'from cuml.common.array import CumlArray\n'), ((6259, 6312), 'cuml.common.array.CumlArray.ones', 'CumlArray.ones', ([], {'shape': 'shape', 'dtype': 'dtype', 'order': 'order'}), '(shape=shape, dtype=dtype, order=order)\n', (6273, 6312), False, 'from cuml.common.array import CumlArray\n'), ((6656, 6725), 'cuml.common.array.CumlArray.full', 'CumlArray.full', ([], {'value': 'value[0]', 'shape': 'shape', 'dtype': 'dtype', 'order': 'order'}), '(value=value[0], shape=shape, dtype=dtype, order=order)\n', (6670, 6725), False, 'from cuml.common.array import CumlArray\n'), ((7143, 7157), 'cuml.common.array.CumlArray', 'CumlArray', (['inp'], {}), '(inp)\n', (7152, 7157), False, 'from cuml.common.array import CumlArray\n'), ((9345, 9359), 'cuml.common.array.CumlArray', 'CumlArray', (['inp'], {}), '(inp)\n', (9354, 9359), False, 'from cuml.common.array import CumlArray\n'), ((9999, 10011), 'cupy.sqrt', 'cp.sqrt', (['ary'], {}), '(ary)\n', (10006, 10011), True, 'import cupy as cp\n'), ((10349, 10368), 'cuml.common.array.CumlArray', 'CumlArray', ([], {'data': 'inp'}), '(data=inp)\n', (10358, 10368), False, 'from cuml.common.array import CumlArray\n'), ((10417, 10454), 'cuml.common.array.CumlArray.deserialize', 'CumlArray.deserialize', (['header', 'frames'], {}), '(header, frames)\n', (10438, 10454), False, 'from cuml.common.array import CumlArray\n'), ((11506, 11525), 'cuml.common.array.CumlArray', 'CumlArray', ([], {'data': 'inp'}), '(data=inp)\n', (11515, 11525), False, 'from cuml.common.array import CumlArray\n'), ((11534, 11551), 'pickle.dumps', 'pickle.dumps', (['ary'], {}), '(ary)\n', (11546, 11551), False, 'import pickle\n'), ((11560, 11575), 'pickle.loads', 'pickle.loads', (['a'], {}), '(a)\n', (11572, 11575), False, 'import pickle\n'), ((12488, 12507), 'cuml.common.array.CumlArray', 'CumlArray', ([], {'data': 'inp'}), '(data=inp)\n', (12497, 12507), False, 'from cuml.common.array import CumlArray\n'), ((12516, 12529), 'copy.deepcopy', 'deepcopy', (['ary'], {}), '(ary)\n', (12524, 12529), False, 'from copy import deepcopy\n'), ((13462, 13506), 'cupy.array', 'cp.array', (['rand_ary'], {'dtype': 'dtype', 'order': 'order'}), '(rand_ary, dtype=dtype, order=order)\n', (13470, 13506), True, 'import cupy as cp\n'), ((2309, 2328), 'cuml.common.array.CumlArray', 'CumlArray', ([], {'data': 'inp'}), '(data=inp)\n', (2318, 2328), False, 'from cuml.common.array import CumlArray\n'), ((2463, 2540), 'cuml.common.array.CumlArray', 'CumlArray', ([], {'data': 'ptr', 'owner': 'inp', 'dtype': 'inp.dtype', 'shape': 'inp.shape', 'order': 'order'}), '(data=ptr, owner=inp, dtype=inp.dtype, shape=inp.shape, order=order)\n', (2472, 2540), False, 'from cuml.common.array import CumlArray\n'), ((2930, 2945), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (2938, 2945), True, 'import numpy as np\n'), ((5657, 5672), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (5665, 5672), True, 'import numpy as np\n'), ((10467, 10506), 'pickle.loads', 'pickle.loads', (["header['type-serialized']"], {}), "(header['type-serialized'])\n", (10479, 10506), False, 'import pickle\n'), ((13358, 13381), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (13374, 13381), True, 'import numpy as np\n'), ((13411, 13445), 'cupy.random.randint', 'cp.random.randint', (['(100)'], {'size': 'shape'}), '(100, size=shape)\n', (13428, 13445), True, 'import cupy as cp\n'), ((2154, 2202), 'pytest.skip', 'pytest.skip', (['"""Unsupported cuDF Series parameter"""'], {}), "('Unsupported cuDF Series parameter')\n", (2165, 2202), False, 'import pytest\n'), ((3382, 3397), 'cupy.asnumpy', 'cp.asnumpy', (['inp'], {}), '(inp)\n', (3392, 3397), True, 'import cupy as cp\n'), ((3499, 3526), 'numpy.array_equal', 'np.array_equal', (['truth', 'data'], {}), '(truth, data)\n', (3513, 3526), True, 'import numpy as np\n'), ((3868, 3902), 'cuml.utils.memory_utils._get_size_from_shape', '_get_size_from_shape', (['shape', 'dtype'], {}), '(shape, dtype)\n', (3888, 3902), False, 'from cuml.utils.memory_utils import _get_size_from_shape\n'), ((4281, 4299), 'cupy.asarray', 'cp.asarray', (['cp_ary'], {}), '(cp_ary)\n', (4291, 4299), True, 'import cupy as cp\n'), ((5987, 6002), 'cupy.zeros', 'cp.zeros', (['shape'], {}), '(shape)\n', (5995, 6002), True, 'import cupy as cp\n'), ((6043, 6058), 'cupy.asarray', 'cp.asarray', (['ary'], {}), '(ary)\n', (6053, 6058), True, 'import cupy as cp\n'), ((6324, 6338), 'cupy.ones', 'cp.ones', (['shape'], {}), '(shape)\n', (6331, 6338), True, 'import cupy as cp\n'), ((6379, 6394), 'cupy.asarray', 'cp.asarray', (['ary'], {}), '(ary)\n', (6389, 6394), True, 'import cupy as cp\n'), ((6804, 6819), 'cupy.asarray', 'cp.asarray', (['ary'], {}), '(ary)\n', (6814, 6819), True, 'import cupy as cp\n'), ((7278, 7303), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7291, 7303), False, 'import pytest\n'), ((10040, 10058), 'cupy.asnumpy', 'cp.asnumpy', (['result'], {}), '(result)\n', (10050, 10058), True, 'import cupy as cp\n'), ((13562, 13582), 'cupy.asnumpy', 'cp.asnumpy', (['rand_ary'], {}), '(rand_ary)\n', (13572, 13582), True, 'import cupy as cp\n'), ((13658, 13686), 'numba.cuda.as_cuda_array', 'cuda.as_cuda_array', (['rand_ary'], {}), '(rand_ary)\n', (13676, 13686), False, 'from numba import cuda\n'), ((3142, 3157), 'cupy.asarray', 'cp.asarray', (['inp'], {}), '(inp)\n', (3152, 3157), True, 'import cupy as cp\n'), ((6737, 6752), 'cupy.zeros', 'cp.zeros', (['shape'], {}), '(shape)\n', (6745, 6752), True, 'import cupy as cp\n'), ((7429, 7454), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7442, 7454), False, 'import pytest\n'), ((7651, 7688), 'numba.cuda.devicearray.is_cuda_ndarray', 'cuda.devicearray.is_cuda_ndarray', (['res'], {}), '(res)\n', (7683, 7688), False, 'from numba import cuda\n'), ((8846, 8860), 'cuml.common.array.CumlArray', 'CumlArray', (['res'], {}), '(res)\n', (8855, 8860), False, 'from cuml.common.array import CumlArray\n'), ((3256, 3278), 'cupy.asarray', 'cp.asarray', (['ary._owner'], {}), '(ary._owner)\n', (3266, 3278), True, 'import cupy as cp\n'), ((3282, 3302), 'cupy.asarray', 'cp.asarray', (['inp_copy'], {}), '(inp_copy)\n', (3292, 3302), True, 'import cupy as cp\n'), ((5118, 5184), 'pytest.skip', 'pytest.skip', (['"""not implemented logical indexing, unless we need it"""'], {}), "('not implemented logical indexing, unless we need it')\n", (5129, 5184), False, 'import pytest\n'), ((6607, 6629), 'cupy.random.randint', 'cp.random.randint', (['(100)'], {}), '(100)\n', (6624, 6629), True, 'import cupy as cp\n'), ((9066, 9085), 'numpy.all', 'np.all', (['(inp == res2)'], {}), '(inp == res2)\n', (9072, 9085), True, 'import numpy as np\n'), ((10786, 10802), 'cupy.asarray', 'cp.asarray', (['ary2'], {}), '(ary2)\n', (10796, 10802), True, 'import cupy as cp\n'), ((11781, 11794), 'cupy.asarray', 'cp.asarray', (['b'], {}), '(b)\n', (11791, 11794), True, 'import cupy as cp\n'), ((12735, 12748), 'cupy.asarray', 'cp.asarray', (['b'], {}), '(b)\n', (12745, 12748), True, 'import cupy as cp\n'), ((13748, 13776), 'numba.cuda.as_cuda_array', 'cuda.as_cuda_array', (['rand_ary'], {}), '(rand_ary)\n', (13766, 13776), False, 'from numba import cuda\n'), ((8131, 8146), 'cupy.asarray', 'cp.asarray', (['inp'], {}), '(inp)\n', (8141, 8146), True, 'import cupy as cp\n'), ((8272, 8287), 'cupy.asarray', 'cp.asarray', (['res'], {}), '(res)\n', (8282, 8287), True, 'import cupy as cp\n'), ((8347, 8363), 'cudf.Series', 'cudf.Series', (['inp'], {}), '(inp)\n', (8358, 8363), False, 'import cudf\n'), ((8474, 8493), 'numba.cuda.to_device', 'cuda.to_device', (['inp'], {}), '(inp)\n', (8488, 8493), False, 'from numba import cuda\n'), ((8600, 8635), 'cudf.DataFrame.from_gpu_matrix', 'cudf.DataFrame.from_gpu_matrix', (['mat'], {}), '(mat)\n', (8630, 8635), False, 'import cudf\n'), ((8248, 8267), 'numba.cuda.to_device', 'cuda.to_device', (['inp'], {}), '(inp)\n', (8262, 8267), False, 'from numba import cuda\n')] |
import os
import argparse
import collections
import random
import time
import sys
import shutil
import numpy as np
import pandas as pd
import tqdm
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
from retinanet import model, coco_eval, csv_eval
from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, \
AspectRatioBasedSampler, Augmenter, Normalizer
assert torch.__version__.split('.')[0] == '1'
def worker_init_fn(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
def val_worker_init_fn(worker_id):
np.random.seed(worker_id)
random.seed(worker_id)
def main(parser):
"""Main Function"""
# Set seeds for reproducibility: https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(parser.seed)
np.random.seed(parser.seed)
random.seed(parser.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Set current day string
timestr = time.strftime("%Y%m%d")
print(f'CUDA available: {torch.cuda.is_available()}')
# Create folder to save model states to if it doesn't exist
MODEL_NAME = f'{timestr}'
MODEL_NAME += str(parser.preprocessing) if parser.preprocessing != '' else ''
MODEL_NAME += f'_FiLM-resnet{parser.depth}' if parser.metadata_path != '' else f'_resnet{parser.depth}'
MODEL_NAME += '_pretrained' if parser.pretrained else ''
MODEL_NAME += f'_{parser.epochs}epoch'
MODEL_NAME += '_no-norm' if parser.no_normalize else ''
MODEL_NAME += '_aug' if parser.augment else ''
MODEL_NAME += f'_lr-{parser.lr}'
MODEL_NAME += f'_bs-{parser.batch}'
MODEL_NAME += f'_patience-{parser.lr_patience}-{parser.patience}' if parser.patience != 0 else f'_patience-{parser.lr_patience}'
MODEL_NAME += f'_seed-{parser.seed}'
save_dir = os.path.join(parser.save_dir, MODEL_NAME)
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.mkdir(save_dir)
os.mkdir(os.path.join(save_dir, 'model_states'))
# Create csv files for logging training metrics
train_history = pd.DataFrame({'epoch': [], 'loss': [], 'cls_loss': [], 'bbox_loss': [], 'time': []})
val_history = pd.DataFrame({'epoch': [], 'val_metric': [], 'mAP': [], 'max_F1': [], 'max_F1_pr': [], 'max_F1_re': [], 'max_F2': [], 'max_F_pr': [], 'max_F2_re': []})
train_history.to_csv(os.path.join(save_dir, 'train_history.csv'), index=False)
val_history.to_csv(os.path.join(save_dir, 'val_history.csv'), index=False)
# Create the data loaders
if parser.dataset == 'coco':
if parser.coco_path is None:
raise ValueError('Must provide --coco_path when training on COCO,')
dataset_train = CocoDataset(parser.coco_path, set_name='train2017',
transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
dataset_val = CocoDataset(parser.coco_path, set_name='val2017',
transform=transforms.Compose([Normalizer(), Resizer()]))
elif parser.dataset == 'csv':
if parser.csv_train is None:
raise ValueError('Must provide --csv_train when training on CSV.')
if parser.csv_classes is None:
raise ValueError('Must provide --csv_classes when training on CSV.')
dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, preprocessing=parser.preprocessing, seg_dir=parser.seg_dir, metadata_file=parser.metadata_path,
transform=transforms.Compose([
Augmenter(augment=parser.augment, metadata=parser.metadata_path != ''),
Normalizer(no_normalize=parser.no_normalize, metadata=parser.metadata_path != ''),
Resizer(metadata=parser.metadata_path != '')
])
)
if parser.csv_val is None:
dataset_val = None
print('No validation annotations provided.')
else:
dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, preprocessing=parser.preprocessing, seg_dir=parser.seg_dir, metadata_file=parser.metadata_path,
transform=transforms.Compose([
Augmenter(augment=False, metadata=parser.metadata_path != ''),
Normalizer(no_normalize=parser.no_normalize, metadata=parser.metadata_path != ''),
Resizer(metadata=parser.metadata_path != '')
])
)
else:
raise ValueError('Dataset type not understood (must be csv or coco), exiting.')
sampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch, drop_last=False)
dataloader_train = DataLoader(dataset_train, num_workers=8, collate_fn=collater, batch_sampler=sampler, worker_init_fn=worker_init_fn, pin_memory=True)
if dataset_val is not None:
sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False)
dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val, worker_init_fn=val_worker_init_fn, pin_memory=True)
# Create the model
if parser.depth == 18:
retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=parser.pretrained, FiLMed=parser.metadata_path != '')
elif parser.depth == 34:
retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=parser.pretrained, FiLMed=parser.metadata_path != '')
elif parser.depth == 50:
retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=parser.pretrained, FiLMed=parser.metadata_path != '')
elif parser.depth == 101:
retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=parser.pretrained, FiLMed=parser.metadata_path != '')
elif parser.depth == 152:
retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=parser.pretrained, FiLMed=parser.metadata_path != '')
else:
raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')
# Move model to cuda if GPU is available
if torch.cuda.is_available():
retinanet = retinanet.cuda()
retinanet = torch.nn.DataParallel(retinanet).cuda()
else:
retinanet = torch.nn.DataParallel(retinanet)
# Verify device the model is on
print(f'Model Device: {next(retinanet.parameters()).device}')
retinanet.training = True
optimizer = optim.Adam(retinanet.parameters(), lr=parser.lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=parser.lr_patience, mode='max', verbose=True)
retinanet.train()
retinanet.module.freeze_bn()
print()
print(f'Num training images: {len(dataset_train)}')
if parser.csv_val:
print(f'Num validation images: {len(dataset_val)}')
print(f'Epochs: {parser.epochs}')
print(f'Batch size: {parser.batch}')
print(f'Backbone: ResNet{parser.depth}')
if parser.metadata_path != '':
print(f'Using FiLMed RetinaNet')
if parser.preprocessing != '':
print(f'Using "{parser.preprocessing}" preprocessing method')
print()
print(retinanet)
best_epoch = 0
best_val_metric = 0.
for epoch_num in range(1, parser.epochs+1):
epoch_start = time.perf_counter()
retinanet.train()
retinanet.module.freeze_bn()
running_loss = 0.
running_cls_loss = 0.
running_bbox_loss = 0.
pbar = tqdm.tqdm(enumerate(dataloader_train), total=len(dataloader_train), desc=f'Epoch {epoch_num}')
for iter_num, data in pbar:
# if iter_num == 0:
# import matplotlib.pyplot as plt
# from matplotlib.patches import Rectangle
# import sys
# images = data['img'].detach().cpu().numpy()
# bboxes = data['annot'].detach().cpu().numpy()
# print(images.shape, bboxes.shape)
# print(bboxes)
# # bboxes = bboxes.round(0).astype(np.int)
# for i, (img, bbox) in enumerate(zip(images, bboxes)):
# print(img.min(), img.max())
# fig, ax = plt.subplots(1, 1, figsize=(6, 6))
# ax.imshow(img.transpose((1, 2, 0)), cmap='gray')
# for bb in bbox:
# if bb[0] == -1:
# continue
# ax.add_patch(Rectangle((bb[0], bb[1]), bb[2]-bb[0], bb[3]-bb[1], edgecolor='r', facecolor='none'))
# fig.tight_layout()
# fig.savefig(f'IMG-AUG-{i+1}.png' if parser.augment else f'IMG-{i+1}.png', bbox_inches='tight', dpi=150)
# sys.exit()
try:
optimizer.zero_grad()
if torch.cuda.is_available():
classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']])
else:
classification_loss, regression_loss = retinanet([data['img'].float(), data['annot']])
classification_loss = classification_loss.mean()
regression_loss = regression_loss.mean()
loss = classification_loss + regression_loss
if bool(loss == 0):
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)
optimizer.step()
running_loss += loss.item()
running_cls_loss += classification_loss.item()
running_bbox_loss += regression_loss.item()
pbar.set_postfix({'loss': running_loss/(iter_num+1), 'cls_loss': running_cls_loss/(iter_num+1), 'bbox_loss': running_bbox_loss/(iter_num+1)})
del classification_loss
del regression_loss
except Exception as e:
print(e)
continue
l, cls_l, box_l, t = running_loss/(iter_num+1), running_cls_loss/(iter_num+1), running_bbox_loss/(iter_num+1), time.perf_counter()-epoch_start
pbar.set_postfix({'loss': l, 'cls_loss': cls_l, 'bbox_loss': box_l, 'time': t})
# Save metrics to csv
current_metrics = pd.DataFrame({'epoch': [epoch_num], 'loss': [l], 'cls_loss': [cls_l], 'bbox_loss': [box_l], 'time': [t]})
current_metrics.to_csv(os.path.join(save_dir, 'train_history.csv'), mode='a', header=False, index=False)
if parser.dataset == 'coco':
print('Evaluating dataset')
coco_eval.evaluate_coco(dataset_val, retinanet)
elif parser.dataset == 'csv' and parser.csv_val is not None:
# print('Evaluating validation dataset')
print(f'\n{"-"*10} EPOCH {epoch_num} VALIDATION {"-"*10}')
mAP, max_F1, max_F1_pr, max_F1_re, max_F2, max_F2_pr, max_F2_re = csv_eval.evaluate(dataset_val, retinanet, save_path=save_dir)
val_metric = (mAP+max_F2)/2
if parser.patience != 0:
if val_metric > best_val_metric:
print(f'\nEARLY STOPPING: Validation metric has improved from {best_val_metric} to {val_metric}.\n')
best_val_metric = val_metric
best_epoch = epoch_num
torch.save(retinanet.module, os.path.join(save_dir, 'model_states', f'{parser.dataset}_retinanet_{epoch_num}.pt'))
else:
print(f'\nEARLY STOPPING: Validation metric has not improved from {best_val_metric} (for {epoch_num-best_epoch} epochs).\n')
else:
if val_metric > best_val_metric:
print(f'\nValidation metric has improved from {best_val_metric} to {val_metric}.\n')
else:
print(f'\nValidation metric has not improved from {best_val_metric} (for {epoch_num-best_epoch} epochs).\n')
torch.save(retinanet.module, os.path.join(save_dir, 'model_states', f'{parser.dataset}_retinanet_{epoch_num}.pt'))
print(f'Time for epoch {epoch_num}: {round(time.perf_counter() - epoch_start, 2)} seconds.')
print(f'{"-"*10} END EPOCH {epoch_num} VALIDATION {"-"*10}\n')
# Save val metrics to csv
current_metrics = pd.DataFrame({'epoch': [epoch_num], 'val_metric': [val_metric], 'mAP': [mAP], 'max_F1': [max_F1], 'max_F1_pr': [max_F1_pr],
'max_F1_re': [max_F1_re], 'max_F2': [max_F2], 'max_F_pr': [max_F2_pr], 'max_F2_re': [max_F2_re]})
current_metrics.to_csv(os.path.join(save_dir, 'val_history.csv'), mode='a', header=False, index=False)
scheduler.step(val_metric)
if epoch_num - best_epoch > parser.patience > 0:
print(f'TERMINATING TRAINING AT EPOCH {epoch_num}. BEST VALIDATION METRIC WAS {best_val_metric}.')
break
retinanet.eval()
torch.save(retinanet, os.path.join(save_dir, 'model_final.pt'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simple script for training a RetinaNet network.')
parser.add_argument('--dataset', required=True, help='Dataset type, must be one of csv or coco.')
parser.add_argument('--coco_path', help='Path to COCO directory')
parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)')
parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)')
parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)')
parser.add_argument('--metadata_path', type=str, default='', help='Path to metadata csv file')
parser.add_argument('--seg_dir', type=str, default='', help='Path to directory containing segmentations for each image')
parser.add_argument('--preprocessing', type=str, default='', help='Image preprocessing method (one of "", "three-filters", or "rib-seg")')
parser.add_argument('--depth', type=int, default=50,
help='Resnet depth, must be one of 18, 34, 50, 101, 152.')
parser.add_argument('--epochs', type=int, default=100,
help='Number of epochs to train for.')
parser.add_argument('--batch', type=int, default=2,
help='Batch size for training dataset.')
parser.add_argument('--lr', type=float, default=1e-5,
help='Initial learning rate for Adam optimizer.')
parser.add_argument('--save_dir', type=str, default='/mnt/research/midi_lab/burkowjo_data/model_training_and_eval/pytorch-retinanet/',
help='Path to log metrics and model states to.')
parser.add_argument('--pretrained', action='store_true',
help='Determines whether to start with randomized or pre-trained weights.')
parser.add_argument('--no_normalize', action='store_true',
help='Determine whether to apply ImageNet mean/std normalization.')
parser.add_argument('--augment', action='store_true',
help='Determines whether to augment training images.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed for reproducibility.')
parser.add_argument('--patience', type=int, default=20,
help='Number of epochs of no improvement in validation metric before stopping training early.')
parser.add_argument('--lr_patience', type=int, default=4,
help='Number of epochs of no improvement in validation metric before decreasing learning rate tenfold.')
parser = parser.parse_args()
assert parser.preprocessing in ['', 'three-filters', 'rib-seg'], "--preprocessing must be one of ['', 'three-filters', 'rib-seg']"
if not parser.pretrained:
parser.no_normalize = True # no ImageNet normalization if randomly initializing weights
main(parser)
| [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"time.strftime",
"shutil.rmtree",
"os.path.join",
"retinanet.dataloader.Normalizer",
"pandas.DataFrame",
"retinanet.dataloader.Augmenter",
"torch.utils.data.DataLoader",
"torch.__version__.split",
"retinanet.dataloader.Resizer",
"os... | [((604, 631), 'numpy.random.seed', 'np.random.seed', (['worker_seed'], {}), '(worker_seed)\n', (618, 631), True, 'import numpy as np\n'), ((636, 660), 'random.seed', 'random.seed', (['worker_seed'], {}), '(worker_seed)\n', (647, 660), False, 'import random\n'), ((701, 726), 'numpy.random.seed', 'np.random.seed', (['worker_id'], {}), '(worker_id)\n', (715, 726), True, 'import numpy as np\n'), ((731, 753), 'random.seed', 'random.seed', (['worker_id'], {}), '(worker_id)\n', (742, 753), False, 'import random\n'), ((892, 922), 'torch.manual_seed', 'torch.manual_seed', (['parser.seed'], {}), '(parser.seed)\n', (909, 922), False, 'import torch\n'), ((927, 954), 'numpy.random.seed', 'np.random.seed', (['parser.seed'], {}), '(parser.seed)\n', (941, 954), True, 'import numpy as np\n'), ((959, 983), 'random.seed', 'random.seed', (['parser.seed'], {}), '(parser.seed)\n', (970, 983), False, 'import random\n'), ((1117, 1140), 'time.strftime', 'time.strftime', (['"""%Y%m%d"""'], {}), "('%Y%m%d')\n", (1130, 1140), False, 'import time\n'), ((1967, 2008), 'os.path.join', 'os.path.join', (['parser.save_dir', 'MODEL_NAME'], {}), '(parser.save_dir, MODEL_NAME)\n', (1979, 2008), False, 'import os\n'), ((2016, 2040), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (2030, 2040), False, 'import os\n'), ((2078, 2096), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (2086, 2096), False, 'import os\n'), ((2223, 2311), 'pandas.DataFrame', 'pd.DataFrame', (["{'epoch': [], 'loss': [], 'cls_loss': [], 'bbox_loss': [], 'time': []}"], {}), "({'epoch': [], 'loss': [], 'cls_loss': [], 'bbox_loss': [],\n 'time': []})\n", (2235, 2311), True, 'import pandas as pd\n'), ((2328, 2487), 'pandas.DataFrame', 'pd.DataFrame', (["{'epoch': [], 'val_metric': [], 'mAP': [], 'max_F1': [], 'max_F1_pr': [],\n 'max_F1_re': [], 'max_F2': [], 'max_F_pr': [], 'max_F2_re': []}"], {}), "({'epoch': [], 'val_metric': [], 'mAP': [], 'max_F1': [],\n 'max_F1_pr': [], 'max_F1_re': [], 'max_F2': [], 'max_F_pr': [],\n 'max_F2_re': []})\n", (2340, 2487), True, 'import pandas as pd\n'), ((4996, 5081), 'retinanet.dataloader.AspectRatioBasedSampler', 'AspectRatioBasedSampler', (['dataset_train'], {'batch_size': 'parser.batch', 'drop_last': '(False)'}), '(dataset_train, batch_size=parser.batch, drop_last=False\n )\n', (5019, 5081), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((5100, 5237), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'num_workers': '(8)', 'collate_fn': 'collater', 'batch_sampler': 'sampler', 'worker_init_fn': 'worker_init_fn', 'pin_memory': '(True)'}), '(dataset_train, num_workers=8, collate_fn=collater, batch_sampler\n =sampler, worker_init_fn=worker_init_fn, pin_memory=True)\n', (5110, 5237), False, 'from torch.utils.data import DataLoader\n'), ((6548, 6573), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6571, 6573), False, 'import torch\n'), ((6952, 7058), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'patience': 'parser.lr_patience', 'mode': '"""max"""', 'verbose': '(True)'}), "(optimizer, patience=parser.lr_patience,\n mode='max', verbose=True)\n", (6988, 7058), True, 'import torch.optim as optim\n'), ((13425, 13516), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple script for training a RetinaNet network."""'}), "(description=\n 'Simple script for training a RetinaNet network.')\n", (13448, 13516), False, 'import argparse\n'), ((482, 510), 'torch.__version__.split', 'torch.__version__.split', (['"""."""'], {}), "('.')\n", (505, 510), False, 'import torch\n'), ((571, 591), 'torch.initial_seed', 'torch.initial_seed', ([], {}), '()\n', (589, 591), False, 'import torch\n'), ((2050, 2073), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (2063, 2073), False, 'import shutil\n'), ((2110, 2148), 'os.path.join', 'os.path.join', (['save_dir', '"""model_states"""'], {}), "(save_dir, 'model_states')\n", (2122, 2148), False, 'import os\n'), ((2505, 2548), 'os.path.join', 'os.path.join', (['save_dir', '"""train_history.csv"""'], {}), "(save_dir, 'train_history.csv')\n", (2517, 2548), False, 'import os\n'), ((2586, 2627), 'os.path.join', 'os.path.join', (['save_dir', '"""val_history.csv"""'], {}), "(save_dir, 'val_history.csv')\n", (2598, 2627), False, 'import os\n'), ((5288, 5355), 'retinanet.dataloader.AspectRatioBasedSampler', 'AspectRatioBasedSampler', (['dataset_val'], {'batch_size': '(1)', 'drop_last': '(False)'}), '(dataset_val, batch_size=1, drop_last=False)\n', (5311, 5355), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((5381, 5524), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_val'], {'num_workers': '(3)', 'collate_fn': 'collater', 'batch_sampler': 'sampler_val', 'worker_init_fn': 'val_worker_init_fn', 'pin_memory': '(True)'}), '(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=\n sampler_val, worker_init_fn=val_worker_init_fn, pin_memory=True)\n', (5391, 5524), False, 'from torch.utils.data import DataLoader\n'), ((6702, 6734), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['retinanet'], {}), '(retinanet)\n', (6723, 6734), False, 'import torch\n'), ((7717, 7736), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7734, 7736), False, 'import time\n'), ((10727, 10836), 'pandas.DataFrame', 'pd.DataFrame', (["{'epoch': [epoch_num], 'loss': [l], 'cls_loss': [cls_l], 'bbox_loss': [\n box_l], 'time': [t]}"], {}), "({'epoch': [epoch_num], 'loss': [l], 'cls_loss': [cls_l],\n 'bbox_loss': [box_l], 'time': [t]})\n", (10739, 10836), True, 'import pandas as pd\n'), ((12695, 12931), 'pandas.DataFrame', 'pd.DataFrame', (["{'epoch': [epoch_num], 'val_metric': [val_metric], 'mAP': [mAP], 'max_F1':\n [max_F1], 'max_F1_pr': [max_F1_pr], 'max_F1_re': [max_F1_re], 'max_F2':\n [max_F2], 'max_F_pr': [max_F2_pr], 'max_F2_re': [max_F2_re]}"], {}), "({'epoch': [epoch_num], 'val_metric': [val_metric], 'mAP': [mAP\n ], 'max_F1': [max_F1], 'max_F1_pr': [max_F1_pr], 'max_F1_re': [\n max_F1_re], 'max_F2': [max_F2], 'max_F_pr': [max_F2_pr], 'max_F2_re': [\n max_F2_re]})\n", (12707, 12931), True, 'import pandas as pd\n'), ((13340, 13380), 'os.path.join', 'os.path.join', (['save_dir', '"""model_final.pt"""'], {}), "(save_dir, 'model_final.pt')\n", (13352, 13380), False, 'import os\n'), ((10864, 10907), 'os.path.join', 'os.path.join', (['save_dir', '"""train_history.csv"""'], {}), "(save_dir, 'train_history.csv')\n", (10876, 10907), False, 'import os\n'), ((11038, 11085), 'retinanet.coco_eval.evaluate_coco', 'coco_eval.evaluate_coco', (['dataset_val', 'retinanet'], {}), '(dataset_val, retinanet)\n', (11061, 11085), False, 'from retinanet import model, coco_eval, csv_eval\n'), ((12988, 13029), 'os.path.join', 'os.path.join', (['save_dir', '"""val_history.csv"""'], {}), "(save_dir, 'val_history.csv')\n", (13000, 13029), False, 'import os\n'), ((1171, 1196), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1194, 1196), False, 'import torch\n'), ((6632, 6664), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['retinanet'], {}), '(retinanet)\n', (6653, 6664), False, 'import torch\n'), ((9256, 9281), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9279, 9281), False, 'import torch\n'), ((10550, 10569), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (10567, 10569), False, 'import time\n'), ((11360, 11421), 'retinanet.csv_eval.evaluate', 'csv_eval.evaluate', (['dataset_val', 'retinanet'], {'save_path': 'save_dir'}), '(dataset_val, retinanet, save_path=save_dir)\n', (11377, 11421), False, 'from retinanet import model, coco_eval, csv_eval\n'), ((12375, 12463), 'os.path.join', 'os.path.join', (['save_dir', '"""model_states"""', 'f"""{parser.dataset}_retinanet_{epoch_num}.pt"""'], {}), "(save_dir, 'model_states',\n f'{parser.dataset}_retinanet_{epoch_num}.pt')\n", (12387, 12463), False, 'import os\n'), ((11785, 11873), 'os.path.join', 'os.path.join', (['save_dir', '"""model_states"""', 'f"""{parser.dataset}_retinanet_{epoch_num}.pt"""'], {}), "(save_dir, 'model_states',\n f'{parser.dataset}_retinanet_{epoch_num}.pt')\n", (11797, 11873), False, 'import os\n'), ((2967, 2979), 'retinanet.dataloader.Normalizer', 'Normalizer', ([], {}), '()\n', (2977, 2979), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((2981, 2992), 'retinanet.dataloader.Augmenter', 'Augmenter', ([], {}), '()\n', (2990, 2992), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((2994, 3003), 'retinanet.dataloader.Resizer', 'Resizer', ([], {}), '()\n', (3001, 3003), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((3143, 3155), 'retinanet.dataloader.Normalizer', 'Normalizer', ([], {}), '()\n', (3153, 3155), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((3157, 3166), 'retinanet.dataloader.Resizer', 'Resizer', ([], {}), '()\n', (3164, 3166), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((3740, 3810), 'retinanet.dataloader.Augmenter', 'Augmenter', ([], {'augment': 'parser.augment', 'metadata': "(parser.metadata_path != '')"}), "(augment=parser.augment, metadata=parser.metadata_path != '')\n", (3749, 3810), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((3851, 3936), 'retinanet.dataloader.Normalizer', 'Normalizer', ([], {'no_normalize': 'parser.no_normalize', 'metadata': "(parser.metadata_path != '')"}), "(no_normalize=parser.no_normalize, metadata=parser.metadata_path !=\n '')\n", (3861, 3936), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((3973, 4017), 'retinanet.dataloader.Resizer', 'Resizer', ([], {'metadata': "(parser.metadata_path != '')"}), "(metadata=parser.metadata_path != '')\n", (3980, 4017), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((12513, 12532), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (12530, 12532), False, 'import time\n'), ((4529, 4590), 'retinanet.dataloader.Augmenter', 'Augmenter', ([], {'augment': '(False)', 'metadata': "(parser.metadata_path != '')"}), "(augment=False, metadata=parser.metadata_path != '')\n", (4538, 4590), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((4632, 4717), 'retinanet.dataloader.Normalizer', 'Normalizer', ([], {'no_normalize': 'parser.no_normalize', 'metadata': "(parser.metadata_path != '')"}), "(no_normalize=parser.no_normalize, metadata=parser.metadata_path !=\n '')\n", (4642, 4717), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n'), ((4755, 4799), 'retinanet.dataloader.Resizer', 'Resizer', ([], {'metadata': "(parser.metadata_path != '')"}), "(metadata=parser.metadata_path != '')\n", (4762, 4799), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, Normalizer\n')] |
from __future__ import division
import numpy as np
from PIL import Image
from io import BytesIO
def pil2bio(im, fmt='PNG'):
"""Convert a PIL Image to a StringIO object """
if im.mode == 'CMYK': # this causes an error
im = im.convert('RGB')
bio = BytesIO()
im.save(bio, format=fmt)
bio.seek(0)
return bio
def bio2pil(bioimage):
"""Reverse of pil2bio """
bio = BytesIO()
bio.write(bioimage)
bio.seek(0)
image = Image.open(bio)
return image
def scale_pad_to_square(im, square_size, pad_colour = 255, pad_mode = 'constant'):
"""
This function applies following pre-processing steps on the image
1. Resize the image to make it's larger dimension to square_size (keep AR)
2. Pad the lower dimension by zeros up to the square size
Args:
im : image (PIL image)
pad_colour: dcitonary of top, bottom , left, right rgb colour or constant
pad_mode: refer numpy pad modes + edge_mean
Returns:
PIL image with the size of [square_size, square_size, 3]
"""
#TODO woring on common RGB mode as lazy to do the logics for arrays everytime
if im.mode != 'RGB':
im = im.convert('RGB')
if type(pad_colour) == int and pad_mode == 'constant':
c = pad_colour
pad_colour = {'top' : [c]*3, 'left' : [c]*3, 'right' : [c]*3, 'bottom' : [c]*3}
if pad_mode == 'edge_mean':
pad_mode = 'constant'
im_arr = np.asarray(im)
pad_colour = {'top': np.mean(im_arr[0], axis=0), 'bottom': np.mean(im_arr[-1], axis=0), 'left': np.mean(im_arr[:,0], axis=0), 'right': np.mean(im_arr[:,-1], axis=0)}
w = im.size[0]
h = im.size[1]
assert w != 0 and h != 0, "input image resulted in size: %dx%d" % (w, h)
npad = []
if (w > h):
w1 = int(square_size)
h1 = int(square_size/w * h)
pad0 = (square_size - h1) // 2
pad1 = (square_size - h1) - pad0
npad = ((pad0, pad1), (0,0))
if pad_mode =='constant':
pad_clr = [pad_colour['top'], pad_colour['bottom']]
elif (w < h) :
h1 = int(square_size)
w1 = int(square_size/h * w)
pad0 = (square_size - w1) // 2
pad1 = (square_size - w1) - pad0
npad = ((0, 0), (pad0, pad1))
if pad_mode == 'constant':
pad_clr = [pad_colour['left'], pad_colour['right']]
else:
padded_im = im
w1 = square_size
h1 = square_size
assert w1 != 0 and h1 != 0, "scaled image resulted in size: %dx%d" % (w1, h1)
im = im.resize((w1, h1), Image.ANTIALIAS)
# if image need to be padded
if len(npad) > 0:
im = np.asarray(im)
padded_im = np.zeros(shape = (square_size, square_size,3), dtype= np.uint8)
for i in range(3):
if pad_mode == 'constant':
padded_im[:,:,i] = np.pad(im[:,:,i], pad_width=npad, mode='constant', constant_values= (pad_clr[0][i],pad_clr[1][i]))
else:
padded_im[:,:,i] = np.pad(im[:,:,i], pad_width=npad, mode=pad_mode)
output_im = Image.fromarray(padded_im, 'RGB')
else:
output_im = im
wh = output_im.size
assert wh[0] == square_size and wh[1] == square_size, "scaled image resulted in size: %dx%d" % (wh[0], wh[1])
return output_im
def undo_scale_pad_to_square(im, orig_size, bboxes=[]):
"""Reverses the scale_pad_to_square function, i.e. crop then resize.
Returns corrected image and bboxes
im -- PIL image
orig_size -- (width, height)
bboxes -- list of bboxes from dabbox.py to correct
"""
square_size = im.size[0]
w = orig_size[0]
h = orig_size[1]
left = 0
upper = 0
right = im.size[0]
lower = im.size[1]
if (w > h):
w1 = int(square_size)
h1 = int(square_size/w * h)
upper = (square_size - h1) // 2
lower -= (square_size - h1) - upper
else:
h1 = int(square_size)
w1 = int(square_size/h * w)
left = (square_size - w1) // 2
right -= (square_size - w1) - left
im = im.crop((left, upper, right, lower))
im = im.resize(orig_size, Image.ANTIALIAS)
bboxes_fixed = []
for bbox in bboxes:
bboxes_fixed.append([
(int(round(bbox[0] * square_size)) - left) / float(w1),
(int(round(bbox[1] * square_size)) - upper) / float(h1),
(int(round(bbox[2] * square_size)) - left) / float(w1),
(int(round(bbox[3] * square_size)) - upper) / float(h1)
])
return im, bboxes_fixed | [
"numpy.pad",
"io.BytesIO",
"numpy.asarray",
"numpy.zeros",
"PIL.Image.open",
"numpy.mean",
"PIL.Image.fromarray"
] | [((268, 277), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (275, 277), False, 'from io import BytesIO\n'), ((403, 412), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (410, 412), False, 'from io import BytesIO\n'), ((465, 480), 'PIL.Image.open', 'Image.open', (['bio'], {}), '(bio)\n', (475, 480), False, 'from PIL import Image\n'), ((1463, 1477), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (1473, 1477), True, 'import numpy as np\n'), ((2669, 2683), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (2679, 2683), True, 'import numpy as np\n'), ((2704, 2765), 'numpy.zeros', 'np.zeros', ([], {'shape': '(square_size, square_size, 3)', 'dtype': 'np.uint8'}), '(shape=(square_size, square_size, 3), dtype=np.uint8)\n', (2712, 2765), True, 'import numpy as np\n'), ((3091, 3124), 'PIL.Image.fromarray', 'Image.fromarray', (['padded_im', '"""RGB"""'], {}), "(padded_im, 'RGB')\n", (3106, 3124), False, 'from PIL import Image\n'), ((1507, 1533), 'numpy.mean', 'np.mean', (['im_arr[0]'], {'axis': '(0)'}), '(im_arr[0], axis=0)\n', (1514, 1533), True, 'import numpy as np\n'), ((1545, 1572), 'numpy.mean', 'np.mean', (['im_arr[-1]'], {'axis': '(0)'}), '(im_arr[-1], axis=0)\n', (1552, 1572), True, 'import numpy as np\n'), ((1582, 1611), 'numpy.mean', 'np.mean', (['im_arr[:, 0]'], {'axis': '(0)'}), '(im_arr[:, 0], axis=0)\n', (1589, 1611), True, 'import numpy as np\n'), ((1621, 1651), 'numpy.mean', 'np.mean', (['im_arr[:, -1]'], {'axis': '(0)'}), '(im_arr[:, -1], axis=0)\n', (1628, 1651), True, 'import numpy as np\n'), ((2869, 2974), 'numpy.pad', 'np.pad', (['im[:, :, i]'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(pad_clr[0][i], pad_clr[1][i])'}), "(im[:, :, i], pad_width=npad, mode='constant', constant_values=(\n pad_clr[0][i], pad_clr[1][i]))\n", (2875, 2974), True, 'import numpy as np\n'), ((3021, 3071), 'numpy.pad', 'np.pad', (['im[:, :, i]'], {'pad_width': 'npad', 'mode': 'pad_mode'}), '(im[:, :, i], pad_width=npad, mode=pad_mode)\n', (3027, 3071), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
def glorot(shape, dtype=tf.float32, scope='default'):
with tf.variable_scope(scope):
init_range = np.sqrt(6.0 / (shape[0] + shape[1]))
init = tf.random_uniform(
shape, minval=-init_range, maxval=init_range, dtype=dtype)
return tf.Variable(init)
def zeros(shape, dtype=tf.float32, scope='default'):
with tf.variable_scope(scope):
init = tf.zeros(shape, dtype=dtype)
return tf.Variable(init)
class GraphCNN(object):
def __init__(self, inputs, input_dim, hid_dims, output_dim,
max_depth, act_fn, scope='gcn'):
self.inputs = inputs
self.input_dim = input_dim
self.hid_dims = hid_dims
self.output_dim = output_dim
self.max_depth = max_depth
self.act_fn = act_fn
self.scope = scope
# initialize message passing transformation parameters
self.prep_weights, self.prep_bias = \
self.init(self.input_dim, self.hid_dims, self.output_dim)
self.proc_weights, self.proc_bias = \
self.init(self.output_dim, self.hid_dims, self.output_dim)
self.agg_weights, self.agg_bias = \
self.init(self.output_dim, self.hid_dims, self.output_dim)
self.outputs = self.forward()
def init(self, input_dim, hid_dims, output_dim):
# Initialize the parameters
weights = []
bias = []
curr_in_dim = input_dim
# Hidden layers
for hid_dim in hid_dims:
weights.append(
glorot([curr_in_dim, hid_dim], scope=self.scope))
bias.append(
zeros([hid_dim], scope=self.scope))
curr_in_dim = hid_dim
# Output layer
weights.append(glorot([curr_in_dim, output_dim], scope=self.scope))
bias.append(zeros([output_dim], scope=self.scope))
return weights, bias
def forward(self):
x = self.inputs
# Raise x into higher dimension
for l in range(len(self.prep_weights)):
x = tf.matmul(x, self.prep_weights[l])
x += self.prep_bias[l]
x = self.act_fn(x)
for d in range(self.max_depth):
y = x
# Process the features
for l in range(len(self.proc_weights)):
y = tf.matmul(y, self.proc_weights[l])
y += self.proc_bias[l]
y = self.act_fn(y)
# Aggregate features
for l in range(len(self.agg_weights)):
y = tf.matmul(y, self.agg_weights[l])
y += self.agg_bias[l]
y = self.act_fn(y)
# assemble neighboring information
x = x + y
return x
| [
"tensorflow.random_uniform",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.zeros",
"numpy.sqrt"
] | [((108, 132), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (125, 132), True, 'import tensorflow as tf\n'), ((155, 191), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (shape[0] + shape[1]))'], {}), '(6.0 / (shape[0] + shape[1]))\n', (162, 191), True, 'import numpy as np\n'), ((207, 283), 'tensorflow.random_uniform', 'tf.random_uniform', (['shape'], {'minval': '(-init_range)', 'maxval': 'init_range', 'dtype': 'dtype'}), '(shape, minval=-init_range, maxval=init_range, dtype=dtype)\n', (224, 283), True, 'import tensorflow as tf\n'), ((312, 329), 'tensorflow.Variable', 'tf.Variable', (['init'], {}), '(init)\n', (323, 329), True, 'import tensorflow as tf\n'), ((394, 418), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (411, 418), True, 'import tensorflow as tf\n'), ((435, 463), 'tensorflow.zeros', 'tf.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (443, 463), True, 'import tensorflow as tf\n'), ((479, 496), 'tensorflow.Variable', 'tf.Variable', (['init'], {}), '(init)\n', (490, 496), True, 'import tensorflow as tf\n'), ((2077, 2111), 'tensorflow.matmul', 'tf.matmul', (['x', 'self.prep_weights[l]'], {}), '(x, self.prep_weights[l])\n', (2086, 2111), True, 'import tensorflow as tf\n'), ((2343, 2377), 'tensorflow.matmul', 'tf.matmul', (['y', 'self.proc_weights[l]'], {}), '(y, self.proc_weights[l])\n', (2352, 2377), True, 'import tensorflow as tf\n'), ((2556, 2589), 'tensorflow.matmul', 'tf.matmul', (['y', 'self.agg_weights[l]'], {}), '(y, self.agg_weights[l])\n', (2565, 2589), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# encoding: utf-8
# Created by <NAME> on 2016-02-18 18:04:14
# Licensed under a 3-clause BSD license.
# Revision History:
# Initial Version: 2016-02-18 18:04:14 by <NAME>
# Last Modified On: 2016-02-18 18:04:14 by Brian
from __future__ import print_function
from __future__ import division
import numpy as np
import flask
import jinja2
# If the filter is to return HTML code and you don't want it autmatically
# escaped, return the value as "return Markup(value)".
jinjablue = flask.Blueprint('jinja_filters', __name__)
# Ref: http://stackoverflow.com/questions/12288454/how-to-import-custom-jinja2-filters-from-another-file-and-using-flask
@jinja2.contextfilter
@jinjablue.app_template_filter()
def make_token(context, value, group):
''' Make a keyword string for query parameter dropdown live search '''
tokstring = ', '.join([group, value._joinedname])
return tokstring
@jinja2.contextfilter
@jinjablue.app_template_filter()
def filtergaltype(context, value):
''' Parse plateifu or mangaid into better form '''
if value == 'plateifu':
return 'Plate-IFU'
elif value == 'mangaid':
return 'MaNGA-ID'
@jinja2.contextfilter
@jinjablue.app_template_filter()
def filternsa(context, value):
''' Parse plateifu or mangaid into better form '''
newvalue = value.replace('elpetro_absmag_g_r', 'Abs. g-r').\
replace('elpetro_absmag_u_r', 'Abs. u-r').\
replace('elpetro_absmag_i_z', 'Abs. i-z')
return newvalue
@jinja2.contextfilter
@jinjablue.app_template_filter()
def filternsaval(context, value, key):
''' Parse plateifu or mangaid into better form '''
if type(value) == list:
newvalue = ', '.join([str(np.round(v, 4)) for v in value])
elif isinstance(value, (float, np.floating)):
newvalue = np.round(value, 4)
else:
newvalue = value
return newvalue
@jinja2.contextfilter
@jinjablue.app_template_filter()
def allclose(context, value, newvalue):
''' Do a numpy allclose comparison between the two values '''
try:
return np.allclose(float(value), float(newvalue), 1e-7)
except Exception as e:
return False
@jinja2.contextfilter
@jinjablue.app_template_filter()
def prettyFlag(context, value):
''' Pretty print bit mask and flags '''
name, bit, flags = value
return '{0}: {1} - {2}'.format(name, bit, ', '.join(flags))
@jinja2.contextfilter
@jinjablue.app_template_filter()
def qaclass(context, value):
''' Return an alert indicator based on quality flags '''
name, bit, flags = value
isgood = ['VALIDFILE'] == flags or [] == flags
iscrit = 'CRITICAL' in flags
out = 'success' if isgood else 'danger' if iscrit else 'warning'
text = 'Good' if isgood else 'DO NOT USE' if iscrit else 'Warning'
return out, text
@jinja2.contextfilter
@jinjablue.app_template_filter()
def targtype(context, value):
''' Return the MaNGA target type based on what bit is set '''
# names = value.get('names', None)
# namelabel = ', '.join(names)
# out = namelabel.replace('MNGTRG1', 'Galaxy').replace('MNGTRG2', 'Stellar').replace('MNGTRG3', 'Ancillary')
out = 'Galaxy' if '1' in value else 'Ancillary' if '3' in value else 'Stellar'
return out
@jinja2.contextfilter
@jinjablue.app_template_filter()
def split(context, value, delim=None):
'''Split a string based on a delimiter'''
if not delim:
delim = ' '
return value.split(delim) if value else None
@jinja2.contextfilter
@jinjablue.app_template_filter()
def striprelease(context, value):
'''Strip and trim and lowercase a release string'''
value = value.strip().replace('-', '').lower()
return value
| [
"numpy.round",
"flask.Blueprint"
] | [((513, 555), 'flask.Blueprint', 'flask.Blueprint', (['"""jinja_filters"""', '__name__'], {}), "('jinja_filters', __name__)\n", (528, 555), False, 'import flask\n'), ((1828, 1846), 'numpy.round', 'np.round', (['value', '(4)'], {}), '(value, 4)\n', (1836, 1846), True, 'import numpy as np\n'), ((1726, 1740), 'numpy.round', 'np.round', (['v', '(4)'], {}), '(v, 4)\n', (1734, 1740), True, 'import numpy as np\n')] |
from sklearn.metrics import confusion_matrix
import tensorflow as tf
import math
import numpy as np
def multi_focal_loss(gamma, alpha, y_true, y_pred):
epsilon = 1.e-7
#gamma=5.
alpha = tf.constant(alpha, dtype=tf.float32)
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.clip_by_value(y_pred, epsilon, 1. - epsilon)
alpha_t = y_true*alpha + (tf.ones_like(y_true)-y_true)*(1-alpha)
y_t = tf.multiply(y_true, y_pred) + tf.multiply(1-y_true, 1-y_pred)
ce = -tf.log(y_t)
weight = tf.pow(tf.subtract(1., y_t), gamma)
focal_loss = tf.multiply(tf.multiply(weight, ce), alpha_t)
#print('f1: ', fl.shape)
focal_loss = tf.reduce_mean(focal_loss, axis=1)
return focal_loss
def confusion(ground_truth, predictions):
np.seterr(invalid='ignore')
tn, fp, fn, tp = confusion_matrix(ground_truth, predictions).ravel()
accuracy = (tp+tn)/(tp+fp+tn+fn)
sensitivity = (tp)/(tp+fn)
specificty = (tn)/(tn+fp)
if math.isnan(accuracy) == True: accuracy = 0
if math.isnan(sensitivity) == True: sensitivity = 0
if math.isnan(specificty) == True: specificty = 0
return accuracy,sensitivity,specificty | [
"math.isnan",
"tensorflow.clip_by_value",
"tensorflow.subtract",
"numpy.seterr",
"tensorflow.reduce_mean",
"tensorflow.constant",
"tensorflow.ones_like",
"tensorflow.cast",
"tensorflow.multiply",
"tensorflow.log",
"sklearn.metrics.confusion_matrix"
] | [((201, 237), 'tensorflow.constant', 'tf.constant', (['alpha'], {'dtype': 'tf.float32'}), '(alpha, dtype=tf.float32)\n', (212, 237), True, 'import tensorflow as tf\n'), ((252, 279), 'tensorflow.cast', 'tf.cast', (['y_true', 'tf.float32'], {}), '(y_true, tf.float32)\n', (259, 279), True, 'import tensorflow as tf\n'), ((293, 341), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y_pred', 'epsilon', '(1.0 - epsilon)'], {}), '(y_pred, epsilon, 1.0 - epsilon)\n', (309, 341), True, 'import tensorflow as tf\n'), ((666, 700), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['focal_loss'], {'axis': '(1)'}), '(focal_loss, axis=1)\n', (680, 700), True, 'import tensorflow as tf\n'), ((770, 797), 'numpy.seterr', 'np.seterr', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (779, 797), True, 'import numpy as np\n'), ((421, 448), 'tensorflow.multiply', 'tf.multiply', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (432, 448), True, 'import tensorflow as tf\n'), ((451, 486), 'tensorflow.multiply', 'tf.multiply', (['(1 - y_true)', '(1 - y_pred)'], {}), '(1 - y_true, 1 - y_pred)\n', (462, 486), True, 'import tensorflow as tf\n'), ((493, 504), 'tensorflow.log', 'tf.log', (['y_t'], {}), '(y_t)\n', (499, 504), True, 'import tensorflow as tf\n'), ((525, 546), 'tensorflow.subtract', 'tf.subtract', (['(1.0)', 'y_t'], {}), '(1.0, y_t)\n', (536, 546), True, 'import tensorflow as tf\n'), ((583, 606), 'tensorflow.multiply', 'tf.multiply', (['weight', 'ce'], {}), '(weight, ce)\n', (594, 606), True, 'import tensorflow as tf\n'), ((977, 997), 'math.isnan', 'math.isnan', (['accuracy'], {}), '(accuracy)\n', (987, 997), False, 'import math\n'), ((1027, 1050), 'math.isnan', 'math.isnan', (['sensitivity'], {}), '(sensitivity)\n', (1037, 1050), False, 'import math\n'), ((1083, 1105), 'math.isnan', 'math.isnan', (['specificty'], {}), '(specificty)\n', (1093, 1105), False, 'import math\n'), ((819, 862), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['ground_truth', 'predictions'], {}), '(ground_truth, predictions)\n', (835, 862), False, 'from sklearn.metrics import confusion_matrix\n'), ((372, 392), 'tensorflow.ones_like', 'tf.ones_like', (['y_true'], {}), '(y_true)\n', (384, 392), True, 'import tensorflow as tf\n')] |
"""
Connections are periods of continuous lock (and therefore carrier phase offsets)
between satellites and ground stations.
Things to manage those are stored here
"""
from __future__ import annotations # defer type annotations due to circular stuff
import collections
from functools import cached_property
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterable,
Iterator,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import numpy
from laika.lib import coordinates
from tid import tec, types, util
# deal with circular type definitions for Scenario
if TYPE_CHECKING:
from tid.scenario import Scenario
class Connection:
"""
Each time a receiver acquires a lock on a GNSS satellite,
some random error of an unknown wavelengths are accumulated
in the phase measurements. A period of continuous lock
is referred to as a "connection"
Therefore, each connection needs to be established so that
we can solve for the unknown wavelength difference.
The longer the connection, the more data and better job
we can do. However, if we mess up, we can introduce extra
noise.
"""
def __init__(
self,
scenario: Scenario,
station: str,
prn: str,
idx_start: int,
idx_end: int,
) -> None:
"""
Args:
scenario: scenario to which this connection belongs
station: station name
prn: satellite svid
idx_start: first data index in scenario data
idx_end: last data index in scenario data
filter_ticks: whether we should run processing to determine
how long this connection lasts, possibly truncating it
"""
self.scenario = scenario
self.station = station
self.prn = prn
self.idx_start = idx_start
self.idx_end = idx_end
self.tick_start = scenario.station_data[station][prn][idx_start]["tick"]
self.tick_end = scenario.station_data[station][prn][idx_end]["tick"]
self.missing_ticks: Set[int] = set()
self._init_missing_ticks()
# integer ambiguities, the phase correction information
# that is the goal of this whole connections stuff
self.n_chan1 = None
self.n_chan2 = None
# "raw" offset: the difference from the code phase tec values
self.offset = None # this value has units of Meters
self.offset_error = None
def _init_missing_ticks(self) -> None:
"""
Fill out missing ticks table, used when trying to query some data
from connections
"""
gap_idxs = numpy.where(numpy.diff(self.observations["tick"]) > 1)[0]
for gap_idx in gap_idxs:
first, last = self.observations["tick"][gap_idx : gap_idx + 2]
self.missing_ticks |= set(range(first + 1, last))
def tick_idx(self, tick) -> Optional[int]:
"""
Because we might miss ticks in our observations, this has a helpful mapping
from tick to idx in this data struct.
Args:
tick: the tick number to look up, must be in [tick_start, tick_end]
Returns:
idx where that tick is found, or None if it doesn't exist
"""
if tick in self.missing_ticks:
return None
guess = tick - self.tick_start
for missing in self.missing_ticks:
if tick > missing:
guess -= 1
return guess
@property
def is_glonass(self) -> bool:
"""
Is this a GLONASS satellite?
Returns:
boolean indicating glonass or not
"""
return self.prn.startswith("R")
@cached_property
def glonass_chan(self) -> int:
"""
The channel that GLONASS is using.
Returns:
the integer channel GLONASS is using, or 0 if it is not using GLONASS
"""
if not self.is_glonass:
return 0
chan = self.scenario.get_glonass_chan(self.prn, self.observations)
# can't have gotten None, or we'd not have gotten it in our connection
assert chan is not None
return chan
@cached_property
def frequencies(self) -> Tuple[float, float]:
"""
The frequencies that correspond to this connection
"""
frequencies = self.scenario.get_frequencies(self.prn, self.observations)
assert frequencies is not None, "Unknown frequencies INSIDE connection object"
return frequencies
@property
def ticks(self) -> numpy.ndarray:
"""
Numpy array of ticks from tick_start to tick_end (inclusive), for convenience
"""
return numpy.arange(self.tick_start, self.tick_end + 1)
@property
def observations(self) -> types.Observations:
"""
Convenience function: returns the numpy arrays for the raw observations
corresponding to this connection
"""
# note: don't use self.ticks, `range` vs `slice` is a lot slower
assert self.scenario.station_data
return self.scenario.station_data[self.station][self.prn][
self.idx_start : self.idx_end + 1
]
def elevation(
self, sat_pos: Union[types.ECEF_XYZ, types.ECEF_XYZ_LIST]
) -> Union[types.ECEF_XYZ, types.ECEF_XYZ_LIST]:
"""
Convenience wrapper around scenario.station_el, but specifically
for the station that this connection uses.
sat_pos: numpy array of XYZ ECEF satellite positions in meters
must have shape (?, 3)
Returns:
elevation in radians (will have same length as sat_pos)
"""
return self.scenario.station_el(self.station, sat_pos)
def __contains__(self, tick: int) -> bool:
"""
Convenience function: `tick in connection` is true iff tick is in
the range [self.tick_start, self.tick_end]
Args:
tick: the tick to check
Returns:
boolean whether or not the tick is included
"""
return self.tick_start <= tick <= self.tick_end
def _correct_ambiguities_avg(self) -> None:
"""
Code phase smoothing for carrier phase offsets
This is the simplest method: use the average difference between
the code and carrier phases.
"""
f1, f2 = self.frequencies
# sign reversal here is correct: ionospheric effect is opposite for code phase
code_phase_diffs = self.observations["C2C"] - self.observations["C1C"]
carrier_phase_diffs = tec.C * (
self.observations["L1C"] / f1 - self.observations["L2C"] / f2
)
difference = code_phase_diffs - carrier_phase_diffs
# assert abs(numpy.mean(difference)) < 100
self.offset = numpy.mean(difference)
self.offset_error = numpy.std(difference)
def correct_ambiguities(self) -> None:
"""
Attempt to calculate the offsets from L1C to L2C
In the complex case by using integer ambiguities
In the simple case by code phase smoothing
"""
self._correct_ambiguities_avg()
@property
def carrier_correction_meters(self) -> float:
"""
Returns the correction factor for the chan1 chan2 difference
This may be calculated with integer ambiguity corrections, or
using code-phase smoothing
Note: This could be cached, but this calculation is too simple to be worth it
"""
# if we have integer ambiguity data, use that
if self.n_chan1 is not None and self.n_chan2 is not None:
f1, f2 = self.frequencies
return tec.C * (self.n_chan2 / f2 - self.n_chan1 / f1)
# otherwise use the code-phase smoothed difference values
if self.offset is not None:
return self.offset
assert False, "carrier correction attempted with no correction mechanism"
@property
def ipps(self) -> types.ECEF_XYZ_LIST:
"""
The locations where the signals associated with this connection
penetrate the ionosphere.
Returns:
numpy array of XYZ ECEF coordinates in meters of the IPPs
"""
return tec.ion_locs(
self.scenario.station_locs[self.station], self.observations["sat_pos"]
)
@property
def vtecs(self) -> numpy.ndarray:
"""
The vtec values associated with this connection
Returns:
numpy array of (
vtec value in TECu,
unitless slant_to_vertical factor
)
"""
return tec.calculate_vtecs(self)
class SparseList(collections.Sequence):
"""
Helper to represent data from connections where we may be missing stuff
Don't store all those 0s!
"""
def __init__(
self,
index_ranges: Sequence[Tuple[int, int]],
data: Iterable[Union[Sequence, numpy.ndarray]],
tick_lookup: Iterable[Callable[[int], Optional[int]]],
default: Any = 0.0,
):
self.ranges = index_ranges
self.data = data
self.tick_lookup = tick_lookup
self.default = default
self.max = 0 if len(index_ranges) == 0 else max(i[1] for i in index_ranges)
def __len__(self) -> int:
"""
Returns the total number of ticks available to be fetched
(so this matches a Sequence type)
Returns:
the integer length
"""
return self.max + 1
def __iter__(self) -> Iterator[Any]:
for i in range(self.max + 1):
yield self[i]
def __getitem__(self, tick: Any) -> Any:
"""
Fetch the given tick data
Args:
tick: the tick number to fetch
Returns:
the data associated with that tick, or the default value if it was not found
"""
if isinstance(tick, slice):
return [self[i] for i in range(*tick.indices(len(self)))]
if not isinstance(tick, int):
raise IndexError
for data_range, datum, tick_lookup in zip(
self.ranges, self.data, self.tick_lookup
):
if data_range[0] <= tick <= data_range[1]:
idx = tick_lookup(tick)
if idx is None:
return self.default
return datum[idx]
return self.default
class ConnTickMap:
"""
Simple helper class to efficiently convert a tick number back
into a connection.
"""
def __init__(self, connections: Iterable[Connection]) -> None:
self.connections = connections
def __getitem__(self, tick: int) -> Connection:
"""
Get the tick for this tick
Args:
tick: the tick to fetch a connection for
Raises KeyError if tick is not in any of the connections
"""
for con in self.connections:
if tick in con:
return con
raise KeyError
def __contains__(self, tick: int) -> bool:
"""
Do we have data for the tick
Args:
tick: the tick to check
Returns:
True iff we have data for the tick
"""
for con in self.connections:
if tick in con:
return True
return False
def get_vtecs(self) -> Sequence[float]:
"""
Get vtec data for this set of connections
Returns:
SparseList of raw VTEC TECu values, one per tick, 0.0 if unknown
"""
return SparseList(
[(con.tick_start, con.tick_end) for con in self.connections],
[con.vtecs[0] for con in self.connections],
[con.tick_idx for con in self.connections],
)
def get_filtered_vtecs(self) -> Sequence[float]:
"""
Get bandpass filtered vtec data for this set of connections
Returns:
SparseList of 2nd order butterworth bandpass filtered VTEC TECu values,
one per tick, 0.0 if unknown
"""
index_ranges = []
data = []
tick_lookup = []
for con in self.connections:
if con.idx_end - con.idx_start < util.BUTTER_MIN_LENGTH:
# not enough data to filter
continue
index_ranges.append((con.tick_start, con.tick_end))
filtered = util.bpfilter(con.vtecs[0])
if filtered is None:
continue
data.append(filtered)
tick_lookup.append(con.tick_idx)
return SparseList(index_ranges, data, tick_lookup)
def get_ipps(self) -> Sequence[Optional[types.ECEF_XYZ]]:
"""
Get the ionospheric pierce points for each tick in this set of connections.
Returns:
SparseList of (
ECEF XYZ coordinates in meters, or None if there is no
data for that tick
)
"""
return SparseList(
[(con.tick_start, con.tick_end) for con in self.connections],
[con.ipps for con in self.connections],
[con.tick_idx for con in self.connections],
default=None,
)
def get_ipps_latlon(self) -> Sequence[Optional[Tuple[float, float]]]:
"""
Get the ionospheric pierce points for each tick in this set of connections.
Returns:
SparseList of (
lat, lon values, or None if there is no data for that tick
)
"""
return SparseList(
[(con.tick_start, con.tick_end) for con in self.connections],
[coordinates.ecef2geodetic(con.ipps)[..., 0:2] for con in self.connections],
[con.tick_idx for con in self.connections],
default=None,
)
| [
"numpy.std",
"laika.lib.coordinates.ecef2geodetic",
"numpy.mean",
"numpy.arange",
"tid.util.bpfilter",
"numpy.diff",
"tid.tec.calculate_vtecs",
"tid.tec.ion_locs"
] | [((4734, 4782), 'numpy.arange', 'numpy.arange', (['self.tick_start', '(self.tick_end + 1)'], {}), '(self.tick_start, self.tick_end + 1)\n', (4746, 4782), False, 'import numpy\n'), ((6848, 6870), 'numpy.mean', 'numpy.mean', (['difference'], {}), '(difference)\n', (6858, 6870), False, 'import numpy\n'), ((6899, 6920), 'numpy.std', 'numpy.std', (['difference'], {}), '(difference)\n', (6908, 6920), False, 'import numpy\n'), ((8277, 8366), 'tid.tec.ion_locs', 'tec.ion_locs', (['self.scenario.station_locs[self.station]', "self.observations['sat_pos']"], {}), "(self.scenario.station_locs[self.station], self.observations[\n 'sat_pos'])\n", (8289, 8366), False, 'from tid import tec, types, util\n'), ((8679, 8704), 'tid.tec.calculate_vtecs', 'tec.calculate_vtecs', (['self'], {}), '(self)\n', (8698, 8704), False, 'from tid import tec, types, util\n'), ((12444, 12471), 'tid.util.bpfilter', 'util.bpfilter', (['con.vtecs[0]'], {}), '(con.vtecs[0])\n', (12457, 12471), False, 'from tid import tec, types, util\n'), ((2684, 2721), 'numpy.diff', 'numpy.diff', (["self.observations['tick']"], {}), "(self.observations['tick'])\n", (2694, 2721), False, 'import numpy\n'), ((13682, 13717), 'laika.lib.coordinates.ecef2geodetic', 'coordinates.ecef2geodetic', (['con.ipps'], {}), '(con.ipps)\n', (13707, 13717), False, 'from laika.lib import coordinates\n')] |
# Third-party
import numpy as np
from scipy.optimize import minimize
# Project
from ..utils import gaussian_constant
__all__ = ['fit_emission_line']
def _errfunc(p, pix, flux, flux_ivar):
if p[0] < 0 or p[2] < 0:
return np.inf
return np.sum((gaussian_constant(pix, *p) - flux)**2)
def fit_emission_line(pix_grid, flux, flux_ivar=None,
centroid0=None, sigma0=None, amp0=None, offset0=None):
"""
TODO:
Parameters
----------
pix_grid : array_like
Must be the same shape as ``flux``.
flux : array_like
Must be the same shape as ``pix_grid``.
centroid0 : numeric (optional)
Initial guess for line centroid.
amp0 : numeric (optional)
Initial guess for line amplitude.
offset0 : numeric (optional)
Initial guess for line offset above continuum.
"""
if centroid0 is None: # then estimate the initial guess for the centroid
centroid0 = np.argmax(flux)
int_ctrd0 = int(round(centroid0))
if amp0 is None: # then estimate the initial guess for amplitude
amp0 = flux[int_ctrd0] # flux at initial guess
if offset0 is None: # then estimate the initial guess for offset
# TODO / MAGIC NUMBER: buffer hard-coded to 16??
offset0 = flux[int_ctrd0-16:int_ctrd0+16].min()
if sigma0 is None:
sigma0 = 4. # MAGIC NUMBER
if flux_ivar is None:
flux_ivar = 1.
p0 = (amp0, centroid0, sigma0, offset0)
res = minimize(_errfunc, x0=p0, args=(pix_grid, flux, flux_ivar))
p = res.x
fail_msg = "Fitting spectral line in comp lamp spectrum failed. {msg}"
if p[1] < min(pix_grid) or p[1] > max(pix_grid):
raise ValueError(fail_msg.format(msg="Unphysical peak centroid: {:.3f}".format(p[0])))
elif p[2] < 0.1 or p[2] > 10.:
raise ValueError(fail_msg.format(msg="Unphysical line width: {:.3f}".format(p[2])))
return dict(amp=p[0], centroid=p[1], stddev=p[2], const=p[3])
| [
"scipy.optimize.minimize",
"numpy.argmax"
] | [((1490, 1549), 'scipy.optimize.minimize', 'minimize', (['_errfunc'], {'x0': 'p0', 'args': '(pix_grid, flux, flux_ivar)'}), '(_errfunc, x0=p0, args=(pix_grid, flux, flux_ivar))\n', (1498, 1549), False, 'from scipy.optimize import minimize\n'), ((964, 979), 'numpy.argmax', 'np.argmax', (['flux'], {}), '(flux)\n', (973, 979), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Training and using a KNN for 1D data interpolation and extrapolation.
Comparison of training methods, EKF vs SGD.
"""
# Dependencies
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import kalmann
# Get some noisy training data, a fun compact function
stdev = 0.05
U = np.arange(-10, 10, 0.2)
Y = np.exp(-U**2) + 0.5*np.exp(-(U-3)**2) + np.random.normal(0, stdev, len(U))
# Create two identical KNN's that will be trained differently
knn_ekf = kalmann.KNN(nu=1, ny=1, nl=10, neuron='logistic')
knn_sgd = kalmann.KNN(nu=1, ny=1, nl=10, neuron='logistic')
# Train
nepochs_ekf = 100
nepochs_sgd = 400
knn_ekf.train(nepochs=nepochs_ekf, U=U, Y=Y, method='ekf', P=0.5, Q=0, R=0.1+stdev**2, pulse_T=0.75)
knn_sgd.train(nepochs=nepochs_sgd, U=U, Y=Y, method='sgd', step=0.05, pulse_T=0.5)
# Evaluation
X = np.arange(-15, 15, 0.01)
plt.suptitle("Data Fit", fontsize=22)
plt.scatter(U, Y, c='b', s=5)
plt.plot(X, knn_ekf.feedforward(X), c='g', lw=3, label='EKF: {} epochs'.format(nepochs_ekf))
plt.plot(X, knn_sgd.feedforward(X), c='k', ls=':', lw=2, label='SGD: {} epochs'.format(nepochs_sgd))
plt.grid(True)
plt.legend(fontsize=22)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.arange",
"numpy.exp",
"kalmann.KNN",
"matplotlib.pyplot.grid"
] | [((331, 354), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(0.2)'], {}), '(-10, 10, 0.2)\n', (340, 354), True, 'import numpy as np\n'), ((507, 556), 'kalmann.KNN', 'kalmann.KNN', ([], {'nu': '(1)', 'ny': '(1)', 'nl': '(10)', 'neuron': '"""logistic"""'}), "(nu=1, ny=1, nl=10, neuron='logistic')\n", (518, 556), False, 'import kalmann\n'), ((567, 616), 'kalmann.KNN', 'kalmann.KNN', ([], {'nu': '(1)', 'ny': '(1)', 'nl': '(10)', 'neuron': '"""logistic"""'}), "(nu=1, ny=1, nl=10, neuron='logistic')\n", (578, 616), False, 'import kalmann\n'), ((864, 888), 'numpy.arange', 'np.arange', (['(-15)', '(15)', '(0.01)'], {}), '(-15, 15, 0.01)\n', (873, 888), True, 'import numpy as np\n'), ((889, 926), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Data Fit"""'], {'fontsize': '(22)'}), "('Data Fit', fontsize=22)\n", (901, 926), True, 'import matplotlib.pyplot as plt\n'), ((927, 956), 'matplotlib.pyplot.scatter', 'plt.scatter', (['U', 'Y'], {'c': '"""b"""', 's': '(5)'}), "(U, Y, c='b', s=5)\n", (938, 956), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1165), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1159, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1189), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(22)'}), '(fontsize=22)\n', (1176, 1189), True, 'import matplotlib.pyplot as plt\n'), ((1190, 1200), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1198, 1200), True, 'import matplotlib.pyplot as plt\n'), ((359, 374), 'numpy.exp', 'np.exp', (['(-U ** 2)'], {}), '(-U ** 2)\n', (365, 374), True, 'import numpy as np\n'), ((379, 400), 'numpy.exp', 'np.exp', (['(-(U - 3) ** 2)'], {}), '(-(U - 3) ** 2)\n', (385, 400), True, 'import numpy as np\n')] |
# beautified version of the code found in jupyter notebook
# for more comments and experiments look at notebook.ipynb
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.callbacks import EarlyStopping
from keras.optimizers import RMSprop, SGD, Adam
columns = ['Other', 'Fizz', 'Buzz', 'FizzBuzz']
optimizers = {
'SGD': SGD,
'RMSprop': RMSprop,
'Adam': Adam
}
input_size = 10
output_size = 4
earlystopping_cb = EarlyStopping(monitor='val_loss', verbose=0, patience=100, mode='min')
def decode(x):
"""decodes strings from one-hot representation"""
idx = np.argmax(x,axis=1)
return [columns[z] for z in idx]
def fizz_buzz(x):
"""Vectorized fizzBuzz implementation"""
fizz = x % 3 == 0
buzz = x % 5 == 0
fizz_buzz = fizz & buzz
other = ~(fizz | buzz)
res = pd.DataFrame({
'Other': other,
'Fizz': fizz,
'Buzz': buzz,
'FizzBuzz': fizz_buzz,
}, columns = columns, index=x).astype('uint8')
res['Fizz'] -= res['FizzBuzz']
res['Buzz'] -= res['FizzBuzz']
return res
def enocode(x, dim=10):
"""Converts digit to it's binary representation trimmed to (dim) bits"""
return np.r_[[[i >> d & 1 for d in range(dim)] for i in x]]
def get_train_test(train_num, test_num):
"""Creates train and test data
(as out datasets are small - kept them in memory)."""
X_tr = enocode(train_num)
y_tr = fizz_buzz(train_num)
X_ts = enocode(test_num)
y_ts = fizz_buzz(test_num)
data = {
'X_tr': X_tr,
'y_tr': y_tr,
'X_ts': X_ts,
'y_ts': y_ts,
}
return data
def get_model(hyper_params, optimizer='RMSprop', optimizer_params={'lr': 0.01}):
"""Returns with a specified set of hyperparameters."""
assert len(hyper_params['hidden_layer_nodes']) == hyper_params['num_hidden'], "specify layer size for each hidden layer"
model = Sequential()
model.add(Dense(hyper_params['hidden_layer_nodes'][0], input_dim=input_size))
model.add(Activation(hyper_params['activation']))
model.add(Dropout(hyper_params['drop_out']))
for i in range(1,hyper_params['num_hidden']):
model.add(Dense(hyper_params['hidden_layer_nodes'][i]))
model.add(Activation(hyper_params['activation']))
model.add(Dropout(hyper_params['drop_out']))
model.add(Dense(output_size))
model.add(Activation('softmax'))
optimizer = optimizers[optimizer](**optimizer_params)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def fit_model(model, data, num_epochs=1000, batch_size=32):
"""Trains (model) on (data) for (num_epochs) with a specified (batch_size)."""
model.fit(data['X_tr'],
data['y_tr'],
validation_split = 0.2,
epochs = num_epochs,
batch_size = batch_size,
callbacks = [earlystopping_cb],
verbose = False)
def perform_experiment(model):
"""Performs experiment and generates output."""
test_vals = np.arange(1,101)
data = get_train_test(np.array([]), test_vals)
label = decode(data['y_ts'].values)
pred = model.predict(data['X_ts'])
pred_label = decode(pred)
header = pd.DataFrame({
'input': ['UBID','personNumber'],
'label': ['vliunda', '50291163'],
'predicted_label': ['','']
}, columns=['input','label','predicted_label'])
test_res = pd.DataFrame({
'input': test_vals,
'label': label,
'predicted_label': pred_label
}, columns=['input','label','predicted_label'])
return pd.concat([header, test_res]).reset_index(drop=True)
if __name__ == '__main__':
# hyperparameters optimized with Bayesian optimization
# to see the experiment see jupyterNotebook
hyper_params = {
'drop_out': 0.339,
'hidden_layer_nodes': [245, 154, 67],
'num_hidden': 3,
'activation': 'tanh',
}
optimizer_params = {
'lr': 0.00815
}
data = get_train_test(np.arange(101,1001), np.arange(1,101))
model = get_model(hyper_params, optimizer_params=optimizer_params)
fit_model(model, data)
perform_experiment(model).to_csv('./output.csv')
| [
"pandas.DataFrame",
"numpy.argmax",
"keras.layers.Activation",
"keras.layers.Dropout",
"keras.callbacks.EarlyStopping",
"numpy.arange",
"keras.layers.Dense",
"numpy.array",
"keras.models.Sequential",
"pandas.concat"
] | [((505, 575), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'verbose': '(0)', 'patience': '(100)', 'mode': '"""min"""'}), "(monitor='val_loss', verbose=0, patience=100, mode='min')\n", (518, 575), False, 'from keras.callbacks import EarlyStopping\n'), ((651, 671), 'numpy.argmax', 'np.argmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (660, 671), True, 'import numpy as np\n'), ((1845, 1857), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1855, 1857), False, 'from keras.models import Sequential\n'), ((2911, 2928), 'numpy.arange', 'np.arange', (['(1)', '(101)'], {}), '(1, 101)\n', (2920, 2928), True, 'import numpy as np\n'), ((3088, 3255), 'pandas.DataFrame', 'pd.DataFrame', (["{'input': ['UBID', 'personNumber'], 'label': ['vliunda', '50291163'],\n 'predicted_label': ['', '']}"], {'columns': "['input', 'label', 'predicted_label']"}), "({'input': ['UBID', 'personNumber'], 'label': ['vliunda',\n '50291163'], 'predicted_label': ['', '']}, columns=['input', 'label',\n 'predicted_label'])\n", (3100, 3255), True, 'import pandas as pd\n'), ((3267, 3399), 'pandas.DataFrame', 'pd.DataFrame', (["{'input': test_vals, 'label': label, 'predicted_label': pred_label}"], {'columns': "['input', 'label', 'predicted_label']"}), "({'input': test_vals, 'label': label, 'predicted_label':\n pred_label}, columns=['input', 'label', 'predicted_label'])\n", (3279, 3399), True, 'import pandas as pd\n'), ((1871, 1937), 'keras.layers.Dense', 'Dense', (["hyper_params['hidden_layer_nodes'][0]"], {'input_dim': 'input_size'}), "(hyper_params['hidden_layer_nodes'][0], input_dim=input_size)\n", (1876, 1937), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1950, 1988), 'keras.layers.Activation', 'Activation', (["hyper_params['activation']"], {}), "(hyper_params['activation'])\n", (1960, 1988), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((2001, 2034), 'keras.layers.Dropout', 'Dropout', (["hyper_params['drop_out']"], {}), "(hyper_params['drop_out'])\n", (2008, 2034), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((2255, 2273), 'keras.layers.Dense', 'Dense', (['output_size'], {}), '(output_size)\n', (2260, 2273), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((2286, 2307), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2296, 2307), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((2951, 2963), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2959, 2963), True, 'import numpy as np\n'), ((3789, 3809), 'numpy.arange', 'np.arange', (['(101)', '(1001)'], {}), '(101, 1001)\n', (3798, 3809), True, 'import numpy as np\n'), ((3810, 3827), 'numpy.arange', 'np.arange', (['(1)', '(101)'], {}), '(1, 101)\n', (3819, 3827), True, 'import numpy as np\n'), ((861, 972), 'pandas.DataFrame', 'pd.DataFrame', (["{'Other': other, 'Fizz': fizz, 'Buzz': buzz, 'FizzBuzz': fizz_buzz}"], {'columns': 'columns', 'index': 'x'}), "({'Other': other, 'Fizz': fizz, 'Buzz': buzz, 'FizzBuzz':\n fizz_buzz}, columns=columns, index=x)\n", (873, 972), True, 'import pandas as pd\n'), ((2097, 2141), 'keras.layers.Dense', 'Dense', (["hyper_params['hidden_layer_nodes'][i]"], {}), "(hyper_params['hidden_layer_nodes'][i])\n", (2102, 2141), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((2155, 2193), 'keras.layers.Activation', 'Activation', (["hyper_params['activation']"], {}), "(hyper_params['activation'])\n", (2165, 2193), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((2207, 2240), 'keras.layers.Dropout', 'Dropout', (["hyper_params['drop_out']"], {}), "(hyper_params['drop_out'])\n", (2214, 2240), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((3414, 3443), 'pandas.concat', 'pd.concat', (['[header, test_res]'], {}), '([header, test_res])\n', (3423, 3443), True, 'import pandas as pd\n')] |
import os,glob,numpy
os.chdir('/Desktop/malimg_dataset') # the parent folder with sub-folders
list_fams = os.listdir(os.getcwd()) # vector of strings with family names
no_imgs = [] # No. of samples per family
for i in range(len(list_fams)):
os.chdir(list_fams[i])
len1 = len(glob.glob('*.png')) # assuming the images are stored as 'png'
no_imgs.append(len1)
os.chdir('..')
total = sum(no_imgs) # total number of all samples
y = numpy.zeros(total) # label vector
temp1 = numpy.zeros(len(no_imgs)+1)
temp1[1:len(temp1)]=no_imgs
temp2 = int(temp1[0]) # now temp2 is [0 no_imgs]
for jj in range(len(no_imgs)):
temp3 = temp2 +int(temp1[jj+1])
for ii in range(temp2,temp3):
y[ii] = jj
temp2 = temp2+ int(temp1[jj+1])
import Image, leargist
X = numpy.zeros((sum(no_imgs), 320)) # Feature Matrix
cnt = 0
for i in range(len(list_fams)):
os.chdir(list_fams[i])
img_list = glob.glob('*.png') # Getting only 'png' files in a folder
for j in range(len(img_list)):
im = Image.open(img_list[j])
im1 = im.resize((64, 64), Image.ANTIALIAS); # for faster computation
des = leargist.color_gist(im1)
X[cnt] = des[0:320]
cnt = cnt + 1
os.chdir('..')
import random
from sklearn.cross_validation import StratifiedKFold
from sklearn.utils import shuffle
n_samples, n_features = X.shape
p = range(n_samples) # an index array, 0:n_samples
random.seed(random.random())
random.shuffle(p) # the index array is now shuffled
X, y = X[p], y[p] # both the arrays are now shuffled
import time
conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) # Initializing the Confusion Matrix
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.3)
ms = MeanShift(bandwidth= bandwidth)
ms.fit(X)
labels = ms.labelsy
cluster_centers = ms.cluster_centersy
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"os.getcwd",
"random.shuffle",
"sklearn.cluster.MeanShift",
"numpy.zeros",
"leargist.color_gist",
"sklearn.cluster.estimate_bandwidth",
"matplotlib.pyplot.figure",
"random.random",
"glob.g... | [((21, 56), 'os.chdir', 'os.chdir', (['"""/Desktop/malimg_dataset"""'], {}), "('/Desktop/malimg_dataset')\n", (29, 56), False, 'import os, glob, numpy\n'), ((436, 454), 'numpy.zeros', 'numpy.zeros', (['total'], {}), '(total)\n', (447, 454), False, 'import os, glob, numpy\n'), ((1434, 1451), 'random.shuffle', 'random.shuffle', (['p'], {}), '(p)\n', (1448, 1451), False, 'import random\n'), ((1836, 1871), 'sklearn.cluster.estimate_bandwidth', 'estimate_bandwidth', (['X'], {'quantile': '(0.3)'}), '(X, quantile=0.3)\n', (1854, 1871), False, 'from sklearn.cluster import MeanShift, estimate_bandwidth\n'), ((1878, 1908), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {'bandwidth': 'bandwidth'}), '(bandwidth=bandwidth)\n', (1887, 1908), False, 'from sklearn.cluster import MeanShift, estimate_bandwidth\n'), ((1995, 2012), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (2004, 2012), True, 'import numpy as np\n'), ((2260, 2273), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2270, 2273), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2283), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2281, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2294, 2331), 'itertools.cycle', 'cycle', (['"""bgrcmykbgrcmykbgrcmykbgrcmyk"""'], {}), "('bgrcmykbgrcmykbgrcmykbgrcmyk')\n", (2299, 2331), False, 'from itertools import cycle\n'), ((2634, 2693), 'matplotlib.pyplot.title', 'plt.title', (["('Estimated number of clusters: %d' % n_clusters_)"], {}), "('Estimated number of clusters: %d' % n_clusters_)\n", (2643, 2693), True, 'import matplotlib.pyplot as plt\n'), ((2694, 2704), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2702, 2704), True, 'import matplotlib.pyplot as plt\n'), ((118, 129), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (127, 129), False, 'import os, glob, numpy\n'), ((245, 267), 'os.chdir', 'os.chdir', (['list_fams[i]'], {}), '(list_fams[i])\n', (253, 267), False, 'import os, glob, numpy\n'), ((365, 379), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (373, 379), False, 'import os, glob, numpy\n'), ((864, 886), 'os.chdir', 'os.chdir', (['list_fams[i]'], {}), '(list_fams[i])\n', (872, 886), False, 'import os, glob, numpy\n'), ((902, 920), 'glob.glob', 'glob.glob', (['"""*.png"""'], {}), "('*.png')\n", (911, 920), False, 'import os, glob, numpy\n'), ((1204, 1218), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (1212, 1218), False, 'import os, glob, numpy\n'), ((1417, 1432), 'random.random', 'random.random', ([], {}), '()\n', (1430, 1432), False, 'import random\n'), ((2452, 2507), 'matplotlib.pyplot.plot', 'plt.plot', (['X[my_members, 0]', 'X[my_members, 1]', "(col + '.')"], {}), "(X[my_members, 0], X[my_members, 1], col + '.')\n", (2460, 2507), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2624), 'matplotlib.pyplot.plot', 'plt.plot', (['cluster_center[0]', 'cluster_center[1]', '"""o"""'], {'markerfacecolor': 'col', 'markeredgecolor': '"""k"""', 'markersize': '(14)'}), "(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=14)\n", (2520, 2624), True, 'import matplotlib.pyplot as plt\n'), ((280, 298), 'glob.glob', 'glob.glob', (['"""*.png"""'], {}), "('*.png')\n", (289, 298), False, 'import os, glob, numpy\n'), ((1009, 1032), 'Image.open', 'Image.open', (['img_list[j]'], {}), '(img_list[j])\n', (1019, 1032), False, 'import Image, leargist\n'), ((1125, 1149), 'leargist.color_gist', 'leargist.color_gist', (['im1'], {}), '(im1)\n', (1144, 1149), False, 'import Image, leargist\n')] |
import scipy.linalg
import numpy as np
from tqdm import tqdm
from scipy.stats import norm
from sepia.SepiaDistCov import SepiaDistCov
class SepiaPrediction():
"""
Base class inherited for predictions. Contains:
:var sepia.SepiaModel model: SepiaModel instance
:var numpy.ndarray xpred: x values for which to predict, shape (npred, p) matrix, on original untransformed scale
:var numpy.ndarray/NoneType t_pred: t values for which to predict, shape (npred, q) matrix, optional for full model
(if not provided, `theta` values from posterior samples will be used) but required for emulator.
:var dict samples: from `SepiaModel.get_samples()`
:var bool addResidVar: add the posterior residual variability to the samples?
:var bool storeRlz: make and store a process realizations for each x_pred / sample combination?
:var bool storeMuSigma: store the mean and sigma for the GP posterior for each x_pred / sample combination?
:var bool do_call: call wPred/uvPred upon initialization?
:var numpy.ndarray/NoneType w: simulation predictions on PCA weight space, shape (#samples, #x_pred, pu)
:var numpy.ndarray/NoneType u: observation predictions on PCA weight space, shape (#samples, #x_pred, pu)
:var numpy.ndarray/NoneType v: observation predictions on D weight space, shape (#samples, #x_pred, pv)
:var numpy.ndarray/NoneType mu: posterior mean, shape (#samples, #x_pred)
:var numpy.ndarray/NoneType sigma: posterior sigma, shape (#samples, #x_pred, #x_pred)
"""
def __init__(self, x_pred=None, samples=None, model=None, t_pred=None,
addResidVar=False, storeRlz=True, storeMuSigma=False, do_call=True):
# """
# Instantiate SepiaPredict object (usually not called directly, but by subclass __init__).
#
# :param numpy.ndarray x_pred: x values for which to predict, shape (npred, p) matrix, on original untransformed scale
# :param dict samples: from `SepiaModel.get_samples()`
# :param sepia.SepiaModel model: the SepiaModel object
# :param numpy.ndarray/NoneType t_pred: t values for which to predict, shape (npred, q) matrix, optional for full model
# (if not provided, `theta` values from posterior samples will be used) but required for emulator.
# :param bool addResidVar: add the posterior residual variability to the samples?
# :param bool storeRlz: make and store a process realizations for each x_pred / sample combination?
# :param bool storeMuSigma: store the mean and sigma for the GP posterior for each x_pred / sample combination?
# :param bool do_call: call wPred/uvPred upon initialization?
# :raises TypeError: if inputs are not expected types
# :raises ValueError: if inputs are not expected shapes
#
# """
# make a list or scalar into an ndarray
if not model.data.dummy_x:
if not isinstance(x_pred,np.ndarray) or len(x_pred.shape)!=2:
raise TypeError('x_pred is not a 2D numpy ndarray')
else:
if x_pred is not None:
raise TypeError('specified x_pred, but this is a no-x model')
if t_pred is not None and (not isinstance(t_pred,np.ndarray) or len(t_pred.shape)!=2):
raise TypeError('t_pred is not a 2D numpy ndarray')
# Specify the x_pred for a no-x model with a dummy-x field if needed
if model.data.dummy_x:
if t_pred is None:
x_pred=np.array([0.5]).reshape((1,1)) # 1x1 x, to use for predictions of sampled thetas
else:
x_pred=0.5*np.ones((t_pred.shape[0],1))
# Validation of input sizes
if x_pred.shape[1] != model.num.p:
raise ValueError('x_pred number of columns %d is not the same as model defined p = %d'%\
(x_pred.shape[1],model.num.p))
if t_pred is not None:
if t_pred.shape[1] != model.num.q:
raise ValueError('t_pred number of columns %d is not the same as model defined q = %d'%\
(t_pred.shape[1],model.num.q))
if x_pred.shape[0] != t_pred.shape[0]:
raise ValueError('x_pred and t_pred have different number of rows: %d vs %d resp.'%\
(x_pred.shape[0],t_pred.shape[0]))
# transform x and/or t from native scale to emulator scale
if t_pred is None:
x_pred,_ = model.data.transform_xt(x=x_pred)
else:
x_pred,t_pred = model.data.transform_xt(x=x_pred,t=t_pred)
self.model=model
self.xpred=x_pred
self.t_pred=t_pred
self.samples=samples
self.addResidVar=addResidVar
self.storeRlz=storeRlz
self.storeMuSigma=storeMuSigma
self.do_call = do_call
self.w=[]
self.u=[]
self.v=[]
self.mu=[]
self.sigma=[]
class SepiaEmulatorPrediction(SepiaPrediction):
"""
Make predictions of the emulator ('eta') component of the model. This functions with an emulator-only model
or a full model, but predicts the posterior simulation estimates.
Predictions are performed on init and stored in the object for access methods.
"""
def __init__(self,*args,**kwrds):
"""
Instantiate SepiaEmulatorPrediction object.
:param numpy.ndarray x_pred: x values for which to predict, shape (npred, p) matrix, on original untransformed scale
:param dict samples: from `SepiaModel.get_samples()`
:param sepia.SepiaModel model: the SepiaModel object
:param numpy.ndarray t_pred: t values for which to predict, shape (npred, q) matrix, required.
:param bool addResidVar: add the posterior residual variability to the samples?
:param bool storeRlz: make and store a process realizations for each x_pred / sample combination?
:param bool storeMuSigma: store the mean and sigma for the GP posterior for each x_pred / sample combination?
:param bool do_call: call wPred upon initialization?
:raises TypeError: if inputs are not expected types
:raises ValueError: if inputs are not expected shapes
"""
super(SepiaEmulatorPrediction,self).__init__(*args,**kwrds)
# prediction is samples x prediction points (xpreds) x pu (basis)
if self.model.data.sep_design:
print('Separable Design model. Separable designs for emulator predictions are not currently implemented')
print(' Work around is to set this problem up as a full (calibration) model, and use SepiaFullPrediction')
print(' to get predictions that include those from the emulator. ')
raise ValueError('Separable design for emulator predictions is not currently implemented')
if self.do_call:
wPred(self)
def get_w(self):
"""
Returns predictions that were made on init in PCA weight space.
:return: predictions of w, (#samples x #x_pred x pu) numpy.ndarray
"""
return self.w
def get_y(self, std=False):
"""
Project w through the K basis to provide predictions of y on native (or standardized) scale.
(standardized refers to the mean=0 and sd=1 standardization process in model setup).
:param bool std: return standardized (True) or native (default, False) scaling of predictions
:return: predictions of y, (#samples x #x_pred x py) tensor
"""
if std:
return self.get_y_standardized()
else:
return self.get_y_native()
def get_mu_sigma(self):
"""
Returns the stored (if requested on init) mean (vector) and sigma (matrix) of the posterior process for each sample
:return: tuple: posterior mean (#samples x #x_pred), sigma (#samples x #x_pred x #x_pred x )
"""
return self.mu,self.sigma
def get_y_standardized(self):
#
# used by get_y, not called by user
#
if self.model.num.scalar_out:
return self.w
else:
return np.tensordot(self.w,self.model.data.sim_data.K,axes=[[2],[0]])
def get_y_native(self):
#
# used by get_y, not called by user
#
wshape=self.w.shape
if isinstance(self.model.data.sim_data.orig_y_sd,np.ndarray):
ysd_inpredshape = np.tile(self.model.data.sim_data.orig_y_sd, (wshape[0], wshape[1], 1))
else:
# cheating a bit, if it's scalar it doesn't have to be tiled out
ysd_inpredshape=self.model.data.sim_data.orig_y_sd
ymean_inpredshape = np.tile(self.model.data.sim_data.orig_y_mean, (wshape[0], wshape[1], 1))
return self.get_y_standardized()*ysd_inpredshape+ymean_inpredshape
class SepiaXvalEmulatorPrediction(SepiaEmulatorPrediction):
"""
Cross-validated predictions from the emulator.
"""
def __init__(self, leave_out_inds=None, model=None, *args, **kwrds):
"""
Instantiate SepiaXvalEmulatorPrediction object.
:param list/NoneType leave_out_inds: optional, list of lists of indices to leave out in each fold; defaults to leave-one-out
:param numpy.ndarray x_pred: x values for which to predict, shape (npred, p) matrix, on original untransformed scale
:param dict samples: from `SepiaModel.get_samples()`
:param sepia.SepiaModel model: the SepiaModel object
:param numpy.ndarray t_pred: t values for which to predict, shape (npred, q) matrix, required.
:param bool addResidVar: add the posterior residual variability to the samples?
:param bool storeRlz: make and store a process realizations for each x_pred / sample combination?
:param bool storeMuSigma: store the mean and sigma for the GP posterior for each x_pred / sample combination?
:param bool do_call: call wPred upon initialization?
:raises TypeError: if inputs are not expected types
:raises ValueError: if inputs are not expected shapes
"""
import copy
if model.data.dummy_x:
super(SepiaXvalEmulatorPrediction, self).__init__(do_call=False, t_pred=model.data.sim_data.t_trans, model=model, *args, **kwrds)
else:
super(SepiaXvalEmulatorPrediction, self).__init__(do_call=False, x_pred=model.data.sim_data.x_trans,
t_pred=model.data.sim_data.t_trans, model=model, *args, **kwrds)
m = self.model.num.m
pu = self.model.num.pu
orig_model = copy.deepcopy(self.model)
# By default, leave out inds is just each simulation in turn; it is a list of lists
if leave_out_inds is None:
leave_out_inds = [[i] for i in np.arange(m)]
w_cv = []
x_cv = []
t_cv = []
for li in tqdm(leave_out_inds, desc='Cross validation...', mininterval=0.5):
fit_inds = [i for i in np.arange(m) if i not in li]
sub_model = copy.deepcopy(orig_model)
# Subset zt to fit inds, update ztDist
sub_model.data.zt = sub_model.data.zt[fit_inds, :]
sub_model.num.m = len(fit_inds)
sub_model.num.ztDist = SepiaDistCov(sub_model.data.zt, cat_ind=np.concatenate([sub_model.data.x_cat_ind, sub_model.data.t_cat_ind]))
# Subset x/t to predict inds (check if None)
if sub_model.data.sim_data.x_trans is None:
#self.xpred = np.array([[0.5]])
self.xpred = None
else:
self.xpred = sub_model.data.sim_data.x_trans[li, :]
if sub_model.data.sim_data.t_trans is None:
self.t_pred = np.array([[]])
else:
self.t_pred = sub_model.data.sim_data.t_trans[li, :]
# Subset w's -- need to index for each pu
w_inds = np.zeros(m)
w_inds[fit_inds] = 1
w_inds = np.tile(w_inds, pu)
sub_model.num.w = sub_model.num.w[w_inds == 1, :]
# Set up sub model and call wPred
self.model = sub_model
wPred(self)
w_cv.append(self.w)
x_cv.append(self.xpred)
t_cv.append(self.t_pred)
self.w = np.concatenate(w_cv, axis=1)
self.xpred = np.concatenate(x_cv, axis=0)
self.t_pred = np.concatenate(t_cv, axis=0)
self.leave_out_inds = leave_out_inds
class SepiaFullPrediction(SepiaPrediction):
"""
Make predictions of the full model: both emulator ('eta') and discrepancy ('delta') == (u,v)
Predictions are performed on init and stored in the object for access by methods.
"""
def __init__(self,mode='Sep',*args,**kwrds):
"""
Instantiate SepiaFullPrediction object.
:param numpy.ndarray x_pred: x values for which to predict, shape (npred, p) matrix, on original untransformed scale
:param dict samples: from `SepiaModel.get_samples()`
:param sepia.SepiaModel model: the SepiaModel object
:param numpy.ndarray t_pred: t values for which to predict, shape (npred, q) matrix, optional (can take from theta samples).
:param bool addResidVar: add the posterior residual variability to the samples?
:param bool storeRlz: make and store a process realizations for each x_pred / sample combination?
:param bool storeMuSigma: store the mean and sigma for the GP posterior for each x_pred / sample combination?
:raises TypeError: if inputs are not expected types
:raises ValueError: if inputs are not expected shapes
"""
super(SepiaFullPrediction,self).__init__(*args,**kwrds)
# prediction is samples x prediction points x pu or pv (basis)
# TODO remove notSep option (from dev debug) now that Sep is vetted
if mode=='notSep':
uvPred(self)
else:
uvPredSep(self)
def get_u_v(self):
"""
Returns predictions that were made on init
:return: tuple: predictions of u (#samples x #x_pred x pu) , v (#samples x #x_pred x pv)
"""
return self.u, self.v
def get_ysim(self, as_obs=False, std=False, obs_ref=0):
"""
Project u through the K basis to provide predictions of ysim on the native scale.
(native refers to not the mean=0 and sd=1 standardization process in model setup)
:param bool as_obs: provide ysim predictions at obs locations (defaults to sim locations)
:param bool std: provide ysim predictions on standardized scale (defaults to native scale)
:param int obs_ref: if this is a ragged_obs problem, selects the reference observation index
to use for transformation parameters; default index 0
:return: predictions of native ysim, (#samples x #x_pred x py_sim(or py_obs)) or (#samples x py_sim(or py_obs)) if ragged and obs_ref is specified
"""
if std:
if self.model.num.scalar_out:
return self.u
else:
if as_obs:
if self.model.data.ragged_obs:
K = self.model.data.obs_data.K[obs_ref]
return np.tensordot(self.u,K,axes=[[2],[0]])[:,obs_ref,:]
else:
K = self.model.data.obs_data.K
return np.tensordot(self.u,K,axes=[[2],[0]])
else:
return np.tensordot(self.u,self.model.data.sim_data.K,axes=[[2],[0]])
else:
if self.model.num.scalar_out:
return self.u*self.model.data.sim_data.orig_y_sd + self.model.data.sim_data.orig_y_mean
else:
if as_obs:
if self.model.data.ragged_obs:
K = self.model.data.obs_data.K[obs_ref]
ysd_inpredshape, ymean_inpredshape = self.calc_obs_standardizations_inpredshape(obs_ref=obs_ref)
return (np.tensordot(self.u,K,axes=[[2],[0]])*ysd_inpredshape+ymean_inpredshape)[:,obs_ref,:]
else:
K = self.model.data.obs_data.K
ysd_inpredshape, ymean_inpredshape = self.calc_obs_standardizations_inpredshape(obs_ref=obs_ref)
return np.tensordot(self.u,K,axes=[[2],[0]])*ysd_inpredshape+ymean_inpredshape
else:
ysd_inpredshape, ymean_inpredshape = self.calc_sim_standardizations_inpredshape()
return np.tensordot(self.u,self.model.data.sim_data.K,axes=[[2],[0]])*ysd_inpredshape+ymean_inpredshape
def get_discrepancy(self, as_obs=False, std=False, obs_ref=0):
"""
return Dsim*v to provide predictions of discrepancy on the native scale at sim locations.
(native refers to not the sd=1 standardization process in model setup)
:param bool as_obs: provide discrepancy predictions at obs locations (defaults to sim locations)
:param bool std: provide discrepancy predictions on standardized scale (defaults to native scale)
:param int obs_ref: if this is a ragged_obs problem, selects the reference observation index
to use for transformation parameters; default index 0
:return: predictions of native discrepancy, (#samples x #x_pred x py_sim(or py_obs)) or (#samples x py_sim(or py_obs)) if ragged and obs_ref is specified
"""
if self.model.num.pv==0: # no-discrepancy model
raise TypeError('discrepancy requested from a no-discrepancy model')
if std:
if as_obs:
if self.model.data.ragged_obs:
D = self.model.data.obs_data.D[obs_ref]
return np.tensordot(self.v,D,axes=[[2],[0]])[:,obs_ref,:]
else:
D = self.model.data.obs_data.D
return np.tensordot(self.v,D,axes=[[2],[0]])
else:
return np.tensordot(self.v,self.model.data.sim_data.D,axes=[[2],[0]])
else:
ysd_inpredshape,_ = self.calc_obs_standardizations_inpredshape(obs_ref=obs_ref)
if as_obs:
if self.model.data.ragged_obs:
D = self.model.data.obs_data.D[obs_ref]
return (np.tensordot(self.v,D,axes=[[2],[0]])*ysd_inpredshape)[:,obs_ref,:]
else:
D = self.model.data.obs_data.D
return np.tensordot(self.v,D,axes=[[2],[0]])*ysd_inpredshape
else:
return np.tensordot(self.v,self.model.data.sim_data.D,axes=[[2],[0]])*ysd_inpredshape
def get_yobs(self, as_obs=False, std=False, obs_ref=0):
"""
return y=Ksim*u+Dsim*v to provide predictions of y on the native scale at sim locations.
(native refers to not the mean=0 and sd=1 standardization process in model setup)
:param bool as_obs: provide discrepancy predictions at obs locations (defaults to sim locations)
:param bool std: provide discrepancy predictions on standardized scale (defaults to native scale)
:param int obs_ref: if this is a ragged_obs problem, selects the reference observation index
to use for transformation parameters; default index 0
:return: predictions of native y (Emulator+Discrepancy), (#samples x #x_pred x py_sim(or py_obs)) or (#samples x py_sim(or py_obs)) if ragged and obs_ref is specified
"""
if self.model.num.pv==0: #means it's a no-discrepancy model
return self.get_ysim(as_obs=as_obs, std=std, obs_ref=obs_ref)
else:
return self.get_ysim(as_obs=as_obs,std=std,obs_ref=obs_ref) + \
self.get_discrepancy(as_obs=as_obs,std=std,obs_ref=obs_ref)
def get_mu_sigma(self):
"""
Returns the stored (if requested on init) mean (vector) and sigma (matrix) of the posterior process for each sample
:return: tuple: posterior mean (#samples x #x_pred), sigma (#samples x #x_pred x #x_pred x )
"""
return self.mu,self.sigma
def calc_sim_standardizations_inpredshape(self):
# internal function, calculate the ysd and ymean arrays
# tile out the standardization vectors to the full prediction shape (is this this only way?!?)
ushape=self.u.shape
if isinstance(self.model.data.sim_data.orig_y_sd,np.ndarray):
ysd_inpredshape = np.tile(self.model.data.sim_data.orig_y_sd, (ushape[0], ushape[1], 1))
else:
# cheating a bit, if it's scalar it doesn't have to be tiled out
ysd_inpredshape=self.model.data.sim_data.orig_y_sd
ymean_inpredshape = np.tile(self.model.data.sim_data.orig_y_mean, (ushape[0], ushape[1], 1))
return ysd_inpredshape, ymean_inpredshape
def calc_obs_standardizations_inpredshape(self,obs_ref):
# internal function, calculate the ysd and ymean arrays
# tile out the standardization vectors to the full prediction shape (is this this only way?!?)
if self.model.data.ragged_obs:
if obs_ref<0 or obs_ref>len(self.model.data.obs_data.orig_y_sd):
raise ValueError('obs_ref index specified in predictions is not within obs_data size')
orig_y_sd = self.model.data.obs_data.orig_y_sd[obs_ref]
orig_y_mean = self.model.data.obs_data.orig_y_mean[obs_ref]
else:
orig_y_sd = self.model.data.obs_data.orig_y_sd
orig_y_mean = self.model.data.obs_data.orig_y_mean
ushape=self.u.shape
if isinstance(orig_y_sd,np.ndarray):
ysd_inpredshape = np.tile(orig_y_sd, (ushape[0], ushape[1], 1))
else:
# cheating a bit, if it's scalar it doesn't have to be tiled out
ysd_inpredshape=orig_y_sd
ymean_inpredshape = np.tile(orig_y_mean, (ushape[0], ushape[1], 1))
return ysd_inpredshape, ymean_inpredshape
# """
# So much for the sugar, here's the medicine...
# """
def rmultnorm(n, mu, sigma, dev=True):
# the deal with this is to use the same rand stream to generate realizations in the same way
# gpmsa, for testing purposes. numpy's mvn uses a different stream.
# I guess there's no reason not to leave it like this - in dev mode?
if not dev:
if n!=1:
raise ValueError('Internal error: native method should have only been asked to produce single realizations')
return np.random.multivariate_normal(mu.squeeze(), sigma)
else:
# development, to verify with the same rand stream as matlab
U, s, V = np.linalg.svd(sigma, full_matrices=False)
normalrands=norm.ppf(np.random.rand(np.shape(mu)[0],n))
rnorm=np.tile(mu,(1,n)) + U @ np.diag(np.sqrt(s)) @ normalrands
return rnorm.squeeze()
def wPred(pred):
# some shorthand references from the pred object
xpred=pred.xpred
samples=pred.samples
num=pred.model.num
data=pred.model.data
theta_pred=pred.t_pred
n=num.n; m=num.m; p=num.p; q=num.q; pu=num.pu
npred=np.shape(xpred)[0]
nsamp=samples['lamWs'].shape[0]
#allocate results containers if needed
if pred.storeRlz:
tpred = np.zeros((nsamp, npred * pu))
if pred.storeMuSigma:
pred.mu=np.empty((nsamp,npred*pu))
pred.sigma=np.empty((nsamp,npred*pu,npred*pu))
for ii in range(nsamp):
if not num.sim_only:
theta=samples['theta'][ii:ii+1,:]
betaU=samples['betaU'][ii,:]
betaU=np.reshape(betaU,(p+q,pu),order='F')
lamUz=samples['lamUz'][ii:ii+1,:]
lamWs=samples['lamWs'][ii:ii+1,:]
lamWOs=samples['lamWOs'][ii:ii+1,:]
if data.mean_basis is not None:
gamma=samples['gamma'][ii:ii+1,:].reshape((-1,1))
if theta_pred is not None:
xpredt = np.concatenate((xpred,theta_pred),axis=1)
cat_ind = np.concatenate([data.x_cat_ind, data.t_cat_ind])
elif not num.sim_only:
xpredt = np.concatenate( ( xpred,np.tile(theta,(npred, 1)) ),axis=1)
cat_ind = np.concatenate([data.x_cat_ind, data.t_cat_ind])
else:
xpredt=xpred
cat_ind = data.x_cat_ind
xpredDist=SepiaDistCov(xpredt, cat_ind=cat_ind)
zxpredDist=SepiaDistCov(data.zt,xpredt, cat_ind=cat_ind)
Myhat=np.zeros((npred*pu,1))
Syhat=np.zeros((npred*pu,npred*pu))
if not data.mean_basis:
w = num.w
else:
w = num.w - data.sim_data.H @ gamma
for jj in range(pu):
SigW = num.ztDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
np.fill_diagonal(SigW,SigW.diagonal() + 1/(num.LamSim[jj]*lamWOs) + 1/lamWs[0,jj] )
SigWp = xpredDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
diagAdd=np.reciprocal(lamWs[0,jj])
if pred.addResidVar:
diagAdd += 1/(num.LamSim[jj]*lamWOs)
np.fill_diagonal(SigWp, SigWp.diagonal() + diagAdd )
SigWWp = zxpredDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
SigData=SigW
SigPred=SigWp
SigCross=SigWWp
# Get posterior parameters
W=scipy.linalg.solve(SigData,SigCross,sym_pos=True)
Myhat[jj*npred:(jj+1)*npred] = W.T @ w[jj*m:(jj+1)*m,0:1]
if data.mean_basis is not None:
if data.dummy_x or pred.xpred is None:
pred_mb_dat=pred.t_pred
elif pred.t_pred is None:
pred_mb_dat=pred.xpred
else:
pred_mb_dat=np.hstack( (pred.xpred,pred.t_pred) )
H_pred=data.make_mean_basis(pred_mb_dat)
Myhat[jj*npred:(jj+1)*npred] += H_pred @ gamma
Syhat[jj*npred:(jj+1)*npred,jj*npred:(jj+1)*npred] = SigPred - W.T @ SigCross
if pred.storeRlz:
# Record a realization
tpred[ii,:]=rmultnorm(1, Myhat, Syhat)
if pred.storeMuSigma:
pred.mu[ii,:]=np.squeeze(Myhat)
pred.sigma[ii,:,:]=Syhat
if pred.storeRlz:
#% Reshape the pred matrix to 3D:
#% first dim - (number of realizations == samples)
#% second dim - (number of prediction points n = number of rows of [x,theta])
#% third dim - (number of basis elements in K = pu)
pred.w=np.zeros((nsamp,npred,pu))
for ii in range(pu):
pred.w[:,:,ii]=tpred[:,ii*npred:(ii+1)*npred]
# and at the end, everything should be stored back in the prediction object.
#
# This version should not normally be called; should be using uvPredSep
# - which is the block-wise prediction calculations, supporting the separable design schema
#
def uvPred(pred):
# some shorthand references from the pred object
raise RuntimeError('uvPred should not normally be invoked; uvPredSep is the correct method')
xpred=pred.xpred
samples=pred.samples
num=pred.model.num
data=pred.model.data
theta_pred=pred.t_pred
n=num.n; m=num.m; p=num.p; q=num.q; pu=num.pu; pv=num.pv
lamVzGnum=num.lamVzGnum; lamVzGroup=num.lamVzGroup
npred = np.shape(xpred)[0]
nsamp = samples['lamWs'].shape[0]
x0Dist = num.x0Dist
xpred0Dist=SepiaDistCov(xpred, cat_ind=data.x_cat_ind)
xxpred0Dist=SepiaDistCov(data.x, xpred, cat_ind=data.x_cat_ind)
if pred.storeRlz:
tpred = np.empty((nsamp, npred*(pv+pu) ))
if pred.storeMuSigma:
pred.mu=np.empty((nsamp,npred*(pv+pu) ))
pred.sigma=np.empty((nsamp,npred*(pv+pu),npred*(pv+pu) ))
for ii in range(nsamp):
theta = samples['theta'][ii:ii + 1, :]
betaU = samples['betaU'][ii, :]
betaU = np.reshape(betaU, (p+q, pu), order='F')
if pv>0:
betaV = samples['betaV'][ii, :]
betaV = np.reshape(betaV, (p, lamVzGnum), order='F')
lamVz = samples['lamVz'][ii:ii + 1, :]
no_D=False
else:
no_D=True
lamUz = samples['lamUz'][ii:ii + 1, :]
lamWs = samples['lamWs'][ii:ii + 1, :]
lamWOs = samples['lamWOs'][ii:ii + 1, :]
lamOs = samples['lamOs'][ii:ii + 1, :]
if theta_pred is not None:
xpredt = np.concatenate((xpred, theta_pred), axis=1)
else:
xpredt = np.concatenate((xpred, np.tile(theta, (npred, 1))), axis=1)
xtheta=np.concatenate((data.x,np.tile(theta, (n, 1))),axis=1)
xDist=SepiaDistCov(xtheta, cat_ind=np.concatenate([data.x_cat_ind, data.t_cat_ind]))
xzDist=SepiaDistCov(xtheta,data.zt, cat_ind=np.concatenate([data.x_cat_ind, data.t_cat_ind]))
xpredDist=SepiaDistCov(xpredt, cat_ind=np.concatenate([data.x_cat_ind, data.t_cat_ind]))
xxpredDist=SepiaDistCov(xtheta,xpredt, cat_ind=np.concatenate([data.x_cat_ind, data.t_cat_ind]))
zxpredDist=SepiaDistCov(data.zt,xpredt, cat_ind=np.concatenate([data.x_cat_ind, data.t_cat_ind]))
# SigData
# Generate the part of the matrix related to the data
# Four parts to compute: Sig_v, Sig_u, Sig_w, and the Sig_uw crossterm
SigV = np.zeros((n * pv, n * pv)) # for no_D model, pv=0
if not no_D:
vCov=[]
for jj in range(lamVzGnum):
vCov.append(x0Dist.compute_cov_mat(betaV[:, jj], lamVz[0,jj]))
for jj in range(pv):
SigV[jj*n:(jj+1)*n,jj*n:(jj+1)*n]=vCov[lamVzGroup[jj]]
SigU=np.zeros((n*pu,n*pu))
for jj in range(pu):
SigU[jj*n:(jj+1)*n,jj*n:(jj+1)*n]=xDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
np.fill_diagonal(SigU, SigU.diagonal() + np.repeat(np.reciprocal(lamWs), n))
SigW = np.zeros((m * pu, m * pu))
for jj in range(pu):
SigW[jj * m:(jj + 1) * m, jj * m:(jj + 1) * m] = num.ztDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
np.fill_diagonal(SigW, SigW.diagonal() +
np.repeat(np.reciprocal(num.LamSim * lamWOs), m) + np.repeat(np.reciprocal(lamWs), m))
SigUW=np.zeros((n*pu,m*pu))
for jj in range(pu):
SigUW[jj*n:(jj+1)*n,jj*m:(jj+1)*m]=xzDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
if num.scalar_out:
#SigData=[ SigU+SigV +SigObs/lamOs SigUW; ...
# SigUW' SigW ];
if not no_D:
SigUplusVpart=SigU+SigV + num.SigObs * 1/lamOs
else:
SigUplusVpart = SigU + num.SigObs * 1 / lamOs
SigData=np.block([[SigUplusVpart,SigUW],[SigUW.T,SigW]])
else:
#SigData=[SigV 0
# 0 [ SigU SigUW; ...
# SigUW' SigW ] ];
#SigData(1:n*(pv+pu),1:n*(pv+pu)) += model.SigObs*1/lamOs;
SigSubmat=np.block([[SigU,SigUW],[SigUW.T,SigW]])
sddim=n*pv+(n+m)*pu
SigData=np.zeros((sddim,sddim))
SigData[:n*pv,:n*pv] = SigV
SigData[n*pv:,n*pv:] = SigSubmat
SigData[:n*(pv+pu),:n*(pv+pu)] += num.SigObs*1/lamOs
# SigPred
# Generate the part of the matrix related to the predictors
# Parts to compute: Sig_vpred, Sig_upred
SigVp=np.zeros((npred*pv,npred*pv))
if not no_D:
vpCov=[]
for jj in range(lamVzGnum):
vpCov.append(xpred0Dist.compute_cov_mat(betaV[:, jj], lamVz[0,jj]))
for jj in range(pv):
SigVp[jj*npred:(jj+1)*npred,jj*npred:(jj+1)*npred]=vpCov[lamVzGroup[jj]]
SigUp=np.zeros((npred*pu,npred*pu))
for jj in range(pu):
SigUp[jj*npred:(jj+1)*npred,jj*npred:(jj+1)*npred] = \
xpredDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
np.fill_diagonal(SigUp, SigUp.diagonal() + np.repeat(np.reciprocal(lamWs), npred))
if pred.addResidVar:
np.fill_diagonal(SigUp, SigUp.diagonal() + np.repeat(np.reciprocal(num.LamSim * lamWOs), npred))
#SigPred=[SigVp 0
# 0 SigUp ]
SigPred=np.zeros( (npred*(pu+pv),npred*(pu+pv)) )
SigPred[:npred*pv,:npred*pv]=SigVp
SigPred[npred*pv:,npred*pv:]=SigUp
# SigCross
SigVVx=np.zeros((n*pv,npred*pv))
if not no_D:
vvCov=[]
for jj in range(lamVzGnum):
vvCov.append(xxpred0Dist.compute_cov_mat(betaV[:, jj], lamVz[0,jj]))
for jj in range(pv):
SigVVx[jj*n:(jj+1)*n,jj*npred:(jj+1)*npred]=vvCov[lamVzGroup[jj]]
SigUUx=np.zeros((n*pu,npred*pu))
for jj in range(pu):
SigUUx[jj*n:(jj+1)*n,jj*npred:(jj+1)*npred]=xxpredDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
SigWUx=np.zeros((m*pu,npred*pu))
for jj in range(pu):
SigWUx[jj*m:(jj+1)*m,jj*npred:(jj+1)*npred]=zxpredDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
if num.scalar_out:
if not no_D:
#SigCross=[SigVVx SigUUx; ...
# zeros(m*pu,npred*pv) SigWUx];
SigCross=np.zeros( ( n*pv+m*pu, npred*(pv+pu) ) )
SigCross[:n*pv,:npred*pv]=SigVVx
SigCross[n*pv:,:npred*pv]=SigUUx
SigCross[n*pv:,npred*pv:]=SigWUx
else: # no Discrepancy model
# SigCross=[SigUUx;
# SigWUx]
SigCross=np.vstack((SigUUx,SigWUx))
else:
#SigCross=[SigVVx zeros(n*pv,npred*pu); ...
# zeros(n*pu,npred*pv) SigUUx; ...
# zeros(m*pu,npred*pv) SigWUx];
SigCross=np.zeros( ( n*pv+(n+m)*pu, npred*(pv+pu) ) )
SigCross[:n*pv, :npred*pv]=SigVVx
SigCross[n*pv:n*(pv+pu),npred*pv:]=SigUUx
SigCross[n*(pv+pu):, npred*pv:]=SigWUx
# Get posterior parameters
W = scipy.linalg.solve(SigData, SigCross, sym_pos=True)
if num.scalar_out:
Myhat = W.T @ num.uw
else:
Myhat = W.T @ num.vuw
Syhat = SigPred - W.T @ SigCross
if pred.storeRlz:
# Record a realization
tpred[ii, :] = rmultnorm(1, Myhat, Syhat)
# testing speed of built in
#tpred[ii, :] = np.random.multivariate_normal(Myhat.squeeze(), Syhat)
if pred.storeMuSigma:
# add the distribution params to the return
pred.mu[ii, :] = np.squeeze(Myhat)
pred.sigma[ii, :, :] = Syhat
if pred.storeRlz:
# Reshape the pred matrix to 3D, for each component:
# first dim - (number of realizations [pvals])
# second dim - (number of points [x,theta]s)
# third dim - (number of principal components)
pred.v=np.zeros( (nsamp,npred, pv) )
pred.u=np.zeros( (nsamp,npred, pu) )
for ii in range(pv):
pred.v[:,:,ii]=tpred[:,ii*npred:(ii+1)*npred]
for ii in range(pu):
pred.u[:,:,ii]=tpred[:,pv*npred+ii*npred:pv*npred+(ii+1)*npred]
# and at the end, everything should be stored back in the prediction object.
def uvPredSep(pred):
# calculate the equivalent quadratic form of kron separable data
def sepQuadFormCalc(V,zp):
# calculate right side of the kronecker quadratic form solve
dlen,mlen=zp.shape
zpo=np.empty((dlen,mlen))
for jj in range(mlen):
zt=zp[:,jj]
for ii in range(len(V)-1,-1,-1):
Vsize=V[ii].shape[1]
zt=scipy.linalg.solve(V[ii],zt.reshape((Vsize,int(dlen/Vsize)),order='F') ).T
zpo[:,jj]=zt.reshape(-1,order='F')
return zpo
def sepCalc(W,S,nugget,m):
# use kron form to compute:
# T = W' * inv( kron(S) + nugget*I ) * W
# r = W' * inv( kron(S) + nugget*I ) * m
# for matrix W, covariance S, scalar nugget, and vector m
# where S is a cell array of sub-covariances to be composed by kron
# eigen decomposition of the blocks
V=[None]*len(S)
D=[None]*len(S)
for ii in range(len(S)):
D[ii], V[ii] = np.linalg.eig(S[ii])
# compose eigenvalues
dkron=D[-1]
for ii in range(len(D)-2,-1,-1):
dkron=np.kron(D[ii],dkron)
# inverse sqrt of the D diagonal augmented with the nugget
Dki2=(1/np.sqrt(dkron + nugget)).reshape((-1,1))
# Put the parts together for W'inv(S)W
zp=sepQuadFormCalc(V,W)
zp2T=zp * np.tile(Dki2,(1,zp.shape[1]) )
T=zp2T.T @ zp2T
# Put the parts together for W'inv(S)m
zp=sepQuadFormCalc(V,m)
zp2r=zp * Dki2
r=zp2T.T @ zp2r
return T,r
# some shorthand references from the pred object
xpred=pred.xpred
samples=pred.samples
num=pred.model.num
data=pred.model.data
theta_pred=pred.t_pred
n=num.n; m=num.m; p=num.p; q=num.q; pu=num.pu; pv=num.pv
lamVzGnum=num.lamVzGnum; lamVzGroup=num.lamVzGroup
npred = np.shape(xpred)[0]
nsamp = samples['lamWs'].shape[0]
x0Dist = num.x0Dist
xpred0Dist=SepiaDistCov(xpred, cat_ind=data.x_cat_ind)
xxpred0Dist=SepiaDistCov(data.x, xpred, cat_ind=data.x_cat_ind)
if data.sep_design:
ztSep=data.ztSep
ztSepDist=num.ztSepDist
if pred.storeRlz:
tpred = np.empty((nsamp, npred*(pv+pu) ))
if pred.storeMuSigma:
pred.mu=np.empty((nsamp,npred*(pv+pu) ))
pred.sigma=np.empty((nsamp,npred*(pv+pu),npred*(pv+pu) ))
for ii in range(nsamp):
theta = samples['theta'][ii:ii + 1, :]
betaU = samples['betaU'][ii, :]
betaU = np.reshape(betaU, (p+q, pu), order='F')
if pv>0:
betaV = samples['betaV'][ii, :]
betaV = np.reshape(betaV, (p, lamVzGnum), order='F')
lamVz = samples['lamVz'][ii:ii + 1, :]
no_D=False
else:
no_D=True
lamUz = samples['lamUz'][ii:ii + 1, :]
lamWs = samples['lamWs'][ii:ii + 1, :]
lamWOs = samples['lamWOs'][ii:ii + 1, :]
lamOs = samples['lamOs'][ii:ii + 1, :]
if data.mean_basis is not None:
gamma=samples['gamma'][ii:ii+1,:].reshape((-1,1))
if theta_pred is not None:
xpredt = np.concatenate((xpred, theta_pred), axis=1)
else:
xpredt = np.concatenate((xpred, np.tile(theta, (npred, 1))), axis=1)
xtheta=np.concatenate((data.x,np.tile(theta, (n, 1))),axis=1)
xDist=SepiaDistCov(xtheta, cat_ind=np.concatenate([data.x_cat_ind, data.t_cat_ind]))
xzDist=SepiaDistCov(xtheta,data.zt, cat_ind=np.concatenate([data.x_cat_ind, data.t_cat_ind]))
xpredDist=SepiaDistCov(xpredt, cat_ind=np.concatenate([data.x_cat_ind, data.t_cat_ind]))
xxpredDist=SepiaDistCov(xtheta,xpredt, cat_ind=np.concatenate([data.x_cat_ind, data.t_cat_ind]))
zxpredDist=SepiaDistCov(data.zt,xpredt, cat_ind=np.concatenate([data.x_cat_ind, data.t_cat_ind]))
# SigData
# Generate the part of the matrix related to the data
# Four parts to compute: Sig_v, Sig_u, Sig_w, and the Sig_uw crossterm
SigV = np.zeros((n * pv, n * pv)) # for no_D model, pv=0
if not no_D:
vCov=[]
for jj in range(lamVzGnum):
vCov.append(x0Dist.compute_cov_mat(betaV[:, jj], lamVz[0,jj]))
for jj in range(pv):
SigV[jj*n:(jj+1)*n,jj*n:(jj+1)*n]=vCov[lamVzGroup[jj]]
SigU=np.zeros((n*pu,n*pu))
for jj in range(pu):
SigU[jj*n:(jj+1)*n,jj*n:(jj+1)*n]=xDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
np.fill_diagonal(SigU, SigU.diagonal() + np.repeat(np.reciprocal(lamWs), n))
#SigW = np.zeros((m * pu, m * pu))
#for jj in range(pu):
# SigW[jj * m:(jj + 1) * m, jj * m:(jj + 1) * m] = num.ztDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
#np.fill_diagonal(SigW, SigW.diagonal() +
# np.repeat(np.reciprocal(num.LamSim * lamWOs), m) + np.repeat(np.reciprocal(lamWs), m))
#SigUW=np.zeros((n*pu,m*pu))
#for jj in range(pu):
# SigUW[jj*n:(jj+1)*n,jj*m:(jj+1)*m]=xzDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
SigWb = [None]*pu
if not data.sep_design:
for jj in range(pu):
SigWb[jj]= num.ztDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
# a scalar added to diagonal for each block
np.fill_diagonal(SigWb[jj], SigWb[jj].diagonal() + 1/(num.LamSim[jj] * lamWOs) + 1/lamWs[0,jj] )
else:
SigWbDiag = [None]*pu
for jj in range(pu):
segVarStart=0
SigWb[jj] = [None]*len(ztSep)
for kk in range(len(ztSep)):
segInds = np.arange(segVarStart,segVarStart+ztSepDist[kk].p)
segVarStart = segVarStart + ztSepDist[kk].p
if kk==0: # count lamUz once while composing the block from sep design
SigWb[jj][kk] = ztSepDist[kk].compute_cov_mat(betaU[segInds, jj], lamUz[0, jj])
else:
SigWb[jj][kk] = ztSepDist[kk].compute_cov_mat(betaU[segInds, jj], 1)
SigWbDiag[jj]=1/(num.LamSim[jj] * lamWOs) + 1/lamWs[0,jj] # should be scalar
SigUWb = [None]*pu
for jj in range(pu):
SigUWb[jj] = np.vstack( ( np.zeros((jj*n,m)),
xzDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj]),
np.zeros(((pu-jj-1)*n,m)) ) )
# augmented for separable...
#if num.scalar_out:
# #SigData=[ SigU+SigV +SigObs/lamOs SigUW; ...
# # SigUW' SigW ];
# if not no_D:
# SigUplusVpart=SigU+SigV + num.SigObs * 1/lamOs
# else:
# SigUplusVpart = SigU + num.SigObs * 1 / lamOs
# SigData=np.block([[SigUplusVpart,SigUW],[SigUW.T,SigW]])
#else:
# #SigData=[SigV 0
# # 0 [ SigU SigUW; ...
# # SigUW' SigW ] ];
# #SigData(1:n*(pv+pu),1:n*(pv+pu)) += model.SigObs*1/lamOs;
# SigSubmat=np.block([[SigU,SigUW],[SigUW.T,SigW]])
# sddim=n*pv+(n+m)*pu
# SigData=np.zeros((sddim,sddim))
# SigData[:n*pv,:n*pv] = SigV
# SigData[n*pv:,n*pv:] = SigSubmat
# SigData[:n*(pv+pu),:n*(pv+pu)] += num.SigObs*1/lamOs
# SigPred
# Generate the part of the matrix related to the predictors
# Parts to compute: Sig_vpred, Sig_upred
SigVp=np.zeros((npred*pv,npred*pv))
if not no_D:
vpCov=[]
for jj in range(lamVzGnum):
vpCov.append(xpred0Dist.compute_cov_mat(betaV[:, jj], lamVz[0,jj]))
for jj in range(pv):
SigVp[jj*npred:(jj+1)*npred,jj*npred:(jj+1)*npred]=vpCov[lamVzGroup[jj]]
SigUp=np.zeros((npred*pu,npred*pu))
for jj in range(pu):
SigUp[jj*npred:(jj+1)*npred,jj*npred:(jj+1)*npred] = \
xpredDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
np.fill_diagonal(SigUp, SigUp.diagonal() + np.repeat(np.reciprocal(lamWs), npred))
if pred.addResidVar:
np.fill_diagonal(SigUp, SigUp.diagonal() + np.repeat(np.reciprocal(num.LamSim * lamWOs), npred))
#SigPred=[SigVp 0
# 0 SigUp ]
SigPred=np.zeros( (npred*(pu+pv),npred*(pu+pv)) )
SigPred[:npred*pv,:npred*pv]=SigVp
SigPred[npred*pv:,npred*pv:]=SigUp
# SigCross
SigVVx=np.zeros((n*pv,npred*pv))
if not no_D:
vvCov=[]
for jj in range(lamVzGnum):
vvCov.append(xxpred0Dist.compute_cov_mat(betaV[:, jj], lamVz[0,jj]))
for jj in range(pv):
SigVVx[jj*n:(jj+1)*n,jj*npred:(jj+1)*npred]=vvCov[lamVzGroup[jj]]
SigUUx=np.zeros((n*pu,npred*pu))
for jj in range(pu):
SigUUx[jj*n:(jj+1)*n,jj*npred:(jj+1)*npred]=xxpredDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj])
SigWUxb=[None]*pu
for jj in range(pu):
SigWUxb[jj] = np.vstack( ( np.zeros((jj*npred,m)),
zxpredDist.compute_cov_mat(betaU[:, jj], lamUz[0, jj]).T,
np.zeros(((pu-jj-1)*npred,m)) ) )
# augmented for separable
#if num.scalar_out:
# if not no_D:
# #SigCross=[SigVVx SigUUx; ...
# # zeros(m*pu,npred*pv) SigWUx];
# SigCross=np.zeros( ( n*pv+m*pu, npred*(pv+pu) ) )
# SigCross[:n*pv,:npred*pv]=SigVVx
# SigCross[n*pv:,:npred*pv]=SigUUx
# SigCross[n*pv:,npred*pv:]=SigWUx
# else: # no Discrepancy model
# # SigCross=[SigUUx;
# # SigWUx]
# SigCross=np.vstack((SigUUx,SigWUx))
#else:
# #SigCross=[SigVVx zeros(n*pv,npred*pu); ...
# # zeros(n*pu,npred*pv) SigUUx; ...
# # zeros(m*pu,npred*pv) SigWUx];
# SigCross=np.zeros( ( n*pv+(n+m)*pu, npred*(pv+pu) ) )
# SigCross[:n*pv, :npred*pv]=SigVVx
# SigCross[n*pv:n*(pv+pu),npred*pv:]=SigUUx
# SigCross[n*(pv+pu):, npred*pv:]=SigWUx
# Get posterior parameters
# pre-ksep
#W = scipy.linalg.solve(SigData, SigCross, sym_pos=True)
#if num.scalar_out:
# Myhat = W.T @ num.uw
#else:
# Myhat = W.T @ num.vuw
#Syhat = SigPred - W.T @ SigCross
SigVUo = np.zeros( (n*(pv+pu),n*(pv+pu)) )
SigVUo[:n*pv,:n*pv]=SigV
SigVUo[n*pv:,n*pv:]=SigU
SigVUo = SigVUo + num.SigObs * 1/lamOs
SigVUx = np.zeros( (n*(pv+pu), npred*(pv+pu)) )
SigVUx[:n*pv,:npred*pv] = SigVVx
SigVUx[n*pv:,npred*pv:] = SigUUx
SignoW = np.block([[SigVUo, SigVUx], [SigVUx.T, SigPred]])
if not data.mean_basis:
w = num.w
else:
w = num.w - data.sim_data.H @ gamma
SigWsb=[None]*pu
mugWsb=[None]*pu
for jj in range(pu):
SigWcrossb=np.zeros( ( (n+npred)*(pv+pu), m ) )
SigWcrossb[n*pv:n*(pv+pu),:]=SigUWb[jj]
SigWcrossb[(n*(pv+pu)+npred*pv):((n+npred)*(pv+pu)),:]=SigWUxb[jj]
SigWcrossb=SigWcrossb.T
if not data.sep_design:
Tb=scipy.linalg.solve(SigWb[jj],SigWcrossb, sym_pos=True).T
SigWsb[jj] = Tb @ SigWcrossb
mugWsb[jj] = (Tb @ w[jj*m:(jj+1)*m,0] ).reshape(-1,1)
else:
SigWsb[jj],mugWsb[jj]=sepCalc(SigWcrossb,SigWb[jj],SigWbDiag[jj],w[jj*m:(jj+1)*m,:])
SiggWb=SignoW
mugWb=np.zeros(mugWsb[0].shape)
for jj in range(pu):
SiggWb=SiggWb - SigWsb[jj]
mugWb=mugWb + mugWsb[jj]
SiggW11=SiggWb[:n*(pv+pu),:n*(pv+pu)]
SiggW22=SiggWb[n*(pv+pu):,n*(pv+pu):]
SiggW12=SiggWb[:n*(pv+pu):,n*(pv+pu):]
mugW1=mugWb[:n*(pv+pu)]
mugW2=mugWb[n*(pv+pu):]
# subtract from the u part of vu
# currently assumes scalar output if we got here.
if not data.mean_basis:
vu = num.vu
else:
if data.dummy_x:
mb_dat=xtheta[:,1:]
else:
mb_dat=xtheta
H_x = data.make_mean_basis(mb_dat)
u = num.u - H_x @ gamma
vu = np.concatenate((num.v,u))
T=scipy.linalg.solve(SiggW11,SiggW12).T
Syhat=SiggW22 - T@SiggW12
Myhat=mugW2 + T @ (vu-mugW1)
if data.mean_basis is not None:
if data.dummy_x:
pred_mb_dat=xpredt[:,1:]
else:
pred_mb_dat=xpredt
H_pred=data.make_mean_basis(pred_mb_dat)
Myhat[n*pv:] += H_pred @ gamma
if pred.storeRlz:
# Record a realization
tpred[ii, :] = rmultnorm(1, Myhat, Syhat)
# testing speed of built in
#tpred[ii, :] = np.random.multivariate_normal(Myhat.squeeze(), Syhat)
if pred.storeMuSigma:
# add the distribution params to the return
pred.mu[ii, :] = np.squeeze(Myhat)
pred.sigma[ii, :, :] = Syhat
if pred.storeRlz:
# Reshape the pred matrix to 3D, for each component:
# first dim - (number of realizations [pvals])
# second dim - (number of points [x,theta]s)
# third dim - (number of principal components)
pred.v=np.zeros( (nsamp,npred, pv) )
pred.u=np.zeros( (nsamp,npred, pu) )
for ii in range(pv):
pred.v[:,:,ii]=tpred[:,ii*npred:(ii+1)*npred]
for ii in range(pu):
pred.u[:,:,ii]=tpred[:,pv*npred+ii*npred:pv*npred+(ii+1)*npred]
# and at the end, everything should be stored back in the prediction object.
| [
"numpy.empty",
"numpy.reciprocal",
"numpy.ones",
"numpy.shape",
"numpy.linalg.svd",
"numpy.arange",
"numpy.tile",
"numpy.linalg.eig",
"numpy.reshape",
"numpy.kron",
"sepia.SepiaDistCov.SepiaDistCov",
"tqdm.tqdm",
"copy.deepcopy",
"numpy.tensordot",
"numpy.hstack",
"numpy.squeeze",
"n... | [((27466, 27509), 'sepia.SepiaDistCov.SepiaDistCov', 'SepiaDistCov', (['xpred'], {'cat_ind': 'data.x_cat_ind'}), '(xpred, cat_ind=data.x_cat_ind)\n', (27478, 27509), False, 'from sepia.SepiaDistCov import SepiaDistCov\n'), ((27526, 27577), 'sepia.SepiaDistCov.SepiaDistCov', 'SepiaDistCov', (['data.x', 'xpred'], {'cat_ind': 'data.x_cat_ind'}), '(data.x, xpred, cat_ind=data.x_cat_ind)\n', (27538, 27577), False, 'from sepia.SepiaDistCov import SepiaDistCov\n'), ((37403, 37446), 'sepia.SepiaDistCov.SepiaDistCov', 'SepiaDistCov', (['xpred'], {'cat_ind': 'data.x_cat_ind'}), '(xpred, cat_ind=data.x_cat_ind)\n', (37415, 37446), False, 'from sepia.SepiaDistCov import SepiaDistCov\n'), ((37463, 37514), 'sepia.SepiaDistCov.SepiaDistCov', 'SepiaDistCov', (['data.x', 'xpred'], {'cat_ind': 'data.x_cat_ind'}), '(data.x, xpred, cat_ind=data.x_cat_ind)\n', (37475, 37514), False, 'from sepia.SepiaDistCov import SepiaDistCov\n'), ((8778, 8850), 'numpy.tile', 'np.tile', (['self.model.data.sim_data.orig_y_mean', '(wshape[0], wshape[1], 1)'], {}), '(self.model.data.sim_data.orig_y_mean, (wshape[0], wshape[1], 1))\n', (8785, 8850), True, 'import numpy as np\n'), ((10718, 10743), 'copy.deepcopy', 'copy.deepcopy', (['self.model'], {}), '(self.model)\n', (10731, 10743), False, 'import copy\n'), ((11000, 11065), 'tqdm.tqdm', 'tqdm', (['leave_out_inds'], {'desc': '"""Cross validation..."""', 'mininterval': '(0.5)'}), "(leave_out_inds, desc='Cross validation...', mininterval=0.5)\n", (11004, 11065), False, 'from tqdm import tqdm\n'), ((12403, 12431), 'numpy.concatenate', 'np.concatenate', (['w_cv'], {'axis': '(1)'}), '(w_cv, axis=1)\n', (12417, 12431), True, 'import numpy as np\n'), ((12453, 12481), 'numpy.concatenate', 'np.concatenate', (['x_cv'], {'axis': '(0)'}), '(x_cv, axis=0)\n', (12467, 12481), True, 'import numpy as np\n'), ((12504, 12532), 'numpy.concatenate', 'np.concatenate', (['t_cv'], {'axis': '(0)'}), '(t_cv, axis=0)\n', (12518, 12532), True, 'import numpy as np\n'), ((20883, 20955), 'numpy.tile', 'np.tile', (['self.model.data.sim_data.orig_y_mean', '(ushape[0], ushape[1], 1)'], {}), '(self.model.data.sim_data.orig_y_mean, (ushape[0], ushape[1], 1))\n', (20890, 20955), True, 'import numpy as np\n'), ((22036, 22083), 'numpy.tile', 'np.tile', (['orig_y_mean', '(ushape[0], ushape[1], 1)'], {}), '(orig_y_mean, (ushape[0], ushape[1], 1))\n', (22043, 22083), True, 'import numpy as np\n'), ((22796, 22837), 'numpy.linalg.svd', 'np.linalg.svd', (['sigma'], {'full_matrices': '(False)'}), '(sigma, full_matrices=False)\n', (22809, 22837), True, 'import numpy as np\n'), ((23259, 23274), 'numpy.shape', 'np.shape', (['xpred'], {}), '(xpred)\n', (23267, 23274), True, 'import numpy as np\n'), ((23396, 23425), 'numpy.zeros', 'np.zeros', (['(nsamp, npred * pu)'], {}), '((nsamp, npred * pu))\n', (23404, 23425), True, 'import numpy as np\n'), ((23468, 23497), 'numpy.empty', 'np.empty', (['(nsamp, npred * pu)'], {}), '((nsamp, npred * pu))\n', (23476, 23497), True, 'import numpy as np\n'), ((23514, 23555), 'numpy.empty', 'np.empty', (['(nsamp, npred * pu, npred * pu)'], {}), '((nsamp, npred * pu, npred * pu))\n', (23522, 23555), True, 'import numpy as np\n'), ((23705, 23746), 'numpy.reshape', 'np.reshape', (['betaU', '(p + q, pu)'], {'order': '"""F"""'}), "(betaU, (p + q, pu), order='F')\n", (23715, 23746), True, 'import numpy as np\n'), ((24420, 24457), 'sepia.SepiaDistCov.SepiaDistCov', 'SepiaDistCov', (['xpredt'], {'cat_ind': 'cat_ind'}), '(xpredt, cat_ind=cat_ind)\n', (24432, 24457), False, 'from sepia.SepiaDistCov import SepiaDistCov\n'), ((24477, 24523), 'sepia.SepiaDistCov.SepiaDistCov', 'SepiaDistCov', (['data.zt', 'xpredt'], {'cat_ind': 'cat_ind'}), '(data.zt, xpredt, cat_ind=cat_ind)\n', (24489, 24523), False, 'from sepia.SepiaDistCov import SepiaDistCov\n'), ((24538, 24563), 'numpy.zeros', 'np.zeros', (['(npred * pu, 1)'], {}), '((npred * pu, 1))\n', (24546, 24563), True, 'import numpy as np\n'), ((24575, 24609), 'numpy.zeros', 'np.zeros', (['(npred * pu, npred * pu)'], {}), '((npred * pu, npred * pu))\n', (24583, 24609), True, 'import numpy as np\n'), ((26580, 26608), 'numpy.zeros', 'np.zeros', (['(nsamp, npred, pu)'], {}), '((nsamp, npred, pu))\n', (26588, 26608), True, 'import numpy as np\n'), ((27369, 27384), 'numpy.shape', 'np.shape', (['xpred'], {}), '(xpred)\n', (27377, 27384), True, 'import numpy as np\n'), ((27617, 27653), 'numpy.empty', 'np.empty', (['(nsamp, npred * (pv + pu))'], {}), '((nsamp, npred * (pv + pu)))\n', (27625, 27653), True, 'import numpy as np\n'), ((27693, 27729), 'numpy.empty', 'np.empty', (['(nsamp, npred * (pv + pu))'], {}), '((nsamp, npred * (pv + pu)))\n', (27701, 27729), True, 'import numpy as np\n'), ((27745, 27800), 'numpy.empty', 'np.empty', (['(nsamp, npred * (pv + pu), npred * (pv + pu))'], {}), '((nsamp, npred * (pv + pu), npred * (pv + pu)))\n', (27753, 27800), True, 'import numpy as np\n'), ((27924, 27965), 'numpy.reshape', 'np.reshape', (['betaU', '(p + q, pu)'], {'order': '"""F"""'}), "(betaU, (p + q, pu), order='F')\n", (27934, 27965), True, 'import numpy as np\n'), ((29335, 29361), 'numpy.zeros', 'np.zeros', (['(n * pv, n * pv)'], {}), '((n * pv, n * pv))\n', (29343, 29361), True, 'import numpy as np\n'), ((29663, 29689), 'numpy.zeros', 'np.zeros', (['(n * pu, n * pu)'], {}), '((n * pu, n * pu))\n', (29671, 29689), True, 'import numpy as np\n'), ((29911, 29937), 'numpy.zeros', 'np.zeros', (['(m * pu, m * pu)'], {}), '((m * pu, m * pu))\n', (29919, 29937), True, 'import numpy as np\n'), ((30259, 30285), 'numpy.zeros', 'np.zeros', (['(n * pu, m * pu)'], {}), '((n * pu, m * pu))\n', (30267, 30285), True, 'import numpy as np\n'), ((31487, 31521), 'numpy.zeros', 'np.zeros', (['(npred * pv, npred * pv)'], {}), '((npred * pv, npred * pv))\n', (31495, 31521), True, 'import numpy as np\n'), ((31820, 31854), 'numpy.zeros', 'np.zeros', (['(npred * pu, npred * pu)'], {}), '((npred * pu, npred * pu))\n', (31828, 31854), True, 'import numpy as np\n'), ((32331, 32379), 'numpy.zeros', 'np.zeros', (['(npred * (pu + pv), npred * (pu + pv))'], {}), '((npred * (pu + pv), npred * (pu + pv)))\n', (32339, 32379), True, 'import numpy as np\n'), ((32494, 32524), 'numpy.zeros', 'np.zeros', (['(n * pv, npred * pv)'], {}), '((n * pv, npred * pv))\n', (32502, 32524), True, 'import numpy as np\n'), ((32818, 32848), 'numpy.zeros', 'np.zeros', (['(n * pu, npred * pu)'], {}), '((n * pu, npred * pu))\n', (32826, 32848), True, 'import numpy as np\n'), ((33000, 33030), 'numpy.zeros', 'np.zeros', (['(m * pu, npred * pu)'], {}), '((m * pu, npred * pu))\n', (33008, 33030), True, 'import numpy as np\n'), ((35075, 35103), 'numpy.zeros', 'np.zeros', (['(nsamp, npred, pv)'], {}), '((nsamp, npred, pv))\n', (35083, 35103), True, 'import numpy as np\n'), ((35120, 35148), 'numpy.zeros', 'np.zeros', (['(nsamp, npred, pu)'], {}), '((nsamp, npred, pu))\n', (35128, 35148), True, 'import numpy as np\n'), ((35651, 35673), 'numpy.empty', 'np.empty', (['(dlen, mlen)'], {}), '((dlen, mlen))\n', (35659, 35673), True, 'import numpy as np\n'), ((37306, 37321), 'numpy.shape', 'np.shape', (['xpred'], {}), '(xpred)\n', (37314, 37321), True, 'import numpy as np\n'), ((37635, 37671), 'numpy.empty', 'np.empty', (['(nsamp, npred * (pv + pu))'], {}), '((nsamp, npred * (pv + pu)))\n', (37643, 37671), True, 'import numpy as np\n'), ((37711, 37747), 'numpy.empty', 'np.empty', (['(nsamp, npred * (pv + pu))'], {}), '((nsamp, npred * (pv + pu)))\n', (37719, 37747), True, 'import numpy as np\n'), ((37763, 37818), 'numpy.empty', 'np.empty', (['(nsamp, npred * (pv + pu), npred * (pv + pu))'], {}), '((nsamp, npred * (pv + pu), npred * (pv + pu)))\n', (37771, 37818), True, 'import numpy as np\n'), ((37942, 37983), 'numpy.reshape', 'np.reshape', (['betaU', '(p + q, pu)'], {'order': '"""F"""'}), "(betaU, (p + q, pu), order='F')\n", (37952, 37983), True, 'import numpy as np\n'), ((39455, 39481), 'numpy.zeros', 'np.zeros', (['(n * pv, n * pv)'], {}), '((n * pv, n * pv))\n', (39463, 39481), True, 'import numpy as np\n'), ((39783, 39809), 'numpy.zeros', 'np.zeros', (['(n * pu, n * pu)'], {}), '((n * pu, n * pu))\n', (39791, 39809), True, 'import numpy as np\n'), ((43044, 43078), 'numpy.zeros', 'np.zeros', (['(npred * pv, npred * pv)'], {}), '((npred * pv, npred * pv))\n', (43052, 43078), True, 'import numpy as np\n'), ((43377, 43411), 'numpy.zeros', 'np.zeros', (['(npred * pu, npred * pu)'], {}), '((npred * pu, npred * pu))\n', (43385, 43411), True, 'import numpy as np\n'), ((43888, 43936), 'numpy.zeros', 'np.zeros', (['(npred * (pu + pv), npred * (pu + pv))'], {}), '((npred * (pu + pv), npred * (pu + pv)))\n', (43896, 43936), True, 'import numpy as np\n'), ((44051, 44081), 'numpy.zeros', 'np.zeros', (['(n * pv, npred * pv)'], {}), '((n * pv, npred * pv))\n', (44059, 44081), True, 'import numpy as np\n'), ((44375, 44405), 'numpy.zeros', 'np.zeros', (['(n * pu, npred * pu)'], {}), '((n * pu, npred * pu))\n', (44383, 44405), True, 'import numpy as np\n'), ((46155, 46195), 'numpy.zeros', 'np.zeros', (['(n * (pv + pu), n * (pv + pu))'], {}), '((n * (pv + pu), n * (pv + pu)))\n', (46163, 46195), True, 'import numpy as np\n'), ((46320, 46364), 'numpy.zeros', 'np.zeros', (['(n * (pv + pu), npred * (pv + pu))'], {}), '((n * (pv + pu), npred * (pv + pu)))\n', (46328, 46364), True, 'import numpy as np\n'), ((46459, 46508), 'numpy.block', 'np.block', (['[[SigVUo, SigVUx], [SigVUx.T, SigPred]]'], {}), '([[SigVUo, SigVUx], [SigVUx.T, SigPred]])\n', (46467, 46508), True, 'import numpy as np\n'), ((47316, 47341), 'numpy.zeros', 'np.zeros', (['mugWsb[0].shape'], {}), '(mugWsb[0].shape)\n', (47324, 47341), True, 'import numpy as np\n'), ((49121, 49149), 'numpy.zeros', 'np.zeros', (['(nsamp, npred, pv)'], {}), '((nsamp, npred, pv))\n', (49129, 49149), True, 'import numpy as np\n'), ((49166, 49194), 'numpy.zeros', 'np.zeros', (['(nsamp, npred, pu)'], {}), '((nsamp, npred, pu))\n', (49174, 49194), True, 'import numpy as np\n'), ((8241, 8306), 'numpy.tensordot', 'np.tensordot', (['self.w', 'self.model.data.sim_data.K'], {'axes': '[[2], [0]]'}), '(self.w, self.model.data.sim_data.K, axes=[[2], [0]])\n', (8253, 8306), True, 'import numpy as np\n'), ((8525, 8595), 'numpy.tile', 'np.tile', (['self.model.data.sim_data.orig_y_sd', '(wshape[0], wshape[1], 1)'], {}), '(self.model.data.sim_data.orig_y_sd, (wshape[0], wshape[1], 1))\n', (8532, 8595), True, 'import numpy as np\n'), ((11155, 11180), 'copy.deepcopy', 'copy.deepcopy', (['orig_model'], {}), '(orig_model)\n', (11168, 11180), False, 'import copy\n'), ((12028, 12039), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (12036, 12039), True, 'import numpy as np\n'), ((12094, 12113), 'numpy.tile', 'np.tile', (['w_inds', 'pu'], {}), '(w_inds, pu)\n', (12101, 12113), True, 'import numpy as np\n'), ((20630, 20700), 'numpy.tile', 'np.tile', (['self.model.data.sim_data.orig_y_sd', '(ushape[0], ushape[1], 1)'], {}), '(self.model.data.sim_data.orig_y_sd, (ushape[0], ushape[1], 1))\n', (20637, 20700), True, 'import numpy as np\n'), ((21833, 21878), 'numpy.tile', 'np.tile', (['orig_y_sd', '(ushape[0], ushape[1], 1)'], {}), '(orig_y_sd, (ushape[0], ushape[1], 1))\n', (21840, 21878), True, 'import numpy as np\n'), ((22916, 22935), 'numpy.tile', 'np.tile', (['mu', '(1, n)'], {}), '(mu, (1, n))\n', (22923, 22935), True, 'import numpy as np\n'), ((24029, 24072), 'numpy.concatenate', 'np.concatenate', (['(xpred, theta_pred)'], {'axis': '(1)'}), '((xpred, theta_pred), axis=1)\n', (24043, 24072), True, 'import numpy as np\n'), ((24093, 24141), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (24107, 24141), True, 'import numpy as np\n'), ((25034, 25061), 'numpy.reciprocal', 'np.reciprocal', (['lamWs[0, jj]'], {}), '(lamWs[0, jj])\n', (25047, 25061), True, 'import numpy as np\n'), ((26237, 26254), 'numpy.squeeze', 'np.squeeze', (['Myhat'], {}), '(Myhat)\n', (26247, 26254), True, 'import numpy as np\n'), ((28045, 28089), 'numpy.reshape', 'np.reshape', (['betaV', '(p, lamVzGnum)'], {'order': '"""F"""'}), "(betaV, (p, lamVzGnum), order='F')\n", (28055, 28089), True, 'import numpy as np\n'), ((28447, 28490), 'numpy.concatenate', 'np.concatenate', (['(xpred, theta_pred)'], {'axis': '(1)'}), '((xpred, theta_pred), axis=1)\n', (28461, 28490), True, 'import numpy as np\n'), ((30743, 30794), 'numpy.block', 'np.block', (['[[SigUplusVpart, SigUW], [SigUW.T, SigW]]'], {}), '([[SigUplusVpart, SigUW], [SigUW.T, SigW]])\n', (30751, 30794), True, 'import numpy as np\n'), ((31071, 31113), 'numpy.block', 'np.block', (['[[SigU, SigUW], [SigUW.T, SigW]]'], {}), '([[SigU, SigUW], [SigUW.T, SigW]])\n', (31079, 31113), True, 'import numpy as np\n'), ((31163, 31187), 'numpy.zeros', 'np.zeros', (['(sddim, sddim)'], {}), '((sddim, sddim))\n', (31171, 31187), True, 'import numpy as np\n'), ((33939, 33991), 'numpy.zeros', 'np.zeros', (['(n * pv + (n + m) * pu, npred * (pv + pu))'], {}), '((n * pv + (n + m) * pu, npred * (pv + pu)))\n', (33947, 33991), True, 'import numpy as np\n'), ((34749, 34766), 'numpy.squeeze', 'np.squeeze', (['Myhat'], {}), '(Myhat)\n', (34759, 34766), True, 'import numpy as np\n'), ((36423, 36443), 'numpy.linalg.eig', 'np.linalg.eig', (['S[ii]'], {}), '(S[ii])\n', (36436, 36443), True, 'import numpy as np\n'), ((36554, 36575), 'numpy.kron', 'np.kron', (['D[ii]', 'dkron'], {}), '(D[ii], dkron)\n', (36561, 36575), True, 'import numpy as np\n'), ((36798, 36829), 'numpy.tile', 'np.tile', (['Dki2', '(1, zp.shape[1])'], {}), '(Dki2, (1, zp.shape[1]))\n', (36805, 36829), True, 'import numpy as np\n'), ((38063, 38107), 'numpy.reshape', 'np.reshape', (['betaV', '(p, lamVzGnum)'], {'order': '"""F"""'}), "(betaV, (p, lamVzGnum), order='F')\n", (38073, 38107), True, 'import numpy as np\n'), ((38567, 38610), 'numpy.concatenate', 'np.concatenate', (['(xpred, theta_pred)'], {'axis': '(1)'}), '((xpred, theta_pred), axis=1)\n', (38581, 38610), True, 'import numpy as np\n'), ((46729, 46767), 'numpy.zeros', 'np.zeros', (['((n + npred) * (pv + pu), m)'], {}), '(((n + npred) * (pv + pu), m))\n', (46737, 46767), True, 'import numpy as np\n'), ((48036, 48062), 'numpy.concatenate', 'np.concatenate', (['(num.v, u)'], {}), '((num.v, u))\n', (48050, 48062), True, 'import numpy as np\n'), ((48795, 48812), 'numpy.squeeze', 'np.squeeze', (['Myhat'], {}), '(Myhat)\n', (48805, 48812), True, 'import numpy as np\n'), ((11851, 11865), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (11859, 11865), True, 'import numpy as np\n'), ((18162, 18227), 'numpy.tensordot', 'np.tensordot', (['self.v', 'self.model.data.sim_data.D'], {'axes': '[[2], [0]]'}), '(self.v, self.model.data.sim_data.D, axes=[[2], [0]])\n', (18174, 18227), True, 'import numpy as np\n'), ((24276, 24324), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (24290, 24324), True, 'import numpy as np\n'), ((28625, 28647), 'numpy.tile', 'np.tile', (['theta', '(n, 1)'], {}), '(theta, (n, 1))\n', (28632, 28647), True, 'import numpy as np\n'), ((28700, 28748), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (28714, 28748), True, 'import numpy as np\n'), ((28802, 28850), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (28816, 28850), True, 'import numpy as np\n'), ((28899, 28947), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (28913, 28947), True, 'import numpy as np\n'), ((29004, 29052), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (29018, 29052), True, 'import numpy as np\n'), ((29110, 29158), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (29124, 29158), True, 'import numpy as np\n'), ((33365, 33411), 'numpy.zeros', 'np.zeros', (['(n * pv + m * pu, npred * (pv + pu))'], {}), '((n * pv + m * pu, npred * (pv + pu)))\n', (33373, 33411), True, 'import numpy as np\n'), ((33692, 33719), 'numpy.vstack', 'np.vstack', (['(SigUUx, SigWUx)'], {}), '((SigUUx, SigWUx))\n', (33701, 33719), True, 'import numpy as np\n'), ((38745, 38767), 'numpy.tile', 'np.tile', (['theta', '(n, 1)'], {}), '(theta, (n, 1))\n', (38752, 38767), True, 'import numpy as np\n'), ((38820, 38868), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (38834, 38868), True, 'import numpy as np\n'), ((38922, 38970), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (38936, 38970), True, 'import numpy as np\n'), ((39019, 39067), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (39033, 39067), True, 'import numpy as np\n'), ((39124, 39172), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (39138, 39172), True, 'import numpy as np\n'), ((39230, 39278), 'numpy.concatenate', 'np.concatenate', (['[data.x_cat_ind, data.t_cat_ind]'], {}), '([data.x_cat_ind, data.t_cat_ind])\n', (39244, 39278), True, 'import numpy as np\n'), ((3710, 3739), 'numpy.ones', 'np.ones', (['(t_pred.shape[0], 1)'], {}), '((t_pred.shape[0], 1))\n', (3717, 3739), True, 'import numpy as np\n'), ((10914, 10926), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (10923, 10926), True, 'import numpy as np\n'), ((11102, 11114), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (11111, 11114), True, 'import numpy as np\n'), ((11414, 11482), 'numpy.concatenate', 'np.concatenate', (['[sub_model.data.x_cat_ind, sub_model.data.t_cat_ind]'], {}), '([sub_model.data.x_cat_ind, sub_model.data.t_cat_ind])\n', (11428, 11482), True, 'import numpy as np\n'), ((15628, 15693), 'numpy.tensordot', 'np.tensordot', (['self.u', 'self.model.data.sim_data.K'], {'axes': '[[2], [0]]'}), '(self.u, self.model.data.sim_data.K, axes=[[2], [0]])\n', (15640, 15693), True, 'import numpy as np\n'), ((18082, 18122), 'numpy.tensordot', 'np.tensordot', (['self.v', 'D'], {'axes': '[[2], [0]]'}), '(self.v, D, axes=[[2], [0]])\n', (18094, 18122), True, 'import numpy as np\n'), ((18754, 18819), 'numpy.tensordot', 'np.tensordot', (['self.v', 'self.model.data.sim_data.D'], {'axes': '[[2], [0]]'}), '(self.v, self.model.data.sim_data.D, axes=[[2], [0]])\n', (18766, 18819), True, 'import numpy as np\n'), ((22882, 22894), 'numpy.shape', 'np.shape', (['mu'], {}), '(mu)\n', (22890, 22894), True, 'import numpy as np\n'), ((28549, 28575), 'numpy.tile', 'np.tile', (['theta', '(npred, 1)'], {}), '(theta, (npred, 1))\n', (28556, 28575), True, 'import numpy as np\n'), ((29869, 29889), 'numpy.reciprocal', 'np.reciprocal', (['lamWs'], {}), '(lamWs)\n', (29882, 29889), True, 'import numpy as np\n'), ((30218, 30238), 'numpy.reciprocal', 'np.reciprocal', (['lamWs'], {}), '(lamWs)\n', (30231, 30238), True, 'import numpy as np\n'), ((32087, 32107), 'numpy.reciprocal', 'np.reciprocal', (['lamWs'], {}), '(lamWs)\n', (32100, 32107), True, 'import numpy as np\n'), ((36659, 36682), 'numpy.sqrt', 'np.sqrt', (['(dkron + nugget)'], {}), '(dkron + nugget)\n', (36666, 36682), True, 'import numpy as np\n'), ((38669, 38695), 'numpy.tile', 'np.tile', (['theta', '(npred, 1)'], {}), '(theta, (npred, 1))\n', (38676, 38695), True, 'import numpy as np\n'), ((39989, 40009), 'numpy.reciprocal', 'np.reciprocal', (['lamWs'], {}), '(lamWs)\n', (40002, 40009), True, 'import numpy as np\n'), ((41114, 41167), 'numpy.arange', 'np.arange', (['segVarStart', '(segVarStart + ztSepDist[kk].p)'], {}), '(segVarStart, segVarStart + ztSepDist[kk].p)\n', (41123, 41167), True, 'import numpy as np\n'), ((41731, 41752), 'numpy.zeros', 'np.zeros', (['(jj * n, m)'], {}), '((jj * n, m))\n', (41739, 41752), True, 'import numpy as np\n'), ((41879, 41911), 'numpy.zeros', 'np.zeros', (['((pu - jj - 1) * n, m)'], {}), '(((pu - jj - 1) * n, m))\n', (41887, 41911), True, 'import numpy as np\n'), ((43644, 43664), 'numpy.reciprocal', 'np.reciprocal', (['lamWs'], {}), '(lamWs)\n', (43657, 43664), True, 'import numpy as np\n'), ((44636, 44661), 'numpy.zeros', 'np.zeros', (['(jj * npred, m)'], {}), '((jj * npred, m))\n', (44644, 44661), True, 'import numpy as np\n'), ((44794, 44830), 'numpy.zeros', 'np.zeros', (['((pu - jj - 1) * npred, m)'], {}), '(((pu - jj - 1) * npred, m))\n', (44802, 44830), True, 'import numpy as np\n'), ((3584, 3599), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (3592, 3599), True, 'import numpy as np\n'), ((15541, 15581), 'numpy.tensordot', 'np.tensordot', (['self.u', 'K'], {'axes': '[[2], [0]]'}), '(self.u, K, axes=[[2], [0]])\n', (15553, 15581), True, 'import numpy as np\n'), ((17931, 17971), 'numpy.tensordot', 'np.tensordot', (['self.v', 'D'], {'axes': '[[2], [0]]'}), '(self.v, D, axes=[[2], [0]])\n', (17943, 17971), True, 'import numpy as np\n'), ((18659, 18699), 'numpy.tensordot', 'np.tensordot', (['self.v', 'D'], {'axes': '[[2], [0]]'}), '(self.v, D, axes=[[2], [0]])\n', (18671, 18699), True, 'import numpy as np\n'), ((22948, 22958), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (22955, 22958), True, 'import numpy as np\n'), ((24218, 24244), 'numpy.tile', 'np.tile', (['theta', '(npred, 1)'], {}), '(theta, (npred, 1))\n', (24225, 24244), True, 'import numpy as np\n'), ((25825, 25861), 'numpy.hstack', 'np.hstack', (['(pred.xpred, pred.t_pred)'], {}), '((pred.xpred, pred.t_pred))\n', (25834, 25861), True, 'import numpy as np\n'), ((30167, 30201), 'numpy.reciprocal', 'np.reciprocal', (['(num.LamSim * lamWOs)'], {}), '(num.LamSim * lamWOs)\n', (30180, 30201), True, 'import numpy as np\n'), ((32211, 32245), 'numpy.reciprocal', 'np.reciprocal', (['(num.LamSim * lamWOs)'], {}), '(num.LamSim * lamWOs)\n', (32224, 32245), True, 'import numpy as np\n'), ((43768, 43802), 'numpy.reciprocal', 'np.reciprocal', (['(num.LamSim * lamWOs)'], {}), '(num.LamSim * lamWOs)\n', (43781, 43802), True, 'import numpy as np\n'), ((15366, 15406), 'numpy.tensordot', 'np.tensordot', (['self.u', 'K'], {'axes': '[[2], [0]]'}), '(self.u, K, axes=[[2], [0]])\n', (15378, 15406), True, 'import numpy as np\n'), ((16706, 16771), 'numpy.tensordot', 'np.tensordot', (['self.u', 'self.model.data.sim_data.K'], {'axes': '[[2], [0]]'}), '(self.u, self.model.data.sim_data.K, axes=[[2], [0]])\n', (16718, 16771), True, 'import numpy as np\n'), ((18491, 18531), 'numpy.tensordot', 'np.tensordot', (['self.v', 'D'], {'axes': '[[2], [0]]'}), '(self.v, D, axes=[[2], [0]])\n', (18503, 18531), True, 'import numpy as np\n'), ((16483, 16523), 'numpy.tensordot', 'np.tensordot', (['self.u', 'K'], {'axes': '[[2], [0]]'}), '(self.u, K, axes=[[2], [0]])\n', (16495, 16523), True, 'import numpy as np\n'), ((16164, 16204), 'numpy.tensordot', 'np.tensordot', (['self.u', 'K'], {'axes': '[[2], [0]]'}), '(self.u, K, axes=[[2], [0]])\n', (16176, 16204), True, 'import numpy as np\n')] |
import numpy as np
from random import shuffle
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in range(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
for j in range(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
loss += margin
dW[:, j] += X[i]
dW[:, y[i]] -= X[i]
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
dW /= num_train
# Add regularization to the loss.
loss += reg * np.sum(W * W)
dW += reg * W
#############################################################################
# TODO: #
# Compute the gradient of the loss function and store it dW. #
# Rather that first computing the loss and then computing the derivative, #
# it may be simpler to compute the derivative at the same time that the #
# loss is being computed. As a result you may need to modify some of the #
# code above to compute the gradient. #
#############################################################################
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
num_train = X.shape[0]
dW = np.zeros(W.shape) # initialize the gradient as zero
#############################################################################
# TODO: #
# Implement a vectorized version of the structured SVM loss, storing the #
# result in loss. #
#############################################################################
scores = X.dot(W)
#margins = np.maximum(0, scores - scores[y] + 1)
#margins[y] = 0
correct_label_score_idxes = (range(scores.shape[0]), y)
# length of this vector is N, one correct label score per datapt
correct_label_scores = scores[correct_label_score_idxes]
# subtract correct scores (as a column vector) from every cell
scores_diff = scores - np.reshape(correct_label_scores, (-1, 1))
# add 1 for the margin.
scores_diff += 1
# now zero out all the loss scores for the correct classes.
scores_diff[correct_label_score_idxes] = 0
# now zero out all elements less than zero. (b/c of the max() in the hinge)
indexes_of_neg_nums = np.nonzero(scores_diff < 0)
scores_diff[indexes_of_neg_nums] = 0
loss = scores_diff.sum()
loss /= num_train
loss += 0.5 * reg * np.sum(W * W)
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
#############################################################################
# TODO: #
# Implement a vectorized version of the gradient for the structured SVM #
# loss, storing the result in dW. #
# #
# Hint: Instead of computing the gradient from scratch, it may be easier #
# to reuse some of the intermediate values that you used to compute the #
# loss. #
#############################################################################
scores_diff[scores_diff > 0] = 1
correct_label_vals = scores_diff.sum(axis=1) * -1
scores_diff[correct_label_score_idxes] = correct_label_vals
dW = X.T.dot(scores_diff)
dW /= num_train
# add the regularization contribution to the gradient
dW += reg * W
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
| [
"numpy.nonzero",
"numpy.zeros",
"numpy.sum",
"numpy.reshape"
] | [((680, 697), 'numpy.zeros', 'np.zeros', (['W.shape'], {}), '(W.shape)\n', (688, 697), True, 'import numpy as np\n'), ((2299, 2316), 'numpy.zeros', 'np.zeros', (['W.shape'], {}), '(W.shape)\n', (2307, 2316), True, 'import numpy as np\n'), ((3424, 3451), 'numpy.nonzero', 'np.nonzero', (['(scores_diff < 0)'], {}), '(scores_diff < 0)\n', (3434, 3451), True, 'import numpy as np\n'), ((1397, 1410), 'numpy.sum', 'np.sum', (['(W * W)'], {}), '(W * W)\n', (1403, 1410), True, 'import numpy as np\n'), ((3121, 3162), 'numpy.reshape', 'np.reshape', (['correct_label_scores', '(-1, 1)'], {}), '(correct_label_scores, (-1, 1))\n', (3131, 3162), True, 'import numpy as np\n'), ((3567, 3580), 'numpy.sum', 'np.sum', (['(W * W)'], {}), '(W * W)\n', (3573, 3580), True, 'import numpy as np\n')] |
import time
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import ccobra
import onehot
class RNN(nn.Module):
def __init__(self, input_size=12, hidden_size=64, output_size=9):
super(RNN, self).__init__()
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=2,
dropout=0.2)
self.h2o = nn.Linear(hidden_size, 9)
def forward(self, input, hidden):
output, hidden = self.lstm(input, hidden)
output = self.h2o(output)
return output, hidden
class RNNModel(ccobra.CCobraModel):
def __init__(self, name='RNN'):
super(RNNModel, self).__init__(
name, ['syllogistic'], ['single-choice'])
self.net = RNN()
self.hidden = None
# Training parameters
self.n_epochs = 13
# Training algorithms
self.optimizer = optim.Adam(self.net.parameters())
self.criterion = nn.CrossEntropyLoss()
def pre_train(self, dataset):
# Prepare the data for training by converting it into a 64 x n_subj x 12
train_x = []
train_y = []
for subj_data in dataset:
subj_train_x = []
subj_train_y = []
for task_data in subj_data:
syllogism = ccobra.syllogistic.Syllogism(task_data['item'])
# Onehot encodings
onehot_task = onehot.onehot_syllogism_content(syllogism.encoded_task)
onehot_response = onehot.onehot_response(
syllogism.encode_response(task_data['response']))
subj_train_x.append(onehot_task)
subj_train_y.append(onehot_response)
train_x.append(subj_train_x)
train_y.append(subj_train_y)
self.train_x = torch.from_numpy(np.array(train_x)).float()
self.train_y = torch.from_numpy(np.array(train_y)).float()
self.train_network(self.train_x, self.train_y, self.n_epochs, verbose=True)
def train_network(self, train_x, train_y, n_epochs, verbose=False):
print('Starting training...')
for epoch in range(self.n_epochs):
start_time = time.time()
# Shuffle the training data
perm_idxs = np.random.permutation(np.arange(len(train_x)))
train_x = train_x[perm_idxs]
train_y = train_y[perm_idxs]
# Loop over the training instances
losses = []
for idx in range(len(train_x)):
cur_x = train_x[idx]
cur_y = train_y[idx]
input = cur_x.view(64, 1, -1)
outputs, _ = self.net(input, None)
# Backpropagation and parameter optimization
loss = self.criterion(outputs.view(64, -1), cur_y.argmax(1))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses.append(loss.item())
# Print statistics
print('Epoch {}/{} ({:.2f}s): {:.4f} ({:.4f})'.format(
epoch + 1, n_epochs, time.time() - start_time, np.mean(losses), np.std(losses)))
# Test the predictive accuracy
accs = []
for subj_idx in range(len(self.train_x)):
pred, _ = self.net(self.train_x[subj_idx].view(64, 1, -1), None)
pred_max = pred.view(64, -1).argmax(1)
truth = self.train_y[subj_idx].argmax(1)
acc = torch.mean((pred_max == truth).float()).item()
accs.append(acc)
print(' acc mean: {:.2f}'.format(np.mean(accs)))
print(' acc std : {:.2f}'.format(np.std(accs)))
# input = torch.from_numpy(onehot.onehot_syllogism_content('AA1')).float().view(1, -1)
# print(' AA1:', self.net(input, self.net.initHidden()))
self.net.eval()
def predict(self, item, **kwargs):
syllogism = ccobra.syllogistic.Syllogism(item)
# Obtain the prediction
input = torch.from_numpy(onehot.onehot_syllogism_content(syllogism.encoded_task)).float()
output, self.hidden = self.net(input.view(1, 1, -1), self.hidden)
# Return maximum response
response = output.argmax().item()
enc_response = ccobra.syllogistic.RESPONSES[response]
return syllogism.decode_response(enc_response)
| [
"ccobra.syllogistic.Syllogism",
"numpy.std",
"torch.nn.CrossEntropyLoss",
"time.time",
"numpy.mean",
"onehot.onehot_syllogism_content",
"numpy.array",
"torch.nn.Linear",
"torch.nn.LSTM"
] | [((275, 361), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': '(2)', 'dropout': '(0.2)'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=2,\n dropout=0.2)\n', (282, 361), True, 'import torch.nn as nn\n'), ((426, 451), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(9)'], {}), '(hidden_size, 9)\n', (435, 451), True, 'import torch.nn as nn\n'), ((998, 1019), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1017, 1019), True, 'import torch.nn as nn\n'), ((4013, 4047), 'ccobra.syllogistic.Syllogism', 'ccobra.syllogistic.Syllogism', (['item'], {}), '(item)\n', (4041, 4047), False, 'import ccobra\n'), ((2225, 2236), 'time.time', 'time.time', ([], {}), '()\n', (2234, 2236), False, 'import time\n'), ((1342, 1389), 'ccobra.syllogistic.Syllogism', 'ccobra.syllogistic.Syllogism', (["task_data['item']"], {}), "(task_data['item'])\n", (1370, 1389), False, 'import ccobra\n'), ((1456, 1511), 'onehot.onehot_syllogism_content', 'onehot.onehot_syllogism_content', (['syllogism.encoded_task'], {}), '(syllogism.encoded_task)\n', (1487, 1511), False, 'import onehot\n'), ((1867, 1884), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (1875, 1884), True, 'import numpy as np\n'), ((1934, 1951), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (1942, 1951), True, 'import numpy as np\n'), ((3177, 3192), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3184, 3192), True, 'import numpy as np\n'), ((3194, 3208), 'numpy.std', 'np.std', (['losses'], {}), '(losses)\n', (3200, 3208), True, 'import numpy as np\n'), ((3675, 3688), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (3682, 3688), True, 'import numpy as np\n'), ((3738, 3750), 'numpy.std', 'np.std', (['accs'], {}), '(accs)\n', (3744, 3750), True, 'import numpy as np\n'), ((4114, 4169), 'onehot.onehot_syllogism_content', 'onehot.onehot_syllogism_content', (['syllogism.encoded_task'], {}), '(syllogism.encoded_task)\n', (4145, 4169), False, 'import onehot\n'), ((3151, 3162), 'time.time', 'time.time', ([], {}), '()\n', (3160, 3162), False, 'import time\n')] |
import numpy as np
import pandas as pd
import sqlalchemy as sqa
from sqlalchemy.pool import NullPool
from sqlalchemy.sql.expression import func
from sqlalchemy.schema import MetaData
from io import BytesIO
import urllib
# mysql://ekwan16:h9#Li48Z#hY$b@J8@SG-nmrdatabase-2962-master.servers.mongodirector.com/pbe0
# status code meanings:
# 0 - "not started" meaning only one jiggle is present
# 1 - "complete" meaning 0th entry in data column is stationary, 1st entry is jiggle
# 2 - "pending" meaning the stationary is currently being computed
# 3 - "error" meaning something went wrong and this row is considered dead
def connect(func):
def wrapped(self, *args, **kwargs):
connect = True if self.connection is None else False
if connect: self.__enter__()
r = func(self, *args, **kwargs)
if connect: self.__exit__(None, None, None)
return r
return wrapped
# converts a byte representation of a numpy array to an actual numpy array
def unpack_bytes(arr_bytes):
load_bytes = BytesIO(arr_bytes)
loaded_np = np.load(load_bytes, allow_pickle=True)
return loaded_np
class Database:
def __init__(self, host, user, passwd=None, db="pbe0", table="data_new", status=1):
self.host = urllib.parse.quote(host)
self.user = urllib.parse.quote(user, safe="")
self.passwd = "" if passwd is None else ":" + urllib.parse.quote(passwd, safe="")
self.db = db
self.dialect = "mysql+pymysql"
self.status = status
self.metadata = MetaData()
self.connection = None
self.engine = None
self.__enter__()
self.status_table = \
sqa.Table('status_new', self.metadata, autoload=True, autoload_with=self.engine) \
if table=="data_new" else None
self.data_table = sqa.Table(table, self.metadata, autoload=True, autoload_with=self.engine)
self.__exit__(None, None, None)
def __enter__(self):
if self.connection is not None: return
self.engine = sqa.create_engine(f"{self.dialect}://{self.user}{self.passwd}@{self.host}/{self.db}",
connect_args={'ssl':{'ssl': {}}}, poolclass=NullPool)
self.connection = self.engine.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.connection is None: return
self.connection.close()
self.connection = None
self.engine.dispose()
self.engine = None
@connect
def fetch_ids(self, number=None, requested_status=None, increment=10000, verbose=False):
if self.status_table is not None:
if requested_status is None:
requested_status = self.status
query = sqa.select([self.status_table.columns.id]).where(self.status_table.columns.status == requested_status)
else:
query = sqa.select([self.data_table.columns.id])
if number:
query = query.limit(number)
num = f"{number:,d}"
else:
num = "ALL"
if verbose: print(f"Fetching {num} IDs" + ("" if requested_status is None else f" with status {requested_status}") + ".", flush=True)
result = self.connection.execution_options(stream_results=True).execute(query)
rows = []
while True:
batch = result.fetchmany(increment)
if not batch: break
rows += batch
if verbose: print(".", end="", flush=True)
if verbose: print(" Done.")
return np.array([int(r[0]) for r in rows])
@connect
def read_rows(self, ids, columns=['id', 'atomic_numbers', 'geometries_and_shieldings', 'compound_type', 'weights'], randomize=False):
query = sqa.select((getattr(self.data_table.columns, c) for c in columns))
if hasattr(ids, 'tolist'):
ids = ids.tolist()
query = query.where(self.data_table.columns.id.in_(ids))
if randomize:
query = query.order_by(func.rand())
query_df = pd.read_sql_query(query, self.engine, index_col="id")
self.__exit__(None, None, None)
# convert back to the array types
query_df.atomic_numbers = query_df.atomic_numbers.apply(unpack_bytes)
query_df.geometries_and_shieldings = query_df.geometries_and_shieldings.apply(unpack_bytes)
#query_df.symmetric_atoms = query_df.symmetric_atoms.apply(ast.literal_eval)
query_df.weights = query_df.weights.apply(unpack_bytes)
return query_df
@connect
def set_status(self, id, status):
update = sqa.update(self.status_table).where(self.status_table.columns.id == ID).values(status=0)
self.connection.execute(update)
if __name__ == '__main__':
from configparser import ConfigParser
parser = ConfigParser()
parser.read('connect_params.ini')
connect_params = parser['connect_params']
db = Database(**connect_params)
ids = db.fetch_ids(10, requested_status=1)
print('\n', list(ids))
print("\nRetrieving IDs as rows...\n")
data = db.read_rows(ids)
print(data)
for id, row in data.iterrows():
print(id, row)
| [
"sqlalchemy.schema.MetaData",
"io.BytesIO",
"numpy.load",
"sqlalchemy.sql.expression.func.rand",
"sqlalchemy.select",
"sqlalchemy.sql.expression.func",
"urllib.parse.quote",
"sqlalchemy.Table",
"sqlalchemy.update",
"pandas.read_sql_query",
"sqlalchemy.create_engine",
"configparser.ConfigParser... | [((1033, 1051), 'io.BytesIO', 'BytesIO', (['arr_bytes'], {}), '(arr_bytes)\n', (1040, 1051), False, 'from io import BytesIO\n'), ((1068, 1106), 'numpy.load', 'np.load', (['load_bytes'], {'allow_pickle': '(True)'}), '(load_bytes, allow_pickle=True)\n', (1075, 1106), True, 'import numpy as np\n'), ((4948, 4962), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (4960, 4962), False, 'from configparser import ConfigParser\n'), ((791, 818), 'sqlalchemy.sql.expression.func', 'func', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (795, 818), False, 'from sqlalchemy.sql.expression import func\n'), ((1258, 1282), 'urllib.parse.quote', 'urllib.parse.quote', (['host'], {}), '(host)\n', (1276, 1282), False, 'import urllib\n'), ((1303, 1336), 'urllib.parse.quote', 'urllib.parse.quote', (['user'], {'safe': '""""""'}), "(user, safe='')\n", (1321, 1336), False, 'import urllib\n'), ((1548, 1558), 'sqlalchemy.schema.MetaData', 'MetaData', ([], {}), '()\n', (1556, 1558), False, 'from sqlalchemy.schema import MetaData\n'), ((1854, 1927), 'sqlalchemy.Table', 'sqa.Table', (['table', 'self.metadata'], {'autoload': '(True)', 'autoload_with': 'self.engine'}), '(table, self.metadata, autoload=True, autoload_with=self.engine)\n', (1863, 1927), True, 'import sqlalchemy as sqa\n'), ((2098, 2247), 'sqlalchemy.create_engine', 'sqa.create_engine', (['f"""{self.dialect}://{self.user}{self.passwd}@{self.host}/{self.db}"""'], {'connect_args': "{'ssl': {'ssl': {}}}", 'poolclass': 'NullPool'}), "(\n f'{self.dialect}://{self.user}{self.passwd}@{self.host}/{self.db}',\n connect_args={'ssl': {'ssl': {}}}, poolclass=NullPool)\n", (2115, 2247), True, 'import sqlalchemy as sqa\n'), ((4157, 4210), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'self.engine'], {'index_col': '"""id"""'}), "(query, self.engine, index_col='id')\n", (4174, 4210), True, 'import pandas as pd\n'), ((1702, 1787), 'sqlalchemy.Table', 'sqa.Table', (['"""status_new"""', 'self.metadata'], {'autoload': '(True)', 'autoload_with': 'self.engine'}), "('status_new', self.metadata, autoload=True, autoload_with=self.engine\n )\n", (1711, 1787), True, 'import sqlalchemy as sqa\n'), ((2951, 2991), 'sqlalchemy.select', 'sqa.select', (['[self.data_table.columns.id]'], {}), '([self.data_table.columns.id])\n', (2961, 2991), True, 'import sqlalchemy as sqa\n'), ((1391, 1426), 'urllib.parse.quote', 'urllib.parse.quote', (['passwd'], {'safe': '""""""'}), "(passwd, safe='')\n", (1409, 1426), False, 'import urllib\n'), ((4116, 4127), 'sqlalchemy.sql.expression.func.rand', 'func.rand', ([], {}), '()\n', (4125, 4127), False, 'from sqlalchemy.sql.expression import func\n'), ((2814, 2856), 'sqlalchemy.select', 'sqa.select', (['[self.status_table.columns.id]'], {}), '([self.status_table.columns.id])\n', (2824, 2856), True, 'import sqlalchemy as sqa\n'), ((4735, 4764), 'sqlalchemy.update', 'sqa.update', (['self.status_table'], {}), '(self.status_table)\n', (4745, 4764), True, 'import sqlalchemy as sqa\n')] |
# Function generate_exposure()
# The function makes use of several helper functions, which can be found below the function definition.
# Dependencies
import os
import numpy as np
from astropy.io import fits
import astropy.units as u
from datetime import datetime
# Main definition
def generate_exposure(target, telescope, detector, DIT, number_frames=1, filename=None, time_stamp='end', maximum_number_frames_per_file=100, verbose=0, **kwargs):
"""
The function generate_exposure() is the central function of the VEGAPy
package.
It takes three quantities carrrying information on the vegapy.Target,
vegapy.Telescope, and vegapy.Detector. It generates 'number_frames'
exposures of integration time 'DIT' and writes them to a fits file
'filename'. The filename automatically obtains a time stamp, as long as the
argument is set to 'start' or to the default 'end'.
To distribute the virtual exposures to multiple files, for instance if the
size of the file would become too large, just set
'maximum_number_frames_per_file' to a smaller value (default is 100). The
last file may contain empty frames.
"""
# Adapt file name
if filename is None:
filename = 'exposure.fits'
if time_stamp == 'end':
try:
generic, ext = filename.split('.')
filename = generic + '_' + _make_time_stamp() + '.' + ext
except ValueError as e:
path = filename
filename = filename.split('/')[-1]
generic, ext = filename.split('.')
filename = path.replace(filename, generic + '_' + _make_time_stamp() + '.' + ext)
elif time_stamp == 'start':
filename = _make_time_stamp() + '_' + filename
elif time_stamp is None:
pass
# Initialize fits header
hdu = fits.PrimaryHDU()
hdu.header.set('NAXIS', 2)
hdu.header.set('NAXIS1', detector.shape[0])
hdu.header.set('NAXIS2', detector.shape[1])
if number_frames > 1:
# In case of multiple frames, update 'NAXIS'
hdu.header.set('NAXIS', 3, 'number of array dimensions')
hdu.header.set('NAXIS3', number_frames)
hdu.data = np.zeros( (number_frames, detector.shape[0], detector.shape[1]) )
else:
hdu.data = np.zeros(detector.shape)
hdu.header.set('DIT', DIT.value, DIT.unit)
_add_attributes_to_header(hdu, target, skip_attributes=['shape', 'data', 'stars'], object_name='TARGET')
_add_attributes_to_header(hdu, telescope, skip_attributes=['psf'], object_name='TELESCOP')
_add_attributes_to_header(hdu, detector, skip_attributes=['shape', 'array'], object_name='DETECTOR')
hdu.header.set('DATE', str(datetime.now()))
# Write header to one or more files, depending on 'number_frames' and 'maximum_number_frames_per_file'
if number_frames <= maximum_number_frames_per_file:
multiple_files = False
print("Writing file {}.".format(filename))
hdu.writeto(filename, overwrite=True)
else:
multiple_files = True
number_full_files = number_frames // maximum_number_frames_per_file
number_leftover_frames = number_frames % maximum_number_frames_per_file
if number_leftover_frames != 0:
print("Writing {} files, where the last file contains only {} valid frames.".format(number_full_files + 1, number_leftover_frames))
# Writing files with the maximum number of frames
for i in range(number_full_files):
hdu.header.set('NAXIS3', maximum_number_frames_per_file)
hdu.writeto(_make_filename(filename, i, add_index=multiple_files), overwrite=True)
# The last file shall contain only fewer frames
hdu.header.set('NAXIS3', number_leftover_frames)
hdu.writeto(_make_filename(filename, i+1, add_index=multiple_files), overwrite=True)
else:
print("Writing {} files.".format(number_full_files))
# Writing files with the maximum number of frames
for i in range(number_full_files):
hdu.header.set('NAXIS3', maximum_number_frames_per_file)
hdu.writeto(_make_filename(filename, i, add_index=multiple_files), overwrite=True)
# Initialize parameters for frame computation
if ('readout_time' in kwargs):
skip_frames = int( kwargs['readout_time'] / telescope.psf_timestep )
else:
skip_frames = 0
# Computation of frames
for dt in range(number_frames):
print("\rExposure {:4}/{:4}".format(dt+1, number_frames), end='')
imaged = telescope(target.data, target.resolution, integration_time=DIT, verbose=verbose)
detected = detector(photon_rate_density_array=imaged, integration_time=DIT, target_FoV=target.FoV)
detected = detected.decompose()
# Write file
with fits.open(_make_filename(filename, dt // maximum_number_frames_per_file, add_index=multiple_files), mode='update') as hdulist:
if number_frames == 1:
hdulist[0].data = detected.value
else:
if multiple_files:
hdulist[0].data[dt % maximum_number_frames_per_file] = detected.value
else:
hdulist[0].data[dt] = detected.value
hdulist.flush()
# Skip psf frames, to account for time between two readouts
try:
telescope.psf_plane += skip_frames
except TypeError:
pass
print("")
# Helper functions
def _make_time_stamp():
"""
The helper function _make_time_stamp() returns a string:
'YYYYMMDD_HHMMSS'.
"""
tmp = str(datetime.now())
tmp = tmp.split('.')[0]
tmp = tmp.replace(' ', '_')
tmp = tmp.replace('-', '')
tmp = tmp.replace(':', '')
return tmp
def _add_attributes_to_header(hdu_object, object, skip_attributes=[], prefix='HIERARCH VEGAPY ', object_name='New object'):
"""
The helper function _add_attributes_to_header() formats the attributes of
the argument object into appropriate FITS header cards.
For distinguishing the attributes of different objects, a headline card is
created for the given object, followed by HIERARCH cards with the attributes
as long as prefix is set to 'HIERARCH '.
"""
dict = object.__dict__
#hdu_object.header.set(object_name, '')
for key in dict:
# Ability to skip for instance arrays
if key in skip_attributes:
continue
# Appending the unit of a u.Quantity to the comment
if isinstance(dict[key], u.Quantity):
hdu_object.header.set(prefix + object_name + ' ' + key, dict[key].value, dict[key].unit)
# Suppress (long) relative paths
elif isinstance(dict[key], str):
if len(dict[key]) > 20:
path, file = os.path.split(dict[key])
hdu_object.header.set(prefix + object_name + ' ' + key, file)
else:
hdu_object.header.set(prefix + object_name + ' ' + key, dict[key])
# Separating tuple attributes into two header cards
elif isinstance(dict[key], tuple):
hdu_object.header.set(prefix + object_name + ' ' + key + '[0]', dict[key][0].value, dict[key][0].unit)
hdu_object.header.set(prefix + object_name + ' ' + key + '[1]', dict[key][1].value, dict[key][1].unit)
# Add all other types
else:
hdu_object.header.set(prefix + object_name + ' ' + key, dict[key])
def _make_filename(filename, index, add_index):
if add_index:
generic, extension = filename.split('.')
return "{}_{}.{}".format(generic, index, extension)
else:
return filename
| [
"astropy.io.fits.PrimaryHDU",
"numpy.zeros",
"datetime.datetime.now",
"os.path.split"
] | [((1822, 1839), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (1837, 1839), False, 'from astropy.io import fits\n'), ((2178, 2241), 'numpy.zeros', 'np.zeros', (['(number_frames, detector.shape[0], detector.shape[1])'], {}), '((number_frames, detector.shape[0], detector.shape[1]))\n', (2186, 2241), True, 'import numpy as np\n'), ((2273, 2297), 'numpy.zeros', 'np.zeros', (['detector.shape'], {}), '(detector.shape)\n', (2281, 2297), True, 'import numpy as np\n'), ((5662, 5676), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5674, 5676), False, 'from datetime import datetime\n'), ((2685, 2699), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2697, 2699), False, 'from datetime import datetime\n'), ((6848, 6872), 'os.path.split', 'os.path.split', (['dict[key]'], {}), '(dict[key])\n', (6861, 6872), False, 'import os\n')] |
# Copyright (c) 2021 VISTEC - Vidyasirimedhi Institute of Science and Technology
# Distribute under MIT License
# Authors:
# - <NAME> <suttisak.w_s19[-at-]vistec.ac.th>
# - <NAME> <pakkapon.p_s19[-at-]vistec.ac.th>
# - <NAME> <jiraphony_pro[-at-]vistec.ac.th>
# - <NAME> <supasorn.s[-at-]vistec.ac.th>
from __future__ import absolute_import, division, print_function
import http.server
import json
import math
import os
import re
import socketserver
import sys
import time
import traceback
import webbrowser
from threading import Timer
import numpy as np
import torch as pt
from skimage import io
from torch.utils.data import DataLoader, SubsetRandomSampler
def is_deepview(dpath):
# check if dataset is deepview dataset
return os.path.exists(dpath + "/models.json")
def is_llff(dpath):
# check if dataset is LLFF dataset
return os.path.exists(dpath + "/poses_bounds.npy")
def getDatasetScale(dpath, deepview_width, llff_width):
if is_deepview(dpath):
with open(os.path.join(dpath, "models.json"), "r") as fi:
js = json.load(fi)
return float(deepview_width / js[0][0]["width"])
elif is_llff(dpath):
img0 = [
os.path.join(dpath, "images", f)
for f in sorted(os.listdir(os.path.join(dpath, "images")))
if f.endswith("JPG") or f.endswith("jpg") or f.endswith("png")
][0]
sh = io.imread(img0).shape
return float(llff_width / sh[1])
else:
return 1
def generateSubsetSamplers(dataset_size, ratio=0.8, random_seed=0):
indices = list(range(dataset_size))
np.random.seed(random_seed)
np.random.shuffle(indices)
split = int(np.round(ratio * dataset_size))
train_indices, val_indices = indices[:split], indices[split:]
return SubsetRandomSampler(train_indices), SubsetRandomSampler(val_indices)
def prepareDataloaders(dataset, dpath, random_split=False, train_ratio=1, num_workers=8):
if random_split:
sampler_train, sampler_val = generateSubsetSamplers(len(dataset), ratio=train_ratio, num_workers=num_workers)
dataloader_train = DataLoader(dataset, batch_size=1, sampler=sampler_train)
dataloader_val = DataLoader(dataset, batch_size=1, sampler=sampler_val)
print("TRAINING IMAGES: {}".format(len(dataloader_train)))
print("VALIDATE IMAGES: {}".format(len(dataloader_val)))
else:
def get_indices(ty):
if os.path.exists(dpath + "/{}_image.txt".format(ty)):
data = []
with open(dpath + "/{}_image.txt".format(ty), "r") as fi:
for line in fi.readlines():
count = 0
for img in dataset.imgs:
if line.strip() in img["path"]:
data.append(count)
break
count += 1
return data
else:
raise ("No CONFIG TRAINING FILE")
def clean_path(list_of_path):
return list(map(lambda x: str(os.path.basename(x)).lower(), list_of_path))
if os.path.exists(os.path.join(dpath, "poses_bounds.npy")):
# LLFF dataset which is use every 8 images to be training data
indices_total = list(range(len(dataset.imgs)))
indices_val = indices_total[::8]
indices_train = list(filter(lambda x: x not in indices_val, indices_total))
elif os.path.exists(os.path.join(dpath, "transforms_train.json")):
indices_train = dataset.sfm.index_split[0]
indices_val = dataset.sfm.index_split[1]
else:
indices_train = get_indices("train")
indices_val = get_indices("val")
# save indices to sfm for render propose
dataset.sfm.index_split = [indices_train, indices_val]
# set cam rotation and translation for kalantari
ref_camtxt = os.path.join(dpath, "ref_cameramatrix.txt")
if os.path.exists(ref_camtxt):
cam_matrix = np.zeros((4, 4), np.float32)
with open(ref_camtxt) as fi:
lines = fi.readlines()
for i in range(4):
line = lines[i].strip().split(" ")
for j in range(4):
cam_matrix[i, j] = float(line[j])
dataset.sfm.ref_rT = pt.from_numpy(cam_matrix[:3, :3]).t()
dataset.sfm.ref_t = pt.from_numpy(cam_matrix[:3, 3:4])
sampler_train = SubsetRandomSampler(indices_train)
sampler_val = SubsetRandomSampler(indices_val)
dataloader_train = DataLoader(dataset, batch_size=1, sampler=sampler_train, num_workers=num_workers)
dataloader_val = DataLoader(dataset, batch_size=1, sampler=sampler_val, num_workers=num_workers)
print("TRAINING IMAGES: {}".format(len(dataloader_train)))
print("VALIDATE IMAGES: {}".format(len(dataloader_val)))
return sampler_train, sampler_val, dataloader_train, dataloader_val
def drawBottomBar(status):
def print_there(x, y, text):
sys.stdout.write("\x1b7\x1b[%d;%df%s\x1b8" % (x, y, text))
sys.stdout.flush()
def move(y, x):
print("\033[%d;%dH" % (y, x))
columns, rows = os.get_terminal_size()
# status += "\x1B[K\n"
status += " " * ((columns - (len(status) % columns)) % columns)
# status += " " * (columns)
lines = int(len(status) / columns)
print("\n" * (lines), end="")
print_there(rows - lines, 0, " " * columns)
print_there(rows - lines + 1, 0, "\33[38;5;72m\33[48;5;234m%s\33[0m" % status)
move(rows - lines - 1, 0)
class TrainingStatus:
def __init__(self, num_steps, eta_interval=25, statusbar=""):
self.eta_interval = eta_interval
self.num_steps = num_steps
self.etaCount = 0
self.etaStart = time.time()
self.duration = 0
self.statusbar = " ".join(sys.argv)
def tic(self):
self.start = time.time()
def toc(self, iter, loss):
self.end = time.time()
self.etaCount += 1
if self.etaCount % self.eta_interval == 0:
self.duration = time.time() - self.etaStart
self.etaStart = time.time()
etaTime = float(self.num_steps - iter) / self.eta_interval * self.duration
m, s = divmod(etaTime, 60)
h, m = divmod(m, 60)
etaString = "%d:%02d:%02d" % (h, m, s)
msg = "%.2f%% (%d/%d): %.3e t %.3f @ %s (%s)" % (
iter * 100.0 / self.num_steps,
iter,
self.num_steps,
loss,
self.end - self.start,
time.strftime("%a %d %H:%M:%S", time.localtime(time.time() + etaTime)),
etaString,
)
if "CUDA_VISIBLE_DEVICES" in os.environ:
barText = "Command: CUDA_VISIBLE_DEVICES=%s python %s" % (os.environ["CUDA_VISIBLE_DEVICES"], self.statusbar)
else:
barText = "Command: python %s" % (self.statusbar)
try:
drawBottomBar(barText)
except:
pass # skip bottombar if it no output
return msg
# use in render_depth
def Rainbow(val):
rainbow = [
[0.18995, 0.07176, 0.23217],
[0.22500, 0.16354, 0.45096],
[0.25107, 0.25237, 0.63374],
[0.26816, 0.33825, 0.78050],
[0.27628, 0.42118, 0.89123],
[0.27543, 0.50115, 0.96594],
[0.25862, 0.57958, 0.99876],
[0.21382, 0.65886, 0.97959],
[0.15844, 0.73551, 0.92305],
[0.11167, 0.80569, 0.84525],
[0.09267, 0.86554, 0.76230],
[0.12014, 0.91193, 0.68660],
[0.19659, 0.94901, 0.59466],
[0.30513, 0.97697, 0.48987],
[0.42778, 0.99419, 0.38575],
[0.54658, 0.99907, 0.29581],
[0.64362, 0.98999, 0.23356],
[0.72596, 0.96470, 0.20640],
[0.80473, 0.92452, 0.20459],
[0.87530, 0.87267, 0.21555],
[0.93301, 0.81236, 0.22667],
[0.97323, 0.74682, 0.22536],
[0.99314, 0.67408, 0.20348],
[0.99593, 0.58703, 0.16899],
[0.98360, 0.49291, 0.12849],
[0.95801, 0.39958, 0.08831],
[0.92105, 0.31489, 0.05475],
[0.87422, 0.24526, 0.03297],
[0.81608, 0.18462, 0.01809],
[0.74617, 0.13098, 0.00851],
[0.66449, 0.08436, 0.00424],
[0.47960, 0.01583, 0.01055],
]
ind = val * (len(rainbow) - 1)
color0 = np.array(rainbow[int(ind)])
color1 = np.array(rainbow[min(int(ind) + 1, len(rainbow) - 1)])
intt = ind - int(ind)
color = color0 * (1 - intt) + color1 * intt
return color
def colored_hook(home_dir):
"""Colorizes python's error message.
Args:
home_dir: directory where code resides (to highlight your own files).
Returns:
The traceback hook.
"""
def hook(type_, value, tb):
def colorize(text, color, own=0):
"""Returns colorized text."""
endcolor = "\x1b[0m"
codes = {
"green": "\x1b[0;32m",
"green_own": "\x1b[1;32;40m",
"red": "\x1b[0;31m",
"red_own": "\x1b[1;31m",
"yellow": "\x1b[0;33m",
"yellow_own": "\x1b[1;33m",
"black": "\x1b[0;90m",
"black_own": "\x1b[1;90m",
"cyan": "\033[1;36m",
}
return codes[color + ("_own" if own else "")] + text + endcolor
for filename, line_num, func, text in traceback.extract_tb(tb):
basename = os.path.basename(filename)
own = (home_dir in filename) or ("/" not in filename)
print(colorize('"' + basename + '"', "green", own) + " in " + func)
print("%s: %s" % (colorize("%5d" % line_num, "red", own), colorize(text, "yellow", own)))
print(" %s" % colorize(filename, "black", own))
print(colorize("%s: %s" % (type_.__name__, value), "cyan"))
return hook
class ServeFilesHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def end_headers(self):
self.send_header("Access-Control-Allow-Origin", "*")
http.server.SimpleHTTPRequestHandler.end_headers(self)
@classmethod
def Creator(cls, *args, **kwargs):
def _HandlerCreator(request, client_address, server):
cls(request, client_address, server, *args, **kwargs)
return _HandlerCreator
def open_webgl_on_nexmpi(address, port, model_dir):
webbrowser.open_new("https://nex-mpi.github.io/viewer/viewer.html?scene=http://{}:{}/{}".format(address, port, model_dir))
def serve_files(model_dir="", web_path="runs/html/"):
print(web_path)
with socketserver.TCPServer(("localhost", 0), ServeFilesHandler.Creator(directory=web_path)) as http:
address = http.server_address[0]
port = http.server_address[1]
print("serving real-time demo at http://{}:{}/{}".format(address, port, model_dir))
# need delay for waiting http server start listening
Timer(2.0, open_webgl_on_nexmpi, (address, port, model_dir)).start()
http.serve_forever()
| [
"sys.stdout.write",
"os.get_terminal_size",
"json.load",
"numpy.random.seed",
"os.path.join",
"torch.utils.data.DataLoader",
"os.path.basename",
"skimage.io.imread",
"threading.Timer",
"os.path.exists",
"numpy.zeros",
"time.time",
"sys.stdout.flush",
"torch.utils.data.SubsetRandomSampler",... | [((745, 783), 'os.path.exists', 'os.path.exists', (["(dpath + '/models.json')"], {}), "(dpath + '/models.json')\n", (759, 783), False, 'import os\n'), ((856, 899), 'os.path.exists', 'os.path.exists', (["(dpath + '/poses_bounds.npy')"], {}), "(dpath + '/poses_bounds.npy')\n", (870, 899), False, 'import os\n'), ((1602, 1629), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1616, 1629), True, 'import numpy as np\n'), ((1634, 1660), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1651, 1660), True, 'import numpy as np\n'), ((5276, 5298), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (5296, 5298), False, 'import os\n'), ((1678, 1708), 'numpy.round', 'np.round', (['(ratio * dataset_size)'], {}), '(ratio * dataset_size)\n', (1686, 1708), True, 'import numpy as np\n'), ((1788, 1822), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (1807, 1822), False, 'from torch.utils.data import DataLoader, SubsetRandomSampler\n'), ((1824, 1856), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['val_indices'], {}), '(val_indices)\n', (1843, 1856), False, 'from torch.utils.data import DataLoader, SubsetRandomSampler\n'), ((2115, 2171), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'sampler': 'sampler_train'}), '(dataset, batch_size=1, sampler=sampler_train)\n', (2125, 2171), False, 'from torch.utils.data import DataLoader, SubsetRandomSampler\n'), ((2197, 2251), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'sampler': 'sampler_val'}), '(dataset, batch_size=1, sampler=sampler_val)\n', (2207, 2251), False, 'from torch.utils.data import DataLoader, SubsetRandomSampler\n'), ((3958, 4001), 'os.path.join', 'os.path.join', (['dpath', '"""ref_cameramatrix.txt"""'], {}), "(dpath, 'ref_cameramatrix.txt')\n", (3970, 4001), False, 'import os\n'), ((4013, 4039), 'os.path.exists', 'os.path.exists', (['ref_camtxt'], {}), '(ref_camtxt)\n', (4027, 4039), False, 'import os\n'), ((4532, 4566), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['indices_train'], {}), '(indices_train)\n', (4551, 4566), False, 'from torch.utils.data import DataLoader, SubsetRandomSampler\n'), ((4589, 4621), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['indices_val'], {}), '(indices_val)\n', (4608, 4621), False, 'from torch.utils.data import DataLoader, SubsetRandomSampler\n'), ((4649, 4735), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'sampler': 'sampler_train', 'num_workers': 'num_workers'}), '(dataset, batch_size=1, sampler=sampler_train, num_workers=\n num_workers)\n', (4659, 4735), False, 'from torch.utils.data import DataLoader, SubsetRandomSampler\n'), ((4756, 4835), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'sampler': 'sampler_val', 'num_workers': 'num_workers'}), '(dataset, batch_size=1, sampler=sampler_val, num_workers=num_workers)\n', (4766, 4835), False, 'from torch.utils.data import DataLoader, SubsetRandomSampler\n'), ((5110, 5168), 'sys.stdout.write', 'sys.stdout.write', (["('\\x1b7\\x1b[%d;%df%s\\x1b8' % (x, y, text))"], {}), "('\\x1b7\\x1b[%d;%df%s\\x1b8' % (x, y, text))\n", (5126, 5168), False, 'import sys\n'), ((5177, 5195), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5193, 5195), False, 'import sys\n'), ((5880, 5891), 'time.time', 'time.time', ([], {}), '()\n', (5889, 5891), False, 'import time\n'), ((6004, 6015), 'time.time', 'time.time', ([], {}), '()\n', (6013, 6015), False, 'import time\n'), ((6067, 6078), 'time.time', 'time.time', ([], {}), '()\n', (6076, 6078), False, 'import time\n'), ((9516, 9540), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {}), '(tb)\n', (9536, 9540), False, 'import traceback\n'), ((1068, 1081), 'json.load', 'json.load', (['fi'], {}), '(fi)\n', (1077, 1081), False, 'import json\n'), ((3159, 3198), 'os.path.join', 'os.path.join', (['dpath', '"""poses_bounds.npy"""'], {}), "(dpath, 'poses_bounds.npy')\n", (3171, 3198), False, 'import os\n'), ((4066, 4094), 'numpy.zeros', 'np.zeros', (['(4, 4)', 'np.float32'], {}), '((4, 4), np.float32)\n', (4074, 4094), True, 'import numpy as np\n'), ((6242, 6253), 'time.time', 'time.time', ([], {}), '()\n', (6251, 6253), False, 'import time\n'), ((9565, 9591), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (9581, 9591), False, 'import os\n'), ((1003, 1037), 'os.path.join', 'os.path.join', (['dpath', '"""models.json"""'], {}), "(dpath, 'models.json')\n", (1015, 1037), False, 'import os\n'), ((1398, 1413), 'skimage.io.imread', 'io.imread', (['img0'], {}), '(img0)\n', (1407, 1413), False, 'from skimage import io\n'), ((3496, 3540), 'os.path.join', 'os.path.join', (['dpath', '"""transforms_train.json"""'], {}), "(dpath, 'transforms_train.json')\n", (3508, 3540), False, 'import os\n'), ((4473, 4507), 'torch.from_numpy', 'pt.from_numpy', (['cam_matrix[:3, 3:4]'], {}), '(cam_matrix[:3, 3:4])\n', (4486, 4507), True, 'import torch as pt\n'), ((6186, 6197), 'time.time', 'time.time', ([], {}), '()\n', (6195, 6197), False, 'import time\n'), ((11109, 11169), 'threading.Timer', 'Timer', (['(2.0)', 'open_webgl_on_nexmpi', '(address, port, model_dir)'], {}), '(2.0, open_webgl_on_nexmpi, (address, port, model_dir))\n', (11114, 11169), False, 'from threading import Timer\n'), ((1193, 1225), 'os.path.join', 'os.path.join', (['dpath', '"""images"""', 'f'], {}), "(dpath, 'images', f)\n", (1205, 1225), False, 'import os\n'), ((4399, 4432), 'torch.from_numpy', 'pt.from_numpy', (['cam_matrix[:3, :3]'], {}), '(cam_matrix[:3, :3])\n', (4412, 4432), True, 'import torch as pt\n'), ((6710, 6721), 'time.time', 'time.time', ([], {}), '()\n', (6719, 6721), False, 'import time\n'), ((1265, 1294), 'os.path.join', 'os.path.join', (['dpath', '"""images"""'], {}), "(dpath, 'images')\n", (1277, 1294), False, 'import os\n'), ((3087, 3106), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (3103, 3106), False, 'import os\n')] |
#!/usr/bin/env python
from ast import literal_eval
from fenics import *
from itertools import combinations_with_replacement
from math import floor, ceil
import numpy as np
import matplotlib.pyplot as plt
def update_params(argv, params):
''' Update default dictionary with system arguments'''
if len(argv) > 1:
params.update(literal_eval(argv[1]))
return params
def matrix_sqrt(mat):
'''Caculate matrix square root of SPD matrix via eigendecomposition'''
w, V = np.linalg.eigh(mat)
if np.min(w) < 0:
print('Smallest eigenvalue: ', np.min(w))
print('Warning negative eigenvalues set to Zero')
diag_mat = np.diag(np.sqrt(np.maximum(0, w)))
else:
diag_mat = np.diag(np.sqrt(w))
mat_sqrt = np.linalg.multi_dot([V, diag_mat, V.T])
return mat_sqrt
def sqrt_gram_matrix(V):
'''Calculate square root of Gram matrix'''
u = TrialFunction(V)
v = TestFunction(V)
G1 = assemble(u*v*dx).array()
G2 = assemble(dot(grad(u), grad(v))*dx).array()
G = G1 + G2
return matrix_sqrt(G.astype(np.float32))
def get_polynomial(num_var, max_deg):
'''Generate list of all possible monomials
num_var: number of variables
max_deg: maximal degree of monomials
'''
var = ['c'] # add dummy variable
for i in range(num_var):
var.append('x[' + str(i) + ']')
# compute all combinations of variables
terms = []
for x in combinations_with_replacement(var, max_deg):
terms.append(list(x))
for el in terms:
while "c" in el:
el.remove("c")
monomial_list = ['*'.join(x) for x in terms]
monomial_list[0] = '1' # fix constant term
return monomial_list
def trigonometric_basis(n, p, mu=1, sigma=-1):
'''Generate list of expressions and array of coefficients in polynomial basis (T1)
n: number of samples
p: number of basis fcts.
mu: shift
sigma: decay rate
'''
num_terms = p
coeff = np.random.uniform(0, 1., (n, num_terms))
expr_list = []
template_a = "+(1+{}*{}*(1+{}))"
template_sin = "sin({}*3.14159*x[0])*sin({}*3.14159*x[1])"
for i in range(n):
temp = str(mu)
for j in range(1,num_terms+1):
sin = template_sin.format(floor((j+2)/2), ceil((j+2)/2))
a = template_a.format(j**(sigma), str(coeff[i,j-1]), sin)
temp += a
expr_list.append(temp)
return expr_list, coeff
def squares_basis(n, s, mu=0.1):
'''Generate list of expressions and array of coefficients in chessboard basis (T2)
n: number of samples
s: number of squares per row -> p = s^2
mu: shift
Warning: Currently only works on unit square (in other cases adjust scaling)
'''
num_terms = s**2
coeff = np.random.uniform(0.0, 1., (n, num_terms))
expr_list = []
step = 1 / s
# indicator fct. for x[0] in interval [low, up]
template_x0 = "*ceil(fmax(x[0]-{low},0))*ceil(fmax({up}-x[0],0))"
# indicator fct. for x[1] in interval [low, up]
template_x1 = "*ceil(fmax(x[1]-{low},0))*ceil(fmax({up}-x[1],0))"
for i in range(n):
temp = str(mu)
count = 0
for j in range(s):
for k in range(s):
indicator_x0 = template_x0.format(low=j*step, up=(j+1)*step)
indicator_x1 = template_x1.format(low=k*step, up=(k+1)*step)
temp += '+' + str(coeff[i,count]) + indicator_x0 + indicator_x1
count += 1
expr_list.append(temp)
return expr_list, coeff
def cookies_basis(n, s, mu=0.1, mode='var'):
'''Generate list of expressions and array of coefficients in cookie basis (T3)
n: number of samples
s: number of squares per row -> p = s^2
mu: shift
mode: 'var' for variable radius, else: fixed radius
Warning: Currently only works on unit square (in other cases adjust scaling)
'''
num_terms = s**2
coeff = np.random.uniform(0.0, 1., (n, num_terms))
if mode == "var":
coeff_radii = np.random.uniform(0.5, 0.9, (n, num_terms))
coeff = np.concatenate((coeff, coeff_radii), axis=1)
expr_list = []
step = 1 / s
template_dist = 'sqrt(pow(x[0]-{c_x0},2)+pow(x[1]-{c_x1},2))'
template_cookie = '+{}*ceil(fmax({radius}-{dist},0))'
for i in range(n):
temp = str(mu)
count = 0
for j in range(s):
for k in range(s):
if mode == "var":
r = coeff[i, count+num_terms] / (2*s)
else:
r = 0.8 / (2*s)
d = template_dist.format(c_x0=1/(2*s)+j*step, c_x1=1/(2*s)+k*step)
cookie = template_cookie.format(coeff[i, count], radius=r, dist=d)
temp += cookie
count += 1
expr_list.append(temp)
return expr_list, coeff
def polynomial_basis(n, k, mu=0.1):
''' Generate list of expressions and coefficients in polynomial basis (T4)
n: number of samples
k: maximal degree of polynomial -> p = (k+2) choose 2
mu: shift
'''
poly = get_polynomial(2, k)
num_terms = len(poly)
coeff = np.random.uniform(-1., 1., (n, num_terms))
expr_list = []
template = "fmax({},{})"
for i in range(n):
temp = ''
for j in range(num_terms):
temp += str(coeff[i, j]) + '*' + poly[j] + '+'
expr_list.append(template.format(mu, temp[:-1]))
return expr_list, coeff
if __name__ == '__main__':
# plot some examples
expr, coeff = trigonometric_basis(1,10, sigma=1)
#expr, coeff = squares_basis(1,4)
#expr, coeff = cookies_basis(1,3,mode='var')
#expr, coeff = polynomial_basis(1,9)
mesh = UnitSquareMesh(100, 100)
V = FunctionSpace(mesh, 'P', 1)
u = interpolate(Expression(expr[0], degree=1), V)
fig = plot(u)
plt.colorbar(fig)
plt.show()
| [
"numpy.random.uniform",
"matplotlib.pyplot.show",
"numpy.maximum",
"math.ceil",
"math.floor",
"matplotlib.pyplot.colorbar",
"itertools.combinations_with_replacement",
"numpy.linalg.eigh",
"numpy.min",
"ast.literal_eval",
"numpy.sqrt",
"numpy.concatenate",
"numpy.linalg.multi_dot"
] | [((479, 498), 'numpy.linalg.eigh', 'np.linalg.eigh', (['mat'], {}), '(mat)\n', (493, 498), True, 'import numpy as np\n'), ((719, 758), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[V, diag_mat, V.T]'], {}), '([V, diag_mat, V.T])\n', (738, 758), True, 'import numpy as np\n'), ((1354, 1397), 'itertools.combinations_with_replacement', 'combinations_with_replacement', (['var', 'max_deg'], {}), '(var, max_deg)\n', (1383, 1397), False, 'from itertools import combinations_with_replacement\n'), ((1847, 1888), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1.0)', '(n, num_terms)'], {}), '(0, 1.0, (n, num_terms))\n', (1864, 1888), True, 'import numpy as np\n'), ((2577, 2620), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(n, num_terms)'], {}), '(0.0, 1.0, (n, num_terms))\n', (2594, 2620), True, 'import numpy as np\n'), ((3628, 3671), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(n, num_terms)'], {}), '(0.0, 1.0, (n, num_terms))\n', (3645, 3671), True, 'import numpy as np\n'), ((4662, 4706), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(n, num_terms)'], {}), '(-1.0, 1.0, (n, num_terms))\n', (4679, 4706), True, 'import numpy as np\n'), ((5296, 5313), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['fig'], {}), '(fig)\n', (5308, 5313), True, 'import matplotlib.pyplot as plt\n'), ((5315, 5325), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5323, 5325), True, 'import matplotlib.pyplot as plt\n'), ((505, 514), 'numpy.min', 'np.min', (['w'], {}), '(w)\n', (511, 514), True, 'import numpy as np\n'), ((3707, 3750), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(0.9)', '(n, num_terms)'], {}), '(0.5, 0.9, (n, num_terms))\n', (3724, 3750), True, 'import numpy as np\n'), ((3761, 3805), 'numpy.concatenate', 'np.concatenate', (['(coeff, coeff_radii)'], {'axis': '(1)'}), '((coeff, coeff_radii), axis=1)\n', (3775, 3805), True, 'import numpy as np\n'), ((333, 354), 'ast.literal_eval', 'literal_eval', (['argv[1]'], {}), '(argv[1])\n', (345, 354), False, 'from ast import literal_eval\n'), ((553, 562), 'numpy.min', 'np.min', (['w'], {}), '(w)\n', (559, 562), True, 'import numpy as np\n'), ((693, 703), 'numpy.sqrt', 'np.sqrt', (['w'], {}), '(w)\n', (700, 703), True, 'import numpy as np\n'), ((645, 661), 'numpy.maximum', 'np.maximum', (['(0)', 'w'], {}), '(0, w)\n', (655, 661), True, 'import numpy as np\n'), ((2103, 2121), 'math.floor', 'floor', (['((j + 2) / 2)'], {}), '((j + 2) / 2)\n', (2108, 2121), False, 'from math import floor, ceil\n'), ((2119, 2136), 'math.ceil', 'ceil', (['((j + 2) / 2)'], {}), '((j + 2) / 2)\n', (2123, 2136), False, 'from math import floor, ceil\n')] |
#!/usr.bin/env python
#<NAME> - 1/13/14
"""This code is from the IDL Astronomy Users Library"""
import numpy as np
import dao_value
import pyfits
def rdpsf(psfname):
"""Read the FITS file created by GETPSF in the DAOPHOT sequence
Combines the Gaussian with the residuals to create an output PSF array.
psf,hpsf = rdpsf.rdpsf( PSFname )
INPUTS:
PSFname - string giving the name of the FITS file containing the PSF
residuals
RETURNS:
psf - array containing the actual PSF
hpsf - header associated with psf
PROCEDURES CALLED:
DAO_VALUE()
REVISION HISTORY:
Written <NAME> December, 1988
Checked for IDL Version 2 <NAME> & <NAME> December, 1990
Converted to IDL V5.0 <NAME> September, 1997
Converted to Python <NAME> January, 2014
"""
resid=pyfits.getdata(psfname)
hpsf = pyfits.getheader(psfname)
gauss1 = hpsf['GAUSS1'] #Get Gaussian parameters (5)
gauss2 = hpsf['GAUSS2'] #
gauss3 = hpsf['GAUSS3'] #
gauss4 = hpsf['GAUSS4'] #
gauss5 = hpsf['GAUSS5'] #
gauss=[gauss1,gauss2,gauss3,gauss4,gauss5]
psfrad = hpsf['PSFRAD'] # Get PSF radius
npsf = 2*psfrad + 1 #hpsf['NAXIS1'] # Width of output array containing PSF
psf = np.zeros([npsf,npsf]) # Create output array
dx = np.arange(npsf,dtype='int') - psfrad # Vector gives X distance from center of array
dy = np.arange(npsf,dtype='int') - psfrad # Ditto for dy
ny = len(dy)
nx = len(dx)
dx = dx.reshape(1,nx)
dy = dy.reshape(ny,1)
dx = rebin(dx, [ny, nx])
dy = rebin(dy, [ny, nx])
psf = psf + dao_value.dao_value(dx,dy,gauss,resid,deriv=False) #Compute DAOPHOT value at each point
hpsf['NAXIS1'] = npsf
hpsf['NAXIS2'] = npsf
return(psf,hpsf)
def rebin(a, new_shape):
M, N = a.shape
m, n = new_shape
if m<M:
return a.reshape((m,M/m,n,N/n)).mean(3).mean(1)
else:
return np.repeat(np.repeat(a, m/M, axis=0), n/N, axis=1)
| [
"dao_value.dao_value",
"numpy.zeros",
"pyfits.getdata",
"pyfits.getheader",
"numpy.arange",
"numpy.repeat"
] | [((996, 1019), 'pyfits.getdata', 'pyfits.getdata', (['psfname'], {}), '(psfname)\n', (1010, 1019), False, 'import pyfits\n'), ((1031, 1056), 'pyfits.getheader', 'pyfits.getheader', (['psfname'], {}), '(psfname)\n', (1047, 1056), False, 'import pyfits\n'), ((1433, 1455), 'numpy.zeros', 'np.zeros', (['[npsf, npsf]'], {}), '([npsf, npsf])\n', (1441, 1455), True, 'import numpy as np\n'), ((1492, 1520), 'numpy.arange', 'np.arange', (['npsf'], {'dtype': '"""int"""'}), "(npsf, dtype='int')\n", (1501, 1520), True, 'import numpy as np\n'), ((1588, 1616), 'numpy.arange', 'np.arange', (['npsf'], {'dtype': '"""int"""'}), "(npsf, dtype='int')\n", (1597, 1616), True, 'import numpy as np\n'), ((1826, 1880), 'dao_value.dao_value', 'dao_value.dao_value', (['dx', 'dy', 'gauss', 'resid'], {'deriv': '(False)'}), '(dx, dy, gauss, resid, deriv=False)\n', (1845, 1880), False, 'import dao_value\n'), ((2159, 2186), 'numpy.repeat', 'np.repeat', (['a', '(m / M)'], {'axis': '(0)'}), '(a, m / M, axis=0)\n', (2168, 2186), True, 'import numpy as np\n')] |
import numpy as np
def sigmoid(Z):
A = 1 / (1 + np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
A = np.maximum(0, Z)
assert (A.shape == Z.shape)
cache = Z
return A, cache
def sigmoid_derivative(dA, cache):
Z = cache
s = 1 / (1 + np.exp(-Z))
dZ = dA * s * (1 - s)
assert (dZ.shape == Z.shape)
return dZ
def relu_derivative(dA, cache):
Z = cache
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ | [
"numpy.array",
"numpy.maximum",
"numpy.exp"
] | [((123, 139), 'numpy.maximum', 'np.maximum', (['(0)', 'Z'], {}), '(0, Z)\n', (133, 139), True, 'import numpy as np\n'), ((421, 444), 'numpy.array', 'np.array', (['dA'], {'copy': '(True)'}), '(dA, copy=True)\n', (429, 444), True, 'import numpy as np\n'), ((54, 64), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (60, 64), True, 'import numpy as np\n'), ((276, 286), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (282, 286), True, 'import numpy as np\n')] |
"""
Do not change the input and output format.
If our script cannot run your code or the format is improper, your code will not be graded.
"""
import numpy as np
import dnn_misc
np.random.seed(123)
# example data
X = np.random.normal(0, 1, (5, 3))
# example modules
check_linear = dnn_misc.linear_layer(input_D = 3, output_D = 2)
check_relu = dnn_misc.relu()
check_dropout = dnn_misc.dropout(r = 0.5)
# check_linear.forward
hat_X = check_linear.forward(X)
ground_hat_X = np.array([[ 0.42525407, -0.2120611 ],
[ 0.15174804, -0.36218431],
[ 0.20957104, -0.57861084],
[ 0.03460477, -0.35992763],
[-0.07256568, 0.1385197 ]])
if (hat_X.shape[0] != 5) or (hat_X.shape[1] != 2):
print('Wrong output dimension of linear.forward')
else:
max_relative_diff = np.amax(np.abs(ground_hat_X - hat_X) / (ground_hat_X + 1e-8))
print('max_diff_output: ' + str(max_relative_diff))
if max_relative_diff >= 1e-7:
print('linear.forward might be wrong')
else:
print('linear.forward should be correct')
print('##########################')
# check_linear.backward
grad_hat_X = np.random.normal(0, 1, (5, 2))
grad_X = check_linear.backward(X, grad_hat_X)
ground_grad_X = np.array([[-0.32766959, 0.13123228, -0.0470483 ],
[ 0.22780188, -0.04838436, 0.04225799],
[ 0.03115675, -0.32648556, -0.06550193],
[-0.01895741, -0.21411292, -0.05212837],
[-0.26923074, -0.78986304, -0.23870499]])
ground_grad_W = np.array([[-0.27579345, -2.08570514],
[ 4.52754775, -0.40995374],
[-1.2049515, 1.77662551]])
ground_grad_b = np.array([[-4.55094716, -2.51399667]])
if (grad_X.shape[0] != 5) or (grad_X.shape[1] != 3):
print('Wrong output dimension of linear.backward')
else:
max_relative_diff_X = np.amax(np.abs(ground_grad_X - grad_X) / (ground_grad_X + 1e-8))
print('max_diff_grad_X: ' + str(max_relative_diff_X))
max_relative_diff_W = np.amax(np.abs(ground_grad_W - check_linear.gradient['W']) / (ground_grad_W + 1e-8))
print('max_diff_grad_W: ' + str(max_relative_diff_W))
max_relative_diff_b = np.amax(np.abs(ground_grad_b - check_linear.gradient['b']) / (ground_grad_b + 1e-8))
print('max_diff_grad_b: ' + str(max_relative_diff_b))
if (max_relative_diff_X >= 1e-7) or (max_relative_diff_W >= 1e-7) or (max_relative_diff_b >= 1e-7):
print('linear.backward might be wrong')
else:
print('linear.backward should be correct')
print('##########################')
# check_relu.forward
hat_X = check_relu.forward(X)
ground_hat_X = np.array([[ 0., 0.99734545, 0.2829785 ],
[ 0., 0., 1.65143654],
[ 0., 0., 1.26593626],
[ 0., 0., 0. ],
[ 1.49138963, 0., 0. ]])
if (hat_X.shape[0] != 5) or (hat_X.shape[1] != 3):
print('Wrong output dimension of relu.forward')
else:
max_relative_diff = np.amax(np.abs(ground_hat_X - hat_X) / (ground_hat_X + 1e-8))
print('max_diff_output: ' + str(max_relative_diff))
if max_relative_diff >= 1e-7:
print('relu.forward might be wrong')
else:
print('relu.forward should be correct')
print('##########################')
# check_relu.backward
grad_hat_X = np.random.normal(0, 1, (5, 3))
grad_X = check_relu.backward(X, grad_hat_X)
ground_grad_X = np.array([[-0., 0.92746243, -0.17363568],
[ 0., 0., -0.87953634],
[ 0., -0., -1.72766949],
[-0., 0., 0. ],
[-0.01183049, 0., 0. ]])
if (grad_X.shape[0] != 5) or (grad_X.shape[1] != 3):
print('Wrong output dimension of relu.backward')
else:
max_relative_diff_X = np.amax(np.abs(ground_grad_X - grad_X) / (ground_grad_X + 1e-8))
print('max_diff_grad_X: ' + str(max_relative_diff_X))
if (max_relative_diff_X >= 1e-7):
print('relu.backward might be wrong')
else:
print('relu.backward should be correct')
print('##########################')
# check_dropout.forward
hat_X = check_dropout.forward(X, is_train = True)
# check_dropout.backward
grad_hat_X = np.random.normal(0, 1, (5, 3))
grad_X = check_dropout.backward(X, grad_hat_X)
ground_grad_X = np.array([[ 0., -0.39530184, -1.45606984],
[-1.22062684, -0., 0. ],
[ 0., 1.7354356, 2.53503582],
[ 4.21567995, -0.4721789, -0.46416366],
[-2.15627882, 2.32636907, 1.04498015]])
if (grad_X.shape[0] != 5) or (grad_X.shape[1] != 3):
print('Wrong output dimension of dropout.backward')
else:
max_relative_diff_X = np.amax(np.abs(ground_grad_X - grad_X) / (grad_X + 1e-8))
print('max_diff_grad_X: ' + str(max_relative_diff_X))
if (max_relative_diff_X >= 1e-7):
print('dropout.backward might be wrong')
else:
print('dropout.backward should be correct')
print('##########################')
| [
"numpy.random.seed",
"numpy.abs",
"dnn_misc.relu",
"numpy.array",
"numpy.random.normal",
"dnn_misc.dropout",
"dnn_misc.linear_layer"
] | [((181, 200), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (195, 200), True, 'import numpy as np\n'), ((221, 251), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(5, 3)'], {}), '(0, 1, (5, 3))\n', (237, 251), True, 'import numpy as np\n'), ((287, 331), 'dnn_misc.linear_layer', 'dnn_misc.linear_layer', ([], {'input_D': '(3)', 'output_D': '(2)'}), '(input_D=3, output_D=2)\n', (308, 331), False, 'import dnn_misc\n'), ((349, 364), 'dnn_misc.relu', 'dnn_misc.relu', ([], {}), '()\n', (362, 364), False, 'import dnn_misc\n'), ((381, 404), 'dnn_misc.dropout', 'dnn_misc.dropout', ([], {'r': '(0.5)'}), '(r=0.5)\n', (397, 404), False, 'import dnn_misc\n'), ((479, 626), 'numpy.array', 'np.array', (['[[0.42525407, -0.2120611], [0.15174804, -0.36218431], [0.20957104, -\n 0.57861084], [0.03460477, -0.35992763], [-0.07256568, 0.1385197]]'], {}), '([[0.42525407, -0.2120611], [0.15174804, -0.36218431], [0.20957104,\n -0.57861084], [0.03460477, -0.35992763], [-0.07256568, 0.1385197]])\n', (487, 626), True, 'import numpy as np\n'), ((1104, 1134), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(5, 2)'], {}), '(0, 1, (5, 2))\n', (1120, 1134), True, 'import numpy as np\n'), ((1198, 1418), 'numpy.array', 'np.array', (['[[-0.32766959, 0.13123228, -0.0470483], [0.22780188, -0.04838436, \n 0.04225799], [0.03115675, -0.32648556, -0.06550193], [-0.01895741, -\n 0.21411292, -0.05212837], [-0.26923074, -0.78986304, -0.23870499]]'], {}), '([[-0.32766959, 0.13123228, -0.0470483], [0.22780188, -0.04838436, \n 0.04225799], [0.03115675, -0.32648556, -0.06550193], [-0.01895741, -\n 0.21411292, -0.05212837], [-0.26923074, -0.78986304, -0.23870499]])\n', (1206, 1418), True, 'import numpy as np\n'), ((1435, 1531), 'numpy.array', 'np.array', (['[[-0.27579345, -2.08570514], [4.52754775, -0.40995374], [-1.2049515, \n 1.77662551]]'], {}), '([[-0.27579345, -2.08570514], [4.52754775, -0.40995374], [-\n 1.2049515, 1.77662551]])\n', (1443, 1531), True, 'import numpy as np\n'), ((1549, 1587), 'numpy.array', 'np.array', (['[[-4.55094716, -2.51399667]]'], {}), '([[-4.55094716, -2.51399667]])\n', (1557, 1587), True, 'import numpy as np\n'), ((2508, 2642), 'numpy.array', 'np.array', (['[[0.0, 0.99734545, 0.2829785], [0.0, 0.0, 1.65143654], [0.0, 0.0, \n 1.26593626], [0.0, 0.0, 0.0], [1.49138963, 0.0, 0.0]]'], {}), '([[0.0, 0.99734545, 0.2829785], [0.0, 0.0, 1.65143654], [0.0, 0.0, \n 1.26593626], [0.0, 0.0, 0.0], [1.49138963, 0.0, 0.0]])\n', (2516, 2642), True, 'import numpy as np\n'), ((3189, 3219), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(5, 3)'], {}), '(0, 1, (5, 3))\n', (3205, 3219), True, 'import numpy as np\n'), ((3280, 3422), 'numpy.array', 'np.array', (['[[-0.0, 0.92746243, -0.17363568], [0.0, 0.0, -0.87953634], [0.0, -0.0, -\n 1.72766949], [-0.0, 0.0, 0.0], [-0.01183049, 0.0, 0.0]]'], {}), '([[-0.0, 0.92746243, -0.17363568], [0.0, 0.0, -0.87953634], [0.0, -\n 0.0, -1.72766949], [-0.0, 0.0, 0.0], [-0.01183049, 0.0, 0.0]])\n', (3288, 3422), True, 'import numpy as np\n'), ((4057, 4087), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(5, 3)'], {}), '(0, 1, (5, 3))\n', (4073, 4087), True, 'import numpy as np\n'), ((4151, 4338), 'numpy.array', 'np.array', (['[[0.0, -0.39530184, -1.45606984], [-1.22062684, -0.0, 0.0], [0.0, 1.7354356,\n 2.53503582], [4.21567995, -0.4721789, -0.46416366], [-2.15627882, \n 2.32636907, 1.04498015]]'], {}), '([[0.0, -0.39530184, -1.45606984], [-1.22062684, -0.0, 0.0], [0.0, \n 1.7354356, 2.53503582], [4.21567995, -0.4721789, -0.46416366], [-\n 2.15627882, 2.32636907, 1.04498015]])\n', (4159, 4338), True, 'import numpy as np\n'), ((778, 806), 'numpy.abs', 'np.abs', (['(ground_hat_X - hat_X)'], {}), '(ground_hat_X - hat_X)\n', (784, 806), True, 'import numpy as np\n'), ((1737, 1767), 'numpy.abs', 'np.abs', (['(ground_grad_X - grad_X)'], {}), '(ground_grad_X - grad_X)\n', (1743, 1767), True, 'import numpy as np\n'), ((1886, 1936), 'numpy.abs', 'np.abs', (["(ground_grad_W - check_linear.gradient['W'])"], {}), "(ground_grad_W - check_linear.gradient['W'])\n", (1892, 1936), True, 'import numpy as np\n'), ((2055, 2105), 'numpy.abs', 'np.abs', (["(ground_grad_b - check_linear.gradient['b'])"], {}), "(ground_grad_b - check_linear.gradient['b'])\n", (2061, 2105), True, 'import numpy as np\n'), ((2870, 2898), 'numpy.abs', 'np.abs', (['(ground_hat_X - hat_X)'], {}), '(ground_hat_X - hat_X)\n', (2876, 2898), True, 'import numpy as np\n'), ((3647, 3677), 'numpy.abs', 'np.abs', (['(ground_grad_X - grad_X)'], {}), '(ground_grad_X - grad_X)\n', (3653, 3677), True, 'import numpy as np\n'), ((4521, 4551), 'numpy.abs', 'np.abs', (['(ground_grad_X - grad_X)'], {}), '(ground_grad_X - grad_X)\n', (4527, 4551), True, 'import numpy as np\n')] |
"""
Some useful utils for the project
"""
import numpy
from sklearn.exceptions import NotFittedError
from gensim.sklearn_api import W2VTransformer
from tensorflow.keras.layers import Dense # pylint: disable=no-name-in-module
from tensorflow.keras.models import Sequential # pylint: disable=no-name-in-module
from tensorflow.python.keras.optimizer_v2.gradient_descent import SGD # pylint: disable=no-name-in-module
from tensorflow.python.keras.wrappers.scikit_learn import KerasClassifier # pylint: disable=no-name-in-module
class MyW2VTransformer(W2VTransformer):
"""Some custom w2v transformer."""
def partial_fit(self, X):
# pylint: disable=useless-super-delegation
super().partial_fit([X])
def fit(self, X, y=None):
X = X.iloc[:, 0].tolist()
return super().fit([X], y)
def transform(self, words):
words = words.iloc[:, 0].tolist()
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
vectors = []
for word in words:
if word in self.gensim_model.wv:
vectors.append(self.gensim_model.wv[word])
else:
vectors.append(numpy.zeros(self.size))
return numpy.reshape(numpy.array(vectors), (len(words), self.size))
class MyKerasClassifier(KerasClassifier):
"""A Keras Wrapper that sets input_dim on fit"""
def fit(self, x, y, **kwargs):
"""Create and fit a simple neural network"""
self.sk_params['input_dim'] = x.shape[1]
super().fit(x, y, **kwargs)
def create_model(input_dim=9):
"""Create a simple neural network"""
clf = Sequential()
clf.add(Dense(9, activation='relu', input_dim=input_dim))
clf.add(Dense(9, activation='relu'))
clf.add(Dense(2, activation='softmax'))
clf.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=["accuracy"])
return clf
| [
"tensorflow.keras.layers.Dense",
"numpy.zeros",
"sklearn.exceptions.NotFittedError",
"numpy.array",
"tensorflow.keras.models.Sequential",
"tensorflow.python.keras.optimizer_v2.gradient_descent.SGD"
] | [((1798, 1810), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1808, 1810), False, 'from tensorflow.keras.models import Sequential\n'), ((1823, 1871), 'tensorflow.keras.layers.Dense', 'Dense', (['(9)'], {'activation': '"""relu"""', 'input_dim': 'input_dim'}), "(9, activation='relu', input_dim=input_dim)\n", (1828, 1871), False, 'from tensorflow.keras.layers import Dense\n'), ((1885, 1912), 'tensorflow.keras.layers.Dense', 'Dense', (['(9)'], {'activation': '"""relu"""'}), "(9, activation='relu')\n", (1890, 1912), False, 'from tensorflow.keras.layers import Dense\n'), ((1926, 1956), 'tensorflow.keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (1931, 1956), False, 'from tensorflow.keras.layers import Dense\n'), ((955, 1082), 'sklearn.exceptions.NotFittedError', 'NotFittedError', (['"""This model has not been fitted yet. Call \'fit\' with appropriate arguments before using this method."""'], {}), '(\n "This model has not been fitted yet. Call \'fit\' with appropriate arguments before using this method."\n )\n', (969, 1082), False, 'from sklearn.exceptions import NotFittedError\n'), ((1396, 1416), 'numpy.array', 'numpy.array', (['vectors'], {}), '(vectors)\n', (1407, 1416), False, 'import numpy\n'), ((2017, 2022), 'tensorflow.python.keras.optimizer_v2.gradient_descent.SGD', 'SGD', ([], {}), '()\n', (2020, 2022), False, 'from tensorflow.python.keras.optimizer_v2.gradient_descent import SGD\n'), ((1343, 1365), 'numpy.zeros', 'numpy.zeros', (['self.size'], {}), '(self.size)\n', (1354, 1365), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
# occiput
# Harvard University, Martinos Center for Biomedical Imaging
# Aalto University, Department of Computer Science
from . import Scintillators
from . import Collimators
from ...DataSources.FileSources.nifti import load_nifti
# Import NiftyPy ray-tracers
from tomolab.Core.NiftyRec import SPECT_project_parallelholes, SPECT_backproject_parallelholes
from ...Core import Image3D
from ...Core.Errors import UnexpectedParameter
from ...Visualization.Visualization import ProgressBar
from ...global_settings import svgwrite, has_svgwrite
# Import various other libraries
from numpy import *
from numpy.random import randint
from numpy import asfortranarray
from scipy import optimize
import scipy
import scipy.signal
import h5py
from PIL import Image
# Default parameters
DEFAULT_ITERATIONS = 20
DEFAULT_SUBSET_SIZE = 32
EPS = 1e-9
__all__ = ['SPECT_Projection','SPECT_Static_Scan','GE_Infinia']
class SPECT_Projection:
"""SPECT projection object. """
def __init__(self, data):
self.data = data
def get_data(self):
"""Returns the raw projection data (note that is can be accessed also as self.data ). """
return self.data
def save_to_file(self, filename):
h5f = h5py.File(filename, "w")
h5f.create_dataset("data", data=self.data)
# h5f.create_dataset('size_x', data=size_x)
# h5f.create_dataset('size_y', data=size_y)
h5f.close()
def get_integral(self):
return self.data.sum()
def to_image(self, data, index=0, scale=None, absolute_scale=False):
a = float32(data[:, :, index].reshape((data.shape[0], data.shape[1])))
if scale is None:
a = 255.0 * (a) / (a.max() + 1e-12)
else:
if absolute_scale:
a = scale * (a)
else:
a = scale * 255.0 * (a) / (a.max() + 1e-12)
return Image.fromarray(a).convert("RGB")
def display_in_browser(self, axial=True, azimuthal=False, index=0, scale=None):
self.display(
axial=axial, azimuthal=azimuthal, index=index, scale=scale, open_browser=True
)
def display(self, scale=None, open_browser=False):
pass
'''
def display(self, scale=None, open_browser=False):
data = self.data
d = DisplayNode()
images = []
progress_bar = ProgressBar(
height="6px",
width="100%%",
background_color=C.LIGHT_GRAY,
foreground_color=C.GRAY,
)
if scale is not None:
scale = scale * 255.0 / (data.max() + 1e-12)
else:
scale = 255.0 / (data.max() + 1e-12)
N_projections = self.data.shape[2]
N_x = self.data.shape[0]
N_y = self.data.shape[1]
print(
(
"SPECT Projection [N_projections: %d N_x: %d N_y: %d]"
% (N_projections, N_x, N_y)
)
)
for i in range(N_projections):
images.append(self.to_image(data, i, scale=scale, absolute_scale=True))
progress_bar.set_percentage(i * 100.0 / N_projections)
progress_bar.set_percentage(100.0)
return d.display("tipix", images, open_browser)
def _repr_html_(self):
return self.display()._repr_html_()
'''
class SubsetGenerator:
def __init__(self, N_positions):
self._N_positions = N_positions
def new_subset(self, mode, subset_size):
if mode == "random":
return self._random_no_replacement(subset_size)
elif mode == "ordered":
raise UnexpectedParameter(
"'%s' subset selection mode not yet supported." % str(mode)
)
else:
raise UnexpectedParameter("'mode' parameter %s not recognised." % str(mode))
def all_active(self):
return ones((self._N_positions), dtype=uint32)
def _random_no_replacement(self, subset_size):
if subset_size >= self._N_positions:
return self.all_active()
M = zeros((self._N_positions), dtype=int32)
n = 0
while n < subset_size:
active = randint(self._N_positions)
if M[active] == 0:
M[active] = 1
n += 1
return M
def deg_to_rad(deg):
return deg * pi / 180.0
def rad_to_deg(rad):
return rad * 180.0 / pi
class SPECT_Static_Scan(object):
def __init__(self):
self._name = "Generic SPECT Scanner"
self._scanner_type = "SPECT"
self._manufacturer = "No manufacturer"
self._version = "0.0"
# scanner parameters are named with 'self._p_xxx'
self._p_gantry_angular_positions = 180 # [adim,integer]
self._p_gantry_angular_position_first = 0.0 # [degrees]
self._p_gantry_angular_position_last = 358.0 # [degrees]
self._subset_generator = SubsetGenerator(self._p_gantry_angular_positions)
self._p_scan_time_sec = 600.0 # [seconds]
self._p_radius_mm = 300.0 # [mm]
self._p_n_pix_x = 128 # [adim]
self._p_n_pix_y = 128 # [adim]
self._p_pix_size_x_mm = 2.5 # [mm]
self._p_pix_size_y_mm = 2.5 # [mm]
self.set_background_activity(0.0)
self.set_background_attenuation(0.0)
self.set_use_gpu(True)
self.set_truncate_negative(False)
self.set_scintillator(Scintillators.Ideal())
self.set_collimator(Collimators.LEHR())
self._measurement = None
self._need_update_norm = True
self._attenuation = None
def get_name(self):
return self._name
def get_type(self):
return self._scanner_type
def get_manufacturer(self):
return self._manufacturer
def get_version(self):
return self._version
def _get_parameters(self):
parameters = {}
dic = self.__dict__
for k in list(dic.keys()):
if k.startswith("_p_"):
parameters[k[3:]] = dic[k]
return parameters
def get_gantry_angular_positions(self):
return (
self._p_gantry_angular_position_first,
self._p_gantry_angular_position_last,
self._p_gantry_angular_positions,
)
def set_gantry_angular_positions(
self, first_position_deg, last_position_deg, N_positions
):
if not (
isscalar(first_position_deg)
and isscalar(last_position_deg)
and isscalar(N_positions)
):
raise UnexpectedParameter("Expected scalar values.")
if not isinstance(N_positions, type(1)):
raise UnexpectedParameter("Expected an integer value.")
self._p_gantry_angular_position_first = first_position_deg
self._p_gantry_angular_position_last = last_position_deg
self._p_gantry_angular_positions = N_positions
self._subset_generator = SubsetGenerator(self._p_gantry_angular_positions)
def get_scan_time(self):
return self._p_scan_time_sec
def set_scan_time(self, scan_time_sec):
if not isscalar(scan_time_sec):
raise UnexpectedParameter("Expected a scalar value.")
self._p_scan_time_sec = scan_time_sec
def get_radius(self):
return self._p_radius_mm
def set_radius(self, radius_mm):
if not isscalar(radius_mm):
raise UnexpectedParameter("Expected a scalar value.")
self._p_radius_mm = radius_mm
def get_n_pixels(self):
return (self._p_n_pix_x, self._p_n_pix_y)
def set_n_pixels(self, n_pixels_x, n_pixels_y):
if (not isscalar(n_pixels_x)) or (not isscalar(n_pixels_y)):
raise UnexpectedParameter(
"Expected integer scalar values."
) # FIXME: make sure it is integer
self._p_n_pix_x = n_pixels_x
self._p_n_pix_y = n_pixels_y
self._need_update_norm = True
def get_pixel_size(self):
return (self._p_pix_size_x_mm, self._p_pix_size_y_mm)
def set_pixel_size(self, pixel_size_x, pixel_size_y):
if (not isscalar(pixel_size_x)) or (not isscalar(pixel_size_y)):
raise UnexpectedParameter("Expected scalar values.")
self._p_pix_size_x_mm = pixel_size_x
self._p_pix_size_y_mm = pixel_size_y
def get_scintillator(self):
return self._scintillator
def set_scintillator(self, scintillator):
if not isinstance(scintillator, Scintillators.BaseScintillatorSPECT):
raise UnexpectedParameter("Expected an instance of BaseScintillatorSPECT")
self._scintillator = scintillator
self.__make_psf()
self._need_update_norm = True
def get_collimator(self):
return self._collimator
def set_collimator(self, collimator):
if not isinstance(collimator, Collimators.BaseCollimatorSPECT):
raise UnexpectedParameter("Expected an instance of BaseCollimatorSPECT")
self._collimator = collimator
self.__make_psf()
self._need_update_norm = True
def set_background_activity(self, value):
self._background_activity = value
def get_background_activity(self, value):
return self._background_activity
def set_background_attenuation(self, value):
self._background_attenuation = value
def get_background_attenuation(self, value):
return self._background_attenuation
def set_use_gpu(self, value):
self._use_gpu = value
def set_truncate_negative(self, value):
self._truncate_negative = value
def get_camera_positions(self):
return float32(
linspace(
deg_to_rad(self._p_gantry_angular_position_first),
deg_to_rad(self._p_gantry_angular_position_last),
self._p_gantry_angular_positions,
).reshape((self._p_gantry_angular_positions, 1))
)
def project(
self, activity, attenuation=None, cameras=None, psf=None, subsets_array=None
):
if isinstance(activity, ndarray):
activity = float32(activity)
else:
activity = float32(activity.data)
if attenuation is None:
attenuation = self._attenuation
if attenuation is not None:
if isinstance(attenuation, ndarray):
attenuation = float32(attenuation)
else:
attenuation = float32(attenuation.data)
if cameras is None:
cameras = self.get_camera_positions()
# subsets:
if subsets_array is not None:
cameras = cameras[where(subsets_array)]
if psf is None:
psf = self._psf
proj = SPECT_project_parallelholes(
activity,
cameras,
attenuation,
psf,
self._background_activity,
self._background_attenuation,
self._use_gpu,
self._truncate_negative,
)
return SPECT_Projection(proj)
def backproject(
self, projection, attenuation=None, cameras=None, psf=None, subsets_array=None
):
if isinstance(projection, ndarray):
projection = float32(projection)
else:
projection = float32(projection.data)
if attenuation is None:
attenuation = self._attenuation
if attenuation is not None:
if isinstance(attenuation, ndarray):
attenuation = float32(attenuation)
else:
attenuation = float32(attenuation.data)
if cameras is None:
cameras = self.get_camera_positions()
# subsets:
if subsets_array is not None:
cameras = cameras[where(subsets_array)]
if psf is None:
psf = self._psf
backproj = SPECT_backproject_parallelholes(
projection,
cameras,
attenuation,
psf,
self._background_activity,
self._background_attenuation,
self._use_gpu,
self._truncate_negative,
)
return Image3D(backproj)
def scan(self, activity_Bq, scan_time_sec=None):
if scan_time_sec is None:
scan_time_sec = self.get_scan_time()
sinogram = 0
return sinogram
'''
def __make_probabilistic_graphical_model(self):
pass
from occiput.Visualization import Graph
self.graph = Graph(
{
"nodes": [
{"name": "activity", "type": 0},
{"name": "counts", "type": 0},
],
"links": [{"source": "activity", "target": "counts", "type": "t1"}],
}
)
'''
def __make_psf(self):
self._psf = None # FIXME
def set_psf(self, fwhm0_mm=0.5, depth_dependence=0.0, n_pixels=5):
radius = self.get_radius()
N = self._p_n_pix_x
[pixx, pixy] = self.get_pixel_size()
psf = zeros([n_pixels, n_pixels, N])
def gaussian(fwhm, size):
x = arange(0, size, 1, float)
y = x[:, newaxis]
x0 = y0 = (size - 1) / 2.0
return exp(-4 * log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / fwhm ** 2)
for i in range(N):
distance = radius - N / 2 * pixx + i * pixx
if distance < 0:
distance = 0
fwhm_mm = fwhm0_mm + depth_dependence * distance
fwhm = fwhm_mm / pixy
psf[:, :, i] = gaussian(fwhm, n_pixels)
self._psf = float32(psf)
def get_normalization(self):
if self._need_update_norm:
self._compute_normalisation()
return self._norm
def _compute_normalisation(self):
subsets_array = self._subset_generator.all_active()
self._norm = self.backproject(
ones(
(self._p_n_pix_x, self._p_n_pix_y, self._p_gantry_angular_positions),
dtype=float32,
order="F",
)
).data
self._need_update_norm = False
def set_attenuation(self, attenuation):
self._attenuation = attenuation
def load_attenuation_from_file(self, attenuation_file):
attenuation = load_nifti(attenuation_file).data
self.set_attenuation(attenuation)
def estimate_activity(
self,
attenuation=None,
psf=None,
iterations=DEFAULT_ITERATIONS,
subset_size=DEFAULT_SUBSET_SIZE,
subset_mode="random",
method="EM",
m=32,
factr=0.01,
pgtol=1e-16,
maxfun=10000,
smoothing=0.0,
activity=None,
):
progress_bar = ProgressBar()
progress_bar.set_percentage(0.1)
if activity == None:
activity = ones(
(self._p_n_pix_x, self._p_n_pix_y, self._p_n_pix_x),
dtype=float32,
order="F",
)
if attenuation is None:
attenuation = self._attenuation
if attenuation is not None:
if isinstance(attenuation, ndarray):
attenuation = float32(attenuation)
else:
attenuation = float32(attenuation.data)
if psf is None:
psf = self._psf
if method == "EM":
print("Reconstruction method: EM")
for i in range(iterations):
# Subsets:
if subset_size is None:
subsets_array = None
subset_size = self._p_gantry_angular_positions
elif subset_size >= self._p_gantry_angular_positions:
subsets_array = None
subset_size = self._p_gantry_angular_positions
else:
subsets_array = self._subset_generator.new_subset(
subset_mode, subset_size
)
if subsets_array is not None:
proj = self.project(
activity,
attenuation=attenuation,
psf=psf,
subsets_array=subsets_array,
).data
measurement = self._measurement[:, :, where(subsets_array)].reshape(
(self._p_n_pix_x, self._p_n_pix_y, subset_size)
)
measurement = asfortranarray(ascontiguousarray(measurement))
P = (measurement + EPS) / (proj + EPS)
norm = self.backproject(
ones(
(self._p_n_pix_x, self._p_n_pix_y, subset_size),
dtype=float32,
order="F",
),
attenuation=attenuation,
psf=psf,
subsets_array=subsets_array,
).data
update = (
self.backproject(
P,
attenuation=attenuation,
psf=psf,
subsets_array=subsets_array,
).data
+ EPS
) / (norm + EPS)
else:
proj = self.project(activity, attenuation=attenuation, psf=psf).data
P = (self._measurement + EPS) / (proj + EPS)
norm = self.get_normalization()
update = (
self.backproject(P, attenuation=attenuation, psf=psf).data + EPS
) / (norm + EPS)
activity = activity * update # * self.get_mask().data
progress_bar.set_percentage((i + 1) * 100.0 / iterations)
# print "Iteration: %d max act: %f min act: %f max proj: %f min proj: %f max norm: %f min norm: %f"%(i, activity.max(), activity.min(), proj.max(), proj.min(), norm.data.max(), norm.data.min() )
progress_bar.set_percentage(100.0)
elif method == "LBFGS":
print("Reconstruction method: LBFGS-B")
bounds = [(None, None)] * activity.size
for i in range(0, activity.size):
bounds[i] = (0, None)
args = [activity.shape, smoothing]
activity0 = float64(activity.reshape(activity.size))
# print "SIZE ACTIVITY0: ",activity0.shape
activity_rec, f, d = optimize.fmin_l_bfgs_b(
self.get_likelihood,
activity0,
fprime=self.get_gradient_activity,
m=m,
factr=factr,
pgtol=pgtol,
args=args,
maxfun=maxfun,
iprint=0,
bounds=bounds,
)
activity = float32(activity_rec.reshape(activity.shape))
progress_bar.set_percentage(100.0)
else:
raise UnexpectedParameter("Reconstruction method %s unknown" % method)
return Image3D(activity)
def get_likelihood(self, activity, activity_size, smoothing=0.0):
"""Returns the likelihood value - given the activity. This at the moment implements the Poisson likelihood only. """
eps = 1e-16
sinogram = self._measurement
psf = self._psf
attenuation = None
sinosize = sinogram.size
activity = activity.reshape(activity_size)
if any(activity < 0):
return 0
# if any(activity<0):
# activity[activity<0]=0
print(("MIN MAX activity: ", activity.min(), activity.max()))
proj = self.project(activity).data # FIXME: optionally use subsets
print(("MIN MAX proj: ", proj.min(), proj.max()))
log_proj = log(proj + eps)
print(("MIN MAX log_proj: ", log_proj.min(), log_proj.max()))
log_proj[isinf(log_proj)] = 0
d = (
-proj.reshape(sinosize)
+ sinogram.reshape(sinosize) * log_proj.reshape(sinosize)
).sum()
print(("func:", d))
return -float64(d)
def get_gradient_activity(self, activity, activity_size, smoothing=0.0):
"""Returns the derivative of the log-likelihood with respect to the activity. At the moment, this
implements only the Poisson likelihood. """
eps = 1e-16
sinogram = self._measurement
psf = self._psf
attenuation = None
sinoshape = sinogram.shape
activity = activity.reshape(activity_size)
if any(activity < 0):
activity[activity < 0] = 0
proj = self.project(activity).data + eps # FIXME: optionally use subsets
norm = self.get_normalization()
back = (
self.backproject(sinogram.reshape(sinoshape) / proj.reshape(sinoshape)).data
+ eps
)
grad = back - norm
# print "SIZE0: ",activity_size, proj.shape, norm.shape, grad.shape
kernel = asarray([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
a = activity.reshape([activity_size[0], activity_size[1], activity_size[2]])
prior = asfortranarray(smoothing * scipy.signal.convolve2d(a, kernel, "same"))
G = grad.reshape(activity.size) + prior.reshape(activity.size)
G = G.reshape(activity_size[0] * activity_size[1] * activity_size[2])
return -float64(G)
def get_likelihood_log(self, activity_log, activity_size, smoothing=0.0):
"""Returns the likelihood value - given the log of the activity.
This at the moment implements the Poisson likelihood only. This is useful to optimize the likelihood
or the posterior with respect to the log of the activity - e.g. to include the non negativity constraint. """
return self.get_likelihood(exp(activity_log), activity_size, smoothing)
def get_gradient_log_activity(self, activity_log, activity_size, smoothing=0.0):
"""Returns the derivative of the log-likelihood with respect to the log of the activity. At the moment, this
implements only the Poisson likelihood. This is useful to optimize the likelihood or the posterior with respect to the
log of the activity - e.g. to include the non negativity constraint. """
activity = exp(activity_log)
g = self.get_gradient_activity(activity, activity_size, smoothing)
return float64(g * activity)
def volume_render(self, volume, scale=1.0):
# FIXME: use the VolumeRenderer object in occiput.Visualization (improve it), the following is a quick fix:
if isinstance(volume, ndarray):
volume = float32(volume)
else:
volume = float32(volume.data)
proj = self.project(volume).data
proj[where(proj > proj.max() / scale)] = proj.max() / scale
return SPECT_Projection(proj)
def load_measurement_file(self, filename):
pass
def set_measurement(self, measurement):
if not (
self._p_n_pix_x == measurement.shape[0]
and self._p_n_pix_y == measurement.shape[1]
and self._p_gantry_angular_positions == measurement.shape[2]
):
raise UnexpectedParameter(
"Measurement size is not compatible with n_pix_x, n_pix_y, gantry_angular_positions. "
)
self._measurement = measurement
def load_measurement_from_file(self, measurement_file):
measurement = load_nifti(measurement_file).data
self.set_measurement(measurement)
def get_measurement(self):
return Volume(self._measurement)
def display_measurement(self):
return SPECT_Projection(self._measurement)
def _make_svg(self):
if not has_svgwrite:
self._svg_string = None
return self._svg_string
w = "100%"
h = "100%"
dwg = svgwrite.Drawing("SPECT.svg", size=(w, h), profile="full", debug=True)
dwg.viewbox(width=100, height=100)
# DETECTOR
# collimator
rect = dwg.add(dwg.rect(insert=(12, 30), size=(8, 40), rx=0.5, ry=0.5))
rect.fill("grey", opacity=0.5).stroke("black", width=0.3, opacity=0.001)
# scintillator
rect = dwg.add(dwg.rect(insert=(9, 30), size=(3, 40), rx=0.5, ry=0.5))
rect.fill("green", opacity=0.1).stroke("none", width=0.3, opacity=0.001)
# photomultipliers
for i in range(8):
rect = dwg.add(
dwg.rect(insert=(1, 31.2 + i * 4.8), size=(8, 4), rx=0.3, ry=0.3)
)
rect.fill("grey", opacity=0.25).stroke("none", width=0.3, opacity=0.001)
# IMAGING VOLUME
rect = dwg.add(dwg.rect(insert=(30, 30), size=(40, 40), rx=0.5, ry=0.5))
rect.fill("grey", opacity=0.02).stroke("grey", width=0.3, opacity=0.02)
# GEOMETRIC NOTATIONS
# circle, gantry rotation
circle = dwg.add(dwg.circle(center=(50, 50), r=30))
circle.fill("none").stroke("grey", width=0.1).dasharray([0.5, 0.5])
# center
circle = dwg.add(dwg.circle(center=(50, 50), r=0.5))
circle.fill("grey", opacity=0.1).stroke("grey", width=0.1)
line = dwg.add(dwg.line(start=(50 - 1, 50), end=(50 + 1, 50)))
line.stroke("grey", width=0.1)
line = dwg.add(dwg.line(start=(50, 50 - 1), end=(50, 50 + 1)))
line.stroke("grey", width=0.1)
# line = dwg.add(dwg.polyline([(10, 10), (10, 100), (100, 100), (100, 10), (10, 10)],stroke='black', fill='none'))
self._svg_string = dwg.tostring()
return self._svg_string
def _repr_svg_(self):
self._make_svg()
return self._svg_string
class Gantry:
def __init__(self):
self.svg_string = self.make_svg()
def make_svg(self):
if not has_svgwrite:
self._svg_string = None
return self._svg_string
w = "100%"
h = "100%"
dwg = svgwrite.Drawing("test.svg", size=(w, h), profile="full", debug=True)
dwg.viewbox(width=100, height=100)
# DETECTOR
# collimator
rect = dwg.add(dwg.rect(insert=(12, 30), size=(8, 40), rx=0.5, ry=0.5))
rect.fill("grey", opacity=0.5).stroke("black", width=0.3, opacity=0.001)
# scintillator
rect = dwg.add(dwg.rect(insert=(9, 30), size=(3, 40), rx=0.5, ry=0.5))
rect.fill("green", opacity=0.1).stroke("none", width=0.3, opacity=0.001)
# photomultipliers
for i in range(8):
rect = dwg.add(
dwg.rect(insert=(1, 31.2 + i * 4.8), size=(8, 4), rx=0.3, ry=0.3)
)
rect.fill("grey", opacity=0.25).stroke("none", width=0.3, opacity=0.001)
# IMAGING VOLUME
rect = dwg.add(dwg.rect(insert=(30, 30), size=(40, 40), rx=0.5, ry=0.5))
rect.fill("grey", opacity=0.02).stroke("grey", width=0.3, opacity=0.02)
# GEOMETRIC NOTATIONS
# circle, gantry rotation
circle = dwg.add(dwg.circle(center=(50, 50), r=30))
circle.fill("none").stroke("grey", width=0.1).dasharray([0.5, 0.5])
# center
circle = dwg.add(dwg.circle(center=(50, 50), r=0.5))
circle.fill("grey", opacity=0.1).stroke("grey", width=0.1)
line = dwg.add(dwg.line(start=(50 - 1, 50), end=(50 + 1, 50)))
line.stroke("grey", width=0.1)
line = dwg.add(dwg.line(start=(50, 50 - 1), end=(50, 50 + 1)))
line.stroke("grey", width=0.1)
# line = dwg.add(dwg.polyline([(10, 10), (10, 100), (100, 100), (100, 10), (10, 10)],stroke='black', fill='none'))
return dwg.tostring()
def _repr_svg_(self):
return self.svg_string
class GE_Infinia(SPECT_Static_Scan):
def __init__(self):
SPECT_Static_Scan.__init__(self)
self._name = "GE Infinia SPECT Scanner with LEHR collimator"
| [
"h5py.File",
"tomolab.Core.NiftyRec.SPECT_project_parallelholes",
"scipy.signal.convolve2d",
"numpy.random.randint",
"tomolab.Core.NiftyRec.SPECT_backproject_parallelholes",
"PIL.Image.fromarray",
"scipy.optimize.fmin_l_bfgs_b"
] | [((1247, 1271), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (1256, 1271), False, 'import h5py\n'), ((10714, 10885), 'tomolab.Core.NiftyRec.SPECT_project_parallelholes', 'SPECT_project_parallelholes', (['activity', 'cameras', 'attenuation', 'psf', 'self._background_activity', 'self._background_attenuation', 'self._use_gpu', 'self._truncate_negative'], {}), '(activity, cameras, attenuation, psf, self.\n _background_activity, self._background_attenuation, self._use_gpu, self\n ._truncate_negative)\n', (10741, 10885), False, 'from tomolab.Core.NiftyRec import SPECT_project_parallelholes, SPECT_backproject_parallelholes\n'), ((11854, 12030), 'tomolab.Core.NiftyRec.SPECT_backproject_parallelholes', 'SPECT_backproject_parallelholes', (['projection', 'cameras', 'attenuation', 'psf', 'self._background_activity', 'self._background_attenuation', 'self._use_gpu', 'self._truncate_negative'], {}), '(projection, cameras, attenuation, psf, self\n ._background_activity, self._background_attenuation, self._use_gpu,\n self._truncate_negative)\n', (11885, 12030), False, 'from tomolab.Core.NiftyRec import SPECT_project_parallelholes, SPECT_backproject_parallelholes\n'), ((4164, 4190), 'numpy.random.randint', 'randint', (['self._N_positions'], {}), '(self._N_positions)\n', (4171, 4190), False, 'from numpy.random import randint\n'), ((1904, 1922), 'PIL.Image.fromarray', 'Image.fromarray', (['a'], {}), '(a)\n', (1919, 1922), False, 'from PIL import Image\n'), ((18579, 18760), 'scipy.optimize.fmin_l_bfgs_b', 'optimize.fmin_l_bfgs_b', (['self.get_likelihood', 'activity0'], {'fprime': 'self.get_gradient_activity', 'm': 'm', 'factr': 'factr', 'pgtol': 'pgtol', 'args': 'args', 'maxfun': 'maxfun', 'iprint': '(0)', 'bounds': 'bounds'}), '(self.get_likelihood, activity0, fprime=self.\n get_gradient_activity, m=m, factr=factr, pgtol=pgtol, args=args, maxfun\n =maxfun, iprint=0, bounds=bounds)\n', (18601, 18760), False, 'from scipy import optimize\n'), ((21271, 21313), 'scipy.signal.convolve2d', 'scipy.signal.convolve2d', (['a', 'kernel', '"""same"""'], {}), "(a, kernel, 'same')\n", (21294, 21313), False, 'import scipy\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""export ckpt to model"""
import argparse
import numpy as np
from mindspore import context, Tensor
from mindspore.train.serialization import export, load_checkpoint
from src.bgcf import BGCF
from src.callback import ForwardBGCF
parser = argparse.ArgumentParser(description="bgcf export")
parser.add_argument("--device_id", type=int, default=0, help="Device id")
parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.")
parser.add_argument("--file_name", type=str, default="bgcf", help="output file name.")
parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format")
parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend",
help="device target")
parser.add_argument("--input_dim", type=int, choices=[64, 128], default=64, help="embedding dimension")
parser.add_argument("--embedded_dimension", type=int, default=64, help="output embedding dimension")
parser.add_argument("--row_neighs", type=int, default=40, help="num of sampling neighbors in raw graph")
parser.add_argument("--gnew_neighs", type=int, default=20, help="num of sampling neighbors in sample graph")
parser.add_argument("--activation", type=str, default="tanh", choices=["relu", "tanh"], help="activation function")
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, device_id=args.device_id)
if __name__ == "__main__":
num_user, num_item = 7068, 3570
network = BGCF([args.input_dim, num_user, num_item],
args.embedded_dimension,
args.activation,
[0.0, 0.0, 0.0],
num_user,
num_item,
args.input_dim)
load_checkpoint(args.ckpt_file, net=network)
forward_net = ForwardBGCF(network)
users = Tensor(np.zeros([num_user,]).astype(np.int32))
items = Tensor(np.zeros([num_item,]).astype(np.int32))
neg_items = Tensor(np.zeros([num_item, 1]).astype(np.int32))
u_test_neighs = Tensor(np.zeros([num_user, args.row_neighs]).astype(np.int32))
u_test_gnew_neighs = Tensor(np.zeros([num_user, args.gnew_neighs]).astype(np.int32))
i_test_neighs = Tensor(np.zeros([num_item, args.row_neighs]).astype(np.int32))
i_test_gnew_neighs = Tensor(np.zeros([num_item, args.gnew_neighs]).astype(np.int32))
input_data = [users, items, neg_items, u_test_neighs, u_test_gnew_neighs, i_test_neighs, i_test_gnew_neighs]
export(forward_net, *input_data, file_name=args.file_name, file_format=args.file_format)
| [
"mindspore.context.set_context",
"argparse.ArgumentParser",
"src.callback.ForwardBGCF",
"numpy.zeros",
"mindspore.train.serialization.load_checkpoint",
"mindspore.train.serialization.export",
"src.bgcf.BGCF"
] | [((908, 958), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""bgcf export"""'}), "(description='bgcf export')\n", (931, 958), False, 'import argparse\n'), ((2033, 2142), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'args.device_target', 'device_id': 'args.device_id'}), '(mode=context.GRAPH_MODE, device_target=args.\n device_target, device_id=args.device_id)\n', (2052, 2142), False, 'from mindspore import context, Tensor\n'), ((2217, 2359), 'src.bgcf.BGCF', 'BGCF', (['[args.input_dim, num_user, num_item]', 'args.embedded_dimension', 'args.activation', '[0.0, 0.0, 0.0]', 'num_user', 'num_item', 'args.input_dim'], {}), '([args.input_dim, num_user, num_item], args.embedded_dimension, args.\n activation, [0.0, 0.0, 0.0], num_user, num_item, args.input_dim)\n', (2221, 2359), False, 'from src.bgcf import BGCF\n'), ((2474, 2518), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['args.ckpt_file'], {'net': 'network'}), '(args.ckpt_file, net=network)\n', (2489, 2518), False, 'from mindspore.train.serialization import export, load_checkpoint\n'), ((2538, 2558), 'src.callback.ForwardBGCF', 'ForwardBGCF', (['network'], {}), '(network)\n', (2549, 2558), False, 'from src.callback import ForwardBGCF\n'), ((3205, 3298), 'mindspore.train.serialization.export', 'export', (['forward_net', '*input_data'], {'file_name': 'args.file_name', 'file_format': 'args.file_format'}), '(forward_net, *input_data, file_name=args.file_name, file_format=args\n .file_format)\n', (3211, 3298), False, 'from mindspore.train.serialization import export, load_checkpoint\n'), ((2579, 2599), 'numpy.zeros', 'np.zeros', (['[num_user]'], {}), '([num_user])\n', (2587, 2599), True, 'import numpy as np\n'), ((2638, 2658), 'numpy.zeros', 'np.zeros', (['[num_item]'], {}), '([num_item])\n', (2646, 2658), True, 'import numpy as np\n'), ((2701, 2724), 'numpy.zeros', 'np.zeros', (['[num_item, 1]'], {}), '([num_item, 1])\n', (2709, 2724), True, 'import numpy as np\n'), ((2770, 2807), 'numpy.zeros', 'np.zeros', (['[num_user, args.row_neighs]'], {}), '([num_user, args.row_neighs])\n', (2778, 2807), True, 'import numpy as np\n'), ((2858, 2896), 'numpy.zeros', 'np.zeros', (['[num_user, args.gnew_neighs]'], {}), '([num_user, args.gnew_neighs])\n', (2866, 2896), True, 'import numpy as np\n'), ((2942, 2979), 'numpy.zeros', 'np.zeros', (['[num_item, args.row_neighs]'], {}), '([num_item, args.row_neighs])\n', (2950, 2979), True, 'import numpy as np\n'), ((3030, 3068), 'numpy.zeros', 'np.zeros', (['[num_item, args.gnew_neighs]'], {}), '([num_item, args.gnew_neighs])\n', (3038, 3068), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""\
Copyright (c) 2015-2018, MGH Computational Pathology
"""
import json
import six
from calicoml.core.algo.learners import SelectAndClassify
from calicoml.core.problem import Problem, ProblemVectorizer
from calicoml.core.serialization.model import ClassificationModel
from calicoml.core.tools import predict_web_service
import nose
from calicoml.core.tools.predict_web_service import format_datatypes
import numpy as np
import pandas as pd
from six import wraps, BytesIO
from sklearn.linear_model import LogisticRegression
def mock_problem():
""" creates mock problem """
X = np.random.normal(size=(100, 2))
y = np.asarray([1] * 50 + [0] * 50)
df = pd.DataFrame({'featA': X[:, 0],
'featB': X[:, 1],
'featC': ['foo', 'bar'] * 50,
'y': y})
prob = Problem(df, ['featA', 'featB', 'featC'], 'y', 1)
return prob
def mock_model():
"""Creates a simple mock model for testing"""
prob = mock_problem()
logit = SelectAndClassify(selector=None, classifier=LogisticRegression(),
preprocess=ProblemVectorizer(), name="test model").fit(prob)
return ClassificationModel(logit, prob)
def with_mock_service(func):
"""Sets up a mock model and service and calls the function"""
@wraps(func)
def inner(*args, **kwargs):
"""Wrapper around the annotated function"""
model = mock_model()
predict_web_service.model = model
app = predict_web_service.app.test_client()
return func(app, model, *args, **kwargs)
return inner
def test_datatype_mapping():
"""Validates that we correctly map internal python/numpy types to either 'numeric' or 'text'"""
def checkme(in_type, out_type):
"""Utility: checks a single type"""
nose.tools.eq_(format_datatypes({'test': in_type}), {'test': out_type})
yield checkme, int, 'numeric'
yield checkme, float, 'numeric'
yield checkme, np.int, 'numeric'
yield checkme, np.int_, 'numeric'
yield checkme, np.int32, 'numeric'
yield checkme, np.int64, 'numeric'
yield checkme, np.float, 'numeric'
yield checkme, np.float_, 'numeric'
yield checkme, np.float64, 'numeric'
yield checkme, np.float128, 'numeric'
yield checkme, type(b'foo'), 'binary' if six.PY3 else 'text'
yield checkme, type('foo'), 'text'
@with_mock_service
def test_model_info(app, _):
"""Tests that we return correct model metadata"""
response = json.loads(app.get('/info').get_data(as_text=True))
nose.tools.eq_(response['name'], 'test model')
nose.tools.eq_(response['outcome'], 'y')
nose.tools.eq_(response['datatypes'], {'featA': 'numeric', 'featB': 'numeric', 'featC': 'text'})
nose.tools.eq_(response['positive_outcome'], 1)
nose.tools.eq_(response['training_set']['prevalence'], 0.5)
nose.tools.eq_(response['training_set']['n_features'], 3)
nose.tools.eq_(response['training_set']['n_samples'], 100)
nose.tools.assert_list_equal(response['features'], ['featA', 'featB', 'featC'])
def ws_predict(app, df, features):
"""Utility: calls /predict with samples from the given DataFrame, and returns the results as a DataFrame"""
samples = [{feat: row[feat] for feat in features}
for _, row in df.iterrows()]
response = app.post('/predict', data=json.dumps(samples), headers={'content-type': 'application/json'})
return pd.DataFrame(json.loads(response.get_data(as_text=True)).get("scores"))
@with_mock_service
def test_train_set_predict(app, model):
"""Validates prediction on training set samples"""
predictions = ws_predict(app, model.training_problem.dataframe, ['featA', 'featB', 'featC'])
np.testing.assert_allclose(predictions['score'].values, model.expected_scores)
@with_mock_service
def test_novel_predict(app, model):
"""Validates prediction on novel samples"""
df = pd.DataFrame({'featA': np.random.normal(100),
'featB': np.random.normal(100),
'featC': ['foo', 'bar', 'bar', 'foo'] * 25})
predictions = ws_predict(app, df, ['featA', 'featB', 'featC'])
np.testing.assert_allclose(predictions['score'].values, model.predict(df)['score'].values)
@with_mock_service
def test_upload(app, model):
"""Validates that we can upload files"""
fake_file = six.StringIO()
df = model.training_problem.dataframe
df.to_csv(fake_file, sep='\t')
fake_file.seek(0)
response = app.post('/upload', buffered=True,
content_type='multipart/form-data',
data={'file': (BytesIO(fake_file.getvalue().encode('utf-8')), 'samples.txt')})
response_df = pd.DataFrame(json.loads(response.get_data(as_text=True)))
np.testing.assert_allclose(response_df['featA'].values, df['featA'].values)
np.testing.assert_allclose(response_df['featB'].values, df['featB'].values)
nose.tools.assert_list_equal(list(response_df['featC']), list(df['featC'].values))
@with_mock_service
def test_get_training_set(app, model):
"""Tests that the service returns valid training data"""
response = json.loads(app.get('/training_data').get_data(as_text=True))
training_df = pd.DataFrame(response)
np.testing.assert_allclose(training_df['featA'].values, model.training_problem.dataframe['featA'].values)
np.testing.assert_allclose(training_df['featB'].values, model.training_problem.dataframe['featB'].values)
nose.tools.assert_list_equal(list(training_df['featC']), list(model.training_problem.dataframe['featC'].values))
| [
"pandas.DataFrame",
"calicoml.core.tools.predict_web_service.app.test_client",
"calicoml.core.problem.ProblemVectorizer",
"numpy.asarray",
"json.dumps",
"six.StringIO",
"nose.tools.assert_list_equal",
"calicoml.core.tools.predict_web_service.format_datatypes",
"calicoml.core.problem.Problem",
"nos... | [((618, 649), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100, 2)'}), '(size=(100, 2))\n', (634, 649), True, 'import numpy as np\n'), ((658, 689), 'numpy.asarray', 'np.asarray', (['([1] * 50 + [0] * 50)'], {}), '([1] * 50 + [0] * 50)\n', (668, 689), True, 'import numpy as np\n'), ((699, 791), 'pandas.DataFrame', 'pd.DataFrame', (["{'featA': X[:, 0], 'featB': X[:, 1], 'featC': ['foo', 'bar'] * 50, 'y': y}"], {}), "({'featA': X[:, 0], 'featB': X[:, 1], 'featC': ['foo', 'bar'] *\n 50, 'y': y})\n", (711, 791), True, 'import pandas as pd\n'), ((868, 916), 'calicoml.core.problem.Problem', 'Problem', (['df', "['featA', 'featB', 'featC']", '"""y"""', '(1)'], {}), "(df, ['featA', 'featB', 'featC'], 'y', 1)\n", (875, 916), False, 'from calicoml.core.problem import Problem, ProblemVectorizer\n'), ((1210, 1242), 'calicoml.core.serialization.model.ClassificationModel', 'ClassificationModel', (['logit', 'prob'], {}), '(logit, prob)\n', (1229, 1242), False, 'from calicoml.core.serialization.model import ClassificationModel\n'), ((1345, 1356), 'six.wraps', 'wraps', (['func'], {}), '(func)\n', (1350, 1356), False, 'from six import wraps, BytesIO\n'), ((2588, 2634), 'nose.tools.eq_', 'nose.tools.eq_', (["response['name']", '"""test model"""'], {}), "(response['name'], 'test model')\n", (2602, 2634), False, 'import nose\n'), ((2639, 2679), 'nose.tools.eq_', 'nose.tools.eq_', (["response['outcome']", '"""y"""'], {}), "(response['outcome'], 'y')\n", (2653, 2679), False, 'import nose\n'), ((2684, 2784), 'nose.tools.eq_', 'nose.tools.eq_', (["response['datatypes']", "{'featA': 'numeric', 'featB': 'numeric', 'featC': 'text'}"], {}), "(response['datatypes'], {'featA': 'numeric', 'featB':\n 'numeric', 'featC': 'text'})\n", (2698, 2784), False, 'import nose\n'), ((2785, 2832), 'nose.tools.eq_', 'nose.tools.eq_', (["response['positive_outcome']", '(1)'], {}), "(response['positive_outcome'], 1)\n", (2799, 2832), False, 'import nose\n'), ((2837, 2896), 'nose.tools.eq_', 'nose.tools.eq_', (["response['training_set']['prevalence']", '(0.5)'], {}), "(response['training_set']['prevalence'], 0.5)\n", (2851, 2896), False, 'import nose\n'), ((2901, 2958), 'nose.tools.eq_', 'nose.tools.eq_', (["response['training_set']['n_features']", '(3)'], {}), "(response['training_set']['n_features'], 3)\n", (2915, 2958), False, 'import nose\n'), ((2963, 3021), 'nose.tools.eq_', 'nose.tools.eq_', (["response['training_set']['n_samples']", '(100)'], {}), "(response['training_set']['n_samples'], 100)\n", (2977, 3021), False, 'import nose\n'), ((3026, 3105), 'nose.tools.assert_list_equal', 'nose.tools.assert_list_equal', (["response['features']", "['featA', 'featB', 'featC']"], {}), "(response['features'], ['featA', 'featB', 'featC'])\n", (3054, 3105), False, 'import nose\n'), ((3761, 3839), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["predictions['score'].values", 'model.expected_scores'], {}), "(predictions['score'].values, model.expected_scores)\n", (3787, 3839), True, 'import numpy as np\n'), ((4396, 4410), 'six.StringIO', 'six.StringIO', ([], {}), '()\n', (4408, 4410), False, 'import six\n'), ((4804, 4879), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["response_df['featA'].values", "df['featA'].values"], {}), "(response_df['featA'].values, df['featA'].values)\n", (4830, 4879), True, 'import numpy as np\n'), ((4884, 4959), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["response_df['featB'].values", "df['featB'].values"], {}), "(response_df['featB'].values, df['featB'].values)\n", (4910, 4959), True, 'import numpy as np\n'), ((5262, 5284), 'pandas.DataFrame', 'pd.DataFrame', (['response'], {}), '(response)\n', (5274, 5284), True, 'import pandas as pd\n'), ((5290, 5400), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["training_df['featA'].values", "model.training_problem.dataframe['featA'].values"], {}), "(training_df['featA'].values, model.\n training_problem.dataframe['featA'].values)\n", (5316, 5400), True, 'import numpy as np\n'), ((5400, 5510), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["training_df['featB'].values", "model.training_problem.dataframe['featB'].values"], {}), "(training_df['featB'].values, model.\n training_problem.dataframe['featB'].values)\n", (5426, 5510), True, 'import numpy as np\n'), ((1526, 1563), 'calicoml.core.tools.predict_web_service.app.test_client', 'predict_web_service.app.test_client', ([], {}), '()\n', (1561, 1563), False, 'from calicoml.core.tools import predict_web_service\n'), ((1865, 1900), 'calicoml.core.tools.predict_web_service.format_datatypes', 'format_datatypes', (["{'test': in_type}"], {}), "({'test': in_type})\n", (1881, 1900), False, 'from calicoml.core.tools.predict_web_service import format_datatypes\n'), ((3394, 3413), 'json.dumps', 'json.dumps', (['samples'], {}), '(samples)\n', (3404, 3413), False, 'import json\n'), ((3977, 3998), 'numpy.random.normal', 'np.random.normal', (['(100)'], {}), '(100)\n', (3993, 3998), True, 'import numpy as np\n'), ((4032, 4053), 'numpy.random.normal', 'np.random.normal', (['(100)'], {}), '(100)\n', (4048, 4053), True, 'import numpy as np\n'), ((1085, 1105), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1103, 1105), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1148, 1167), 'calicoml.core.problem.ProblemVectorizer', 'ProblemVectorizer', ([], {}), '()\n', (1165, 1167), False, 'from calicoml.core.problem import Problem, ProblemVectorizer\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import tensorflow as tf
from core import plotlib
from core import imgproc
_TMPDIR = "tmp"
tf.logging.set_verbosity(tf.logging.INFO)
class PlotLibTest(tf.test.TestCase):
def test_py_convert_to_heatmap(self):
kernel = imgproc._py_gaussian_kernel(ksize=100)
kernel_heatmap = plotlib._py_convert_to_heatmap(kernel, normalize=True)
kernel_heatmap = (kernel_heatmap * 255).astype(np.uint8)
filename = _TMPDIR + "/py_convert_to_heatmap.png"
cv2.imwrite(filename, kernel_heatmap[:, :, ::-1]) # RGB to BGR.
tf.logging.info("The kernel image is written to %s.", filename)
def test_convert_to_heatmap(self):
image = tf.placeholder(tf.float32, shape=[None, None])
heatmap = plotlib.convert_to_heatmap(
tf.expand_dims(image, 0), normalize=True)
with self.test_session() as sess:
kernel = imgproc._py_gaussian_kernel(ksize=100)
kernel_heatmap = sess.run(heatmap[0], feed_dict={image: kernel})
kernel_heatmap = (kernel_heatmap * 255).astype(np.uint8)
filename = _TMPDIR + "/convert_to_heatmap.png"
cv2.imwrite(filename, kernel_heatmap[:, :, ::-1]) # RGB to BGR.
tf.logging.info("The kernel image is written to %s.", filename)
def test_py_draw_rectangles(self):
image = np.zeros((480, 640, 3), dtype=np.uint8)
canvas = plotlib._py_draw_rectangles(
image,
boxes=[[0.1, 0.25, 0.9, 0.75], [0.2, 0.35, 0.8, 0.65]],
scores=[0.1, 0.2],
labels=["box1", "box2"],
color=(255, 0, 0),
thickness=1,
fontscale=1.0)
filename = _TMPDIR + "/py_draw_rectangles.png"
cv2.imwrite(filename, canvas[:, :, ::-1]) # RGB to BGR.
tf.logging.info("The image with rectangles is written to %s.", filename)
def test_py_draw_caption(self):
image = np.zeros((480, 640, 3), dtype=np.uint8)
canvas = plotlib._py_draw_caption(
image,
caption="hello, world",
org=(10, 10),
color=(255, 0, 0),
thickness=1,
fontscale=1.0)
filename = _TMPDIR + "/py_draw_caption.png"
cv2.imwrite(filename, canvas[:, :, ::-1]) # RGB to BGR.
tf.logging.info("The image with caption is written to %s.", filename)
def test_draw_rectangles(self):
image = tf.placeholder(tf.uint8, shape=[None, None, 3])
boxes = tf.placeholder(tf.float32, shape=[None, 4])
scores = tf.placeholder(tf.float32, shape=[None])
labels = tf.placeholder(tf.string, shape=[None])
canvas = plotlib.draw_rectangles(
image=tf.expand_dims(image, axis=0),
boxes=tf.expand_dims(boxes, axis=0),
scores=tf.expand_dims(scores, axis=0),
labels=tf.expand_dims(labels, axis=0),
color=(0, 0, 255),
thickness=1,
fontscale=1.0)
with self.test_session() as sess:
canvas = sess.run(
canvas[0],
feed_dict={
image: np.zeros((480, 640, 3), dtype=np.uint8),
boxes: [[0.1, 0.25, 0.9, 0.75], [0.2, 0.35, 0.8, 0.65]],
scores: [0.123, 0.456],
labels: ['bbox1', 'bbox2']
})
filename = _TMPDIR + "/draw_rectangles.png"
cv2.imwrite(filename, canvas[:, :, ::-1]) # RGB to BGR.
tf.logging.info("The image with rectangle is written to %s.", filename)
def test_draw_caption(self):
image = tf.placeholder(tf.uint8, shape=[None, None, 3])
caption = tf.placeholder(tf.string, shape=[])
canvas = plotlib.draw_caption(
tf.expand_dims(image, axis=0),
tf.expand_dims(caption, axis=0),
org=(20, 20),
color=(0, 0, 255),
thickness=1,
fontscale=1.0)
with self.test_session() as sess:
canvas = sess.run(
canvas[0],
feed_dict={
image: np.zeros((480, 640, 3), dtype=np.uint8),
caption: 'bye bye, world!'
})
filename = _TMPDIR + "/draw_caption.png"
cv2.imwrite(filename, canvas[:, :, ::-1]) # RGB to BGR.
tf.logging.info("The image with caption is written to %s.", filename)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.test.main",
"core.plotlib._py_draw_caption",
"tensorflow.logging.info",
"core.plotlib._py_draw_rectangles",
"cv2.imwrite",
"numpy.zeros",
"tensorflow.logging.set_verbosity",
"core.imgproc._py_gaussian_kernel",
"tensorflow.placeholder",
"core.plotlib._py_convert_to_heatmap",
"tensorfl... | [((233, 274), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (257, 274), True, 'import tensorflow as tf\n'), ((4551, 4565), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (4563, 4565), True, 'import tensorflow as tf\n'), ((374, 412), 'core.imgproc._py_gaussian_kernel', 'imgproc._py_gaussian_kernel', ([], {'ksize': '(100)'}), '(ksize=100)\n', (401, 412), False, 'from core import imgproc\n'), ((438, 492), 'core.plotlib._py_convert_to_heatmap', 'plotlib._py_convert_to_heatmap', (['kernel'], {'normalize': '(True)'}), '(kernel, normalize=True)\n', (468, 492), False, 'from core import plotlib\n'), ((625, 674), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'kernel_heatmap[:, :, ::-1]'], {}), '(filename, kernel_heatmap[:, :, ::-1])\n', (636, 674), False, 'import cv2\n'), ((698, 761), 'tensorflow.logging.info', 'tf.logging.info', (['"""The kernel image is written to %s."""', 'filename'], {}), "('The kernel image is written to %s.', filename)\n", (713, 761), True, 'import tensorflow as tf\n'), ((818, 864), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None]'}), '(tf.float32, shape=[None, None])\n', (832, 864), True, 'import tensorflow as tf\n'), ((1274, 1323), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'kernel_heatmap[:, :, ::-1]'], {}), '(filename, kernel_heatmap[:, :, ::-1])\n', (1285, 1323), False, 'import cv2\n'), ((1347, 1410), 'tensorflow.logging.info', 'tf.logging.info', (['"""The kernel image is written to %s."""', 'filename'], {}), "('The kernel image is written to %s.', filename)\n", (1362, 1410), True, 'import tensorflow as tf\n'), ((1467, 1506), 'numpy.zeros', 'np.zeros', (['(480, 640, 3)'], {'dtype': 'np.uint8'}), '((480, 640, 3), dtype=np.uint8)\n', (1475, 1506), True, 'import numpy as np\n'), ((1524, 1715), 'core.plotlib._py_draw_rectangles', 'plotlib._py_draw_rectangles', (['image'], {'boxes': '[[0.1, 0.25, 0.9, 0.75], [0.2, 0.35, 0.8, 0.65]]', 'scores': '[0.1, 0.2]', 'labels': "['box1', 'box2']", 'color': '(255, 0, 0)', 'thickness': '(1)', 'fontscale': '(1.0)'}), "(image, boxes=[[0.1, 0.25, 0.9, 0.75], [0.2, \n 0.35, 0.8, 0.65]], scores=[0.1, 0.2], labels=['box1', 'box2'], color=(\n 255, 0, 0), thickness=1, fontscale=1.0)\n", (1551, 1715), False, 'from core import plotlib\n'), ((1855, 1896), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'canvas[:, :, ::-1]'], {}), '(filename, canvas[:, :, ::-1])\n', (1866, 1896), False, 'import cv2\n'), ((1920, 1992), 'tensorflow.logging.info', 'tf.logging.info', (['"""The image with rectangles is written to %s."""', 'filename'], {}), "('The image with rectangles is written to %s.', filename)\n", (1935, 1992), True, 'import tensorflow as tf\n'), ((2046, 2085), 'numpy.zeros', 'np.zeros', (['(480, 640, 3)'], {'dtype': 'np.uint8'}), '((480, 640, 3), dtype=np.uint8)\n', (2054, 2085), True, 'import numpy as np\n'), ((2103, 2224), 'core.plotlib._py_draw_caption', 'plotlib._py_draw_caption', (['image'], {'caption': '"""hello, world"""', 'org': '(10, 10)', 'color': '(255, 0, 0)', 'thickness': '(1)', 'fontscale': '(1.0)'}), "(image, caption='hello, world', org=(10, 10), color\n =(255, 0, 0), thickness=1, fontscale=1.0)\n", (2127, 2224), False, 'from core import plotlib\n'), ((2354, 2395), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'canvas[:, :, ::-1]'], {}), '(filename, canvas[:, :, ::-1])\n', (2365, 2395), False, 'import cv2\n'), ((2419, 2488), 'tensorflow.logging.info', 'tf.logging.info', (['"""The image with caption is written to %s."""', 'filename'], {}), "('The image with caption is written to %s.', filename)\n", (2434, 2488), True, 'import tensorflow as tf\n'), ((2542, 2589), 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8'], {'shape': '[None, None, 3]'}), '(tf.uint8, shape=[None, None, 3])\n', (2556, 2589), True, 'import tensorflow as tf\n'), ((2606, 2649), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 4]'}), '(tf.float32, shape=[None, 4])\n', (2620, 2649), True, 'import tensorflow as tf\n'), ((2667, 2707), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]'}), '(tf.float32, shape=[None])\n', (2681, 2707), True, 'import tensorflow as tf\n'), ((2725, 2764), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {'shape': '[None]'}), '(tf.string, shape=[None])\n', (2739, 2764), True, 'import tensorflow as tf\n'), ((3536, 3577), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'canvas[:, :, ::-1]'], {}), '(filename, canvas[:, :, ::-1])\n', (3547, 3577), False, 'import cv2\n'), ((3601, 3672), 'tensorflow.logging.info', 'tf.logging.info', (['"""The image with rectangle is written to %s."""', 'filename'], {}), "('The image with rectangle is written to %s.', filename)\n", (3616, 3672), True, 'import tensorflow as tf\n'), ((3723, 3770), 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8'], {'shape': '[None, None, 3]'}), '(tf.uint8, shape=[None, None, 3])\n', (3737, 3770), True, 'import tensorflow as tf\n'), ((3789, 3824), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {'shape': '[]'}), '(tf.string, shape=[])\n', (3803, 3824), True, 'import tensorflow as tf\n'), ((4383, 4424), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'canvas[:, :, ::-1]'], {}), '(filename, canvas[:, :, ::-1])\n', (4394, 4424), False, 'import cv2\n'), ((4448, 4517), 'tensorflow.logging.info', 'tf.logging.info', (['"""The image with caption is written to %s."""', 'filename'], {}), "('The image with caption is written to %s.', filename)\n", (4463, 4517), True, 'import tensorflow as tf\n'), ((923, 947), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (937, 947), True, 'import tensorflow as tf\n'), ((1029, 1067), 'core.imgproc._py_gaussian_kernel', 'imgproc._py_gaussian_kernel', ([], {'ksize': '(100)'}), '(ksize=100)\n', (1056, 1067), False, 'from core import imgproc\n'), ((3877, 3906), 'tensorflow.expand_dims', 'tf.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (3891, 3906), True, 'import tensorflow as tf\n'), ((3920, 3951), 'tensorflow.expand_dims', 'tf.expand_dims', (['caption'], {'axis': '(0)'}), '(caption, axis=0)\n', (3934, 3951), True, 'import tensorflow as tf\n'), ((2826, 2855), 'tensorflow.expand_dims', 'tf.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2840, 2855), True, 'import tensorflow as tf\n'), ((2875, 2904), 'tensorflow.expand_dims', 'tf.expand_dims', (['boxes'], {'axis': '(0)'}), '(boxes, axis=0)\n', (2889, 2904), True, 'import tensorflow as tf\n'), ((2925, 2955), 'tensorflow.expand_dims', 'tf.expand_dims', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (2939, 2955), True, 'import tensorflow as tf\n'), ((2976, 3006), 'tensorflow.expand_dims', 'tf.expand_dims', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (2990, 3006), True, 'import tensorflow as tf\n'), ((3247, 3286), 'numpy.zeros', 'np.zeros', (['(480, 640, 3)'], {'dtype': 'np.uint8'}), '((480, 640, 3), dtype=np.uint8)\n', (3255, 3286), True, 'import numpy as np\n'), ((4218, 4257), 'numpy.zeros', 'np.zeros', (['(480, 640, 3)'], {'dtype': 'np.uint8'}), '((480, 640, 3), dtype=np.uint8)\n', (4226, 4257), True, 'import numpy as np\n')] |
"""Evaluation Metrics for Genomics Datasets."""
import numpy as np
from deepchem.data import NumpyDataset
from scipy.signal import correlate2d
def get_motif_scores(encoded_sequences,
motif_names,
max_scores=None,
return_positions=False,
GC_fraction=0.4):
"""Computes pwm log odds.
Parameters
----------
encoded_sequences : 4darray
(N_sequences, N_letters, sequence_length, 1) array
motif_names : list of strings
max_scores : int, optional
return_positions : boolean, optional
GC_fraction : float, optional
Returns
-------
(N_sequences, num_motifs, seq_length) complete score array by default.
If max_scores, (N_sequences, num_motifs*max_scores) max score array.
If max_scores and return_positions, (N_sequences, 2*num_motifs*max_scores)
array with max scores and their positions.
"""
import simdna
from simdna import synthetic
loaded_motifs = synthetic.LoadedEncodeMotifs(
simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)
num_samples, _, seq_length, _ = encoded_sequences.shape
scores = np.ones((num_samples, len(motif_names), seq_length))
for j, motif_name in enumerate(motif_names):
pwm = loaded_motifs.getPwm(motif_name).getRows().T
log_pwm = np.log(pwm)
gc_pwm = 0.5 * np.array(
[[1 - GC_fraction, GC_fraction, GC_fraction, 1 - GC_fraction]] * len(
pwm[0])).T
gc_log_pwm = np.log(gc_pwm)
log_scores = get_pssm_scores(encoded_sequences, log_pwm)
gc_log_scores = get_pssm_scores(encoded_sequences, gc_log_pwm)
scores[:, j, :] = log_scores - gc_log_scores
if max_scores is not None:
sorted_scores = np.sort(scores)[:, :, ::-1][:, :, :max_scores]
if return_positions:
sorted_positions = scores.argsort()[:, :, ::-1][:, :, :max_scores]
return np.concatenate(
(sorted_scores.reshape((num_samples, len(motif_names) * max_scores)),
sorted_positions.reshape(
(num_samples, len(motif_names) * max_scores))),
axis=1)
else:
return sorted_scores.reshape((num_samples, len(motif_names) * max_scores))
else:
return scores
def get_pssm_scores(encoded_sequences, pssm):
"""
Convolves pssm and its reverse complement with encoded sequences
and returns the maximum score at each position of each sequence.
Parameters
----------
encoded_sequences: 3darray
(N_sequences, N_letters, sequence_length, 1) array
pssm: 2darray
(4, pssm_length) array
Returns
-------
scores: 2darray
(N_sequences, sequence_length)
"""
encoded_sequences = encoded_sequences.squeeze(axis=3)
# initialize fwd and reverse scores to -infinity
fwd_scores = np.full_like(encoded_sequences, -np.inf, float)
rc_scores = np.full_like(encoded_sequences, -np.inf, float)
# cross-correlate separately for each base,
# for both the PSSM and its reverse complement
for base_indx in range(encoded_sequences.shape[1]):
base_pssm = pssm[base_indx][None]
base_pssm_rc = base_pssm[:, ::-1]
fwd_scores[:, base_indx, :] = correlate2d(
encoded_sequences[:, base_indx, :], base_pssm, mode='same')
rc_scores[:, base_indx, :] = correlate2d(
encoded_sequences[:, -(base_indx + 1), :], base_pssm_rc, mode='same')
# sum over the bases
fwd_scores = fwd_scores.sum(axis=1)
rc_scores = rc_scores.sum(axis=1)
# take max of fwd and reverse scores at each position
scores = np.maximum(fwd_scores, rc_scores)
return scores
def in_silico_mutagenesis(model, X):
"""Computes in-silico-mutagenesis scores
Parameters
----------
model: TensorGraph
Currently only SequenceDNN will work, but other models may be added.
X: ndarray
Shape (N_sequences, N_letters, sequence_length, 1)
Returns
-------
(num_task, N_sequences, N_letters, sequence_length, 1) ISM score array.
"""
#Shape (N_sequences, N_letters, sequence_length, 1, num_tasks)
mutagenesis_scores = np.empty(X.shape + (model.num_tasks,), dtype=np.float32)
# Shape (N_sequences, num_tasks)
wild_type_predictions = model.predict(NumpyDataset(X))
# Shape (N_sequences, num_tasks, 1, 1, 1)
wild_type_predictions = wild_type_predictions[:, np.newaxis, np.newaxis,
np.newaxis]
for sequence_index, (sequence, wild_type_prediction) in enumerate(
zip(X, wild_type_predictions)):
# Mutates every position of the sequence to every letter
# Shape (N_letters * sequence_length, N_letters, sequence_length, 1)
# Breakdown:
# Shape of sequence[np.newaxis] (1, N_letters, sequence_length, 1)
mutated_sequences = np.repeat(
sequence[np.newaxis], np.prod(sequence.shape), axis=0)
# remove wild-type
# len(arange) = N_letters * sequence_length
arange = np.arange(len(mutated_sequences))
# len(horizontal cycle) = N_letters * sequence_length
horizontal_cycle = np.tile(np.arange(sequence.shape[1]), sequence.shape[0])
mutated_sequences[arange, :, horizontal_cycle, :] = 0
# add mutant
vertical_repeat = np.repeat(np.arange(sequence.shape[0]), sequence.shape[1])
mutated_sequences[arange, vertical_repeat, horizontal_cycle, :] = 1
# make mutant predictions
mutated_predictions = model.predict(NumpyDataset(mutated_sequences))
mutated_predictions = mutated_predictions.reshape(sequence.shape +
(model.num_tasks,))
mutagenesis_scores[
sequence_index] = wild_type_prediction - mutated_predictions
rolled_scores = np.rollaxis(mutagenesis_scores, -1)
return rolled_scores
| [
"numpy.full_like",
"numpy.maximum",
"numpy.log",
"numpy.empty",
"scipy.signal.correlate2d",
"numpy.sort",
"numpy.arange",
"simdna.synthetic.LoadedEncodeMotifs",
"deepchem.data.NumpyDataset",
"numpy.rollaxis",
"numpy.prod"
] | [((975, 1053), 'simdna.synthetic.LoadedEncodeMotifs', 'synthetic.LoadedEncodeMotifs', (['simdna.ENCODE_MOTIFS_PATH'], {'pseudocountProb': '(0.001)'}), '(simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)\n', (1003, 1053), False, 'from simdna import synthetic\n'), ((2739, 2786), 'numpy.full_like', 'np.full_like', (['encoded_sequences', '(-np.inf)', 'float'], {}), '(encoded_sequences, -np.inf, float)\n', (2751, 2786), True, 'import numpy as np\n'), ((2801, 2848), 'numpy.full_like', 'np.full_like', (['encoded_sequences', '(-np.inf)', 'float'], {}), '(encoded_sequences, -np.inf, float)\n', (2813, 2848), True, 'import numpy as np\n'), ((3477, 3510), 'numpy.maximum', 'np.maximum', (['fwd_scores', 'rc_scores'], {}), '(fwd_scores, rc_scores)\n', (3487, 3510), True, 'import numpy as np\n'), ((3991, 4047), 'numpy.empty', 'np.empty', (['(X.shape + (model.num_tasks,))'], {'dtype': 'np.float32'}), '(X.shape + (model.num_tasks,), dtype=np.float32)\n', (3999, 4047), True, 'import numpy as np\n'), ((5593, 5628), 'numpy.rollaxis', 'np.rollaxis', (['mutagenesis_scores', '(-1)'], {}), '(mutagenesis_scores, -1)\n', (5604, 5628), True, 'import numpy as np\n'), ((1299, 1310), 'numpy.log', 'np.log', (['pwm'], {}), '(pwm)\n', (1305, 1310), True, 'import numpy as np\n'), ((1458, 1472), 'numpy.log', 'np.log', (['gc_pwm'], {}), '(gc_pwm)\n', (1464, 1472), True, 'import numpy as np\n'), ((3108, 3179), 'scipy.signal.correlate2d', 'correlate2d', (['encoded_sequences[:, base_indx, :]', 'base_pssm'], {'mode': '"""same"""'}), "(encoded_sequences[:, base_indx, :], base_pssm, mode='same')\n", (3119, 3179), False, 'from scipy.signal import correlate2d\n'), ((3222, 3308), 'scipy.signal.correlate2d', 'correlate2d', (['encoded_sequences[:, -(base_indx + 1), :]', 'base_pssm_rc'], {'mode': '"""same"""'}), "(encoded_sequences[:, -(base_indx + 1), :], base_pssm_rc, mode=\n 'same')\n", (3233, 3308), False, 'from scipy.signal import correlate2d\n'), ((4123, 4138), 'deepchem.data.NumpyDataset', 'NumpyDataset', (['X'], {}), '(X)\n', (4135, 4138), False, 'from deepchem.data import NumpyDataset\n'), ((4715, 4738), 'numpy.prod', 'np.prod', (['sequence.shape'], {}), '(sequence.shape)\n', (4722, 4738), True, 'import numpy as np\n'), ((4956, 4984), 'numpy.arange', 'np.arange', (['sequence.shape[1]'], {}), '(sequence.shape[1])\n', (4965, 4984), True, 'import numpy as np\n'), ((5113, 5141), 'numpy.arange', 'np.arange', (['sequence.shape[0]'], {}), '(sequence.shape[0])\n', (5122, 5141), True, 'import numpy as np\n'), ((5304, 5335), 'deepchem.data.NumpyDataset', 'NumpyDataset', (['mutated_sequences'], {}), '(mutated_sequences)\n', (5316, 5335), False, 'from deepchem.data import NumpyDataset\n'), ((1699, 1714), 'numpy.sort', 'np.sort', (['scores'], {}), '(scores)\n', (1706, 1714), True, 'import numpy as np\n')] |
import scipy as sp
import scanpy.api as sc
import pandas as pd
import matplotlib.pyplot as plt
import glob
import numpy as np
### network derivation
# Manual
# what you need to run this:
# scanpy anndata object which has raw data stored in anndata.raw and 3d umap stored in anndata.obsm["X_umap"]
# let's call this object 'bdata'
# you have annotation stored in anndata.obsm[anno_key] ; anno_key = name of annotation column in anndata.obs
# then all you need to do is following the below:
# step1>> select = get_grid(bdata,n_neighbor=n_neighbor,select_per_grid = 20,scale=1)
# this will put 3d_umap onto the 3d_grid.
# 'scale' parameter will adjust the resolution that you want to model the 3d umap structure.
# 'select_per_grid' parameter decides the number of randomly selected representative cell from each voxel in the grid.
# 'n_neighbor' is no more used in this function so you can ignore this
# output: 'select' -> idx of selected representative cells
# step2>> idata = impute_neighbor(bdata,n_neighbor=n_neighbor)
# imputation based on knn smoothing. 'n_neighbor' parameter determines width of smoothing.
# output: 'idata' -> imputed anndata
# step3>> tfdata = new_exp_matrix(bdata,idata,select,tflist=list_of_genes,max_cutoff=0.1,ratio_expressed=0.01,min_disp=0.5)
# created gene - pseudo cell anndata and output that as 'tfdata' (confined to subset set by tflist)
# gene should have more than 'ratio_expressed' ratio of pseudo-cells expressing higher than 'max_cutoff' value.
# gene should have more than 'min_disp' as normalised dispersion (normalised to mean expression)
# tflist = None: all genes will be considered.
# tflist = list_of_genes: the gene selection will be confined to that subset. (e.g. list of TFs)
# step4>> generate_gene_network(tfdata,n_neighbors=n_neighbor)
# step5>> anno_uniq, anno_ratio = impute_anno(bdata,select,anno_key,n_neighbor=n_neighbor)
# step6>> draw_graph(tfdata, anno_key, anno_uniq, anno_ratio,adjust=True)
def get_grid(bdata, scale=1, border=2
,select_per_grid=5, min_count = 2, n_neighbor = 10):
import math
from collections import defaultdict
def shift(crd, pos):
if (min(crd[:,pos])<0): crd[:,pos]+=abs(min(crd[:,pos]))
if (max(crd[:,pos])<0): crd[:,pos]+=abs(max(crd[:,pos]))
# get umap coordinate
crd = bdata.obsm['X_umap']
shift(crd,0)
shift(crd,1)
shift(crd,2)
picture = np.zeros((scale*math.ceil(max(crd[:,0])-min(crd[:,0]))+border*scale,
scale*math.ceil(max(crd[:,1])-min(crd[:,1]))+border*scale,
scale*math.ceil(max(crd[:,2])-min(crd[:,2]))+border*scale))
for pos in crd*scale+border*scale/2:
picture[math.floor(pos[0]),math.floor(pos[1]),math.floor(pos[2])]+=1
plt.imshow(np.sum(picture,axis=1),vmin=10)
plt.grid(False)
plt.show()
plt.hist(picture[picture>5],bins=100)
plt.show()
# prepare grid
grid = defaultdict(list)
for idx, pos in enumerate(crd*scale+border*scale/2):
posid = '%i:%i:%i'%(math.floor(pos[0]),math.floor(pos[1]),math.floor(pos[2]))
grid[posid].append(idx)
# select grid which has more than [min_count] cells and np.min(grid_size,select_per_grid) number of representative cells from grid
np.random.seed(0)
select = []
for posid in grid:
grid_size = len(grid[posid])
if grid_size < min_count:
continue
else:
select.extend(np.random.choice(grid[posid],size=min([grid_size,select_per_grid]),replace=False))
return select
def impute_neighbor(bdata,n_neighbor=10):
from scipy.spatial import cKDTree
from sklearn.neighbors import KDTree
import multiprocessing as mp
n_jobs = mp.cpu_count()
# Get neighborhood structure based on
ckd = cKDTree(bdata.obsm["X_umap"])
ckdout = ckd.query(x=bdata.obsm["X_umap"], k=n_neighbor, n_jobs=n_jobs)
indices = ckdout[1]
sum_list = []
import scipy
for i in range(0,bdata.raw.X.shape[0],10000):
start = i
end = min(i+10000,bdata.raw.X.shape[0])
X_list = [bdata.raw.X[indices[start:end,i]] for i in range(n_neighbor)]
X_sum = scipy.sparse.csr_matrix(np.sum(X_list)/n_neighbor)
sum_list.append(X_sum)
print(i)
imputed = scipy.sparse.vstack(sum_list)
idata = sc.AnnData(imputed)
idata.obs = bdata.obs.copy()
idata.var = bdata.raw.var.copy()
idata.obsm = bdata.obsm.copy()
idata.uns = bdata.uns.copy()
return idata
def new_exp_matrix(bdata,idata,select,n_min_exp_cell = 10, min_mean=0,min_disp=.1, ratio_expressed = 0.1,example_gene='CDK1',show_filter = None, max_cutoff=0.2, tflist = None):
# get genes expressed more than min_exp_cell
detected = np.sum(bdata.raw.X>0,axis=0).A1
select_gene = np.where(detected > n_min_exp_cell)[0]
Xnew = idata.X[select].todense()
Xnew = Xnew[:,select_gene]
import scipy
gdata = sc.AnnData(scipy.sparse.csr_matrix(Xnew))
gdata.var_names = bdata.raw.var_names[select_gene]
gdata.raw = sc.AnnData(Xnew)
# select highly variable genes
print('selecting hvgs...')
result = sc.pp.filter_genes_dispersion(gdata.X,log=False,min_mean=min_mean,min_disp=min_disp)
if example_gene:
pos = np.where(gdata.var_names==example_gene)[0][0]
plt.hist(gdata.X[:,pos].todense().A1)
plt.show()
print('max:',np.max(gdata.X[:,pos]))
print('mean:',np.mean(gdata.X[:,pos]))
print('min:',np.min(gdata.X[:,pos]))
print('dispersions_norm:',result['dispersions_norm'][pos])
if show_filter:
x = result.means
y = result.dispersions_norm
c = result.gene_subset
print(np.sum(c), 'highly variable genes are selected')
plt.scatter(x,y)
plt.scatter(x[c],y[c])
plt.show()
# do filter
c1 = (result.gene_subset) # highly variable above min_disp
c2 = (np.max(gdata.X,axis=0).todense().A1 > max_cutoff) # max expression should be above max_cutoff
c3 = np.sum(gdata.X>max_cutoff,axis=0).A1 > ratio_expressed*len(gdata.obs_names)
deg = (c1 & c3)
gdata = gdata[:,deg].copy()
# invert gene to cell
import scipy
cdata = sc.AnnData(scipy.sparse.csr_matrix(gdata.X.T))
cdata.obs_names = gdata.var_names
if tflist:
tf_idx = cdata.obs_names.isin(tflist)
tfdata = cdata[tf_idx].copy()
return tfdata
else:
return cdata
def generate_gene_network(tfdata,n_neighbors=10):
sc.pp.pca(tfdata)
sc.pp.neighbors(tfdata,metric='cosine',n_neighbors=n_neighbors)
sc.tl.umap(tfdata,min_dist=0.7)
sc.tl.draw_graph(tfdata,layout='fa')
sc.tl.draw_graph(tfdata,layout='fr')
sc.tl.draw_graph(tfdata,layout='kk')
def impute_anno(bdata,select, anno_key,n_neighbor=10):
from scipy.spatial import cKDTree
from sklearn.neighbors import KDTree
import multiprocessing as mp
n_jobs = mp.cpu_count()
# Get neighborhood structure based on
ckd = cKDTree(bdata.obsm["X_umap"])
ckdout = ckd.query(x=bdata.obsm["X_umap"], k=n_neighbor, n_jobs=n_jobs)
indices = ckdout[1]
anno_uniq = sorted(set(bdata.obs[anno_key]))
anno_arr = np.vstack([np.array(bdata.obs[anno_key]==x).astype(int) for x in anno_uniq])
anno_sum = np.zeros(shape=anno_arr.shape)
for i in range(n_neighbor):
anno_sum += anno_arr[:,indices[:,i]]
anno_ratio = anno_sum/np.sum(anno_sum,axis=0)
select_anno = [x in set(bdata.obs[anno_key][select]) for x in anno_uniq]
return np.array(anno_uniq)[select_anno], anno_ratio[select_anno][:,select]
def draw_graph(tfdata, anno_uniq, anno_ratio,adjust=False,z_score_cut = 2,
factor0 = 2, text_fontsize=10):
palette1 = ["#023fa5", "#7d87b9", "#bec1d4", "#d6bcc0", "#bb7784", "#8e063b", "#4a6fe3",
"#8595e1", "#b5bbe3", "#e6afb9", "#e07b91", "#d33f6a", "#11c638", "#8dd593",
"#c6dec7", "#ead3c6", "#f0b98d", "#ef9708", "#0fcfc0", "#9cded6", "#d5eae7",
"#f3e1eb", "#f6c4e1", "#f79cd4",
'#7f7f7f', "#c7c7c7", "#1CE6FF", "#336600"]
palette2 = [
'#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', # '#7f7f7f' removed grey
'#bcbd22', '#17becf',
'#aec7e8', '#ffbb78', '#98df8a', '#ff9896',
'#c5b0d5', '#c49c94', '#f7b6d2', # '#c7c7c7' removed grey
'#dbdb8d', '#9edae5',
'#ad494a', '#8c6d31'] # manual additions
if len(anno_uniq)> 28:
palette = godsnot_64
elif len(anno_uniq)> 20:
palette = palette1
else:
palette = palette2
Cr = np.corrcoef(np.vstack([tfdata.X.todense(),anno_ratio]))
Cr_an = Cr[len(tfdata):,:len(tfdata)]
color_anno = np.argmax(Cr_an,axis=0)
C = tfdata.uns['neighbors']['connectivities'].todense() # zx3 bdata.uns['neighbors']['connectivities']
#C = Cr[:len(tfdata),:len(tfdata)]
pairs = np.where(C>0.3)
from sklearn import preprocessing
Cr_scaled = preprocessing.scale(Cr_an)
#dot_size0 = np.choose(color_anno,Cr_scaled)
dot_size0 = np.array([Cr_scaled[j,i] for i,j in enumerate(color_anno)])
colors = np.array([palette[i] if s >z_score_cut else 'gray' for i,s in zip(color_anno,dot_size0)])
from adjustText import adjust_text
show = True
gamma = 2
dot_size = dot_size0**(gamma)
axis = 'X_draw_graph_fr'
x = tfdata.obsm[axis][:,0]
y = tfdata.obsm[axis][:,1]
n = list(tfdata.obs_names)
plt.figure(figsize=(8,8))
plt.scatter(x,y,c=colors,s=factor0*dot_size) #50*size
# draw pair to pair lines
for p1, p2 in zip(pairs[0],pairs[1]):
plt.plot([x[p1],x[p2]],[y[p1],y[p2]],c='lightgrey',alpha=0.3,zorder=-1,
linewidth=2*C[p1,p2]**gamma)
# draw_label
for i, label in enumerate(anno_uniq):
plt.scatter(0,0,s=0,label=label,
c=palette[i],zorder=1,alpha=1.0,
linewidth=0)
# add_names
if show != False:
texts = []
for i, txt in enumerate(n):
texts.append(plt.text(x[i],y[i],txt,fontsize=text_fontsize))
lgnd = plt.legend(loc=(1.1,0), scatterpoints=1, fontsize=10)
for handle in lgnd.legendHandles:
handle.set_sizes([30.0])
handle.set_alpha(1)
plt.xticks([], [])
plt.yticks([], [])
plt.grid(False)
if adjust == True:
adjust_text(texts,only_move={'text':'y'})
plt.show()
| [
"numpy.random.seed",
"numpy.sum",
"numpy.argmax",
"sklearn.preprocessing.scale",
"scanpy.api.tl.draw_graph",
"adjustText.adjust_text",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"numpy.mean",
"scipy.spatial.cKDTree",
"multiprocessing.cpu_count",
"scanpy.api.tl.umap",
"scanpy.api.... | [((2840, 2855), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (2848, 2855), True, 'import matplotlib.pyplot as plt\n'), ((2860, 2870), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2868, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2880, 2920), 'matplotlib.pyplot.hist', 'plt.hist', (['picture[picture > 5]'], {'bins': '(100)'}), '(picture[picture > 5], bins=100)\n', (2888, 2920), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2930, 2932), True, 'import matplotlib.pyplot as plt\n'), ((2968, 2985), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2979, 2985), False, 'from collections import defaultdict\n'), ((3305, 3322), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3319, 3322), True, 'import numpy as np\n'), ((3770, 3784), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3782, 3784), True, 'import multiprocessing as mp\n'), ((3843, 3872), 'scipy.spatial.cKDTree', 'cKDTree', (["bdata.obsm['X_umap']"], {}), "(bdata.obsm['X_umap'])\n", (3850, 3872), False, 'from scipy.spatial import cKDTree\n'), ((4347, 4376), 'scipy.sparse.vstack', 'scipy.sparse.vstack', (['sum_list'], {}), '(sum_list)\n', (4366, 4376), False, 'import scipy\n'), ((4389, 4408), 'scanpy.api.AnnData', 'sc.AnnData', (['imputed'], {}), '(imputed)\n', (4399, 4408), True, 'import scanpy.api as sc\n'), ((5125, 5141), 'scanpy.api.AnnData', 'sc.AnnData', (['Xnew'], {}), '(Xnew)\n', (5135, 5141), True, 'import scanpy.api as sc\n'), ((5226, 5317), 'scanpy.api.pp.filter_genes_dispersion', 'sc.pp.filter_genes_dispersion', (['gdata.X'], {'log': '(False)', 'min_mean': 'min_mean', 'min_disp': 'min_disp'}), '(gdata.X, log=False, min_mean=min_mean,\n min_disp=min_disp)\n', (5255, 5317), True, 'import scanpy.api as sc\n'), ((6603, 6620), 'scanpy.api.pp.pca', 'sc.pp.pca', (['tfdata'], {}), '(tfdata)\n', (6612, 6620), True, 'import scanpy.api as sc\n'), ((6625, 6690), 'scanpy.api.pp.neighbors', 'sc.pp.neighbors', (['tfdata'], {'metric': '"""cosine"""', 'n_neighbors': 'n_neighbors'}), "(tfdata, metric='cosine', n_neighbors=n_neighbors)\n", (6640, 6690), True, 'import scanpy.api as sc\n'), ((6693, 6725), 'scanpy.api.tl.umap', 'sc.tl.umap', (['tfdata'], {'min_dist': '(0.7)'}), '(tfdata, min_dist=0.7)\n', (6703, 6725), True, 'import scanpy.api as sc\n'), ((6729, 6766), 'scanpy.api.tl.draw_graph', 'sc.tl.draw_graph', (['tfdata'], {'layout': '"""fa"""'}), "(tfdata, layout='fa')\n", (6745, 6766), True, 'import scanpy.api as sc\n'), ((6770, 6807), 'scanpy.api.tl.draw_graph', 'sc.tl.draw_graph', (['tfdata'], {'layout': '"""fr"""'}), "(tfdata, layout='fr')\n", (6786, 6807), True, 'import scanpy.api as sc\n'), ((6811, 6848), 'scanpy.api.tl.draw_graph', 'sc.tl.draw_graph', (['tfdata'], {'layout': '"""kk"""'}), "(tfdata, layout='kk')\n", (6827, 6848), True, 'import scanpy.api as sc\n'), ((7034, 7048), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (7046, 7048), True, 'import multiprocessing as mp\n'), ((7103, 7132), 'scipy.spatial.cKDTree', 'cKDTree', (["bdata.obsm['X_umap']"], {}), "(bdata.obsm['X_umap'])\n", (7110, 7132), False, 'from scipy.spatial import cKDTree\n'), ((7389, 7419), 'numpy.zeros', 'np.zeros', ([], {'shape': 'anno_arr.shape'}), '(shape=anno_arr.shape)\n', (7397, 7419), True, 'import numpy as np\n'), ((8847, 8871), 'numpy.argmax', 'np.argmax', (['Cr_an'], {'axis': '(0)'}), '(Cr_an, axis=0)\n', (8856, 8871), True, 'import numpy as np\n'), ((9034, 9051), 'numpy.where', 'np.where', (['(C > 0.3)'], {}), '(C > 0.3)\n', (9042, 9051), True, 'import numpy as np\n'), ((9106, 9132), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['Cr_an'], {}), '(Cr_an)\n', (9125, 9132), False, 'from sklearn import preprocessing\n'), ((9604, 9630), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (9614, 9630), True, 'import matplotlib.pyplot as plt\n'), ((9634, 9683), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'colors', 's': '(factor0 * dot_size)'}), '(x, y, c=colors, s=factor0 * dot_size)\n', (9645, 9683), True, 'import matplotlib.pyplot as plt\n'), ((10255, 10309), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1.1, 0)', 'scatterpoints': '(1)', 'fontsize': '(10)'}), '(loc=(1.1, 0), scatterpoints=1, fontsize=10)\n', (10265, 10309), True, 'import matplotlib.pyplot as plt\n'), ((10414, 10432), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '[]'], {}), '([], [])\n', (10424, 10432), True, 'import matplotlib.pyplot as plt\n'), ((10437, 10455), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]', '[]'], {}), '([], [])\n', (10447, 10455), True, 'import matplotlib.pyplot as plt\n'), ((10460, 10475), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (10468, 10475), True, 'import matplotlib.pyplot as plt\n'), ((10553, 10563), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10561, 10563), True, 'import matplotlib.pyplot as plt\n'), ((2804, 2827), 'numpy.sum', 'np.sum', (['picture'], {'axis': '(1)'}), '(picture, axis=1)\n', (2810, 2827), True, 'import numpy as np\n'), ((4816, 4847), 'numpy.sum', 'np.sum', (['(bdata.raw.X > 0)'], {'axis': '(0)'}), '(bdata.raw.X > 0, axis=0)\n', (4822, 4847), True, 'import numpy as np\n'), ((4866, 4901), 'numpy.where', 'np.where', (['(detected > n_min_exp_cell)'], {}), '(detected > n_min_exp_cell)\n', (4874, 4901), True, 'import numpy as np\n'), ((5023, 5052), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['Xnew'], {}), '(Xnew)\n', (5046, 5052), False, 'import scipy\n'), ((5451, 5461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5459, 5461), True, 'import matplotlib.pyplot as plt\n'), ((5854, 5871), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (5865, 5871), True, 'import matplotlib.pyplot as plt\n'), ((5879, 5902), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[c]', 'y[c]'], {}), '(x[c], y[c])\n', (5890, 5902), True, 'import matplotlib.pyplot as plt\n'), ((5910, 5920), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5918, 5920), True, 'import matplotlib.pyplot as plt\n'), ((6317, 6351), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['gdata.X.T'], {}), '(gdata.X.T)\n', (6340, 6351), False, 'import scipy\n'), ((7525, 7549), 'numpy.sum', 'np.sum', (['anno_sum'], {'axis': '(0)'}), '(anno_sum, axis=0)\n', (7531, 7549), True, 'import numpy as np\n'), ((9769, 9885), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[p1], x[p2]]', '[y[p1], y[p2]]'], {'c': '"""lightgrey"""', 'alpha': '(0.3)', 'zorder': '(-1)', 'linewidth': '(2 * C[p1, p2] ** gamma)'}), "([x[p1], x[p2]], [y[p1], y[p2]], c='lightgrey', alpha=0.3, zorder=-\n 1, linewidth=2 * C[p1, p2] ** gamma)\n", (9777, 9885), True, 'import matplotlib.pyplot as plt\n'), ((9954, 10041), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0)', '(0)'], {'s': '(0)', 'label': 'label', 'c': 'palette[i]', 'zorder': '(1)', 'alpha': '(1.0)', 'linewidth': '(0)'}), '(0, 0, s=0, label=label, c=palette[i], zorder=1, alpha=1.0,\n linewidth=0)\n', (9965, 10041), True, 'import matplotlib.pyplot as plt\n'), ((10507, 10550), 'adjustText.adjust_text', 'adjust_text', (['texts'], {'only_move': "{'text': 'y'}"}), "(texts, only_move={'text': 'y'})\n", (10518, 10550), False, 'from adjustText import adjust_text\n'), ((5483, 5506), 'numpy.max', 'np.max', (['gdata.X[:, pos]'], {}), '(gdata.X[:, pos])\n', (5489, 5506), True, 'import numpy as np\n'), ((5529, 5553), 'numpy.mean', 'np.mean', (['gdata.X[:, pos]'], {}), '(gdata.X[:, pos])\n', (5536, 5553), True, 'import numpy as np\n'), ((5575, 5598), 'numpy.min', 'np.min', (['gdata.X[:, pos]'], {}), '(gdata.X[:, pos])\n', (5581, 5598), True, 'import numpy as np\n'), ((5797, 5806), 'numpy.sum', 'np.sum', (['c'], {}), '(c)\n', (5803, 5806), True, 'import numpy as np\n'), ((6118, 6154), 'numpy.sum', 'np.sum', (['(gdata.X > max_cutoff)'], {'axis': '(0)'}), '(gdata.X > max_cutoff, axis=0)\n', (6124, 6154), True, 'import numpy as np\n'), ((7642, 7661), 'numpy.array', 'np.array', (['anno_uniq'], {}), '(anno_uniq)\n', (7650, 7661), True, 'import numpy as np\n'), ((2723, 2741), 'math.floor', 'math.floor', (['pos[0]'], {}), '(pos[0])\n', (2733, 2741), False, 'import math\n'), ((2742, 2760), 'math.floor', 'math.floor', (['pos[1]'], {}), '(pos[1])\n', (2752, 2760), False, 'import math\n'), ((2761, 2779), 'math.floor', 'math.floor', (['pos[2]'], {}), '(pos[2])\n', (2771, 2779), False, 'import math\n'), ((3071, 3089), 'math.floor', 'math.floor', (['pos[0]'], {}), '(pos[0])\n', (3081, 3089), False, 'import math\n'), ((3090, 3108), 'math.floor', 'math.floor', (['pos[1]'], {}), '(pos[1])\n', (3100, 3108), False, 'import math\n'), ((3109, 3127), 'math.floor', 'math.floor', (['pos[2]'], {}), '(pos[2])\n', (3119, 3127), False, 'import math\n'), ((4249, 4263), 'numpy.sum', 'np.sum', (['X_list'], {}), '(X_list)\n', (4255, 4263), True, 'import numpy as np\n'), ((5351, 5392), 'numpy.where', 'np.where', (['(gdata.var_names == example_gene)'], {}), '(gdata.var_names == example_gene)\n', (5359, 5392), True, 'import numpy as np\n'), ((10195, 10244), 'matplotlib.pyplot.text', 'plt.text', (['x[i]', 'y[i]', 'txt'], {'fontsize': 'text_fontsize'}), '(x[i], y[i], txt, fontsize=text_fontsize)\n', (10203, 10244), True, 'import matplotlib.pyplot as plt\n'), ((6015, 6038), 'numpy.max', 'np.max', (['gdata.X'], {'axis': '(0)'}), '(gdata.X, axis=0)\n', (6021, 6038), True, 'import numpy as np\n'), ((7308, 7342), 'numpy.array', 'np.array', (['(bdata.obs[anno_key] == x)'], {}), '(bdata.obs[anno_key] == x)\n', (7316, 7342), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
chosen_list = np.loadtxt('./simulation/valid_list_random.txt')
a = chosen_list[0]
idx = a[np.where(a != -1)]
print(idx) | [
"numpy.where",
"numpy.loadtxt"
] | [((54, 102), 'numpy.loadtxt', 'np.loadtxt', (['"""./simulation/valid_list_random.txt"""'], {}), "('./simulation/valid_list_random.txt')\n", (64, 102), True, 'import numpy as np\n'), ((130, 147), 'numpy.where', 'np.where', (['(a != -1)'], {}), '(a != -1)\n', (138, 147), True, 'import numpy as np\n')] |
from flask import Flask,jsonify
from flask_cors import CORS
from flask import render_template
from flask import Flask, jsonify, request
from flask_cors import CORS
from flask_pymongo import PyMongo
import pymongo
import uuid
import json
from bson import ObjectId
import numpy as np
import pickle
DEBUG = True
application = Flask(__name__)
application.config.from_object(__name__)
incoming_data = []
CORS(application, resources={r'/*': {'origins': '*'}})
@application.route('/index',methods=['GET', 'POST'])
@application.route('/',methods=['GET', 'POST'])
def hello():
if request.method == 'POST':
post_data = request.get_json()
hours = post_data.get('hrs')
prolab = post_data.get('lab')
prod = post_data.get('pro')
medic = post_data.get('med')
outpat = post_data.get('out')
emerg = post_data.get('eme')
inpat = post_data.get('inp')
diag = post_data.get('dig')
adt = post_data.get('adt')
ads = post_data.get('ads')
did = post_data.get('did')
mes = post_data.get('mes')
pay = post_data.get('pay')
rac = post_data.get('rac')
gen = post_data.get('gen')
agr = post_data.get('agr')
wem = post_data.get('wem')
gls = post_data.get('gls')
a1c = post_data.get('a1c')
com = post_data.get('com')
met = post_data.get('met')
rep = post_data.get('rep')
nat = post_data.get('nat')
chl = post_data.get('chl')
glim = post_data.get('glim')
ace = post_data.get('ace')
glip = post_data.get('glip')
glyb = post_data.get('glyb')
tol = post_data.get('tol')
pio = post_data.get('pio')
ros = post_data.get('ros')
aca = post_data.get('aca')
mmg = post_data.get('mmg')
tro = post_data.get('tro')
ins = post_data.get('ins')
gly = post_data.get('gly')
gli = post_data.get('gli')
glp = post_data.get('glp')
mer = post_data.get('mer')
#incoming_data.applicationend(hours)
# incoming_data.applicationend(prolab)
# incoming_data.applicationend(prod)
# ti=post_data.get('title')
# print(type(hours))
# print(prolab)
# print(prod)
# print(medic)
# print(outpat)
# print(emerg)
# print(inpat)
# print(diag)
# print(adt)
# print(ads)
# print(did)
# print(mes)
# print(pay)
# print(rac)
# print(gen)
# print(agr)
# print(wem)
# print(gls)
# print(a1c)
# print(com)
# print(met)
# print(rep)
# print(nat)
# print(chl)
# print(glim)
# print(ace)
# print(glip)
# print(glyb)
# print(tol)
# print(pio)
# print(ros)
# print(aca)
# print(mmg)
# print(tro)
# print(ins)
# print(gly)
# print(gli)
# print(glp)
# print(type(mer))
race ={'Asian':0,'Caucasian':0,'Hispanic':0,'Other':0,'UNK':0}
gender ={'Male':0,'Unknown/Invalid':0}
if gen == 'Male':
gender['Male']=1
else:
gender['Unknown/Invalid']=1
glue ={'> 300':0,'None':0,'Norm':0}
glue[gls]=1
aa1c ={'> 8':0,'None':0,'Norm':0}
aa1c[a1c]=1
mett ={'No':0,'Steady':0,'Up':0}
repp ={'No':0,'Steady':0,'Up':0}
natt ={'No':0,'Steady':0,'Up':0}
chll ={'No':0,'Steady':0,'Up':0}
glimm ={'No':0,'Steady':0,'Up':0}
acee=[0]
glipp={'No':0,'Steady':0,'Up':0}
glypp ={'No':0,'Steady':0,'Up':0}
toll=[0]
pioo={'No':0,'Steady':0,'Up':0}
ross ={'No':0,'Steady':0,'Up':0}
acaa ={'No':0,'Steady':0,'Up':0}
mmgg={'No':0,'Steady':0,'Up':0}
tol = [0]
troo ={'Steady':0,'Up':0}
inss ={'No':0,'Steady':0,'Up':0}
glyy ={'No':0,'Steady':0,'Up':0}
glipz = [0]
glii =[0]
megg = [0]
merr=[0]
comm = []
if com == 'Yes':
comm=[1]
else:
comm=[0]
payer = {'CH':0,'CM':0,'CP':0,'DM':0,'FR':0,'HM':0,'MC':0,'MD':0,'MP':0,'OG':0,'OT':0,'PO':0,'SI':0,'SP':0,'UN':0,'UNK':0,'WC':0}
adtt = {'Urgent':0,'Elective':0,'Newborn':0,'Not Available':0,'Null':0,'Trauma Center':0,'Not Mapped':0}
disc = {'Neonate discharged to another hospital for neonatal aftercare':0,
'Still patient or expected to return for outpatient services':0,
'Discharged/transferred within this institution to Medicare approved swing bed':0,
'Discharged/transferred/referred another institution for outpatient services':0,
'Discharged/transferred/referred to this institution for outpatient services':0,
'NULL':0,
'Discharged/transferred to another short term hospital':0,
'Discharged/transferred to another rehab fac including rehab units of a hospital':0,
'Discharged/transferred to a long term care hospital'
'Discharged/transferred to a nursing facility certified under Medicaid but not certified under Medicare':0,
'Not Mapped':0,
'Discharged/transferred to a federal health care facility':0,
'Discharged/transferred/referred to a psychiatric hospital of psychiatric distinct part unit of a hospital':0,
'Discharged/transferred to SNF':0,
'Discharged/transferred to ICF':0,
'Discharged/transferred to another type of inpatient care institution':0,
'Discharged/transferred to home with home health service':0,
'Left AMA':0,
'Discharged/transferred to home under care of Home IV provider':0,
'Admitted as an inpatient to this hospital':0,
}
adss = { 'Transfer from Critical Access Hospital' : 0,'Normal Delivery ':0,
'Sick Baby':0,'Extramural Birth':0,'NULL':0,'Clinic Referral':0,'Not Mapped':0,
'Transfer from hospital inpt/same fac reslt in a sep claim':0,
'Transfer from Ambulatory Surgery Center':0,'HMO Referral':0,
'Transfer from a hospital':0,
'Transfer from a Skilled Nursing Facility (SNF)':0,
'Transfer from another health care facility':0,'Emergency Room':0,
'Court/Law Enforcement':0,
'Not Available':0}
aggrr = 0
if agr == '[0-10)':
aggrr=5
elif agr == '[10-20)':
aggrr = 15
elif agr == '[20-30)':
aggrr = 25
elif agr == '[30-40)':
aggrr = 35
elif agr == '[40-50)':
aggrr = 45
elif agr == '[50-60)':
aggrr = 55
elif agr == '[60-70)':
aggrr = 65
elif agr == '[70-80)':
aggrr = 75
elif agr == '[80-90)':
aggrr = 85
elif agr == '[90-100)':
aggrr = 90
wemm = 0
if wem == 'Yes':
wemm = 1
else:
wemm = 0
mess = {'Emergency/Trauma':0,'Family/GeneralPractice':0,'InternalMedicine':0,'Nephrology':0,'Orthopedics':0,'Orthopedics-Reconstructive':0,'Other':0,'Radiologist':0,'Surgery-General':0,'UNK':0}
mess[mes]=adss[ads]=payer[pay]=mett[met]=repp[rep]=natt[nat]=chll[chl]=glimm[glim]=glipp[glip]=glypp[glyb]=pioo[pio]=ross[ros]=acaa[aca]=mmgg[mmg]=troo[tro]=inss[ins]=glyy[gly]=1
scalerfile = 'knn.sav'
knn_model = pickle.load(open(scalerfile, 'rb'))
race[rac] = 1
# res = knn_model.predict([int(hours),int(prolab
# ,int(prod),int(medic),int(outpat),int(emerg),
# int(inpat),int(diag)]+list(race.values()))]
abc = [int(hours),int(prolab),int(prod),int(medic),int(outpat),int(emerg),int(inpat),int(diag)]
abc1=list(race.values())+list(gender.values())+list(glue.values())+list(aa1c.values())+list(mett.values())+list(repp.values())+list(natt.values())+list(chll.values())+list(glimm.values())+acee+list(glipp.values())+list(glypp.values())+toll+list(pioo.values())+list(ross.values())+list(acaa.values())+list(mmgg.values())+tol+list(troo.values())+list(inss.values())+list(glyy.values())+glipz+glii+megg+merr+comm+[0]+list(payer.values())+list(adtt.values())+list(disc.values())+list(adss.values())+list(mess.values())
abc=abc+abc1
abc3=[]
for i in range(0,144-(len(abc))-3):
abc3.append(0)
abc2 = [int(aggrr)]+[int(wemm)]
if len(abc) > 142:
abc=abc+abc2
print(len(abc))
else:
abc=abc+abc3+abc2
print(len(abc))
# abc2=abc.reshape(-1, 1)
print(abc)
print("Done Modeling")
# abc=abc[:len(abc)-1]
arr = np.array(abc)
arr = arr.reshape(1, -1)
result = knn_model.predict(arr)
print("This is the result = ",result)
return jsonify(str(result[0]))
else:
return jsonify('Please Submit to show info')
# response_object = {'status': 'success'}
# if request.method == 'POST':
# print("GOT IT")
# # return jsonify('Got the result')
# else:
# return jsonify('No POST')
@application.route('/Result')#,methods=['POST'])
def get_data():
return jsonify('Got Results')
if __name__ == '__main__':
application.run() | [
"flask_cors.CORS",
"flask.Flask",
"flask.jsonify",
"numpy.array",
"flask.request.get_json"
] | [((326, 341), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (331, 341), False, 'from flask import Flask, jsonify, request\n'), ((404, 457), 'flask_cors.CORS', 'CORS', (['application'], {'resources': "{'/*': {'origins': '*'}}"}), "(application, resources={'/*': {'origins': '*'}})\n", (408, 457), False, 'from flask_cors import CORS\n'), ((8985, 9007), 'flask.jsonify', 'jsonify', (['"""Got Results"""'], {}), "('Got Results')\n", (8992, 9007), False, 'from flask import Flask, jsonify, request\n'), ((624, 642), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (640, 642), False, 'from flask import Flask, jsonify, request\n'), ((8500, 8513), 'numpy.array', 'np.array', (['abc'], {}), '(abc)\n', (8508, 8513), True, 'import numpy as np\n'), ((8687, 8724), 'flask.jsonify', 'jsonify', (['"""Please Submit to show info"""'], {}), "('Please Submit to show info')\n", (8694, 8724), False, 'from flask import Flask, jsonify, request\n')] |
import os
import time
import numpy as np
import pandas
import pandas as pd
from scripts.MADDPG.maddpg import MADDPG
from scripts.MADDPG.buffer import MultiAgentReplayBuffer
# from scripts.MADDPG_original.maddpg import MADDPG
# from scripts.MADDPG_original.buffer import MultiAgentReplayBuffer
from make_env import make_env
from scripts.MADDPG.edge_env import EdgeEnv
pandas.set_option('display.max_columns', None) # display all columns
def obs_list_to_state_vector(observation):
"""convert several ndarrays to one ndarray"""
state = np.array([])
for obs in observation:
state = np.concatenate([state, obs])
return state
if __name__ == '__main__':
debug = False
# debug = True
evaluate = False
# evaluate = True
# scenario = 'simple'
scenario = 'edge_cloud'
# print something every 500 games
PRINT_INTERVAL = 500
N_GAMES = 80000
print(f"Run {N_GAMES} episodes in total")
# the game do not have a terminal state so we set a max steps for each episode
MAX_STEPS = 20
total_steps_cntr = 0
score_history = []
sw_history_om = []
best_score = 10000 # save the model if the score > -10
# parameters of fog nodes
h_r = 5
l_r = 3
l_c = 2
n_c = 2.5
h_c = 3
avg_resource_capacity = {0: [h_r, h_r, h_r]}
avg_unit_cost = {0: [l_c, l_c, l_c]}
env = EdgeEnv(avg_resource_capacity, avg_unit_cost, n_nodes=1,
n_timesteps=10, n_tasks=500, max_steps=MAX_STEPS,
n_actions=2, p_high_value_tasks=0.2)
n_agents = env.n_nodes
actor_dims = []
for i in range(n_agents):
actor_dims.append(env.observation_space[i].shape[0])
# print(env.observation_space[i])
# exit()
critic_dims = sum(actor_dims)
# action space is a list of arrays, assume each agent has same action space
n_actions = env.n_actions
# print(env.action_space[0])
# exit()
# print(env.action_space[0].shape[0])
# exit()
# print(f"actor_dims = {actor_dims}")
# print(f"critic_dims = {critic_dims}")
print(f"number of agents = {n_agents}")
print(f"number of actions = {n_actions}")
maddpg_agents = MADDPG(actor_dims, critic_dims, n_agents, n_actions,
fc1=64, fc2=64, alpha=0.01, beta=0.01, scenario=scenario,
chkpt_dir='tmp/maddpg/')
memory = MultiAgentReplayBuffer(int(1e6), critic_dims, actor_dims,
n_actions, n_agents, batch_size=1024)
if evaluate:
maddpg_agents.load_checkpoint()
avg_sw_df = pd.DataFrame(columns=['episode_ID', 'avg_sw'])
if debug:
env.verbose = True # print the details of process
N_GAMES = 3
else:
env.verbose = False
for i in range(N_GAMES):
# for i in range(1):
obs, om_sw = env.reset()
if env.verbose:
print("df_tasks:")
print(env.df_tasks.head(20))
print(env.df_nodes)
# exit()
score = 0
done = False
episode_step_cntr = 0
node_0_actions = []
while not done:
if evaluate:
env.render()
# time.sleep(0.1) # to slow down the action for the video
actions_probs = maddpg_agents.choose_action(obs)
# choose the action according to the probabilities
actions = []
for actions_prob in actions_probs:
s = sum(actions_prob)
p = [i / s for i in actions_prob]
# a = np.random.choice(n_actions, 1, p=action)
action = np.random.choice(n_actions, 1, p=p)
actions.append(action[0]) # action in {1,2,...,10}
node_0_actions.append(actions[0])
# the actions are greater than one because of noises
# actions = np.concatenate(actions)
# print(f"actions_probs = {actions_probs}")
# print(f"actions = {actions}")
# exit()
obs_, reward, done, sw_increase = env.step(actions)
reward = reward * n_agents
# print(total_steps_cntr)
# print(f"sw_increase = {reward}")
if episode_step_cntr >= MAX_STEPS - 1:
done = True
else:
state = obs_list_to_state_vector(obs)
state_ = obs_list_to_state_vector(obs_)
memory.store_transition(obs, state, actions_probs, reward, obs_,
state_, done)
# print(f"store transition:")
# print(f"current observation = {obs}")
# print(f"next observation = {obs_}")
# print(f"current state = {state}")
# print(f"next state = {state_}")
# print(f"actions = {actions_probs}")
# print(f"reward = {reward}")
# exit()
# do not learn when evaluate, learn every 100 steps
if total_steps_cntr % 100 == 0 and not evaluate:
maddpg_agents.learn(memory)
# set the current state to new state
obs = obs_
score += sw_increase
total_steps_cntr += 1
episode_step_cntr += 1
node_0_actions_df = pandas.DataFrame(node_0_actions,
columns=['action_of_node_0'])
# print(f"social welfare of episode {i} = {score}")
# print(f"social welfare (achieved by OM) = {om_sw}")
sw_history_om.append(om_sw)
score_history.append(score)
# average score of previous 100 games
avg_score = np.mean(score_history[-100:])
avg_sw_om = np.mean(sw_history_om[-100:])
# print("score_history")
# print(score_history)
# print("avg_score")
# print(avg_score)
if env.verbose:
print('episode', i,
'social welfare by RL {:.1f}'.format(score))
print('episode', i,
'social welfare by OM {:.1f}'.format(om_sw))
if not evaluate:
if avg_score > best_score:
maddpg_agents.save_checkpoint()
best_score = avg_score
if i % PRINT_INTERVAL == 0 and i > 0:
print('episode', i,
'average social welfare by RL {:.1f}'.format(avg_score))
print('episode', i,
'average social welfare by OM {:.1f}'.format(avg_sw_om))
# print actions every * episodes
# print("actions:")
# print(actions)
part_tasks = env.df_tasks['valuation_coefficient']
part_tasks = part_tasks[0:MAX_STEPS + 1]
# print("part_tasks:")
# print(part_tasks)
# print("actions of node 0:")
# print(node_0_actions_df)
# # print actions of node 0 (the high-capacity node)
# watch_actions_df = pd.DataFrame(
# columns=['valuation_coefficient', 'node_0_action'])
# watch_actions_df['valuation_coefficient'] = part_tasks
# watch_actions_df['node_0_action'] = node_0_actions_df
# print(watch_actions_df)
# # exit()
df = pd.DataFrame({'episode_ID': [i],
'avg_sw': [avg_score]})
avg_sw_df = avg_sw_df.append(df, ignore_index=True)
if i >= 10000:
outdir = '/Users/fan/OneDrive - University of Southampton/Chandler\'s Projects/Edge-Cloud-Resource-Allocation-Using-MARL-and-Auction/scripts/MADDPG/tmp'
outname = 'average_social_welfare.csv'
fullname = os.path.join(outdir, outname)
print("... saving to .csv file ...")
avg_sw_df.to_csv(fullname, index=False)
| [
"pandas.DataFrame",
"scripts.MADDPG.maddpg.MADDPG",
"os.path.join",
"numpy.mean",
"numpy.array",
"numpy.random.choice",
"scripts.MADDPG.edge_env.EdgeEnv",
"pandas.set_option",
"numpy.concatenate"
] | [((371, 417), 'pandas.set_option', 'pandas.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (388, 417), False, 'import pandas\n'), ((548, 560), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (556, 560), True, 'import numpy as np\n'), ((1369, 1516), 'scripts.MADDPG.edge_env.EdgeEnv', 'EdgeEnv', (['avg_resource_capacity', 'avg_unit_cost'], {'n_nodes': '(1)', 'n_timesteps': '(10)', 'n_tasks': '(500)', 'max_steps': 'MAX_STEPS', 'n_actions': '(2)', 'p_high_value_tasks': '(0.2)'}), '(avg_resource_capacity, avg_unit_cost, n_nodes=1, n_timesteps=10,\n n_tasks=500, max_steps=MAX_STEPS, n_actions=2, p_high_value_tasks=0.2)\n', (1376, 1516), False, 'from scripts.MADDPG.edge_env import EdgeEnv\n'), ((2169, 2309), 'scripts.MADDPG.maddpg.MADDPG', 'MADDPG', (['actor_dims', 'critic_dims', 'n_agents', 'n_actions'], {'fc1': '(64)', 'fc2': '(64)', 'alpha': '(0.01)', 'beta': '(0.01)', 'scenario': 'scenario', 'chkpt_dir': '"""tmp/maddpg/"""'}), "(actor_dims, critic_dims, n_agents, n_actions, fc1=64, fc2=64, alpha=\n 0.01, beta=0.01, scenario=scenario, chkpt_dir='tmp/maddpg/')\n", (2175, 2309), False, 'from scripts.MADDPG.maddpg import MADDPG\n'), ((2514, 2560), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['episode_ID', 'avg_sw']"}), "(columns=['episode_ID', 'avg_sw'])\n", (2526, 2560), True, 'import pandas as pd\n'), ((605, 633), 'numpy.concatenate', 'np.concatenate', (['[state, obs]'], {}), '([state, obs])\n', (619, 633), True, 'import numpy as np\n'), ((5235, 5297), 'pandas.DataFrame', 'pandas.DataFrame', (['node_0_actions'], {'columns': "['action_of_node_0']"}), "(node_0_actions, columns=['action_of_node_0'])\n", (5251, 5297), False, 'import pandas\n'), ((5570, 5599), 'numpy.mean', 'np.mean', (['score_history[-100:]'], {}), '(score_history[-100:])\n', (5577, 5599), True, 'import numpy as np\n'), ((5620, 5649), 'numpy.mean', 'np.mean', (['sw_history_om[-100:]'], {}), '(sw_history_om[-100:])\n', (5627, 5649), True, 'import numpy as np\n'), ((7542, 7571), 'os.path.join', 'os.path.join', (['outdir', 'outname'], {}), '(outdir, outname)\n', (7554, 7571), False, 'import os\n'), ((7158, 7214), 'pandas.DataFrame', 'pd.DataFrame', (["{'episode_ID': [i], 'avg_sw': [avg_score]}"], {}), "({'episode_ID': [i], 'avg_sw': [avg_score]})\n", (7170, 7214), True, 'import pandas as pd\n'), ((3558, 3593), 'numpy.random.choice', 'np.random.choice', (['n_actions', '(1)'], {'p': 'p'}), '(n_actions, 1, p=p)\n', (3574, 3593), True, 'import numpy as np\n')] |
"""
Parameter uncertainties and likelihood ratio tests using Godambe information.
"""
import numpy
from dadi import Inference
from dadi.Spectrum_mod import Spectrum
def hessian_elem(func, f0, p0, ii, jj, eps, args=()):
"""
Calculate element [ii][jj] of the Hessian matrix, a matrix
of partial second derivatives w.r.t. to parameters ii and jj
func: Model function
f0: Evaluation of func at p0
p0: Parameters for func
eps: List of absolute step sizes to use for each parameter when taking
finite differences.
args: Additional arguments to func
"""
# Note that we need to specify dtype=float, to avoid this being an integer
# array which will silently fail when adding fractional eps.
pwork = numpy.array(p0, copy=True, dtype=float)
if ii == jj:
if pwork[ii] != 0:
pwork[ii] = p0[ii] + eps[ii]
fp = func(pwork, *args)
pwork[ii] = p0[ii] - eps[ii]
fm = func(pwork, *args)
element = (fp - 2*f0 + fm)/eps[ii]**2
if pwork[ii] == 0:
pwork[ii] = p0[ii] + 2*eps[ii]
fpp = func(pwork, *args)
pwork[ii] = p0[ii] + eps[ii]
fp = func(pwork, *args)
element = (fpp - 2*fp + f0)/eps[ii]**2
else:
if pwork[ii] != 0 and pwork[jj] != 0:
# f(xi + hi, xj + h)
pwork[ii] = p0[ii] + eps[ii]
pwork[jj] = p0[jj] + eps[jj]
fpp = func(pwork, *args)
# f(xi + hi, xj - hj)
pwork[ii] = p0[ii] + eps[ii]
pwork[jj] = p0[jj] - eps[jj]
fpm = func(pwork, *args)
# f(xi - hi, xj + hj)
pwork[ii] = p0[ii] - eps[ii]
pwork[jj] = p0[jj] + eps[jj]
fmp = func(pwork, *args)
# f(xi - hi, xj - hj)
pwork[ii] = p0[ii] - eps[ii]
pwork[jj] = p0[jj] - eps[jj]
fmm = func(pwork, *args)
element = (fpp - fpm - fmp + fmm)/(4 * eps[ii]*eps[jj])
else:
# f(xi + hi, xj + h)
pwork[ii] = p0[ii] + eps[ii]
pwork[jj] = p0[jj] + eps[jj]
fpp = func(pwork, *args)
# f(xi + hi, xj)
pwork[ii] = p0[ii] + eps[ii]
pwork[jj] = p0[jj]
fpm = func(pwork, *args)
# f(xi, xj + hj)
pwork[ii] = p0[ii]
pwork[jj] = p0[jj] + eps[jj]
fmp = func(pwork, *args)
element = (fpp - fpm - fmp + f0)/(eps[ii]*eps[jj])
return element
def get_hess(func, p0, eps, args=()):
"""
Calculate Hessian matrix of partial second derivatives.
Hij = dfunc/(dp_i dp_j)
func: Model function
p0: Parameter values to take derivative around
eps: Fractional stepsize to use when taking finite-difference derivatives
args: Additional arguments to func
"""
# Calculate step sizes for finite-differences.
eps_in = eps
eps = numpy.empty([len(p0)])
for i, pval in enumerate(p0):
if pval != 0:
eps[i] = eps_in*pval
else:
# Account for parameters equal to zero
eps[i] = eps_in
f0 = func(p0, *args)
hess = numpy.empty((len(p0), len(p0)))
for ii in range(len(p0)):
for jj in range(ii, len(p0)):
hess[ii][jj] = hessian_elem(func, f0, p0, ii, jj, eps, args=args)
hess[jj][ii] = hess[ii][jj]
return hess
def get_grad(func, p0, eps, args=()):
"""
Calculate gradient vector
func: Model function
p0: Parameters for func
eps: Fractional stepsize to use when taking finite-difference derivatives
args: Additional arguments to func
"""
# Calculate step sizes for finite-differences.
eps_in = eps
eps = numpy.empty([len(p0)])
for i, pval in enumerate(p0):
if pval != 0:
eps[i] = eps_in*pval
else:
# Account for parameters equal to zero
eps[i] = eps_in
grad = numpy.empty([len(p0), 1])
for ii in range(len(p0)):
pwork = numpy.array(p0, copy=True, dtype=float)
if p0[ii] != 0:
pwork[ii] = p0[ii] + eps[ii]
fp = func(pwork, *args)
pwork[ii] = p0[ii] - eps[ii]
fm = func(pwork, *args)
grad[ii] = (fp - fm)/(2*eps[ii])
else:
# Do one-sided finite-difference
pwork[ii] = p0[ii] + eps[ii]
fp = func(pwork, *args)
pwork[ii] = p0[ii]
fm = func(pwork, *args)
grad[ii] = (fp - fm)/eps[ii]
return grad
def get_godambe(func_ex, grid_pts, all_boot, p0, data, eps, log=False,
just_hess=False):
"""
Godambe information and Hessian matrices
NOTE: Assumes that last parameter in p0 is theta.
func_ex: Model function
grid_pts: Number of grid points to evaluate the model function
all_boot: List of bootstrap frequency spectra
p0: Best-fit parameters for func_ex.
data: Original data frequency spectrum
eps: Fractional stepsize to use when taking finite-difference derivatives
log: If True, calculate derivatives in terms of log-parameters
just_hess: If True, only evaluate and return the Hessian matrix
"""
ns = data.sample_sizes
# Cache evaluations of the frequency spectrum inside our hessian/J
# evaluation function
cache = {}
def func(params, data):
key = (tuple(params), tuple(ns), tuple(grid_pts))
if key not in cache:
cache[key] = func_ex(params, ns, grid_pts)
fs = cache[key]
return Inference.ll(fs, data)
def log_func(logparams, data):
return func(numpy.exp(logparams), data)
# First calculate the observed hessian
if not log:
hess = -get_hess(func, p0, eps, args=[data])
else:
hess = -get_hess(log_func, numpy.log(p0), eps, args=[data])
if just_hess:
return hess
# Now the expectation of J over the bootstrap data
J = numpy.zeros((len(p0), len(p0)))
for ii, boot in enumerate(all_boot):
boot = Spectrum(boot)
if not log:
grad_temp = get_grad(func, p0, eps, args=[boot])
else:
grad_temp = get_grad(log_func, numpy.log(p0), eps, args=[boot])
J_temp = numpy.outer(grad_temp, grad_temp)
J = J + J_temp
J = J/len(all_boot)
# G = H*J^-1*H
J_inv = numpy.linalg.inv(J)
godambe = numpy.dot(numpy.dot(hess, J_inv), hess)
return godambe, hess, J
def GIM_uncert(func_ex, grid_pts, all_boot, p0, data, log=False,
multinom=True, eps=0.01):
"""
Parameter uncertainties from Godambe Information Matrix
Returns standard deviations of parameter values.
func_ex: Model function
all_boot: List of bootstrap frequency spectra
p0: Best-fit parameters for func_ex
data: Original data frequency spectrum
eps: Fractional stepsize to use when taking finite-difference derivatives
log: If True, assume log-normal distribution of parameters. Returned values
are then the standard deviations of the *logs* of the parameter values,
which can be interpreted as relative parameter uncertainties.
multinom: If True, assume model is defined without an explicit parameter for
theta. Because uncertainty in theta must be accounted for to get
correct uncertainties for other parameters, this function will
automatically consider theta if multinom=True. In that case, the
final entry of the returned uncertainties will correspond to
theta.
"""
if multinom:
func_multi = func_ex
model = func_multi(p0, data.sample_sizes, grid_pts)
theta_opt = Inference.optimal_sfs_scaling(model, data)
p0 = list(p0) + [theta_opt]
func_ex = lambda p, ns, pts: p[-1]*func_multi(p[:-1], ns, pts)
GIM, H, J = get_godambe(func_ex, grid_pts, all_boot, p0, data, eps, log)
return numpy.sqrt(numpy.diag(numpy.linalg.inv(GIM)))
def FIM_uncert(func_ex, grid_pts, p0, data, log=False, multinom=True, eps=0.01):
"""
Parameter uncertainties from Fisher Information Matrix
Returns standard deviations of parameter values.
func_ex: Model function
all_boot: List of bootstrap frequency spectra
p0: Best-fit parameters for func_ex
data: Original data frequency spectrum
eps: Fractional stepsize to use when taking finite-difference derivatives
log: If True, assume log-normal distribution of parameters. Returned values
are then the standard deviations of the *logs* of the parameter values,
which can be interpreted as relative parameter uncertainties.
multinom: If True, assume model is defined without an explicit parameter for
theta. Because uncertainty in theta must be accounted for to get
correct uncertainties for other parameters, this function will
automatically consider theta if multinom=True. In that case, the
final entry of the returned uncertainties will correspond to
theta.
"""
if multinom:
func_multi = func_ex
model = func_multi(p0, data.sample_sizes, grid_pts)
theta_opt = Inference.optimal_sfs_scaling(model, data)
p0 = list(p0) + [theta_opt]
func_ex = lambda p, ns, pts: p[-1]*func_multi(p[:-1], ns, pts)
H = get_godambe(func_ex, grid_pts, [], p0, data, eps, log, just_hess=True)
return numpy.sqrt(numpy.diag(numpy.linalg.inv(H)))
def LRT_adjust(func_ex, grid_pts, all_boot, p0, data, nested_indices,
multinom=True, eps=0.01):
"""
First-order moment matching adjustment factor for likelihood ratio test
func_ex: Model function for complex model
all_boot: List of bootstrap frequency spectra
p0: Best-fit parameters for the simple model, with nested parameter
explicity defined. Although equal to values for simple model, should
be in a list form that can be taken in by the complex model you'd like
to evaluate.
data: Original data frequency spectrum
eps: Fractional stepsize to use when taking finite-difference derivatives
nested_indices: List of positions of nested parameters in complex model
parameter list
multinom: If True, assume model is defined without an explicit parameter for
theta. Because uncertainty in theta must be accounted for to get
correct uncertainties for other parameters, this function will
automatically consider theta if multinom=True. In that case, the
final entry of the returned uncertainties will correspond to
theta.
"""
if multinom:
func_multi = func_ex
model = func_multi(p0, data.sample_sizes, grid_pts)
theta_opt = Inference.optimal_sfs_scaling(model, data)
p0 = list(p0) + [theta_opt]
func_ex = lambda p, ns, pts: p[-1]*func_multi(p[:-1], ns, pts)
# We only need to take derivatives with respect to the parameters in the
# complex model that have been set to specified values in the simple model
def diff_func(diff_params, ns, grid_pts):
# diff_params argument is only the nested parameters. All the rest
# should come from p0
full_params = numpy.array(p0, copy=True, dtype=float)
# Use numpy indexing to set relevant parameters
full_params[nested_indices] = diff_params
return func_ex(full_params, ns, grid_pts)
p_nested = numpy.asarray(p0)[nested_indices]
GIM, H, J = get_godambe(diff_func, grid_pts, all_boot, p_nested, data, eps,
log=False)
adjust = len(nested_indices)/numpy.trace(numpy.dot(J, numpy.linalg.inv(H)))
return adjust
def sum_chi2_ppf(x, weights=(0,1)):
"""
Percent point function (inverse of cdf) of weighted sum of chi^2
distributions.
x: Value(s) at which to evaluate ppf
weights: Weights of chi^2 distributions, beginning with zero d.o.f.
For example, weights=(0,1) is the normal chi^2 distribution with 1
d.o.f. For single parameters on the boundary, the correct
distribution for the LRT is 0.5*chi^2_0 + 0.5*chi^2_1, which would
be weights=(0.5,0.5).
"""
import scipy.stats.distributions as ssd
# Ensure that weights are valid
if abs(numpy.sum(weights) - 1) > 1e-6:
raise ValueError('Weights must sum to 1.')
# A little clunky, but we want to handle x = 0.5, and x = [2, 3, 4]
# correctly. So if x is a scalar, we record that fact so we can return a
# scalar on output.
if numpy.isscalar(x):
scalar_input = True
# Convert x into an array, so we can index it easily.
x = numpy.atleast_1d(x)
# Calculate total cdf of all chi^2 dists with dof > 1.
# (ssd.chi2.cdf(x,0) is always nan, so we avoid that.)
cdf = numpy.sum([w*ssd.chi2.cdf(x, d+1) for (d, w)
in enumerate(weights[1:])], axis=0)
# Add in contribution from 0 d.o.f.
cdf[x > 0] += weights[0]
# Convert to ppf
ppf = 1-cdf
if scalar_input:
return ppf[0]
else:
return ppf
| [
"numpy.outer",
"numpy.sum",
"numpy.log",
"dadi.Inference.optimal_sfs_scaling",
"numpy.isscalar",
"numpy.asarray",
"scipy.stats.distributions.chi2.cdf",
"dadi.Inference.ll",
"numpy.linalg.inv",
"numpy.array",
"numpy.exp",
"numpy.dot",
"numpy.atleast_1d",
"dadi.Spectrum_mod.Spectrum"
] | [((759, 798), 'numpy.array', 'numpy.array', (['p0'], {'copy': '(True)', 'dtype': 'float'}), '(p0, copy=True, dtype=float)\n', (770, 798), False, 'import numpy\n'), ((6533, 6552), 'numpy.linalg.inv', 'numpy.linalg.inv', (['J'], {}), '(J)\n', (6549, 6552), False, 'import numpy\n'), ((12815, 12832), 'numpy.isscalar', 'numpy.isscalar', (['x'], {}), '(x)\n', (12829, 12832), False, 'import numpy\n'), ((12928, 12947), 'numpy.atleast_1d', 'numpy.atleast_1d', (['x'], {}), '(x)\n', (12944, 12947), False, 'import numpy\n'), ((4183, 4222), 'numpy.array', 'numpy.array', (['p0'], {'copy': '(True)', 'dtype': 'float'}), '(p0, copy=True, dtype=float)\n', (4194, 4222), False, 'import numpy\n'), ((5728, 5750), 'dadi.Inference.ll', 'Inference.ll', (['fs', 'data'], {}), '(fs, data)\n', (5740, 5750), False, 'from dadi import Inference\n'), ((6216, 6230), 'dadi.Spectrum_mod.Spectrum', 'Spectrum', (['boot'], {}), '(boot)\n', (6224, 6230), False, 'from dadi.Spectrum_mod import Spectrum\n'), ((6420, 6453), 'numpy.outer', 'numpy.outer', (['grad_temp', 'grad_temp'], {}), '(grad_temp, grad_temp)\n', (6431, 6453), False, 'import numpy\n'), ((6577, 6599), 'numpy.dot', 'numpy.dot', (['hess', 'J_inv'], {}), '(hess, J_inv)\n', (6586, 6599), False, 'import numpy\n'), ((7884, 7926), 'dadi.Inference.optimal_sfs_scaling', 'Inference.optimal_sfs_scaling', (['model', 'data'], {}), '(model, data)\n', (7913, 7926), False, 'from dadi import Inference\n'), ((9390, 9432), 'dadi.Inference.optimal_sfs_scaling', 'Inference.optimal_sfs_scaling', (['model', 'data'], {}), '(model, data)\n', (9419, 9432), False, 'from dadi import Inference\n'), ((10995, 11037), 'dadi.Inference.optimal_sfs_scaling', 'Inference.optimal_sfs_scaling', (['model', 'data'], {}), '(model, data)\n', (11024, 11037), False, 'from dadi import Inference\n'), ((11475, 11514), 'numpy.array', 'numpy.array', (['p0'], {'copy': '(True)', 'dtype': 'float'}), '(p0, copy=True, dtype=float)\n', (11486, 11514), False, 'import numpy\n'), ((11687, 11704), 'numpy.asarray', 'numpy.asarray', (['p0'], {}), '(p0)\n', (11700, 11704), False, 'import numpy\n'), ((5806, 5826), 'numpy.exp', 'numpy.exp', (['logparams'], {}), '(logparams)\n', (5815, 5826), False, 'import numpy\n'), ((8144, 8165), 'numpy.linalg.inv', 'numpy.linalg.inv', (['GIM'], {}), '(GIM)\n', (8160, 8165), False, 'import numpy\n'), ((9652, 9671), 'numpy.linalg.inv', 'numpy.linalg.inv', (['H'], {}), '(H)\n', (9668, 9671), False, 'import numpy\n'), ((5992, 6005), 'numpy.log', 'numpy.log', (['p0'], {}), '(p0)\n', (6001, 6005), False, 'import numpy\n'), ((6369, 6382), 'numpy.log', 'numpy.log', (['p0'], {}), '(p0)\n', (6378, 6382), False, 'import numpy\n'), ((11900, 11919), 'numpy.linalg.inv', 'numpy.linalg.inv', (['H'], {}), '(H)\n', (11916, 11919), False, 'import numpy\n'), ((12552, 12570), 'numpy.sum', 'numpy.sum', (['weights'], {}), '(weights)\n', (12561, 12570), False, 'import numpy\n'), ((13089, 13111), 'scipy.stats.distributions.chi2.cdf', 'ssd.chi2.cdf', (['x', '(d + 1)'], {}), '(x, d + 1)\n', (13101, 13111), True, 'import scipy.stats.distributions as ssd\n')] |
import unittest
import pytest
import numpy as np
from yaonet.tensor import BasicTensor, Tensor
class TestTensorAdd(unittest.TestCase):
def test_simple_add(self):
t1 = Tensor([1, 2, 3], requires_grad=True)
t2 = Tensor([4, 5, 6], requires_grad=True)
t3 = t1 + t2
assert t3.tolist() == [5, 7, 9]
t3.backward(np.array([-1., -2., -3.]))
assert t1.grad.tolist() == [-1, -2, -3]
assert t2.grad.tolist() == [-1, -2, -3]
t1 += 0.1
assert t1.grad is None
assert t1.tolist() == [1.1, 2.1, 3.1]
def test_broadcast_add(self):
# What is broadcasting? A couple of things:
# If I do t1 + t2 and t1.shape == t2.shape, it's obvious what to do.
# but I'm also allowed to add 1s to the beginning of either shape.
#
# t1.shape == (10, 5), t2.shape == (5,) => t1 + t2, t2 viewed as (1, 5)
# t2 = [1, 2, 3, 4, 5] => view t2 as [[1, 2, 3, 4, 5]]
#
# The second thing I can do, is that if one tensor has a 1 in some dimension,
# I can expand it
# t1 as (10, 5) t2 as (1, 5) is [[1, 2, 3, 4, 5]]
t1 = Tensor([[1, 2, 3], [4, 5, 6]], requires_grad = True) # (2, 3)
t2 = Tensor([7, 8, 9], requires_grad = True) # (3,)
t3 = t1 + t2 # shape (2, 3)
assert t3.tolist() == [[8, 10, 12], [11, 13, 15]]
t3.backward(np.array([[1, 1, 1], [1, 1, 1]]))
# assert isinstance(t1.grad,BasicTensor)
assert t1.grad.tolist() == [[1, 1, 1], [1, 1, 1]]
assert t2.grad.tolist() == [2, 2, 2]
def test_broadcast_add2(self):
t1 = Tensor([[1, 2, 3], [4, 5, 6]], requires_grad = True) # (2, 3)
t2 = Tensor([[7, 8, 9]], requires_grad = True) # (1, 3)
t3 = t1 + t2
assert t3.tolist() == [[8, 10, 12], [11, 13, 15]]
t3.backward(np.array([[1, 1, 1], [1, 1, 1]]))
assert t1.grad.tolist() == [[1, 1, 1], [1, 1, 1]]
assert t2.grad.tolist() == [[2, 2, 2]]
| [
"yaonet.tensor.Tensor",
"numpy.array"
] | [((182, 219), 'yaonet.tensor.Tensor', 'Tensor', (['[1, 2, 3]'], {'requires_grad': '(True)'}), '([1, 2, 3], requires_grad=True)\n', (188, 219), False, 'from yaonet.tensor import BasicTensor, Tensor\n'), ((233, 270), 'yaonet.tensor.Tensor', 'Tensor', (['[4, 5, 6]'], {'requires_grad': '(True)'}), '([4, 5, 6], requires_grad=True)\n', (239, 270), False, 'from yaonet.tensor import BasicTensor, Tensor\n'), ((1161, 1211), 'yaonet.tensor.Tensor', 'Tensor', (['[[1, 2, 3], [4, 5, 6]]'], {'requires_grad': '(True)'}), '([[1, 2, 3], [4, 5, 6]], requires_grad=True)\n', (1167, 1211), False, 'from yaonet.tensor import BasicTensor, Tensor\n'), ((1237, 1274), 'yaonet.tensor.Tensor', 'Tensor', (['[7, 8, 9]'], {'requires_grad': '(True)'}), '([7, 8, 9], requires_grad=True)\n', (1243, 1274), False, 'from yaonet.tensor import BasicTensor, Tensor\n'), ((1653, 1703), 'yaonet.tensor.Tensor', 'Tensor', (['[[1, 2, 3], [4, 5, 6]]'], {'requires_grad': '(True)'}), '([[1, 2, 3], [4, 5, 6]], requires_grad=True)\n', (1659, 1703), False, 'from yaonet.tensor import BasicTensor, Tensor\n'), ((1731, 1770), 'yaonet.tensor.Tensor', 'Tensor', (['[[7, 8, 9]]'], {'requires_grad': '(True)'}), '([[7, 8, 9]], requires_grad=True)\n', (1737, 1770), False, 'from yaonet.tensor import BasicTensor, Tensor\n'), ((355, 383), 'numpy.array', 'np.array', (['[-1.0, -2.0, -3.0]'], {}), '([-1.0, -2.0, -3.0])\n', (363, 383), True, 'import numpy as np\n'), ((1417, 1449), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 1, 1]])\n', (1425, 1449), True, 'import numpy as np\n'), ((1898, 1930), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 1, 1]])\n', (1906, 1930), True, 'import numpy as np\n')] |
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=undefined-all-variable
"""NLP Toolkit Data Stream API. It allows easy and customizable streaming of
corpora and dataset files. Files can be streamed into formats that are
ready for training and evaluation."""
from __future__ import print_function
import glob
import multiprocessing
import os
import random
import sys
import threading
import traceback
import numpy as np
import mxnet as mx
from mxnet.gluon.data import RandomSampler, SequentialSampler, Sampler
try:
import Queue as queue
except ImportError:
import queue
__all__ = [
'DataStream', 'SimpleDataStream', 'DatasetStream', 'SimpleDatasetStream',
'PrefetchingStream']
class DataStream(object):
"""Abstract Data Stream Interface.
DataStreams are useful to avoid loading big datasets to memory. A
DataStream is a iterable object (it implements the __iter__ function).
Whenever an iteration over the DataStream is requested (e.g. in a for loop
or by calling iter(datastream)), a new iterator over all samples in the
DataStream is returned. DataStreams can be lazily transformed by calling
`transform()` which returns a DataStream over the transformed samples.
"""
def __iter__(self):
"""Return an iterator over all elements of the DataStream.
This method returns a new iterator object that can iterate over
all the objects in the DataStream.
Returns
-------
iterator
An object implementing the Python *iterator protocol*.
"""
raise NotImplementedError
def transform(self, fn):
"""Transform a DataStream lazily.
Returns
-------
DataStream
The data stream that lazily transforms the data while streaming.
"""
return _LazyTransformDataStream(self, fn)
class SimpleDataStream(DataStream):
"""SimpleDataStream wraps iterables to expose the DataStream API.
Unlike the iterable itself, the SimpleDataStream exposes the DataStream API
and allows lazy transformation of the iterable.
"""
def __init__(self, iterable):
self._stream = iterable
def __iter__(self):
return iter(self._stream)
class _LazyTransformDataStream(DataStream):
"""Data stream that lazily transforms the data."""
def __init__(self, stream, fn):
self._stream = stream
self._fn = fn
def __iter__(self):
stream_iter = iter(self._stream)
# Yield must be hidden in closure so that __iter__ is called before
# __next__ is called. This is important, as calling iter(self._stream)
# may trigger multi-threaded or multi-processing prefetching of the
# stream.
def _closure():
try:
item = next(stream_iter)
except StopIteration:
return
istuple = isinstance(item, tuple)
if istuple:
yield self._fn(*item)
while True:
try:
yield self._fn(*next(stream_iter))
except StopIteration:
return
else:
yield self._fn(item)
while True:
try:
yield self._fn(next(stream_iter))
except StopIteration:
return
return _closure()
class DatasetStream(DataStream):
"""Abstract Dataset Stream Interface.
A DatasetStream is a DataStream where each sample is a
`mxnet.gluon.data.Dataset`. An iteration over a DatasetStream iterates over
`mxnet.gluon.data.Dataset` objects, representing a chunk or shards of some
large datasets.
Iterating over sizeable chunks of a dataset can be helpful to speed up
preprocessing as the overhead of preprocessing each sample individually is
reduced (this is similar to the idea of using batches for training a
model).
"""
def __iter__(self):
raise NotImplementedError
class SimpleDatasetStream(DatasetStream):
"""A simple stream of Datasets.
The SimpleDatasetStream is created from multiple files based on provided
`file_pattern`. One file is read at a time and a corresponding Dataset is
returned. The Dataset is created based on the file and the kwargs passed to
SimpleDatasetStream.
Parameters
----------
dataset : class
The class for which to create an object for every file. kwargs are
passed to this class.
file_pattern: str
Path to the input text files.
file_sampler : str or gluon.data.Sampler, defaults to 'random'
The sampler used to sample a file. The following string values are supported:
- 'sequential': SequentialSampler
- 'random': RandomSampler
kwargs
All other keyword arguments are passed to the dataset constructor.
"""
def __init__(self, dataset, file_pattern, file_sampler='random', **kwargs):
if not isinstance(file_pattern, str):
raise TypeError('file_pattern must be str, but got %s'%type(file_pattern))
self._dataset = dataset
file_pattern = os.path.expanduser(file_pattern)
self._files = sorted(glob.glob(file_pattern))
if len(self._files) == 0:
raise ValueError('Cannot find any file with path "%s"'%file_pattern)
self._file_sampler = self._get_sampler(file_sampler)
self._kwargs = kwargs
def _get_sampler(self, sampler):
if isinstance(sampler, Sampler):
return sampler
if isinstance(sampler, str):
length = len(self._files)
if sampler == 'random':
return RandomSampler(length)
if sampler == 'sequential':
return SequentialSampler(length)
raise ValueError('file_sampler must be a supported str ("random", "sequential") or'
'a `gluon.data.Sampler`, but got %s'%(sampler))
def __iter__(self):
# generate file samples
for file_idx in iter(self._file_sampler):
filename = self._files[file_idx]
yield self._dataset(filename, **self._kwargs)
class _Prefetcher(object):
"""Internal shared prefetcher logic."""
_dataq = None # Data queue transmits prefetched elements
_controlq = None # Control queue to instruct thread / process shutdown
_errorq = None # Error queue to transmit exceptions from worker to master
_checked_start = False # True once startup has been checkd by _check_start
def __init__(self, stream, num_prefetch, seed, np_seed, mx_seed):
super(_Prefetcher, self).__init__()
self.stream = stream
assert num_prefetch > 0, 'Unbounded Prefetcher is unsupported.'
self.num_prefetch = num_prefetch
self.seed = seed
self.np_seed = np_seed
self.mx_seed = mx_seed
def run(self):
"""Method representing the process’s activity."""
random.seed(self.seed)
np.random.seed(self.np_seed)
if not isinstance(self, multiprocessing.Process):
# Calling mxnet methods in a subprocess will raise an exception if
# mxnet is built with GPU support
# https://github.com/apache/incubator-mxnet/issues/4659
mx.random.seed(self.mx_seed)
# Startup - Master waits for this
try:
stream_iter = iter(self.stream)
self._errorq.put(None)
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
self._errorq.put((e, tb))
# Async work
while True:
try: # Check control queue
c = self._controlq.get(False)
if c is None:
break
else:
raise RuntimeError('Got unexpected control code {}'.format(repr(c)))
except queue.Empty:
pass
except RuntimeError as e:
tb = traceback.format_exc()
self._errorq.put((e, tb))
self._dataq.put(None)
try:
data = next(stream_iter)
error = None
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
error = (e, tb)
data = None
finally:
self._errorq.put(error)
self._dataq.put(data)
def __next__(self):
next_item = self._dataq.get()
next_error = self._errorq.get()
if next_error is None:
return next_item
else:
self._controlq.put(None)
if isinstance(next_error[0], StopIteration):
raise StopIteration
else:
return self._reraise(*next_error)
def _reraise(self, e, tb):
print('Reraising exception from Prefetcher', file=sys.stderr)
print(tb, file=sys.stderr)
raise e
def _check_start(self):
assert not self._checked_start
self._checked_start = True
next_error = self._errorq.get(block=True)
if next_error is not None:
self._reraise(*next_error)
def next(self):
return self.__next__()
class _ProcessPrefetcher(_Prefetcher, multiprocessing.Process):
"""Internal multi-processing prefetcher."""
def __init__(self, *args, **kwargs):
super(_ProcessPrefetcher, self).__init__(*args, **kwargs)
self._dataq = multiprocessing.Queue(self.num_prefetch)
self._controlq = multiprocessing.Queue()
self._errorq = multiprocessing.Queue(self.num_prefetch)
self.daemon = True
self.start()
self._check_start()
class _ThreadPrefetcher(_Prefetcher, threading.Thread):
"""Internal threaded prefetcher."""
def __init__(self, *args, **kwargs):
super(_ThreadPrefetcher, self).__init__(*args, **kwargs)
self._dataq = queue.Queue(self.num_prefetch)
self._controlq = queue.Queue()
self._errorq = queue.Queue(self.num_prefetch)
self.daemon = True
self.start()
self._check_start()
class PrefetchingStream(DataStream):
"""Prefetch a DataStream in a separate Thread or Process.
This iterator will create another thread or process to perform
``iter_next`` and then store the data in memory. It potentially accelerates
the data read, at the cost of more memory usage.
The python, numpy and mxnet random states in the launched Thread or Process
will be initialized randomly based on the next 32 bit integer in the
python, numpy and mxnet random generator of the caller respectively
(random.getrandbits(32), numpy.random.randint(0, 2**32),
int(mx.nd.random.uniform(0, 2**32).asscalar())).
Parameters
----------
stream : DataStream
Source stream.
num_prefetch : int, default 1
Number of elements to prefetch from the stream. Must be greater 0.
worker_type : 'thread' or 'process', default 'thread'
Use a separate Python Thread or Process to prefetch.
"""
def __init__(self, stream, num_prefetch=1, worker_type='thread'):
self._stream = stream
self._num_prefetch = num_prefetch
if num_prefetch < 1:
raise ValueError('num_prefetch must be greater 0.')
assert worker_type.lower() in ['thread', 'process']
self._multiprocessing = worker_type.lower() == 'process'
def __iter__(self):
seed = random.getrandbits(32)
np_seed = np.random.randint(0, 2**32)
mx_seed = int(mx.nd.random.uniform(0, 2**32).asscalar())
if self._multiprocessing:
return _ProcessPrefetcher(self._stream, self._num_prefetch,
seed=seed, np_seed=np_seed,
mx_seed=mx_seed)
else:
return _ThreadPrefetcher(self._stream, self._num_prefetch,
seed=seed, np_seed=np_seed,
mx_seed=mx_seed)
| [
"numpy.random.seed",
"mxnet.random.seed",
"mxnet.nd.random.uniform",
"numpy.random.randint",
"random.seed",
"traceback.format_exc",
"multiprocessing.Queue",
"random.getrandbits",
"glob.glob",
"mxnet.gluon.data.RandomSampler",
"mxnet.gluon.data.SequentialSampler",
"os.path.expanduser",
"queue... | [((5986, 6018), 'os.path.expanduser', 'os.path.expanduser', (['file_pattern'], {}), '(file_pattern)\n', (6004, 6018), False, 'import os\n'), ((7806, 7828), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (7817, 7828), False, 'import random\n'), ((7837, 7865), 'numpy.random.seed', 'np.random.seed', (['self.np_seed'], {}), '(self.np_seed)\n', (7851, 7865), True, 'import numpy as np\n'), ((10354, 10394), 'multiprocessing.Queue', 'multiprocessing.Queue', (['self.num_prefetch'], {}), '(self.num_prefetch)\n', (10375, 10394), False, 'import multiprocessing\n'), ((10420, 10443), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (10441, 10443), False, 'import multiprocessing\n'), ((10467, 10507), 'multiprocessing.Queue', 'multiprocessing.Queue', (['self.num_prefetch'], {}), '(self.num_prefetch)\n', (10488, 10507), False, 'import multiprocessing\n'), ((10811, 10841), 'queue.Queue', 'queue.Queue', (['self.num_prefetch'], {}), '(self.num_prefetch)\n', (10822, 10841), False, 'import queue\n'), ((10867, 10880), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (10878, 10880), False, 'import queue\n'), ((10904, 10934), 'queue.Queue', 'queue.Queue', (['self.num_prefetch'], {}), '(self.num_prefetch)\n', (10915, 10934), False, 'import queue\n'), ((12369, 12391), 'random.getrandbits', 'random.getrandbits', (['(32)'], {}), '(32)\n', (12387, 12391), False, 'import random\n'), ((12410, 12439), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 32)'], {}), '(0, 2 ** 32)\n', (12427, 12439), True, 'import numpy as np\n'), ((6048, 6071), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (6057, 6071), False, 'import glob\n'), ((8129, 8157), 'mxnet.random.seed', 'mx.random.seed', (['self.mx_seed'], {}), '(self.mx_seed)\n', (8143, 8157), True, 'import mxnet as mx\n'), ((6519, 6540), 'mxnet.gluon.data.RandomSampler', 'RandomSampler', (['length'], {}), '(length)\n', (6532, 6540), False, 'from mxnet.gluon.data import RandomSampler, SequentialSampler, Sampler\n'), ((6604, 6629), 'mxnet.gluon.data.SequentialSampler', 'SequentialSampler', (['length'], {}), '(length)\n', (6621, 6629), False, 'from mxnet.gluon.data import RandomSampler, SequentialSampler, Sampler\n'), ((8373, 8395), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8393, 8395), False, 'import traceback\n'), ((8841, 8863), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8861, 8863), False, 'import traceback\n'), ((9120, 9142), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9140, 9142), False, 'import traceback\n'), ((12460, 12492), 'mxnet.nd.random.uniform', 'mx.nd.random.uniform', (['(0)', '(2 ** 32)'], {}), '(0, 2 ** 32)\n', (12480, 12492), True, 'import mxnet as mx\n')] |
import cv2
import numpy as np
from tqdm import tqdm
from utility_matching_functions import *
def get_accuracy_reject_characteristics(pdistArr, lp_text, thVal_list=None):
## Compute the Accuracy and Reject-Rates at different threshold values
# Get the index, text and matching-score of the closest license plate in the Database
tIndx = np.argmin(pdistArr, axis=1)
tPredLPText = lp_text[tIndx]
tScores = pdistArr[range(pdistArr.shape[0]), tIndx]
# set the range of threshold to evaluate
if thVal_list is None:
thVal_list = np.arange(0.01, max(tScores), 0.02) #0.15
# Construct the list of Accuracies and Reject Rates by searching in the list of threshold values
acc = list()
rejRate = list()
for ccnt in range(len(thVal_list)):
thVal = thVal_list[ccnt]
###
rejIndx = np.where(tScores > thVal)[0]
accIndx = np.where(tScores <= thVal)[0]
if len(rejIndx) == 0 or len(accIndx) == 0:
continue
rejLPText = lp_text[rejIndx]
rejFracSamp = len(rejIndx) / float(len(tScores))
rejLPMRate = len(np.where(tPredLPText[rejIndx] == lp_text[rejIndx])[0])/float(len(rejIndx))
rejRate.append(rejFracSamp)
accLPText = lp_text[accIndx]
accFracSamp = len(accIndx) / float(len(tScores))
accLPMRate = len(np.where(tPredLPText[accIndx] == lp_text[accIndx])[0])/float(len(accIndx))
acc.append(accLPMRate)
return acc, rejRate
def get_list_tp_fp_fn(pdistArr, lp_text, thVal, verbose=True):
## Returns the list of true positives, false positives and true negatives
gtCnt = 0
all_tp_list = []
all_fp_list = []
all_fn_list = []
# Get the index, text and matching-score of the closest license plate in the Database
tIndx = np.argmin(pdistArr, axis=1)
tPredLPText = lp_text[tIndx]
tScores = pdistArr[range(pdistArr.shape[0]), tIndx]
if verbose:
print('Compute the counts for a given threshold value ...')
for ii in tqdm(range(pdistArr.shape[0])):
tSimIndx = get_similar_vehicles_fast(pdistArr, ii, thVal)
tSimIndx = np.delete(tSimIndx, np.where(tSimIndx==ii)[0])
tGTIndx = np.where(lp_text[ii] == lp_text)[0]
tGTIndx = tGTIndx[tGTIndx!=ii]
# Construct the list of true positives
t_tp_list = [tt for tt in tSimIndx if tt in tGTIndx]
if len(t_tp_list)>0:
for tt in range(len(t_tp_list)):
all_tp_list.append(np.array([ii, t_tp_list[tt]]))
# Construct the list of false positives
t_fp_list = [tt for tt in tSimIndx if tt not in tGTIndx]
if len(t_fp_list)>0:
for tt in range(len(t_fp_list)):
all_fp_list.append(np.array([ii, t_fp_list[tt]]))
# Construct the list of false negatives
t_fn_list = [tt for tt in tGTIndx if tt not in tSimIndx]
if len(t_fn_list)>0:
for tt in range(len(t_fn_list)):
all_fn_list.append(np.array([ii, t_fn_list[tt]]))
return all_tp_list, all_fp_list, all_fn_list
def get_counts_prf_measures_threshold(pdistArr, lp_text, thVal, verbose=False):
## Compute counts related to the Precision, Recall and F-measure for different threshold values
gt_l = []
gt_r = []
for ii in tqdm(range(pdistArr.shape[0])):
tGTIndx = np.where(lp_text[ii] == lp_text)[0]
tGTIndx = np.delete(tGTIndx, np.where(tGTIndx==ii)[0])
gt_l.extend([ii]*len(tGTIndx))
gt_r.extend(tGTIndx)
gtCnt = len(gt_l)
## Compute the counts for different threshold values
tp_l = []
tp_r = []
fp_l = []
fp_r = []
#thVal = 0.1
for ii in tqdm(range(pdistArr.shape[0])):
tSimIndx = get_similar_vehicles_fast(pdistArr, ii, thVal)
tSimIndx = np.delete(tSimIndx, np.where(tSimIndx==ii)[0])
if(len(tSimIndx) > 0):
for jj in range(len(tSimIndx)):
if(lp_text[ii] == lp_text[tSimIndx[jj]]):
tp_l.append(ii)
tp_r.append(tSimIndx[jj])
else:
fp_l.append(ii)
fp_r.append(tSimIndx[jj])
## Different metrices, add them to the list
trPos = len(tp_l)
flPos = len(fp_l)
flNeg = gtCnt - trPos
return trPos, flPos, flNeg, gtCnt, list(zip(tp_l, tp_r)), list(zip(fp_l, fp_r))
def get_counts_prf_measures(pdistArr, lp_text, thVal_list=None, verbose=False):
## Compute counts related to the Precision, Recall and F-measure for different threshold values
# Get the index, text and matching-score of the closest license plate in the Database
tIndx = np.argmin(pdistArr, axis=1)
tPredLPText = lp_text[tIndx]
tScores = pdistArr[range(pdistArr.shape[0]), tIndx]
# set the range of threshold to evaluate
if thVal_list is None:
thVal_list = np.arange(0.01, max(tScores), 0.01) #0.15
tPcList = list()
fPcList = list()
fNcList = list()
gtcList = list()
smcList = list()
if verbose:
print('Computing the ground truth counts')
gtCnt = 0
for ii in range(pdistArr.shape[0]):
tGTIndx = np.where(lp_text[ii] == lp_text)[0]
gtCnt += (len(tGTIndx) - 1)
## Compute the counts for different threshold values
if verbose:
print('Compute the counts for different threshold values')
for kk in range(len(thVal_list)):
thVal = thVal_list[kk]
if verbose:
print('[ ' + str(kk) + ' / ' + str(len(thVal_list)) + ' ] ' + str(thVal))
simCnt = 0
actCnt = 0
singlCnt = 0
#for ii in tqdm(range(pdistArr.shape[0])):
for ii in range(pdistArr.shape[0]):
tSimIndx = get_similar_vehicles_fast(pdistArr, ii, thVal)
tSimIndx = np.delete(tSimIndx, np.where(tSimIndx==ii)[0])
simCnt += len(tSimIndx)
#tGTIndx = np.where(lp_text[ii] == lp_text)[0]
#gtCnt += (len(tGTIndx) - 1)
# list the pairs which our method do not find
'''
for kk in range(len(tGTIndx)):
if (tGTIndx[kk] not in tSimIndx):
if(ii != tGTIndx[kk]):
abc=0
'''
if(len(tSimIndx) > 0):
for jj in range(len(tSimIndx)):
if(lp_text[ii] == lp_text[tSimIndx[jj]]):
actCnt +=1
#else:
# abc=0
## Different metrices, add them to the list
trPos = actCnt
flNeg = gtCnt - trPos
flPos = simCnt - trPos
tPcList.append(trPos)
fPcList.append(flPos)
gtcList.append(gtCnt)
fNcList.append(flNeg)
return tPcList, fPcList, fNcList, gtcList, thVal_list
def analyze_precision_recall_list(tPcList, fPcList, fNcList, gtcList, thVal_list, verbose=True):
tPcList = np.array(tPcList)
fPcList = np.array(fPcList)
fNcList = np.array(fNcList)
gtcList = np.array(gtcList)
## Empty list of precision, recall and f-measure values
prec = np.zeros((len(tPcList)), dtype=np.float32)
recl = np.zeros((len(tPcList)), dtype=np.float32)
f_measure = np.zeros((len(tPcList)), dtype=np.float32)
for kk in range(len(tPcList)):
cpPr = tPcList[kk]/float(gtcList[kk])
fpPr = fPcList[kk]/ float(tPcList[kk]+fPcList[kk])
prec[kk] = tPcList[kk]/float(tPcList[kk]+fPcList[kk])
recl[kk] = tPcList[kk]/float(tPcList[kk]+fNcList[kk])
f_measure[kk] = 2 * ((prec[kk] * recl[kk]) / (prec[kk] + recl[kk]))
if verbose:
print('Threshold: ' + str(thVal_list[kk]))
print('N.FP: ' + str(fPcList[kk]) + ' N.FN: ' + str(fNcList[kk]) + ' N.TP: ' + str(tPcList[kk]))
print('Precision: ' + str(prec[kk]) + ' Recall: ' + str(recl[kk]) + ' F-measure: ' + str(f_measure[kk]) )
print('===')
# Get the best index according to f-measure and return
f_measure[np.isnan(f_measure)] = -50 # to handle the nan values
best_indx = np.argmax(f_measure)
return thVal_list[best_indx], prec[best_indx], recl[best_indx], f_measure[best_indx] | [
"numpy.argmax",
"numpy.isnan",
"numpy.argmin",
"numpy.where",
"numpy.array"
] | [((355, 382), 'numpy.argmin', 'np.argmin', (['pdistArr'], {'axis': '(1)'}), '(pdistArr, axis=1)\n', (364, 382), True, 'import numpy as np\n'), ((1836, 1863), 'numpy.argmin', 'np.argmin', (['pdistArr'], {'axis': '(1)'}), '(pdistArr, axis=1)\n', (1845, 1863), True, 'import numpy as np\n'), ((4823, 4850), 'numpy.argmin', 'np.argmin', (['pdistArr'], {'axis': '(1)'}), '(pdistArr, axis=1)\n', (4832, 4850), True, 'import numpy as np\n'), ((7135, 7152), 'numpy.array', 'np.array', (['tPcList'], {}), '(tPcList)\n', (7143, 7152), True, 'import numpy as np\n'), ((7167, 7184), 'numpy.array', 'np.array', (['fPcList'], {}), '(fPcList)\n', (7175, 7184), True, 'import numpy as np\n'), ((7199, 7216), 'numpy.array', 'np.array', (['fNcList'], {}), '(fNcList)\n', (7207, 7216), True, 'import numpy as np\n'), ((7231, 7248), 'numpy.array', 'np.array', (['gtcList'], {}), '(gtcList)\n', (7239, 7248), True, 'import numpy as np\n'), ((8310, 8330), 'numpy.argmax', 'np.argmax', (['f_measure'], {}), '(f_measure)\n', (8319, 8330), True, 'import numpy as np\n'), ((8240, 8259), 'numpy.isnan', 'np.isnan', (['f_measure'], {}), '(f_measure)\n', (8248, 8259), True, 'import numpy as np\n'), ((869, 894), 'numpy.where', 'np.where', (['(tScores > thVal)'], {}), '(tScores > thVal)\n', (877, 894), True, 'import numpy as np\n'), ((916, 942), 'numpy.where', 'np.where', (['(tScores <= thVal)'], {}), '(tScores <= thVal)\n', (924, 942), True, 'import numpy as np\n'), ((2244, 2276), 'numpy.where', 'np.where', (['(lp_text[ii] == lp_text)'], {}), '(lp_text[ii] == lp_text)\n', (2252, 2276), True, 'import numpy as np\n'), ((3477, 3509), 'numpy.where', 'np.where', (['(lp_text[ii] == lp_text)'], {}), '(lp_text[ii] == lp_text)\n', (3485, 3509), True, 'import numpy as np\n'), ((5346, 5378), 'numpy.where', 'np.where', (['(lp_text[ii] == lp_text)'], {}), '(lp_text[ii] == lp_text)\n', (5354, 5378), True, 'import numpy as np\n'), ((2198, 2222), 'numpy.where', 'np.where', (['(tSimIndx == ii)'], {}), '(tSimIndx == ii)\n', (2206, 2222), True, 'import numpy as np\n'), ((3550, 3573), 'numpy.where', 'np.where', (['(tGTIndx == ii)'], {}), '(tGTIndx == ii)\n', (3558, 3573), True, 'import numpy as np\n'), ((3953, 3977), 'numpy.where', 'np.where', (['(tSimIndx == ii)'], {}), '(tSimIndx == ii)\n', (3961, 3977), True, 'import numpy as np\n'), ((1148, 1198), 'numpy.where', 'np.where', (['(tPredLPText[rejIndx] == lp_text[rejIndx])'], {}), '(tPredLPText[rejIndx] == lp_text[rejIndx])\n', (1156, 1198), True, 'import numpy as np\n'), ((1380, 1430), 'numpy.where', 'np.where', (['(tPredLPText[accIndx] == lp_text[accIndx])'], {}), '(tPredLPText[accIndx] == lp_text[accIndx])\n', (1388, 1430), True, 'import numpy as np\n'), ((2545, 2574), 'numpy.array', 'np.array', (['[ii, t_tp_list[tt]]'], {}), '([ii, t_tp_list[tt]])\n', (2553, 2574), True, 'import numpy as np\n'), ((2820, 2849), 'numpy.array', 'np.array', (['[ii, t_fp_list[tt]]'], {}), '([ii, t_fp_list[tt]])\n', (2828, 2849), True, 'import numpy as np\n'), ((3077, 3106), 'numpy.array', 'np.array', (['[ii, t_fn_list[tt]]'], {}), '([ii, t_fn_list[tt]])\n', (3085, 3106), True, 'import numpy as np\n'), ((6025, 6049), 'numpy.where', 'np.where', (['(tSimIndx == ii)'], {}), '(tSimIndx == ii)\n', (6033, 6049), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 29 15:46:47 2022
@author: Connor
"""
#
# Imports
#
import requests as reqs
import numpy as np
import matplotlib.pyplot as plt
import datetime
import os
from pathlib import Path
from scipy.special import erf
_BASE ="https://collegefootballrisk.com/api"
_SEASON = 1
plt.style.use("bmh")
def yline(loc, *args, ax=None, **kwargs):
if ax is None:
ylims = plt.ylim()
plt.plot([loc, loc], ylims, *args, **kwargs)
plt.ylim(ylims)
else:
ylims = ax.get_ylim()
ax.plot([loc, loc], ylims, *args, **kwargs)
ax.set_ylim(ylims)
def create_expected_value_hist(
team_name,
rank,
day,
prev_num_terry,
num_runs=100000,
season=_SEASON,
axis=None,
save_dir=None
):
"""
``create_expected_value_hist``, as the name suggests, creates an expected
value histogram for a given team and day from the data in the CFB_RISK api.
if ax = None, plt.gca() is used.
"""
try:
team_odds_req = reqs.get(_BASE+"/team/odds",
params={"season": season,
"day": day,
"team": team_name})
team_odds_info = team_odds_req.json()
teams_req = reqs.get(_BASE+"/teams")
team_info = teams_req.json()
p_color = None
for team in team_info:
if team["name"] == team_name:
p_color = team["colors"]["primary"]
s_color = team["colors"]["secondary"]
break
if p_color is None:
raise ValueError(f"Invalid team_name = {team_name}")
p_color = tuple(float(val)/255 if ii < 3 else float(val) for ii, val in enumerate(p_color[5:-1].split(",")))
s_color = tuple(float(val)/255 if ii < 3 else float(val) for ii, val in enumerate(s_color[5:-1].split(",")))
if p_color[0:3] == (1, 1, 1):
p_color = (0, 0, 0, p_color[3])
if s_color[0:3] == (1, 1, 1):
s_color = (0, 0, 0, s_color[3])
num_territories = len(team_odds_info)
# start with a vector of ones (the "empty territories have a chance of 1)
odds = np.ones((num_territories,))
# for each territoy, exluding "all", compute exact odds
odds = [tory["teamPower"]/tory["territoryPower"] # put the stats, else 1
if tory["territoryPower"]>0 else 1 # if denom != 0
for tory in team_odds_info] # for tory in odds_info
# This calculates the PDF
vals = 1
for k in odds:
vals = np.convolve(vals, [1-k, k])
# axis handling
if axis is None:
fig = plt.figure()
_ax = plt.gca()
else:
_ax = axis
# set up plot values
act = sum([1 if tory["winner"] == team_name else 0 for tory in team_odds_info])
exp = sum(odds)
# Gets the Expected Value numerically to validate expected Odds
mu = np.sum(vals*np.arange(len(vals)))
# Gets the Sigma numerically to validate variance
sigma = np.sqrt(sum(vals*(np.arange(len(vals)) - mu)**2))
dsigma = (act-mu) / sigma
# draw_percentage = stats.norm.pdf(dsigma)*100
if dsigma < 0:
act_color = "#781b0e"
else:
act_color = "#3b8750"
x = np.linspace(0, num_territories, 5000)
y = (1 / (np.sqrt(2 * np.pi * np.power(sigma, 2)))) * \
(np.power(np.e, -(np.power((x - mu), 2) / (2 * np.power(sigma, 2)))))
cdf = 0.5 * (1 + erf((act-exp)/(np.sqrt(2)*sigma)))
_ax.plot(x,y*100, linestyle="-", linewidth=0.5, color="#54585A", label="$X$ ~ $N(\mu, \sigma)$")
_ax.bar(np.arange(num_territories+1), vals*100, 0.9, align="center", color=p_color, edgecolor=s_color)
yline(exp, ax=_ax, linestyle=(0,(2,2)), linewidth=2, color="#081840", label="Expected Value")
yline(act, ax=_ax, linestyle=(0,(2,2)), linewidth=2, color=act_color, label="Actual Territories")
yline(prev_num_terry, ax=_ax, linestyle=(0,(1,1)), linewidth=2, color="#ffb521", label="Prev Num. Territories")
dT = act - prev_num_terry
_ax.set_title(f"Number of Territories Histogram: {team_name}\n$Expected: {exp:2.2f}$, $Actual: {act}$, $\Delta Territories = {dT}$")
_ax.set_xlabel("Number of Territories Won")
_ax.set_ylabel("Percent Chance to Win N Territories (%)")
my_anno_text = f"""$\mu = {mu:2.3f}$
$3\sigma = {3*sigma:2.3f}$
$\Delta\sigma = {dsigma:2.3f}$
$P(Draw) = {100*vals[act]:2.3f}\%$"""
x_min, x_max = _ax.get_xlim()
y_min, y_max = _ax.get_ylim()
if (mu) < (x_max-x_min)//2:
# put both on right:
_ax.legend(loc="upper right")
_ax.text(0.72,
0.08,
my_anno_text,
bbox={'facecolor': 'white', 'alpha': 0.7},
transform=_ax.transAxes)
elif vals[0] > 5:
# top
_ax.legend(loc="upper left")
_ax.text(0.72,
0.80,
my_anno_text,
bbox={'facecolor': 'white', 'alpha': 0.7},
transform=_ax.transAxes)
else:
# left
_ax.legend(loc="upper left")
_ax.text(0.03,
0.10,
my_anno_text,
bbox={'facecolor': 'white', 'alpha': 0.7},
transform=_ax.transAxes)
if save_dir is not None:
fig.savefig(save_dir / f"{rank}_{team_name.lower().replace(' ', '_')}_territory_hist.png", dpi=150)
return mu, sigma, dsigma, act, cdf
except:
print("")
def create_all_hists(
day,
season=_SEASON,
save_dir=None
):
leader_req = reqs.get(_BASE+"/stats/leaderboard",
params={"season": season,
"day": day})
leaders = leader_req.json()
if day > 1:
leader_req_yest = reqs.get(_BASE+"/stats/leaderboard",
params={"season": season,
"day": day-1})
leader_yest = leader_req_yest.json()
mu = np.ones((len(leaders),))
sig = np.ones((len(leaders),))
dsig = np.ones((len(leaders),))
act = np.ones((len(leaders),))
for ind, leader in enumerate(leaders):
print("Making hist for: ", leader["name"])
if day > 1:
prev_num_terry = [ll for ll in leader_yest if ll["name"] == leader["name"]][0]["territoryCount"]
else:
prev_num_terry = leader["territoryCount"]
try:
mu[ind], sig[ind], dsig[ind], act[ind], cdf = create_expected_value_hist(
leader["name"],
leader["rank"],
day,
int(prev_num_terry),
season=season,
save_dir=save_dir)
except TypeError as inst:
print("Unable to make hist for ", leader["name"], ". May not have any players today.")
print(inst)
return (min(dsig), leaders[np.argmin(dsig)]["name"]), (max(dsig), leaders[np.argmax(dsig)]["name"])
def main(day=None):
date = datetime.date
# Set this true if you want to save the graphs
SAVE_FLAG = True
# Set this true if you want to replace the current existing graphs
REPLACE_FLAG = True
if SAVE_FLAG:
output_directory = r"D:\Connor\Documents\GA 2022\Risk\cfb_artifacts"
figs_base_dir = Path(output_directory)
check_dir = figs_base_dir / f"{date.today().isoformat()}"
# check_dir = figs_base_dir / "2020-04-22"
asserted_dir = figs_base_dir / "temp_dir"
# asserted_dir = check_dir
if not check_dir.exists():
os.mkdir(check_dir)
save_dir = check_dir
else:
if REPLACE_FLAG:
save_dir = check_dir
else:
save_dir = asserted_dir
# Get delta Time since start of game
if not day:
dt_now = datetime.datetime.now()
deltaT = dt_now-datetime.datetime(2022, 1, 15)
day = deltaT.days # get just the delta number of days
print(f"Generating plots for day = {day}...")
mins_team, max_team = create_all_hists(day, save_dir=save_dir)
if __name__ == "__main__":
day = 15
main(day) | [
"os.mkdir",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.gca",
"numpy.argmax",
"numpy.power",
"numpy.ones",
"numpy.argmin",
"datetime.datetime",
"matplotlib.pyplot.style.use",
"pathlib.Path",
"matplotlib.pyplot.figure",
"numpy.arange",
"requests.get",
"numpy.lin... | [((315, 335), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""bmh"""'], {}), "('bmh')\n", (328, 335), True, 'import matplotlib.pyplot as plt\n'), ((6067, 6144), 'requests.get', 'reqs.get', (["(_BASE + '/stats/leaderboard')"], {'params': "{'season': season, 'day': day}"}), "(_BASE + '/stats/leaderboard', params={'season': season, 'day': day})\n", (6075, 6144), True, 'import requests as reqs\n'), ((414, 424), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (422, 424), True, 'import matplotlib.pyplot as plt\n'), ((433, 477), 'matplotlib.pyplot.plot', 'plt.plot', (['[loc, loc]', 'ylims', '*args'], {}), '([loc, loc], ylims, *args, **kwargs)\n', (441, 477), True, 'import matplotlib.pyplot as plt\n'), ((486, 501), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylims'], {}), '(ylims)\n', (494, 501), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1169), 'requests.get', 'reqs.get', (["(_BASE + '/team/odds')"], {'params': "{'season': season, 'day': day, 'team': team_name}"}), "(_BASE + '/team/odds', params={'season': season, 'day': day, 'team':\n team_name})\n", (1085, 1169), True, 'import requests as reqs\n'), ((1342, 1368), 'requests.get', 'reqs.get', (["(_BASE + '/teams')"], {}), "(_BASE + '/teams')\n", (1350, 1368), True, 'import requests as reqs\n'), ((2303, 2330), 'numpy.ones', 'np.ones', (['(num_territories,)'], {}), '((num_territories,))\n', (2310, 2330), True, 'import numpy as np\n'), ((3530, 3567), 'numpy.linspace', 'np.linspace', (['(0)', 'num_territories', '(5000)'], {}), '(0, num_territories, 5000)\n', (3541, 3567), True, 'import numpy as np\n'), ((6275, 6360), 'requests.get', 'reqs.get', (["(_BASE + '/stats/leaderboard')"], {'params': "{'season': season, 'day': day - 1}"}), "(_BASE + '/stats/leaderboard', params={'season': season, 'day': day -\n 1})\n", (6283, 6360), True, 'import requests as reqs\n'), ((7791, 7813), 'pathlib.Path', 'Path', (['output_directory'], {}), '(output_directory)\n', (7795, 7813), False, 'from pathlib import Path\n'), ((8333, 8356), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8354, 8356), False, 'import datetime\n'), ((2731, 2760), 'numpy.convolve', 'np.convolve', (['vals', '[1 - k, k]'], {}), '(vals, [1 - k, k])\n', (2742, 2760), True, 'import numpy as np\n'), ((2835, 2847), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2845, 2847), True, 'import matplotlib.pyplot as plt\n'), ((2866, 2875), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2873, 2875), True, 'import matplotlib.pyplot as plt\n'), ((3896, 3926), 'numpy.arange', 'np.arange', (['(num_territories + 1)'], {}), '(num_territories + 1)\n', (3905, 3926), True, 'import numpy as np\n'), ((8063, 8082), 'os.mkdir', 'os.mkdir', (['check_dir'], {}), '(check_dir)\n', (8071, 8082), False, 'import os\n'), ((8381, 8411), 'datetime.datetime', 'datetime.datetime', (['(2022)', '(1)', '(15)'], {}), '(2022, 1, 15)\n', (8398, 8411), False, 'import datetime\n'), ((7381, 7396), 'numpy.argmin', 'np.argmin', (['dsig'], {}), '(dsig)\n', (7390, 7396), True, 'import numpy as np\n'), ((7428, 7443), 'numpy.argmax', 'np.argmax', (['dsig'], {}), '(dsig)\n', (7437, 7443), True, 'import numpy as np\n'), ((3606, 3624), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (3614, 3624), True, 'import numpy as np\n'), ((3662, 3681), 'numpy.power', 'np.power', (['(x - mu)', '(2)'], {}), '(x - mu, 2)\n', (3670, 3681), True, 'import numpy as np\n'), ((3691, 3709), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (3699, 3709), True, 'import numpy as np\n'), ((3755, 3765), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3762, 3765), True, 'import numpy as np\n')] |
# Adapted from https://github.com/SimingYan/HPNet/blob/main/src/approximation.py
import geomdl
import numpy as np
from scipy.special import comb
def fit_curve(points, degree, num_ctrls):
num_points, dim = points.shape
num_cpts = num_ctrls
# Get uk
uk = compute_params_curve(points, use_centripetal=False)
# Compute knot vector
kv = compute_knot_vector2(
degree, num_points=num_points, num_cpts=num_ctrls, params=uk
)
matrix_n = []
for i in range(0, num_points):
m_temp = []
for j in range(0, num_cpts):
m_temp.append(basis_function_one(degree, kv, j, uk[i]))
matrix_n.append(m_temp)
matrix_n = np.array(matrix_n)
# t = np.linalg.lstsq(matrix_n, points)
ps_inv = np.linalg.inv(np.matmul(np.transpose(matrix_n), matrix_n))
ps_inv = np.matmul(ps_inv, np.transpose(matrix_n))
result = np.matmul(ps_inv, points)
return result
class BSpline:
def __init__(self):
pass
def evaluate_param(self, param, control_points, knot_vectors_u, knot_vectors_v, degree_u, degree_v):
control_points_u, control_points_v = control_points.shape[0], control_points.shape[1]
nu = []
for j in range(0, control_points_u):
nu.append(self.basis_function_one(degree_u, knot_vectors_u, j, param[0]))
nu = np.array(nu).reshape(control_points_u, 1)
nv = []
for j in range(0, control_points_v):
nv.append(self.basis_function_one(degree_v, knot_vectors_v, j, param[1]))
nv = np.array(nv).reshape(control_points_v, 1)
points = []
for i in range(3):
points.append(np.matmul(np.matmul(nu.T, control_points[:, :, i]), nv))
points = np.array(points).reshape(1, 3)
return points
def basis_functions(self, param, control_points_u, control_points_v, knot_vectors_u, knot_vectors_v, degree_u,
degree_v):
"""
Returns the basis function in u and v direction to be used to compute the
renormalization factor for the shifting control point grids.
"""
nu = []
for j in range(0, control_points_u):
nu.append(self.basis_function_one(degree_u, knot_vectors_u, j, param[0]))
nu = np.array(nu).reshape(control_points_u, 1)
nv = []
for j in range(0, control_points_v):
nv.append(self.basis_function_one(degree_v, knot_vectors_v, j, param[1]))
nv = np.array(nv).reshape(control_points_v, 1)
return nu, nv
def create_geomdl_surface(self, control_points, ku, kv, degree_u, degree_v):
bspline = geomdl.BSpline.Surface()
cu = control_points.shape[0]
cv = control_points.shape[1]
bspline.degree_u = degree_u
bspline.ctrlpts_size_u = cu
bspline.ctrlpts_size_v = cv
bspline.degree_v = degree_v
bspline.knotvector_u = ku.tolist()
bspline.knotvector_v = kv.tolist()
bspline.ctrlpts2d = control_points.tolist()
return bspline
def evaluate_params(self, params, control_points, knot_vectors_u, knot_vectors_v, degree_u, degree_v):
control_points_u, control_points_v = control_points.shape[0], control_points.shape[1]
num_points = params.shape[0]
nu = []
nu = np.zeros((num_points, control_points_u))
for i in range(num_points):
basis = []
for j in range(0, control_points_u):
nu[i, j] = self.basis_function_one(degree_u, knot_vectors_u, j, params[i, 0])
nu = np.expand_dims(nu, 2)
nv = np.zeros((num_points, control_points_v))
for i in range(num_points):
for j in range(0, control_points_v):
nv[i, j] = self.basis_function_one(degree_v, knot_vectors_v, j, params[i, 1])
nv = np.expand_dims(nv, 1)
points = []
basis = np.matmul(nu, nv)
basis = np.reshape(basis, (num_points, control_points_v * control_points_u))
for i in range(3):
cntrl_pts = np.reshape(control_points[:, :, i], (control_points_u * control_points_v, 1))
points.append(np.matmul(basis, cntrl_pts))
points = np.stack(points, 1)
return points
def fit_surface(self, points, size_u, size_v, degree_u=2, degree_v=2, control_points_u=None, control_points_v=None):
"""
Given points in grid format, this function performs a least square fitting
to fit bspline of given degree. This involves first computing u,v for each
input points along with knot vectors.
:param points: points of size Nx3, note that they are gridded originally of size
N^(1/2) x N^(1/2) x 3
:param size_u: u size of the grid
:param size_v: v size of the grid, note that size_u x size_v = N
:param control_points_u: control points in u direction
:param control_points_v: control points in v direction
"""
points = np.array(points)
points_ = points.reshape((size_u, size_v, 3))
if (not control_points_u):
control_points_u = size_u - 1
if (not control_points_v):
control_points_v = size_v - 1
uk, vl = self.compute_params_surface(points_, size_u=control_points_u, size_v=control_points_v)
# Set up knot vectors depending on the number of control points
kv_u = self.compute_knot_vector2(degree_u, size_u, control_points_u, uk)
kv_v = self.compute_knot_vector2(degree_v, size_v, control_points_v, vl)
nu = []
for i in range(0, size_u):
m_temp = []
for j in range(0, control_points_u):
m_temp.append(self.basis_function_one(degree_u, kv_u, j, uk[i]))
nu.append(m_temp)
nu = np.array(nu)
nv = []
for i in range(0, size_v):
m_temp = []
for j in range(0, control_points_v):
m_temp.append(self.basis_function_one(degree_v, kv_v, j, vl[i]))
nv.append(m_temp)
nv = np.array(nv)
ut_u_inv = np.linalg.inv(np.matmul(np.transpose(nu), nu))
ut_u_inv_u = np.matmul(ut_u_inv, np.transpose(nu))
vt_v_inv = np.linalg.inv(np.matmul(np.transpose(nv), nv))
vt_v_inv_v = np.matmul(nv, vt_v_inv)
cntrlpts = []
# use the pseudo inverse formulation
for i in range(3):
points_cntrl = list(np.matmul(np.matmul(ut_u_inv_u, points_[:, :, i]), vt_v_inv_v))
cntrlpts.append(points_cntrl)
ctrl = np.array(cntrlpts)
ctrl = np.transpose(ctrl, (1, 2, 0))
return ctrl, kv_u, kv_v
def compute_knot_vector2(self, degree, num_points, num_cpts, params):
"""
Computes a knot vector ensuring that every knot span has at least one
:param degree:
:param num_points:
:param num_cpts:
:param params:
:return:
"""
d = num_points / (num_cpts - degree)
j = np.arange(1, num_cpts - degree)
I = np.floor(j * d)
alpha = j * d - I
params_dash_small = params[I.astype(np.int32) - 1]
params_dash_large = params[I.astype(np.int32)]
kv = alpha * params_dash_large + (1.0 - alpha) * params_dash_small
extra_1 = np.array([1.0] * (degree + 1))
extra_0 = np.array([0.0] * (degree + 1))
kv = np.concatenate([extra_0, kv, extra_1])
return kv
def basis_function_one(self, degree, knot_vector, span, knot):
""" Computes the value of a basis function for a single parameter.
Implementation of Algorithm 2.4 from The NURBS Book by <NAME>.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector
:type knot_vector: list, tuple
:param span: knot span, :math:`i`
:type span: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:return: basis function, :math:`N_{i,p}`
:rtype: float
"""
# Special case at boundaries
if (
(span == 0 and knot == knot_vector[0])
or (span == len(knot_vector) - degree - 2)
and knot == knot_vector[len(knot_vector) - 1]
):
return 1.0
# Knot is outside of span range
if knot < knot_vector[span] or knot >= knot_vector[span + degree + 1]:
return 0.0
N = [0.0 for _ in range(degree + span + 1)]
# Initialize the zeroth degree basis functions
for j in range(0, degree + 1):
if knot_vector[span + j] <= knot < knot_vector[span + j + 1]:
N[j] = 1.0
# Computing triangular table of basis functions
for k in range(1, degree + 1):
# Detecting zeros saves computations
saved = 0.0
if N[0] != 0.0:
saved = ((knot - knot_vector[span]) * N[0]) / (
knot_vector[span + k] - knot_vector[span]
)
for j in range(0, degree - k + 1):
Uleft = knot_vector[span + j + 1]
Uright = knot_vector[span + j + k + 1]
# Zero detection
if N[j + 1] == 0.0:
N[j] = saved
saved = 0.0
else:
temp = N[j + 1] / (Uright - Uleft)
N[j] = saved + (Uright - knot) * temp
saved = (knot - Uleft) * temp
return N[0]
def compute_params_surface(self, points, size_u, size_v):
# finding params in v direction
size_u, size_v = points.shape[0:2]
params_v = []
for u in range(size_u):
temp = self.compute_params_curve(points[u]).reshape((1, size_v))
params_v.append(temp)
params_v = np.concatenate(params_v, 0)
params_v = np.mean(params_v, 0)
params_u = []
for v in range(size_v):
temp = self.compute_params_curve(points[:, v]).reshape((size_u, 1))
params_u.append(temp)
params_u = np.concatenate(params_u, 1)
params_u = np.mean(params_u, 1)
return params_u, params_v
def compute_params_curve(self, points, use_centripetal=False):
"""
Given gridded points, the surface needs to be
"""
num_points, dim = points.shape
num_points = points.shape[0]
points_dash = np.square(points[0:-1] - points[1:])
points_dash = np.sqrt(np.sum(points_dash, 1))
# Find the total chord length
d = np.sum(points_dash)
points_dash = points_dash / d
# Divide individual chord lengths by the total chord length
uk = np.zeros((num_points))
for i in range(num_points - 1):
uk[i + 1] = np.sum(points_dash[0: i + 1])
return uk
def bernstein_polynomial(n):
"""
n: degree of the polynomial
"""
N = np.ones(n + 1, dtype=np.int32) * n
K = np.arange(n + 1)
basis = comb(N, K)
return basis.reshape((1, n + 1))
def bernstein_tensor(t, basis):
"""
t: L x 1
basis: 1 x n + 1
"""
n = basis.shape[1] - 1
T = []
for i in range(n + 1):
T.append((t ** i) * ((1.0 - t) ** (n - i)))
T = np.concatenate(T, 1)
basis_tensor = T * basis
return basis_tensor
def fit_bezier_surface(points, basis_u, basis_v):
"""
Given basis function basis_u, basis_v, find the control points.
This is applicable for the gridded points of size N x N x 3.
basis functions are of size N x (n + 1)
"""
# N x (n + 1)
nu = basis_u
nv = basis_v
ut_u_inv = np.linalg.inv(np.matmul(np.transpose(nu), nu))
ut_u_inv_u = np.matmul(ut_u_inv, np.transpose(nu))
vt_v_inv = np.linalg.inv(np.matmul(np.transpose(nv), nv))
vt_v_inv_v = np.matmul(nv, vt_v_inv)
cntrlpts = []
# use the pseudo inverse formulation
for i in range(3):
points_cntrl = list(np.matmul(np.matmul(ut_u_inv_u, points[:, :, i]), vt_v_inv_v))
cntrlpts.append(points_cntrl)
ctrl = np.array(cntrlpts)
ctrl = np.transpose(ctrl, (1, 2, 0))
return ctrl
def fit_bezier_surface_fit_kronecker(points, basis_u, basis_v):
"""
Given basis function basis_u, basis_v, find the control points.
This is applicable for non gridded points of size N x 3 and
the basis functions are of size N x (n + 1) corresponding to N number
of points. Also, n + 1 is the number of control points in u direction.
Note that to get better fitting, points at the boundary should be sampled.
:param basis_u: bernstein polynomial of size N x (n + 1)
:param basis_v: bernstein polynomial of size N x (m + 1)
:return ctrl: control points of size (n + 1) x (m + 1)
"""
# converts the problem of U x C x V^t = P to U^T x V x C = P
# that is A^t x X = b form
A = []
N = basis_u.shape[0]
n = basis_v.shape[1] - 1
for i in range(N):
A.append(np.matmul(np.transpose(basis_u[i:i + 1, :]), basis_v[i:i + 1, :]))
A = np.stack(A, 0)
A = np.reshape(A, (N, -1))
cntrl = []
for i in range(3):
t = np.linalg.lstsq(A, points[:, i], rcond=None)
cntrl.append(t[0].reshape((n + 1, n + 1)))
cntrl = np.stack(cntrl, 2)
return cntrl
def generate_bezier_surface_on_grid(cp, basis_u, basis_v):
"""
evaluates the bezier curve with give control points on a grid defined
by basis_u x basis_v. Only suitable if the points are required to on the grid.
"""
points = []
for i in range(3):
points.append(np.matmul(np.matmul(basis_u, cp[:, :, i]), np.transpose(basis_v)))
points = np.stack(points, 2)
return points
def generate_bezier_surface_using_cp_on_grid(cp, n, num_points):
"""
evaluates the bezier curve with give control points on a grid defined
by basis_u x basis_v. Only suitable if the points are required to on the grid.
"""
basis = bernstein_polynomial(n)
t = np.random.random((num_points, 1))
basis_v = bernstein_tensor(t, basis)
basis_u = bernstein_tensor(t, basis)
points = []
for i in range(3):
points.append(np.matmul(np.matmul(basis_u, cp[:, :, i]), np.transpose(basis_v)))
points = np.stack(points, 2)
points = np.reshape(points, (num_points ** 2, 3))
return points
def compute_params_curve(points, use_centripetal=False):
"""
Given gridded points, the surface needs to be
"""
num_points, dim = points.shape
num_points = points.shape[0]
points_dash = np.square(points[0:-1] - points[1:])
points_dash = np.sqrt(np.sum(points_dash, 1))
# Find the total chord length
d = np.sum(points_dash)
points_dash = points_dash / d
# Divide individual chord lengths by the total chord length
uk = np.zeros((num_points))
for i in range(num_points - 1):
uk[i + 1] = np.sum(points_dash[0: i + 1])
return uk
def uniform_knot_bspline(control_points_u, control_points_v, degree_u, degree_v, grid_size=30):
u = v = np.arange(0., 1, 1 / grid_size)
knots_u = [0.0] * degree_u + np.arange(0, 1.01, 1 / (control_points_u - degree_u)).tolist() + [1.0] * degree_u
knots_v = [0.0] * degree_v + np.arange(0, 1.01, 1 / (control_points_v - degree_v)).tolist() + [1.0] * degree_v
nu = []
nu = np.zeros((u.shape[0], control_points_u))
for i in range(u.shape[0]):
basis = []
for j in range(0, control_points_u):
nu[i, j] = basis_function_one(degree_u, knots_u, j, u[i])
nv = np.zeros((v.shape[0], control_points_v))
for i in range(v.shape[0]):
for j in range(0, control_points_v):
nv[i, j] = basis_function_one(degree_v, knots_v, j, v[i])
return nu, nv
def basis_function_one(degree, knot_vector, span, knot):
""" Computes the value of a basis function for a single parameter.
Implementation of Algorithm 2.4 from The NURBS Book by <NAME>.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector
:type knot_vector: list, tuple
:param span: knot span, :math:`i`
:type span: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:return: basis function, :math:`N_{i,p}`
:rtype: float
"""
# Special case at boundaries
if (
(span == 0 and knot == knot_vector[0])
or (span == len(knot_vector) - degree - 2)
and knot == knot_vector[len(knot_vector) - 1]
):
return 1.0
# Knot is outside of span range
if knot < knot_vector[span] or knot >= knot_vector[span + degree + 1]:
return 0.0
N = [0.0 for _ in range(degree + span + 1)]
# Initialize the zeroth degree basis functions
for j in range(0, degree + 1):
if knot_vector[span + j] <= knot < knot_vector[span + j + 1]:
N[j] = 1.0
# Computing triangular table of basis functions
for k in range(1, degree + 1):
# Detecting zeros saves computations
saved = 0.0
if N[0] != 0.0:
saved = ((knot - knot_vector[span]) * N[0]) / (
knot_vector[span + k] - knot_vector[span]
)
for j in range(0, degree - k + 1):
Uleft = knot_vector[span + j + 1]
Uright = knot_vector[span + j + k + 1]
# Zero detection
if N[j + 1] == 0.0:
N[j] = saved
saved = 0.0
else:
temp = N[j + 1] / (Uright - Uleft)
N[j] = saved + (Uright - knot) * temp
saved = (knot - Uleft) * temp
return N[0]
def uniform_knot_bspline_(control_points_u, control_points_v, degree_u, degree_v, grid_size=30):
"""
Returns uniform knots, given the number of control points in u and v directions and
their degrees.
"""
u = v = np.arange(0., 1, 1 / grid_size)
knots_u = [0.0] * degree_u + np.arange(0, 1.01, 1 / (control_points_u - degree_u)).tolist() + [1.0] * degree_u
knots_v = [0.0] * degree_v + np.arange(0, 1.01, 1 / (control_points_v - degree_v)).tolist() + [1.0] * degree_v
nu = []
nu = np.zeros((u.shape[0], control_points_u))
for i in range(u.shape[0]):
for j in range(0, control_points_u):
nu[i, j] = basis_function_one(degree_u, knots_u, j, u[i])
nv = np.zeros((v.shape[0], control_points_v))
for i in range(v.shape[0]):
for j in range(0, control_points_v):
nv[i, j] = basis_function_one(degree_v, knots_v, j, v[i])
return nu, nv, knots_u, knots_v
| [
"numpy.stack",
"numpy.sum",
"numpy.linalg.lstsq",
"scipy.special.comb",
"numpy.square",
"numpy.transpose",
"numpy.zeros",
"numpy.expand_dims",
"numpy.floor",
"numpy.ones",
"numpy.random.random",
"numpy.array",
"numpy.arange",
"numpy.matmul",
"numpy.reshape",
"geomdl.BSpline.Surface",
... | [((685, 703), 'numpy.array', 'np.array', (['matrix_n'], {}), '(matrix_n)\n', (693, 703), True, 'import numpy as np\n'), ((892, 917), 'numpy.matmul', 'np.matmul', (['ps_inv', 'points'], {}), '(ps_inv, points)\n', (901, 917), True, 'import numpy as np\n'), ((11009, 11025), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (11018, 11025), True, 'import numpy as np\n'), ((11038, 11048), 'scipy.special.comb', 'comb', (['N', 'K'], {}), '(N, K)\n', (11042, 11048), False, 'from scipy.special import comb\n'), ((11295, 11315), 'numpy.concatenate', 'np.concatenate', (['T', '(1)'], {}), '(T, 1)\n', (11309, 11315), True, 'import numpy as np\n'), ((11864, 11887), 'numpy.matmul', 'np.matmul', (['nv', 'vt_v_inv'], {}), '(nv, vt_v_inv)\n', (11873, 11887), True, 'import numpy as np\n'), ((12111, 12129), 'numpy.array', 'np.array', (['cntrlpts'], {}), '(cntrlpts)\n', (12119, 12129), True, 'import numpy as np\n'), ((12141, 12170), 'numpy.transpose', 'np.transpose', (['ctrl', '(1, 2, 0)'], {}), '(ctrl, (1, 2, 0))\n', (12153, 12170), True, 'import numpy as np\n'), ((13086, 13100), 'numpy.stack', 'np.stack', (['A', '(0)'], {}), '(A, 0)\n', (13094, 13100), True, 'import numpy as np\n'), ((13109, 13131), 'numpy.reshape', 'np.reshape', (['A', '(N, -1)'], {}), '(A, (N, -1))\n', (13119, 13131), True, 'import numpy as np\n'), ((13291, 13309), 'numpy.stack', 'np.stack', (['cntrl', '(2)'], {}), '(cntrl, 2)\n', (13299, 13309), True, 'import numpy as np\n'), ((13702, 13721), 'numpy.stack', 'np.stack', (['points', '(2)'], {}), '(points, 2)\n', (13710, 13721), True, 'import numpy as np\n'), ((14024, 14057), 'numpy.random.random', 'np.random.random', (['(num_points, 1)'], {}), '((num_points, 1))\n', (14040, 14057), True, 'import numpy as np\n'), ((14281, 14300), 'numpy.stack', 'np.stack', (['points', '(2)'], {}), '(points, 2)\n', (14289, 14300), True, 'import numpy as np\n'), ((14314, 14354), 'numpy.reshape', 'np.reshape', (['points', '(num_points ** 2, 3)'], {}), '(points, (num_points ** 2, 3))\n', (14324, 14354), True, 'import numpy as np\n'), ((14584, 14620), 'numpy.square', 'np.square', (['(points[0:-1] - points[1:])'], {}), '(points[0:-1] - points[1:])\n', (14593, 14620), True, 'import numpy as np\n'), ((14714, 14733), 'numpy.sum', 'np.sum', (['points_dash'], {}), '(points_dash)\n', (14720, 14733), True, 'import numpy as np\n'), ((14842, 14862), 'numpy.zeros', 'np.zeros', (['num_points'], {}), '(num_points)\n', (14850, 14862), True, 'import numpy as np\n'), ((15075, 15107), 'numpy.arange', 'np.arange', (['(0.0)', '(1)', '(1 / grid_size)'], {}), '(0.0, 1, 1 / grid_size)\n', (15084, 15107), True, 'import numpy as np\n'), ((15360, 15400), 'numpy.zeros', 'np.zeros', (['(u.shape[0], control_points_u)'], {}), '((u.shape[0], control_points_u))\n', (15368, 15400), True, 'import numpy as np\n'), ((15577, 15617), 'numpy.zeros', 'np.zeros', (['(v.shape[0], control_points_v)'], {}), '((v.shape[0], control_points_v))\n', (15585, 15617), True, 'import numpy as np\n'), ((17892, 17924), 'numpy.arange', 'np.arange', (['(0.0)', '(1)', '(1 / grid_size)'], {}), '(0.0, 1, 1 / grid_size)\n', (17901, 17924), True, 'import numpy as np\n'), ((18177, 18217), 'numpy.zeros', 'np.zeros', (['(u.shape[0], control_points_u)'], {}), '((u.shape[0], control_points_u))\n', (18185, 18217), True, 'import numpy as np\n'), ((18375, 18415), 'numpy.zeros', 'np.zeros', (['(v.shape[0], control_points_v)'], {}), '((v.shape[0], control_points_v))\n', (18383, 18415), True, 'import numpy as np\n'), ((855, 877), 'numpy.transpose', 'np.transpose', (['matrix_n'], {}), '(matrix_n)\n', (867, 877), True, 'import numpy as np\n'), ((2649, 2673), 'geomdl.BSpline.Surface', 'geomdl.BSpline.Surface', ([], {}), '()\n', (2671, 2673), False, 'import geomdl\n'), ((3322, 3362), 'numpy.zeros', 'np.zeros', (['(num_points, control_points_u)'], {}), '((num_points, control_points_u))\n', (3330, 3362), True, 'import numpy as np\n'), ((3578, 3599), 'numpy.expand_dims', 'np.expand_dims', (['nu', '(2)'], {}), '(nu, 2)\n', (3592, 3599), True, 'import numpy as np\n'), ((3614, 3654), 'numpy.zeros', 'np.zeros', (['(num_points, control_points_v)'], {}), '((num_points, control_points_v))\n', (3622, 3654), True, 'import numpy as np\n'), ((3847, 3868), 'numpy.expand_dims', 'np.expand_dims', (['nv', '(1)'], {}), '(nv, 1)\n', (3861, 3868), True, 'import numpy as np\n'), ((3906, 3923), 'numpy.matmul', 'np.matmul', (['nu', 'nv'], {}), '(nu, nv)\n', (3915, 3923), True, 'import numpy as np\n'), ((3940, 4008), 'numpy.reshape', 'np.reshape', (['basis', '(num_points, control_points_v * control_points_u)'], {}), '(basis, (num_points, control_points_v * control_points_u))\n', (3950, 4008), True, 'import numpy as np\n'), ((4210, 4229), 'numpy.stack', 'np.stack', (['points', '(1)'], {}), '(points, 1)\n', (4218, 4229), True, 'import numpy as np\n'), ((4987, 5003), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (4995, 5003), True, 'import numpy as np\n'), ((5802, 5814), 'numpy.array', 'np.array', (['nu'], {}), '(nu)\n', (5810, 5814), True, 'import numpy as np\n'), ((6064, 6076), 'numpy.array', 'np.array', (['nv'], {}), '(nv)\n', (6072, 6076), True, 'import numpy as np\n'), ((6291, 6314), 'numpy.matmul', 'np.matmul', (['nv', 'vt_v_inv'], {}), '(nv, vt_v_inv)\n', (6300, 6314), True, 'import numpy as np\n'), ((6564, 6582), 'numpy.array', 'np.array', (['cntrlpts'], {}), '(cntrlpts)\n', (6572, 6582), True, 'import numpy as np\n'), ((6598, 6627), 'numpy.transpose', 'np.transpose', (['ctrl', '(1, 2, 0)'], {}), '(ctrl, (1, 2, 0))\n', (6610, 6627), True, 'import numpy as np\n'), ((7009, 7040), 'numpy.arange', 'np.arange', (['(1)', '(num_cpts - degree)'], {}), '(1, num_cpts - degree)\n', (7018, 7040), True, 'import numpy as np\n'), ((7053, 7068), 'numpy.floor', 'np.floor', (['(j * d)'], {}), '(j * d)\n', (7061, 7068), True, 'import numpy as np\n'), ((7303, 7333), 'numpy.array', 'np.array', (['([1.0] * (degree + 1))'], {}), '([1.0] * (degree + 1))\n', (7311, 7333), True, 'import numpy as np\n'), ((7352, 7382), 'numpy.array', 'np.array', (['([0.0] * (degree + 1))'], {}), '([0.0] * (degree + 1))\n', (7360, 7382), True, 'import numpy as np\n'), ((7396, 7434), 'numpy.concatenate', 'np.concatenate', (['[extra_0, kv, extra_1]'], {}), '([extra_0, kv, extra_1])\n', (7410, 7434), True, 'import numpy as np\n'), ((9859, 9886), 'numpy.concatenate', 'np.concatenate', (['params_v', '(0)'], {}), '(params_v, 0)\n', (9873, 9886), True, 'import numpy as np\n'), ((9907, 9927), 'numpy.mean', 'np.mean', (['params_v', '(0)'], {}), '(params_v, 0)\n', (9914, 9927), True, 'import numpy as np\n'), ((10115, 10142), 'numpy.concatenate', 'np.concatenate', (['params_u', '(1)'], {}), '(params_u, 1)\n', (10129, 10142), True, 'import numpy as np\n'), ((10163, 10183), 'numpy.mean', 'np.mean', (['params_u', '(1)'], {}), '(params_u, 1)\n', (10170, 10183), True, 'import numpy as np\n'), ((10462, 10498), 'numpy.square', 'np.square', (['(points[0:-1] - points[1:])'], {}), '(points[0:-1] - points[1:])\n', (10471, 10498), True, 'import numpy as np\n'), ((10604, 10623), 'numpy.sum', 'np.sum', (['points_dash'], {}), '(points_dash)\n', (10610, 10623), True, 'import numpy as np\n'), ((10744, 10764), 'numpy.zeros', 'np.zeros', (['num_points'], {}), '(num_points)\n', (10752, 10764), True, 'import numpy as np\n'), ((10966, 10996), 'numpy.ones', 'np.ones', (['(n + 1)'], {'dtype': 'np.int32'}), '(n + 1, dtype=np.int32)\n', (10973, 10996), True, 'import numpy as np\n'), ((11766, 11782), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (11778, 11782), True, 'import numpy as np\n'), ((13183, 13227), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'points[:, i]'], {'rcond': 'None'}), '(A, points[:, i], rcond=None)\n', (13198, 13227), True, 'import numpy as np\n'), ((14647, 14669), 'numpy.sum', 'np.sum', (['points_dash', '(1)'], {}), '(points_dash, 1)\n', (14653, 14669), True, 'import numpy as np\n'), ((14921, 14949), 'numpy.sum', 'np.sum', (['points_dash[0:i + 1]'], {}), '(points_dash[0:i + 1])\n', (14927, 14949), True, 'import numpy as np\n'), ((789, 811), 'numpy.transpose', 'np.transpose', (['matrix_n'], {}), '(matrix_n)\n', (801, 811), True, 'import numpy as np\n'), ((4060, 4137), 'numpy.reshape', 'np.reshape', (['control_points[:, :, i]', '(control_points_u * control_points_v, 1)'], {}), '(control_points[:, :, i], (control_points_u * control_points_v, 1))\n', (4070, 4137), True, 'import numpy as np\n'), ((6185, 6201), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (6197, 6201), True, 'import numpy as np\n'), ((10529, 10551), 'numpy.sum', 'np.sum', (['points_dash', '(1)'], {}), '(points_dash, 1)\n', (10535, 10551), True, 'import numpy as np\n'), ((10831, 10859), 'numpy.sum', 'np.sum', (['points_dash[0:i + 1]'], {}), '(points_dash[0:i + 1])\n', (10837, 10859), True, 'import numpy as np\n'), ((11706, 11722), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (11718, 11722), True, 'import numpy as np\n'), ((11824, 11840), 'numpy.transpose', 'np.transpose', (['nv'], {}), '(nv)\n', (11836, 11840), True, 'import numpy as np\n'), ((1350, 1362), 'numpy.array', 'np.array', (['nu'], {}), '(nu)\n', (1358, 1362), True, 'import numpy as np\n'), ((1553, 1565), 'numpy.array', 'np.array', (['nv'], {}), '(nv)\n', (1561, 1565), True, 'import numpy as np\n'), ((1743, 1759), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1751, 1759), True, 'import numpy as np\n'), ((2282, 2294), 'numpy.array', 'np.array', (['nu'], {}), '(nu)\n', (2290, 2294), True, 'import numpy as np\n'), ((2485, 2497), 'numpy.array', 'np.array', (['nv'], {}), '(nv)\n', (2493, 2497), True, 'import numpy as np\n'), ((4164, 4191), 'numpy.matmul', 'np.matmul', (['basis', 'cntrl_pts'], {}), '(basis, cntrl_pts)\n', (4173, 4191), True, 'import numpy as np\n'), ((6121, 6137), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (6133, 6137), True, 'import numpy as np\n'), ((6247, 6263), 'numpy.transpose', 'np.transpose', (['nv'], {}), '(nv)\n', (6259, 6263), True, 'import numpy as np\n'), ((12009, 12047), 'numpy.matmul', 'np.matmul', (['ut_u_inv_u', 'points[:, :, i]'], {}), '(ut_u_inv_u, points[:, :, i])\n', (12018, 12047), True, 'import numpy as np\n'), ((13021, 13054), 'numpy.transpose', 'np.transpose', (['basis_u[i:i + 1, :]'], {}), '(basis_u[i:i + 1, :])\n', (13033, 13054), True, 'import numpy as np\n'), ((13632, 13663), 'numpy.matmul', 'np.matmul', (['basis_u', 'cp[:, :, i]'], {}), '(basis_u, cp[:, :, i])\n', (13641, 13663), True, 'import numpy as np\n'), ((13665, 13686), 'numpy.transpose', 'np.transpose', (['basis_v'], {}), '(basis_v)\n', (13677, 13686), True, 'import numpy as np\n'), ((14211, 14242), 'numpy.matmul', 'np.matmul', (['basis_u', 'cp[:, :, i]'], {}), '(basis_u, cp[:, :, i])\n', (14220, 14242), True, 'import numpy as np\n'), ((14244, 14265), 'numpy.transpose', 'np.transpose', (['basis_v'], {}), '(basis_v)\n', (14256, 14265), True, 'import numpy as np\n'), ((1679, 1719), 'numpy.matmul', 'np.matmul', (['nu.T', 'control_points[:, :, i]'], {}), '(nu.T, control_points[:, :, i])\n', (1688, 1719), True, 'import numpy as np\n'), ((6452, 6491), 'numpy.matmul', 'np.matmul', (['ut_u_inv_u', 'points_[:, :, i]'], {}), '(ut_u_inv_u, points_[:, :, i])\n', (6461, 6491), True, 'import numpy as np\n'), ((15141, 15194), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(1 / (control_points_u - degree_u))'], {}), '(0, 1.01, 1 / (control_points_u - degree_u))\n', (15150, 15194), True, 'import numpy as np\n'), ((15256, 15309), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(1 / (control_points_v - degree_v))'], {}), '(0, 1.01, 1 / (control_points_v - degree_v))\n', (15265, 15309), True, 'import numpy as np\n'), ((17958, 18011), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(1 / (control_points_u - degree_u))'], {}), '(0, 1.01, 1 / (control_points_u - degree_u))\n', (17967, 18011), True, 'import numpy as np\n'), ((18073, 18126), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(1 / (control_points_v - degree_v))'], {}), '(0, 1.01, 1 / (control_points_v - degree_v))\n', (18082, 18126), True, 'import numpy as np\n')] |
"""
A set of functions and scripts to demonstrate camera calibration and
registration.
Notes:
* Parts of this module uses opencv calibration described here:
https://docs.opencv.org/4.0.0/d9/d0c/group__calib3d.html
* and the Chruco board described here:
https://docs.opencv.org/4.0.0/d0/d3c/classcv_1_1aruco_1_1CharucoBoard.html
Copyright (C) Microsoft Corporation. All rights reserved.
"""
# Standard imports.
import os
import fnmatch
import json
import math
import time
import warnings
from dataclasses import dataclass
from typing import Dict
from typing import List
from typing import Tuple
# 3rd party imports.
import cv2
import cv2.aruco as aruco
from cv2 import aruco_CharucoBoard
from cv2 import aruco_DetectorParameters as detect_params
import numpy as np
#-------------------------------------------------------------------------------
class CalibrationError(Exception):
"""Error during calibration.
"""
class RegistrationError(Exception):
"""Error during registration.
"""
#-------------------------------------------------------------------------------
@dataclass
class RTMatrix():
"""dataclass for containing rotation and translation between coordinates"""
rotation:List
translation:List
#-------------------------------------------------------------------------------
def write_json(json_file:str, data:dict)-> None:
"""Helper function for writing out json files.
Args:
json_file (str): full path
data (dict): Blob of data to write.
"""
with open(json_file, "w") as j:
json.dump(data, j, indent=4)
#-------------------------------------------------------------------------------
def r_as_matrix(rotation:np.array):
"""Convert a 3vec rotation array to a rotation matrix.
Args:
rotation (np.array): 3 vector array representing rotation.
Returns:
[np.array]: Rotation matrix.
"""
rmat = np.zeros(shape=(3,3))
cv2.Rodrigues(rotation, rmat)
return rmat
#-------------------------------------------------------------------------------
def read_opencv_calfile(calfile:str) -> Tuple[np.ndarray,
np.ndarray,
np.ndarray]:
"""Read a calibration file generated by opencv.
Args:
calfile (str): full path to an opencv calibration yaml file.
Raises:
ValueError: Function will explicitly fail when the cv2 calibration file
fails to open.
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]:
k_matrix: The camera intrinsics matrix.
dist: the distortion matrix.
img_size: Image size as ndarray objects, in the order width, height.
"""
fs_calib = cv2.FileStorage(calfile, cv2.FILE_STORAGE_READ)
if not fs_calib.isOpened():
raise ValueError(f"failed to open {fs_calib}")
k_matrix = fs_calib.getNode("K").mat()
dist = fs_calib.getNode("dist").mat()
img_size = fs_calib.getNode("img_size").mat()
fs_calib.release()
return k_matrix, dist, img_size
#-------------------------------------------------------------------------------
def write_opencv_calfile(calfile:str,
k_matrix:np.array,
dist8:List,
img_size:np.array) -> None:
"""Write out an opencv calibration file for a single camera.
Args:
calfile (str): Full path of calibration file.
k_matrix (np.array): the camera intrinsics matrix.
dist8 (List): The distortion coefficients.
img_size (np.array): 2d array of the image size, in the order width, height.
"""
fs_calib = cv2.FileStorage(calfile, cv2.FILE_STORAGE_WRITE)
fs_calib.write("K", k_matrix)
fs_calib.write("dist", dist8)
fs_calib.write("img_size", img_size)
fs_calib.release()
#-------------------------------------------------------------------------------
def write_calibration_blob(calibrations:List[str],
rmat_b_to_a:np.array,
tvec_b_to_a:np.array,
out_dir:str):
"""Write all calibration and registration values to a json blob.
Args:
calibrations (List[str]): All calibration files.
rmat_b_to_a (np.array): Rotation matrix array.
tvec_b_to_a (np.array): Translation vector array.
out_dir (str): Ouput directory to place the calibration_blob.json file.
"""
blob = {"CalibrationInformation":{"Cameras":[]}}
for idx, calibration_file in enumerate(calibrations):
if idx == 0:
# RT for the camera used as the origin for all others.
reshape_r = [1,0,0,0,1,0,0,0,1]
reshape_t = [0,0,0]
else:
reshape_r = rmat_b_to_a.reshape(9, 1).squeeze(1).tolist()
reshape_t = tvec_b_to_a.squeeze(1).tolist()
camera_matrix, dist, img_size = read_opencv_calfile(calibration_file)
intrinsics = [camera_matrix[0][2]/img_size[0][0], #Px
camera_matrix[1][2]/img_size[1][0], #Py
camera_matrix[0][0]/img_size[0][0], #Fx
camera_matrix[1][1]/img_size[1][0], #Fy
dist[0][0], #K1
dist[1][0], #K2
dist[4][0], #K3
dist[5][0], #K4
dist[6][0], #K5
dist[7][0], #K6
0, #Cx always Zero. (BrownConrady)
0, #Cy always Zero. (BrownConrady)
dist[3][0], #P2/Tx
dist[2][0]] #P1/Ty
model_type = "CALIBRATION_LensDistortionModelBrownConrady"
intrinsics_data = {"ModelParameterCount":len(intrinsics),
"ModelParameters":intrinsics,
"ModelType":model_type}
extrinsics = {"Rotation":reshape_r, "Translation":reshape_t}
calibration = {"Intrinsics":intrinsics_data,
"Rt":extrinsics,
"SensorHeight":img_size[1].tolist(),
"SensorWidth":img_size[0].tolist()}
blob["CalibrationInformation"]["Cameras"].append(calibration)
os.makedirs(out_dir, exist_ok=True)
json_file = os.path.join(out_dir, "calibration_blob.json")
write_json(json_file, blob)
#-------------------------------------------------------------------------------
def read_board_parameters(json_file: str) -> Tuple[Dict, aruco_CharucoBoard]:
"""Read charuco board from a json file.
Args:
json_file (str): fullpath of the board json_file.
Returns:
Tuple[dict, aruco_CharucoBoard]:
target: Target data from json_file.
board: A single charuco board object.
"""
with open(json_file) as j_file:
targets = json.load(j_file)
target = targets["shapes"][0]
aruco_dict = aruco.Dictionary_get(target["aruco_dict_name"])
board = aruco.CharucoBoard_create(target["squares_x"],
target["squares_y"],
target["square_length"]/1000,
target["marker_length"]/1000,
aruco_dict)
return target, board
#-------------------------------------------------------------------------------
def get_image_points(board:aruco_CharucoBoard,
marker_ids:np.ndarray) -> np.ndarray:
"""
Generate markers 3d and 2d positions like getBoardObjectAndImagePoints but for
Charuco Parameters.
Args:
board (aruco_CharucoBoard): A board object from opencv.
marker_ids (np.ndarray): List of detected charuco marker Ids.
Returns:
np.ndarray: numpy array (n*1*3) markers 3d positions.
"""
object_points = board.chessboardCorners[marker_ids, :]
return object_points
#-------------------------------------------------------------------------------
def detect_markers(img: np.ndarray,
template: str,
params:detect_params = None) -> Tuple[List[np.ndarray],
List[np.ndarray],
aruco_CharucoBoard]:
"""Detect board markers.
Args:
img (np.ndarray): Board image.
template (str): fullpath of the board json_file.
params (aruco_DetectorParameters, optional): a cv2 object
aruco_DetectorParameters. Defaults to None.
Returns:
Tuple[List[np.ndarray], List[np.ndarray], aruco_CharucoBoard]:
charuco_corners: List of detected charuco marker corners.
charuco_ids: List of detected charuco marker Ids.
board: charucoboard object.
"""
# detect markers
_, board = read_board_parameters(template)
if params is None:
params = aruco.DetectorParameters_create()
params.cornerRefinementMethod = aruco.CORNER_REFINE_NONE
aruco_corners, aruco_ids, _ = aruco.detectMarkers(img,
board.dictionary,
None,
None,
params)
if len(aruco_corners) > 0:
_, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(
aruco_corners,
aruco_ids,
img,
board)
if charuco_corners is None:
charuco_corners = []
charuco_ids = []
warnings.warn("No charuco corners detected in image.")
else:
charuco_corners = []
charuco_ids = []
warnings.warn("No charuco corners detected in image.")
return charuco_corners, charuco_ids, board
#-------------------------------------------------------------------------------
def detect_markers_many_images(imgnames:List[str], template: str):
"""
Run detect_markers on a large set of png or jpeg images in a single directory,
with the assumption that all images are viewing the same board.
Args:
imgnames (List[str]):Full path to images.
template (str): Template file json of the board.
Raises:
CalibrationError: Not all image sizes are equal.
CalibrationError: Insufficient number of markers detected. Inspect images
for poor quality.
Returns:
[Tuple]: [List[List[np.ndarray]],
List[List[np.ndarray]],
List[List[np.ndarray]],
Tuple[np.array, np.array],
aruco_CharucoBoard]
ccorners_all: All chauco corners detected in every image.
cids_all: All charuco marker ids detected in every image.
p3d: Image points.
img_size: [width; height].
board: Charucoboard object.
"""
ccorners_all = []
cids_all = []
p3d = []
img_sizes_all = []
for imgfile in imgnames:
img = cv2.imread(imgfile, cv2.IMREAD_GRAYSCALE)
if img is not None:
ccorners, cids, board = detect_markers(img, template)
if len(ccorners) > 3:
ccorners_all.append(ccorners)
cids_all.append(cids)
m3d = get_image_points(board, cids)
p3d.append(m3d)
sizes = np.array([img.shape[1], img.shape[0]])
img_sizes_all.append(sizes)
# check all images sizes are identical.
rows_equal = [elem[0]==img_sizes_all[0][0] for elem in img_sizes_all]
cols_equal = [elem[1]==img_sizes_all[0][1] for elem in img_sizes_all]
if not all(rows_equal) or not all(cols_equal):
raise CalibrationError("Not all image sizes in data set are the same.")
img_size = (img_sizes_all[0][0], img_sizes_all[0][1])
return ccorners_all, cids_all, p3d, img_size, board
#-------------------------------------------------------------------------------
def estimate_pose(img: np.array,
template: str,
opencv_calfile: str) -> Tuple[bool,
np.ndarray,
np.ndarray]:
"""Estimate camera pose using board.
Args:
img (np.array): Board image.
template (str): fullpath of the board json_file.
opencv_calfile (str): fullpath of the opencv cal file.
Raises:
ValueError: Throw an error if the calibration file fails to load.
Returns:
Tuple[bool, np.ndarray, np.ndarray]: Returns success of calibration and
extrinsics.
retval: Return True of the optimizer converged.
rvec: rotation array
tvec: translation array 1*3
"""
k_matrix, dist, _ = read_opencv_calfile(opencv_calfile)
rvec = np.full((1, 3), 0.01)
tvec = np.full((1, 3), 0.01)
charuco_corners, charuco_ids, board = detect_markers(img, template)
if len(charuco_corners) > 0:
retval, rvec, tvec = aruco.estimatePoseCharucoBoard(charuco_corners,
charuco_ids,
board,
k_matrix,
dist,
rvec,
tvec)
else:
retval = False
rvec = []
tvec = []
return retval, rvec, tvec
#-------------------------------------------------------------------------------
def pose_as_dataclass(array_a:np.array,
template:str,
calib_a:str,
img_a:str)-> RTMatrix:
"""Get RT of camera to board.
Args:
array_a (np.array): Numpy array of the image.
template (str): Template to estimate pose.
calib_a (str): calibration file for this camera.
img_a (str): Path to image.
Raises:
RegistrationError: Failed to estimate pose of board.
Returns:
RTMatrix: Rotation and translation as a dataclass.
"""
[retval_a, rvec_a, tvec_a] = estimate_pose(array_a, template, calib_a)
if retval_a is False:
raise RegistrationError(f"Could not estimate pose for image @ {img_a}")
pose_a = RTMatrix(rvec_a, tvec_a)
return pose_a
#-------------------------------------------------------------------------------
def unproject(points:np.array, k_mat:np.array, dist:np.array) -> np.array:
"""
Take the 2D distorted markers in an image and unproject into normalized 3D
coordinates.
Args:
points (np.array): Location of markers in image.
k_mat (np.array): Camera Matrix.
dist (np.array): Distortion coefficients.
Returns:
np.array: 3D Normalized coordinates of markers after unprojection.
"""
principal_point = [k_mat[0][2], k_mat[1][2]]
focal_length = [k_mat[0][0], k_mat[1][1]]
term_criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
400,
0.000001)
x_u = cv2.undistortPointsIter(points,
k_mat,
dist,
np.eye(3),
k_mat,
term_criteria)
rays = (x_u-principal_point)/focal_length
return rays
#-------------------------------------------------------------------------------
def registration_error(array_a:np.array,
template:str,
calib_a:str,
pose_b:RTMatrix,
rmat_b_to_a:np.array,
tvec_b_to_a:np.array) -> Tuple[float, float]:
"""
Calculate the registration error between two cameras. The registration error
is computed by taking the rotation and translation of the board to camera B
coordinates, applying the registration, then projecting into 2D camera A
coordinates, then calculating this against the reprojection error of the
detected markers from camera A.
camera B camera A (prj)
------------ ------------
| 3D points| -----> | 3D points|
------------ ------------
^ |
| |
| v camera A (detected)
------------ ------------ diff with ------------
| 3D board | | 2D points| <---------> | 2D points|
------------ ------------ ------------
Args:
array_a (np.array): Image from camera A.
template (str): Template of the board.
calib_a (str): Path to camera calibration file for camera A.
pose_b (RTMatrix): Rotation and translation of board to camera B.
rmat_b_to_a (np.array): Rotation matrix of camera B to camera A.
tvec_b_to_a (np.array): Translation vector of camera B to camera A.
Returns:
Tuple[float, float]: Root mean square of all reprojected points in pixels
and radians.
"""
# Get 2d image coordinates of markers detected in camera A.
corners_a, ids_a, board = detect_markers(array_a, template)
# 3D board points in coordinates of camera B.
points_board = board.chessboardCorners[ids_a, :]
squoze = points_board.squeeze(1)
markers_cam_b = np.matmul(r_as_matrix(pose_b.rotation),
squoze.transpose()) + pose_b.translation
# Multiply by registration to get markers 3D coordinates in camera A.
pts_in_cam_a = np.matmul(rmat_b_to_a, markers_cam_b) + tvec_b_to_a
# Registration computed 3D points to 2D image plane A.
k_mat, dist, _ = read_opencv_calfile(calib_a)
pts, _ = cv2.projectPoints(pts_in_cam_a,
np.eye(3),
np.zeros((3,1)),
k_mat,
dist)
# Express difference between measured and projected as radians.
angles = []
measured = unproject(corners_a, k_mat, dist)
prediction = unproject(pts, k_mat, dist)
for idx, elem in enumerate(measured):
meas = [elem[0][0], elem[0][1], 1]
pred = [prediction[idx][0][0], prediction[idx][0][1], 1]
dot_product = np.dot(meas, pred)
norm_a = np.linalg.norm(meas)
norm_b = np.linalg.norm(pred)
theta = math.acos(dot_product / (norm_a * norm_b))
angles.append(theta)
# Get Root Mean Square of measured points in camera A to registration
# calculated points.
num_points = pts.shape[0]
squares = [elem**2 for elem in angles]
rms_radians = math.sqrt(sum(squares)/num_points)
print(f"RMS (Radians): {rms_radians}")
rms_pixels = np.sqrt(np.sum((corners_a-pts)**2)/num_points)
return rms_pixels, rms_radians
#-------------------------------------------------------------------------------
def calibrate_camera(
imdir:str,
template:str,
init_calfile:str = None,
rms_thr:float = 1.0,
postfix:str = "",
min_detections:int = 30,
min_images:int = 30,
min_quality_images:int = 30,
per_view_threshold:int = 1
) -> Tuple[float,
np.array,
np.array,
np.array,
List,
List]:
""" Calibrate a camera using charuco detector and opencv bundler.
Args:
imdir (str): Image directory.
template (str): Fullpath of the board json_file.
init_calfile (str, optional): Calibration file. Defaults to None.
rms_thr (float, optional): Reprojection threshold. Defaults to 1.0.
postfix (str, optional): Calibration filename suffix. Defaults to "".
min_detections (int, optional): Required minimum number of detections.
Defaults to 30.
min_images (int, optional): Minimum number of images. Defaults to 30.
min_quality_images (int, optional): Minimum number of images with sufficient
reprojection quality. Defaults to 30.
per_view_threshold (int, optional): RMS reprojection error threshold to
distinguish quality images. Defaults to 1.
Raises:
CalibrationError: Not enough images for calibration.
CalibrationError: Not enough detections for calibration.
CalibrationError: Not enough images with low rms for calibration.
Returns:
Tuple[float, np.array, np.array, np.array, List, List]:
rms: the overall RMS re-projection error in pixels.
k_matrix: camera matrix.
dist: Lens distortion coeffs. OpenCV model with 8 distortion is equivalent
to Brown-Conrady model. (used in K4A).
img_size: [width; height].
rvecs: list of rotation vectors.
tvecs: list of translation vectors.
"""
# Check validity of initial calibration file.
if init_calfile is not None and os.path.exists(init_calfile) is False:
FileExistsError(f"Initial calibration does not exist: {init_calfile}")
# output cal file
calfile = os.path.join(imdir, f"calib{postfix}.yml")
imgnames = []
for file in os.listdir(imdir):
if fnmatch.fnmatch(file, "*.png") or fnmatch.fnmatch(file, "*.jpg"):
imgnames.append(os.path.join(imdir, file))
num_images = len(imgnames)
if num_images < min_images:
msg = f"Not Enough images. {num_images} found, {min_images} required."
raise CalibrationError(msg)
ccorners_all, cids_all, p3d, img_size, board = \
detect_markers_many_images(imgnames, template)
# check number of times corners were successfully detected.
num_det = len(ccorners_all)
if num_det < min_detections:
msg = f"Insufficent detections. {num_det} found, {min_detections} required."
raise CalibrationError(msg)
# initial calibration
if init_calfile is None:
# get image size of any image
k_matrix = cv2.initCameraMatrix2D(p3d, ccorners_all, img_size) # (w,h)
dist = np.zeros((8, 1), dtype=np.float32)
else:
k_matrix, dist, img_data = read_opencv_calfile(init_calfile)
img_arr = img_data.astype(int)
img_size = (img_arr[0][0], img_arr[1][0])
flags = cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_USE_INTRINSIC_GUESS
criteria = cv2.TERM_CRITERIA_COUNT + cv2.TERM_CRITERIA_EPS, 100, 1e-6
start = time.perf_counter()
rms, k_matrix, dist, rvecs, tvecs, stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors = \
aruco.calibrateCameraCharucoExtended(ccorners_all,
cids_all,
board,
img_size,
k_matrix,
dist,
flags=flags,
criteria=criteria)
# Check Quality of each image.
n_low_rms = [err[0] for err in perViewErrors if err[0] <= per_view_threshold]
num_good_images = len(n_low_rms)
# Report which indexes are failing in perViewErrors.
failing_idxs = []
[failing_idxs.append(str(index)) for (index, err) in enumerate(perViewErrors) if err[0] > per_view_threshold]
warning_failing_indexes = "Failing image indices: " + ", ".join(failing_idxs)
if len(failing_idxs) != 0:
warnings.warn(warning_failing_indexes)
if num_good_images < min_quality_images:
msg = f"Insufficent number of quality images. " \
f"{num_good_images} found, {min_quality_images} required."
raise CalibrationError(msg)
dist8 = dist[:8, :]
img_size_as_array = np.array([np.array([img_size[0]]),
np.array([img_size[1]])])
if rms < rms_thr:
print("calibrate_camera took {} sec".format(time.perf_counter()-start))
write_opencv_calfile(calfile, k_matrix, dist8, img_size_as_array)
write_json(os.path.join(imdir, "report.json"), {"RMS_pixels":rms})
else:
print("calibrate_camera failed \n")
return rms, k_matrix, dist, img_size, rvecs, tvecs, num_good_images
#-------------------------------------------------------------------------------
def register(img_a: str,
img_b: str,
template: str,
calib_a: str,
calib_b: str,
out_dir: str,
rms_threshold:float=0.001) -> Tuple[np.array, np.array, float]:
"""Get rotation and translation of camera b in terms of camera a.
Args:
img_a (str): Full path to image taken by camera A.
img_b (str): Full path to image taken by camera B.
template (str): Full path to template image of board.
calib_a (str): Full path to opencv calibration file of camera A.
calib_b (str): Full path to opencv calibration file of camera B.
out_dir (str): Output directory for full calibration blob.
rms_threshold (float): Threshold to fail RMS at in radians.
Raises:
FileExistsError: Raise if image file A is not found.
FileExistsError: Raise if image file B is not found.
FileExistsError: Raise if template file is not found.
FileExistsError: Raise if calibration parameters for camera A is not found.
FileExistsError: Raise if calibration parameters for camera B is not found.
RegistrationError: Raise if OpenCV fails to load image file A.
RegistrationError: Raise if OpenCV fails to load image file B.
RegistrationError: Raise if reprojection error is too large.
Returns:
Tuple[np.array, np.array, float]: Return Rotation Translation of camera B
to A, and rms.
rmat_b_to_a: Numpy array of the rotation matrix from B to A.
tmat_b_to_a: Numpy array of the translation vector from B to A.
rms: Reprojection error expressed as Root mean square of pixel diffs
between markers.
"""
# File exists checks.
if not os.path.exists(img_a):
raise FileExistsError(f"Image file not found for camera A @ {img_a}")
if not os.path.exists(img_b):
raise FileExistsError(f"Image file not found for camera B @ {img_b}")
if not os.path.exists(template):
raise FileExistsError(f"Board template parameters not found @ {template}")
if not os.path.exists(calib_a):
raise FileExistsError(f"Calib params for camera A not found @ {calib_a}")
if not os.path.exists(calib_b):
raise FileExistsError(f"Calib params for camera B not found @ {calib_b}")
array_a = cv2.imread(img_a)
array_b = cv2.imread(img_b)
# Check image was read by opencv.
if array_a is None:
raise RegistrationError(f"OpenCV could not interpret Camera A @ {img_a}")
if array_b is None:
raise RegistrationError(f"OpenCV could not interpret Camera B @ {img_b}")
# Get Rt of camera A to board.
pose_a = pose_as_dataclass(array_a, template, calib_a, img_a)
rmat_a = r_as_matrix(pose_a.rotation)
# Get Rt of camera B to board.
pose_b = pose_as_dataclass(array_b, template, calib_b, img_b)
rmat_b = r_as_matrix(pose_b.rotation)
# Get perspective of camera B to board.
rmat_b_to_a = np.matmul(rmat_a, rmat_b.transpose())
tvec_b_to_a = -np.matmul(rmat_b_to_a, pose_b.translation) + pose_a.translation
print(f"Translation camera B to A:\n{tvec_b_to_a}")
print(f"Rotation camera B to A:\n{rmat_b_to_a}")
# Find registration error.
print("Find forward reprojection error (camera B to camera A).")
(rms1_pixels, rms1_rad) = registration_error(array_a,
template,
calib_a,
pose_b,
rmat_b_to_a,
tvec_b_to_a)
if rms1_rad > rms_threshold:
raise RegistrationError("Registration error from A to B too large.")
# Find reverse registration error.
print("Find reverse reprojection error (camera A to camera B).")
(rms2_pixels, rms2_rad) = registration_error(array_b,
template,
calib_b,
pose_a,
rmat_b_to_a.transpose(),
rmat_b_to_a.transpose() @ tvec_b_to_a*-1)
if rms2_rad > rms_threshold:
raise RegistrationError("Registration error from B to A too large.")
# Write to calibration blob.
write_calibration_blob([calib_a, calib_b], rmat_b_to_a, tvec_b_to_a, out_dir)
rms_report = {"RMS_B_to_A_pixels": rms1_pixels,
"RMS_B_to_A_radians": rms1_rad,
"RMS_A_to_B_pixels": rms2_pixels,
"RMS_A_to_B_radians": rms2_rad}
write_json(os.path.join(out_dir, "report.json"), rms_report)
return rmat_b_to_a, tvec_b_to_a, rms1_pixels, rms1_rad, rms2_pixels, rms2_rad
| [
"numpy.sum",
"cv2.aruco.detectMarkers",
"cv2.aruco.calibrateCameraCharucoExtended",
"numpy.linalg.norm",
"os.path.join",
"numpy.full",
"cv2.initCameraMatrix2D",
"cv2.aruco.CharucoBoard_create",
"numpy.eye",
"os.path.exists",
"cv2.FileStorage",
"cv2.aruco.interpolateCornersCharuco",
"json.dum... | [((1875, 1897), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3, 3)'}), '(shape=(3, 3))\n', (1883, 1897), True, 'import numpy as np\n'), ((1899, 1928), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rotation', 'rmat'], {}), '(rotation, rmat)\n', (1912, 1928), False, 'import cv2\n'), ((2666, 2713), 'cv2.FileStorage', 'cv2.FileStorage', (['calfile', 'cv2.FILE_STORAGE_READ'], {}), '(calfile, cv2.FILE_STORAGE_READ)\n', (2681, 2713), False, 'import cv2\n'), ((3558, 3606), 'cv2.FileStorage', 'cv2.FileStorage', (['calfile', 'cv2.FILE_STORAGE_WRITE'], {}), '(calfile, cv2.FILE_STORAGE_WRITE)\n', (3573, 3606), False, 'import cv2\n'), ((5792, 5827), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (5803, 5827), False, 'import os\n'), ((5842, 5888), 'os.path.join', 'os.path.join', (['out_dir', '"""calibration_blob.json"""'], {}), "(out_dir, 'calibration_blob.json')\n", (5854, 5888), False, 'import os\n'), ((6440, 6487), 'cv2.aruco.Dictionary_get', 'aruco.Dictionary_get', (["target['aruco_dict_name']"], {}), "(target['aruco_dict_name'])\n", (6460, 6487), True, 'import cv2.aruco as aruco\n'), ((6498, 6646), 'cv2.aruco.CharucoBoard_create', 'aruco.CharucoBoard_create', (["target['squares_x']", "target['squares_y']", "(target['square_length'] / 1000)", "(target['marker_length'] / 1000)", 'aruco_dict'], {}), "(target['squares_x'], target['squares_y'], target[\n 'square_length'] / 1000, target['marker_length'] / 1000, aruco_dict)\n", (6523, 6646), True, 'import cv2.aruco as aruco\n'), ((8415, 8477), 'cv2.aruco.detectMarkers', 'aruco.detectMarkers', (['img', 'board.dictionary', 'None', 'None', 'params'], {}), '(img, board.dictionary, None, None, params)\n', (8434, 8477), True, 'import cv2.aruco as aruco\n'), ((11910, 11931), 'numpy.full', 'np.full', (['(1, 3)', '(0.01)'], {}), '((1, 3), 0.01)\n', (11917, 11931), True, 'import numpy as np\n'), ((11941, 11962), 'numpy.full', 'np.full', (['(1, 3)', '(0.01)'], {}), '((1, 3), 0.01)\n', (11948, 11962), True, 'import numpy as np\n'), ((19644, 19686), 'os.path.join', 'os.path.join', (['imdir', 'f"""calib{postfix}.yml"""'], {}), "(imdir, f'calib{postfix}.yml')\n", (19656, 19686), False, 'import os\n'), ((19718, 19735), 'os.listdir', 'os.listdir', (['imdir'], {}), '(imdir)\n', (19728, 19735), False, 'import os\n'), ((20880, 20899), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (20897, 20899), False, 'import time\n'), ((21013, 21142), 'cv2.aruco.calibrateCameraCharucoExtended', 'aruco.calibrateCameraCharucoExtended', (['ccorners_all', 'cids_all', 'board', 'img_size', 'k_matrix', 'dist'], {'flags': 'flags', 'criteria': 'criteria'}), '(ccorners_all, cids_all, board,\n img_size, k_matrix, dist, flags=flags, criteria=criteria)\n', (21049, 21142), True, 'import cv2.aruco as aruco\n'), ((24784, 24801), 'cv2.imread', 'cv2.imread', (['img_a'], {}), '(img_a)\n', (24794, 24801), False, 'import cv2\n'), ((24814, 24831), 'cv2.imread', 'cv2.imread', (['img_b'], {}), '(img_b)\n', (24824, 24831), False, 'import cv2\n'), ((1539, 1567), 'json.dump', 'json.dump', (['data', 'j'], {'indent': '(4)'}), '(data, j, indent=4)\n', (1548, 1567), False, 'import json\n'), ((6374, 6391), 'json.load', 'json.load', (['j_file'], {}), '(j_file)\n', (6383, 6391), False, 'import json\n'), ((8287, 8320), 'cv2.aruco.DetectorParameters_create', 'aruco.DetectorParameters_create', ([], {}), '()\n', (8318, 8320), True, 'import cv2.aruco as aruco\n'), ((8657, 8726), 'cv2.aruco.interpolateCornersCharuco', 'aruco.interpolateCornersCharuco', (['aruco_corners', 'aruco_ids', 'img', 'board'], {}), '(aruco_corners, aruco_ids, img, board)\n', (8688, 8726), True, 'import cv2.aruco as aruco\n'), ((9081, 9135), 'warnings.warn', 'warnings.warn', (['"""No charuco corners detected in image."""'], {}), "('No charuco corners detected in image.')\n", (9094, 9135), False, 'import warnings\n'), ((10275, 10316), 'cv2.imread', 'cv2.imread', (['imgfile', 'cv2.IMREAD_GRAYSCALE'], {}), '(imgfile, cv2.IMREAD_GRAYSCALE)\n', (10285, 10316), False, 'import cv2\n'), ((12090, 12189), 'cv2.aruco.estimatePoseCharucoBoard', 'aruco.estimatePoseCharucoBoard', (['charuco_corners', 'charuco_ids', 'board', 'k_matrix', 'dist', 'rvec', 'tvec'], {}), '(charuco_corners, charuco_ids, board,\n k_matrix, dist, rvec, tvec)\n', (12120, 12189), True, 'import cv2.aruco as aruco\n'), ((14130, 14139), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (14136, 14139), True, 'import numpy as np\n'), ((16354, 16391), 'numpy.matmul', 'np.matmul', (['rmat_b_to_a', 'markers_cam_b'], {}), '(rmat_b_to_a, markers_cam_b)\n', (16363, 16391), True, 'import numpy as np\n'), ((16584, 16593), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (16590, 16593), True, 'import numpy as np\n'), ((16624, 16640), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (16632, 16640), True, 'import numpy as np\n'), ((17043, 17061), 'numpy.dot', 'np.dot', (['meas', 'pred'], {}), '(meas, pred)\n', (17049, 17061), True, 'import numpy as np\n'), ((17075, 17095), 'numpy.linalg.norm', 'np.linalg.norm', (['meas'], {}), '(meas)\n', (17089, 17095), True, 'import numpy as np\n'), ((17109, 17129), 'numpy.linalg.norm', 'np.linalg.norm', (['pred'], {}), '(pred)\n', (17123, 17129), True, 'import numpy as np\n'), ((17142, 17184), 'math.acos', 'math.acos', (['(dot_product / (norm_a * norm_b))'], {}), '(dot_product / (norm_a * norm_b))\n', (17151, 17184), False, 'import math\n'), ((20467, 20518), 'cv2.initCameraMatrix2D', 'cv2.initCameraMatrix2D', (['p3d', 'ccorners_all', 'img_size'], {}), '(p3d, ccorners_all, img_size)\n', (20489, 20518), False, 'import cv2\n'), ((20539, 20573), 'numpy.zeros', 'np.zeros', (['(8, 1)'], {'dtype': 'np.float32'}), '((8, 1), dtype=np.float32)\n', (20547, 20573), True, 'import numpy as np\n'), ((21779, 21817), 'warnings.warn', 'warnings.warn', (['warning_failing_indexes'], {}), '(warning_failing_indexes)\n', (21792, 21817), False, 'import warnings\n'), ((24230, 24251), 'os.path.exists', 'os.path.exists', (['img_a'], {}), '(img_a)\n', (24244, 24251), False, 'import os\n'), ((24336, 24357), 'os.path.exists', 'os.path.exists', (['img_b'], {}), '(img_b)\n', (24350, 24357), False, 'import os\n'), ((24442, 24466), 'os.path.exists', 'os.path.exists', (['template'], {}), '(template)\n', (24456, 24466), False, 'import os\n'), ((24556, 24579), 'os.path.exists', 'os.path.exists', (['calib_a'], {}), '(calib_a)\n', (24570, 24579), False, 'import os\n'), ((24668, 24691), 'os.path.exists', 'os.path.exists', (['calib_b'], {}), '(calib_b)\n', (24682, 24691), False, 'import os\n'), ((26851, 26887), 'os.path.join', 'os.path.join', (['out_dir', '"""report.json"""'], {}), "(out_dir, 'report.json')\n", (26863, 26887), False, 'import os\n'), ((8968, 9022), 'warnings.warn', 'warnings.warn', (['"""No charuco corners detected in image."""'], {}), "('No charuco corners detected in image.')\n", (8981, 9022), False, 'import warnings\n'), ((17491, 17521), 'numpy.sum', 'np.sum', (['((corners_a - pts) ** 2)'], {}), '((corners_a - pts) ** 2)\n', (17497, 17521), True, 'import numpy as np\n'), ((19497, 19525), 'os.path.exists', 'os.path.exists', (['init_calfile'], {}), '(init_calfile)\n', (19511, 19525), False, 'import os\n'), ((19744, 19774), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['file', '"""*.png"""'], {}), "(file, '*.png')\n", (19759, 19774), False, 'import fnmatch\n'), ((19778, 19808), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['file', '"""*.jpg"""'], {}), "(file, '*.jpg')\n", (19793, 19808), False, 'import fnmatch\n'), ((22068, 22091), 'numpy.array', 'np.array', (['[img_size[0]]'], {}), '([img_size[0]])\n', (22076, 22091), True, 'import numpy as np\n'), ((22125, 22148), 'numpy.array', 'np.array', (['[img_size[1]]'], {}), '([img_size[1]])\n', (22133, 22148), True, 'import numpy as np\n'), ((22332, 22366), 'os.path.join', 'os.path.join', (['imdir', '"""report.json"""'], {}), "(imdir, 'report.json')\n", (22344, 22366), False, 'import os\n'), ((25459, 25501), 'numpy.matmul', 'np.matmul', (['rmat_b_to_a', 'pose_b.translation'], {}), '(rmat_b_to_a, pose_b.translation)\n', (25468, 25501), True, 'import numpy as np\n'), ((10582, 10620), 'numpy.array', 'np.array', (['[img.shape[1], img.shape[0]]'], {}), '([img.shape[1], img.shape[0]])\n', (10590, 10620), True, 'import numpy as np\n'), ((19832, 19857), 'os.path.join', 'os.path.join', (['imdir', 'file'], {}), '(imdir, file)\n', (19844, 19857), False, 'import os\n'), ((22219, 22238), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (22236, 22238), False, 'import time\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.