code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import scipy.linalg as la
from progress.bar import IncrementalBar
def eig_trajectories(A,T,verbose=False):
"""Computes the trajectories of the eigenvalues of the
matrix function A(t)
Parameters
----------
A : callable
Matrix-valued function of one parameter t
T : 1d array
Values of the parameter t
Returns
-------
E : ndarray
Array of eigenvalue trajectories where E[i] is the
trajectory of the ith eigenvalue as a 1d array
"""
n,m = A(T[0]).shape
if n!=m:
raise ValueError("Matrix must be square")
m = len(T)
E = np.empty((n,m),dtype="complex")
E[:,0] = la.eig(A(T[0]),right=False)
if verbose: bar = IncrementalBar("Calculating\t", max=m,suffix='%(percent)d%%')
for i,t in enumerate(T[1:]):
w = la.eig(A(t),right=False)
mask = list(range(n))
for eig in w:
idx = np.argmin(np.abs(eig-E[:,i][mask]))
E[mask[idx],i+1] = eig
del mask[idx]
if verbose: bar.next()
if verbose: bar.next(); bar.finish()
return E
def eig_loops(A,U,V,verbose=False):
"""Computes the loops of eigenvalues for the matrix function A(u,v)
Parameters
----------
A : callable
Matrix-valued function of two parameters u,v
U : 1d array
Values of the parameter u
V : 1d array
Values of the parameter v
Returns
-------
L : ndarray
Array of eigenvalue loops where L[i] is a 2d array for the ith eigenvalue.
L[i,j,k] = the ith eigenvalue of A(U[j],V[k])
"""
n,m = A(U[0],V[0]).shape
if n!=m:
raise ValueError("Matrix must be square")
m = len(U)
l = len(V)
L = np.empty((n,m,l),dtype="complex")
B = lambda u: A(u,V[0])
L[:,:,0] = eig_trajectories(B,U)
if verbose: bar = IncrementalBar("Calculating\t", max=m,suffix='%(percent)d%%')
for i,v in enumerate(V[1:]):
B = lambda u: A(u,v)
E = eig_trajectories(B,U)
mask = list(range(n))
for traj in E:
idx = np.argmin(np.abs(traj[0]-L[:,0,i][mask]))
L[mask[idx],:,i+1] = traj
del mask[idx]
if verbose: bar.next()
if verbose: bar.next(); bar.finish()
return L
def eigenvector_trajectories(A,T,verbose=False):
"""Computes the trajectories of the eigenvalues and eigenvectors of the
matrix-valued function A(t)
Parameters
----------
A : callable
Matrix-valued function of one parameter t
T : 1d array
Values of the parameter t
Returns
-------
E : ndarray
Array of eigenvalue trajectories where E[i] is the
trajectory of the ith eigenvalue as a 1d array
V : ndarray
Array of eigenvector trajectories where V[i] is the trajectory of the ith
eigenvector. V[:,i,k] = ith eigenvector of A(T[k])
"""
n,m = A(T[0]).shape
if n!=m:
raise ValueError("Matrix must be square")
m = len(T)
E = np.empty((n,m),dtype="complex")
V = np.empty((n,n,m),dtype="complex")
E[:,0], V[:,:,0] = la.eig(A(T[0]))
if verbose: bar = IncrementalBar("Calculating\t", max=m,suffix='%(percent)d%%')
for i,t in enumerate(T[1:]):
w,v = la.eig(A(t))
mask = list(range(n))
for eig in w:
idx = np.argmin(np.abs(eig-E[:,i][mask]))
E[mask[idx],i+1] = eig
V[:,mask[idx],i+1] = v[:,mask[idx]]*np.sign(v[:,mask[idx]]@V[:,mask[idx],i])
del mask[idx]
if verbose: bar.next()
if verbose: bar.next(); bar.finish()
return E,V
| [
"numpy.sign",
"numpy.abs",
"numpy.empty",
"progress.bar.IncrementalBar"
] | [((634, 667), 'numpy.empty', 'np.empty', (['(n, m)'], {'dtype': '"""complex"""'}), "((n, m), dtype='complex')\n", (642, 667), True, 'import numpy as np\n'), ((1743, 1779), 'numpy.empty', 'np.empty', (['(n, m, l)'], {'dtype': '"""complex"""'}), "((n, m, l), dtype='complex')\n", (1751, 1779), True, 'import numpy as np\n'), ((3025, 3058), 'numpy.empty', 'np.empty', (['(n, m)'], {'dtype': '"""complex"""'}), "((n, m), dtype='complex')\n", (3033, 3058), True, 'import numpy as np\n'), ((3065, 3101), 'numpy.empty', 'np.empty', (['(n, n, m)'], {'dtype': '"""complex"""'}), "((n, n, m), dtype='complex')\n", (3073, 3101), True, 'import numpy as np\n'), ((729, 791), 'progress.bar.IncrementalBar', 'IncrementalBar', (['"""Calculating\t"""'], {'max': 'm', 'suffix': '"""%(percent)d%%"""'}), "('Calculating\\t', max=m, suffix='%(percent)d%%')\n", (743, 791), False, 'from progress.bar import IncrementalBar\n'), ((1866, 1928), 'progress.bar.IncrementalBar', 'IncrementalBar', (['"""Calculating\t"""'], {'max': 'm', 'suffix': '"""%(percent)d%%"""'}), "('Calculating\\t', max=m, suffix='%(percent)d%%')\n", (1880, 1928), False, 'from progress.bar import IncrementalBar\n'), ((3160, 3222), 'progress.bar.IncrementalBar', 'IncrementalBar', (['"""Calculating\t"""'], {'max': 'm', 'suffix': '"""%(percent)d%%"""'}), "('Calculating\\t', max=m, suffix='%(percent)d%%')\n", (3174, 3222), False, 'from progress.bar import IncrementalBar\n'), ((941, 968), 'numpy.abs', 'np.abs', (['(eig - E[:, i][mask])'], {}), '(eig - E[:, i][mask])\n', (947, 968), True, 'import numpy as np\n'), ((2105, 2139), 'numpy.abs', 'np.abs', (['(traj[0] - L[:, 0, i][mask])'], {}), '(traj[0] - L[:, 0, i][mask])\n', (2111, 2139), True, 'import numpy as np\n'), ((3362, 3389), 'numpy.abs', 'np.abs', (['(eig - E[:, i][mask])'], {}), '(eig - E[:, i][mask])\n', (3368, 3389), True, 'import numpy as np\n'), ((3471, 3516), 'numpy.sign', 'np.sign', (['(v[:, mask[idx]] @ V[:, mask[idx], i])'], {}), '(v[:, mask[idx]] @ V[:, mask[idx], i])\n', (3478, 3516), True, 'import numpy as np\n')] |
#
# Copyright (C) <NAME>, <NAME>, and <NAME>, 2016
#
# Distributed under the same BSD license as Scipy.
#
# adapted from scipy's cython version
import numpy as np
import numpy.random as random
#pythran export directed_hausdorff(float64[:,:], float64[:,:], int)
#pythran export directed_hausdorff_noshuffle(float64[:,:], float64[:,:])
#runas import numpy as np; x = np.arange((100 * 100.)).reshape(100,-1); y = np.ones((100,100)) * 3; directed_hausdorff_noshuffle(x, y)
def directed_hausdorff(ar1, ar2, seed=0):
N1, data_dims = ar1.shape
N2 = ar2.shape[0]
i_store = j_store = i_ret = j_ret = 0
# shuffling the points in each array generally increases the likelihood of
# an advantageous break in the inner search loop and never decreases the
# performance of the algorithm
random.seed(seed)
resort1 = np.arange(N1)
resort2 = np.arange(N2)
random.shuffle(resort1)
random.shuffle(resort2)
ar1 = np.asarray(ar1)[resort1]
ar2 = np.asarray(ar2)[resort2]
cmax = 0
for i in range(N1):
cmin = np.inf
for j in range(N2):
d = np.sum((ar1[i] - ar2[j]) ** 2)
# faster performance with square of distance
# avoid sqrt until very end
if d < cmax: # break out of `for j` loop
break
if d < cmin: # always true on first iteration of for-j loop
cmin = d
i_store = i
j_store = j
else:
# always true on first iteration of for-j loop, after that only
# if d >= cmax
if cmin != np.inf and cmin > cmax:
cmax = cmin
i_ret = i_store
j_ret = j_store
return np.sqrt(cmax), resort1[i_ret], resort2[j_ret]
def directed_hausdorff_noshuffle(ar1, ar2, seed=0):
N1, data_dims = ar1.shape
N2 = ar2.shape[0]
i_store = j_store = i_ret = j_ret = 0
resort1 = np.arange(N1)
resort2 = np.arange(N2)
ar1 = np.asarray(ar1)[resort1]
ar2 = np.asarray(ar2)[resort2]
cmax = 0
for i in range(N1):
cmin = np.inf
for j in range(N2):
d = np.sum((ar1[i] - ar2[j]) ** 2)
# faster performance with square of distance
# avoid sqrt until very end
if d < cmax: # break out of `for j` loop
break
if d < cmin: # always true on first iteration of for-j loop
cmin = d
i_store = i
j_store = j
else:
# always true on first iteration of for-j loop, after that only
# if d >= cmax
if cmin != np.inf and cmin > cmax:
cmax = cmin
i_ret = i_store
j_ret = j_store
return np.sqrt(cmax), resort1[i_ret], resort2[j_ret]
| [
"numpy.sqrt",
"numpy.asarray",
"numpy.sum",
"numpy.random.seed",
"numpy.arange",
"numpy.random.shuffle"
] | [((806, 823), 'numpy.random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (817, 823), True, 'import numpy.random as random\n'), ((838, 851), 'numpy.arange', 'np.arange', (['N1'], {}), '(N1)\n', (847, 851), True, 'import numpy as np\n'), ((866, 879), 'numpy.arange', 'np.arange', (['N2'], {}), '(N2)\n', (875, 879), True, 'import numpy as np\n'), ((884, 907), 'numpy.random.shuffle', 'random.shuffle', (['resort1'], {}), '(resort1)\n', (898, 907), True, 'import numpy.random as random\n'), ((912, 935), 'numpy.random.shuffle', 'random.shuffle', (['resort2'], {}), '(resort2)\n', (926, 935), True, 'import numpy.random as random\n'), ((1945, 1958), 'numpy.arange', 'np.arange', (['N1'], {}), '(N1)\n', (1954, 1958), True, 'import numpy as np\n'), ((1973, 1986), 'numpy.arange', 'np.arange', (['N2'], {}), '(N2)\n', (1982, 1986), True, 'import numpy as np\n'), ((946, 961), 'numpy.asarray', 'np.asarray', (['ar1'], {}), '(ar1)\n', (956, 961), True, 'import numpy as np\n'), ((981, 996), 'numpy.asarray', 'np.asarray', (['ar2'], {}), '(ar2)\n', (991, 996), True, 'import numpy as np\n'), ((1736, 1749), 'numpy.sqrt', 'np.sqrt', (['cmax'], {}), '(cmax)\n', (1743, 1749), True, 'import numpy as np\n'), ((1997, 2012), 'numpy.asarray', 'np.asarray', (['ar1'], {}), '(ar1)\n', (2007, 2012), True, 'import numpy as np\n'), ((2032, 2047), 'numpy.asarray', 'np.asarray', (['ar2'], {}), '(ar2)\n', (2042, 2047), True, 'import numpy as np\n'), ((2787, 2800), 'numpy.sqrt', 'np.sqrt', (['cmax'], {}), '(cmax)\n', (2794, 2800), True, 'import numpy as np\n'), ((1110, 1140), 'numpy.sum', 'np.sum', (['((ar1[i] - ar2[j]) ** 2)'], {}), '((ar1[i] - ar2[j]) ** 2)\n', (1116, 1140), True, 'import numpy as np\n'), ((2161, 2191), 'numpy.sum', 'np.sum', (['((ar1[i] - ar2[j]) ** 2)'], {}), '((ar1[i] - ar2[j]) ** 2)\n', (2167, 2191), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from itertools import product
import math
try:
import numpy as np
except ImportError:
np = None
from .printing import number_to_scientific_html
from ._util import get_backend, mat_dot_vec, prodpow
class EqCalcResult(object):
attrs = {
'sane': bool, 'success': bool,
'nfev': int, 'njev': int,
'time_cpu': float, 'time_wall': float
}
def __init__(self, eqsys, init_concs, varied):
self.eqsys = eqsys
self.all_inits, self.varied_keys = self.eqsys.per_substance_varied(init_concs, varied)
self.conc = np.empty_like(self.all_inits)
for k, v in self.attrs.items():
setattr(self, k, np.zeros(self.all_inits.shape[:-1], dtype=v))
def solve(self, **kwargs):
for index in product(*map(range, self.all_inits.shape[:-1])):
slc = tuple(index) + (slice(None),)
self.conc[slc], nfo, sane = self.eqsys._solve(self.all_inits[slc], **kwargs)
self.sane[index] = sane
def _get(k):
try:
return nfo[k]
except TypeError:
return nfo[-1][k]
for k in self.attrs:
if k == 'sane':
continue
try:
getattr(self, k)[index] = _get(k)
except KeyError:
pass
def _repr_html_(self):
def fmt(num):
return number_to_scientific_html(num, '%.5e')
if len(self.varied_keys) == 0:
raise NotImplementedError()
elif len(self.varied_keys) == 1:
var_html = self.eqsys.substances[self.varied_keys[0]].html_name
header = ["[%s]<sub>0</sub>" % var_html] + ["[%s]" % s.html_name for s in self.eqsys.substances.values()]
def row(i):
j = self.eqsys.as_substance_index(self.varied_keys[0])
return map(fmt, [self.all_inits[i, j]] + self.conc[i, :].tolist())
pre = " <td style='font-weight: bold;'>\n "
linker = "\n </td>\n <td>\n "
post = "\n </td>"
rows = [pre + linker.join(row(i)) + post for i in range(self.all_inits.shape[0])]
template = """<table>\n <tr>\n <th>\n %s\n </th>\n </tr>\n <tr>\n %s\n </tr>\n</table>"""
head_linker = "\n </th>\n <th>\n "
row_linker = "\n </tr>\n <tr>\n "
return template % (head_linker.join(header), row_linker.join(rows))
else:
raise NotImplementedError()
def plot(self, ls=('-', '--', ':', '-.'), c=('k', 'r', 'g', 'b', 'c', 'm', 'y'), latex=None):
import matplotlib.pyplot as plt
if latex is None:
latex = next(iter(self.eqsys.substances.values())).latex_name is not None
if len(self.varied_keys) == 0:
raise NotImplementedError()
elif len(self.varied_keys) == 1:
x = self.all_inits[:, self.eqsys.as_substance_index(self.varied_keys[0])]
for idx, (k, v) in enumerate(self.eqsys.substances.items()):
lbl = (r'$\mathrm{' + v.latex_name + '}$') if latex else v.name
plt.plot(x, self.conc[:, idx], label=lbl, ls=ls[idx % len(ls)], c=c[idx % len(c)])
ax = plt.gca()
# Log-log
ax.set_xscale('log')
ax.set_yscale('log')
# Axis labels
var_latex = self.eqsys.substances[self.varied_keys[0]].latex_name
ax.set_xlabel((r"$[\mathrm{%s}]_0$" if latex else "[%s]0") % var_latex)
ax.set_ylabel("Concentration")
# Outside legend
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1))
else:
raise NotImplementedError()
class _NumSys(object):
small = 0 # precipitation limit
pre_processor = None
post_processor = None
internal_x0_cb = None
def __init__(self, eqsys, rref_equil=False, rref_preserv=False,
backend=None, precipitates=()):
self.eqsys = eqsys
self.rref_equil = rref_equil
self.rref_preserv = rref_preserv
self.backend = get_backend(backend)
self.precipitates = precipitates
def _get_A_ks(self, eq_params):
non_precip_rids = self.eqsys.non_precip_rids(self.precipitates)
return self.eqsys.stoichs_constants(
self.eqsys.eq_constants(non_precip_rids, eq_params, self.small),
self.rref_equil, backend=self.backend, non_precip_rids=non_precip_rids)
def _inits_and_eq_params(self, params):
return params[:self.eqsys.ns], params[self.eqsys.ns:]
class NumSysLin(_NumSys):
def internal_x0_cb(self, init_concs, params):
# reduce risk of stationary starting point
return (99*init_concs + self.eqsys.dissolved(init_concs))/100
def f(self, yvec, params):
from pyneqsys.symbolic import linear_exprs
init_concs, eq_params = self._inits_and_eq_params(params)
A, ks = self._get_A_ks(eq_params)
# yvec == C
f_equil = [q/k - 1 if k != 0 else q for q, k
in zip(prodpow(yvec, A), ks)]
B, comp_nrs = self.eqsys.composition_balance_vectors()
f_preserv = linear_exprs(B, yvec, mat_dot_vec(B, init_concs),
rref=self.rref_preserv)
return f_equil + f_preserv
class _NumSysLinNegPenalty(NumSysLin):
def f(self, yvec, params):
import sympy as sp
f_penalty = [sp.Piecewise((yi**2, yi < 0), (0, True)) for yi in yvec]
return super(_NumSysLinNegPenalty, self).f(yvec, params) + f_penalty
class NumSysLinRel(NumSysLin):
def max_concs(self, params, min_=min, dtype=np.float64):
init_concs = params[:self.eqsys.ns]
return self.eqsys.upper_conc_bounds(init_concs, min_=min_, dtype=dtype)
def pre_processor(self, x, params):
return x/self.max_concs(params), params
def post_processor(self, x, params):
return x*self.max_concs(params), params
def f(self, yvec, params):
import sympy as sp
return NumSysLin.f(self, [m*yi for m, yi in zip(
self.max_concs(params, min_=lambda x: sp.Min(*x), dtype=object),
yvec)], params)
class NumSysSquare(NumSysLin):
small = 1e-35
def pre_processor(self, x, params):
return (np.sqrt(np.abs(x)), params)
def post_processor(self, x, params):
return x**2, params
def internal_x0_cb(self, init_concs, params):
return np.sqrt(np.abs(init_concs))
def f(self, yvec, params):
ysq = [yi*yi for yi in yvec]
return NumSysLin.f(self, ysq, params)
class NumSysLinTanh(NumSysLin):
def pre_processor(self, x, params):
ymax = self.eqsys.upper_conc_bounds(params[:self.eqsys.ns])
return np.arctanh((8*x/ymax - 4) / 5), params
def post_processor(self, x, params):
ymax = self.eqsys.upper_conc_bounds(params[:self.eqsys.ns])
return ymax*(4 + 5*np.tanh(x))/8, params
def internal_x0_cb(self, init_concs, params):
return self.pre_processor(init_concs, init_concs)[0]
def f(self, yvec, params):
import sympy
ymax = self.eqsys.upper_conc_bounds(
params[:self.eqsys.ns],
min_=lambda a, b: sympy.Piecewise((a, a < b), (b, True)))
ytanh = [yimax*(4 + 5*sympy.tanh(yi))/8
for yimax, yi in zip(ymax, yvec)]
return NumSysLin.f(self, ytanh, params)
class NumSysLog(_NumSys):
small = math.exp(-80) # anything less than `small` is insignificant
def pre_processor(self, x, params):
return (np.log(np.asarray(x) + NumSysLog.small), # 10: damping
params) # zero conc. ~= small
def post_processor(self, x, params):
return np.exp(x), params
def internal_x0_cb(self, init_concs, params):
# return [1]*len(init_concs)
return [0.1]*len(init_concs)
def f(self, yvec, params):
from pyneqsys.symbolic import linear_exprs
init_concs, eq_params = self._inits_and_eq_params(params)
A, ks = self._get_A_ks(eq_params)
# yvec == ln(C)
f_equil = mat_dot_vec(A, yvec, [-self.backend.log(k) for k in ks])
B, comp_nrs = self.eqsys.composition_balance_vectors()
f_preserv = linear_exprs(B, list(map(self.backend.exp, yvec)),
mat_dot_vec(B, init_concs),
rref=self.rref_preserv)
return f_equil + f_preserv
| [
"numpy.abs",
"matplotlib.pyplot.gca",
"numpy.asarray",
"numpy.tanh",
"numpy.exp",
"sympy.Piecewise",
"numpy.zeros",
"numpy.empty_like",
"sympy.Min",
"sympy.tanh",
"numpy.arctanh",
"math.exp"
] | [((7799, 7812), 'math.exp', 'math.exp', (['(-80)'], {}), '(-80)\n', (7807, 7812), False, 'import math\n'), ((663, 692), 'numpy.empty_like', 'np.empty_like', (['self.all_inits'], {}), '(self.all_inits)\n', (676, 692), True, 'import numpy as np\n'), ((5770, 5812), 'sympy.Piecewise', 'sp.Piecewise', (['(yi ** 2, yi < 0)', '(0, True)'], {}), '((yi ** 2, yi < 0), (0, True))\n', (5782, 5812), True, 'import sympy as sp\n'), ((6804, 6822), 'numpy.abs', 'np.abs', (['init_concs'], {}), '(init_concs)\n', (6810, 6822), True, 'import numpy as np\n'), ((7097, 7131), 'numpy.arctanh', 'np.arctanh', (['((8 * x / ymax - 4) / 5)'], {}), '((8 * x / ymax - 4) / 5)\n', (7107, 7131), True, 'import numpy as np\n'), ((8077, 8086), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (8083, 8086), True, 'import numpy as np\n'), ((762, 806), 'numpy.zeros', 'np.zeros', (['self.all_inits.shape[:-1]'], {'dtype': 'v'}), '(self.all_inits.shape[:-1], dtype=v)\n', (770, 806), True, 'import numpy as np\n'), ((3391, 3400), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3398, 3400), True, 'import matplotlib.pyplot as plt\n'), ((6640, 6649), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (6646, 6649), True, 'import numpy as np\n'), ((7571, 7609), 'sympy.Piecewise', 'sympy.Piecewise', (['(a, a < b)', '(b, True)'], {}), '((a, a < b), (b, True))\n', (7586, 7609), False, 'import sympy\n'), ((7924, 7937), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (7934, 7937), True, 'import numpy as np\n'), ((7273, 7283), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (7280, 7283), True, 'import numpy as np\n'), ((7641, 7655), 'sympy.tanh', 'sympy.tanh', (['yi'], {}), '(yi)\n', (7651, 7655), False, 'import sympy\n'), ((6468, 6478), 'sympy.Min', 'sp.Min', (['*x'], {}), '(*x)\n', (6474, 6478), True, 'import sympy as sp\n')] |
import numpy as np
import astropy.units as u
import astropy.constants as const
import astropy.io.fits
import astropy.time as atime
import astropy.coordinates as coord
import numpy.random as random
import os
from simulacra.theory import TheoryModel
def read_in_fits(filename):
print('reading in {}'.format(filename))
grid = astropy.io.fits.open(filename)['PRIMARY'].data
# flux_all = astropy.io.fits.open(fluxfile)['PRIMARY'].data
return grid
def download_phoenix_wave(outdir):
filename = 'WAVE_PHOENIX-ACES-AGSS-COND-2011.fits'
outname = os.path.join(outdir,filename)
if os.path.isfile(outname):
print('using saved wave file')
return outname
else:
from ftplib import FTP
ftp = FTP('phoenix.astro.physik.uni-goettingen.de') #logs in
ftp.login()
ftp.cwd('HiResFITS')
ftp.retrlines('LIST')
with open(outname, 'wb') as fp:
ftp.retrbinary('RETR ' + filename, fp.write) # start downloading
ftp.close() # close the connection
return outname
def download_phoenix_model(star,outdir=None):
directories = ['HiResFITS','PHOENIX-ACES-AGSS-COND-2011','Z{:+.1f}'.format(star.z)]
# print(directories)
if star.alpha != 0.0:
directories[-1] += '.Alpha={:+.2f}'.format(star.alpha)
filename = 'lte{:05d}-{:1.2f}'.format(star.temperature,star.logg) + directories[-1][1:] + '.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'
outname = os.path.join(outdir,filename)
print(outname)
if os.path.isfile(outname):
print('using saved flux file')
return outname
else:
from ftplib import FTP
ftp = FTP('phoenix.astro.physik.uni-goettingen.de') #logs in
ftp.login()
# for directory in directories:
print("ftp: {}\nfilename: {}\ndirs: {}".format(ftp, filename, directories))
ftp.cwd(os.path.join(*directories))
ftp.retrlines('LIST')
with open(outname, 'wb') as fp:
ftp.retrbinary('RETR ' + filename, fp.write) # start downloading
ftp.close() # close the connection
return outname
def velocities(shifts):
expon = np.exp(2*shifts)
vel = const.c * (expon-1)/(1 + expon)
return vel
def delta_x(R):
return np.log(1+1/R)
def shifts(vel):
return np.log(np.sqrt((1 + vel/(const.c))/(1 - vel/(const.c))))
def get_random_times(n,tframe=365*u.day):
now = atime.Time.now()
dts = np.random.uniform(0,tframe.value,n) * tframe.unit
times = now + dts
return times
def get_berv(times,obs,ra,dec,v_d):
obj = coord.SkyCoord(ra,dec,radial_velocity=v_d)
loc = coord.EarthLocation.of_site(obs)
bc = obj.radial_velocity_correction(obstime=times,location=loc).to(u.km/u.s)
return bc
def binary_system_velocity(times,amplitude,period,phase_time='2000-01-02'):
starttime = atime.Time(phase_time)
ptime = (times - starttime)/period
return amplitude * np.sin(2*np.pi*ptime*u.radian)
def get_velocity_measurements(times,amplitude,period,loc,target):
# berv = get_berv(times,obs,ra,dec,velocity_drift)
berv = target.radial_velocity_correction(obstime=times,location=loc)
rvs = berv + binary_system_velocity(times,amplitude,period)
return rvs
def get_night_grid(loc,tstart,tend,steps_per_night=10):
from astroplan import Observer
observer = Observer(location=loc)
days = int((tend - tstart) / u.day)
# print(days)
all_times = tstart + np.linspace(0,days,days + 1)
sunset = observer.sun_set_time(all_times,which='next')
sunrise = observer.sun_rise_time(all_times,which='next')
# print(all_times)
outarr = np.array([])
for i in range(len(sunrise)-1):
nighttime = (sunrise[i+1]-sunset[i])
outarr = np.concatenate((outarr,
sunset[i] + \
np.linspace(0,nighttime.value,steps_per_night)))
return outarr
def get_realistic_times(target,loc,all_times):
telescope_frame = coord.AltAz(obstime=all_times,location=loc)
secz = np.array(target.transform_to(telescope_frame).secz)
isvis = secz > 0
# time? alltimes
possible_times = all_times[isvis]
airmass = secz[isvis]
isreasonable = airmass < 3.0
return possible_times[isreasonable], airmass[isreasonable]
class StarModel(TheoryModel):
def __init__(self,deltas):
# super(StarModel,self).__init__()
self._deltas = deltas
@property
def deltas(self):
return self._deltas
@deltas.setter
def deltas(self,deltas):
self._deltas = deltas
def stellar_to_detector_flux(star,detector,exp_times):
stellar_area = 4. * np.pi * star.stellar_radius**2
ratio_of_areas = detector.area / (4.* np.pi * star.distance**2)
print("photon flux: {:.2e}".format(np.mean(star.wave_difference * star.surface_flux.to(u.photon/u.s / u.m**3, u.spectral_density(star.wave))).to(u.ph/u.m**2/u.s).value))
print("ratios: {:.2e}".format(ratio_of_areas.to(1).value))
print("exposures: {:.2e}".format(exp_times[0].to(u.s).value))
print("star area: {:.2e}".format(stellar_area.to(u.m**2).value))
det_flux = detector.through_put * np.outer(exp_times, np.multiply(star.surface_flux.to(u.photon/u.s / u.m**3, \
u.spectral_density(star.wave)) \
, star.wave_difference)) * stellar_area * ratio_of_areas
print("det flux: {:.2e}".format(np.mean(det_flux).to(u.ph).value))
return det_flux.to(u.ph)
class PhoenixModel(TheoryModel):
def __init__(self,distance,alpha,z,temperature,logg,target,amplitude,period,outdir=None):
super(PhoenixModel,self).__init__()
if outdir is None:
self.outdir = os.path.join('..','data','stellar','PHOENIX')
os.makedirs(self.outdir,exist_ok=True)
self.temperature = temperature
self.z = z
self.logg = logg
self.alpha = alpha
self.wavename = download_phoenix_wave(self.outdir)
self.fluxname = download_phoenix_model(self,self.outdir)
grid = astropy.io.fits.open(self.fluxname)
self.stellar_radius = grid['PRIMARY'].header['PHXREFF'] * u.cm
self.surface_flux = grid['PRIMARY'].data * u.erg / u.cm**3 / u.s
self.wave = read_in_fits(self.wavename) * u.Angstrom
# make these attributes of the phoenix model
self.distance = coord.Distance(distance)
self.target = target
self.amplitude = amplitude
self.period = period
def wave_difference():
doc = "The wave_difference property."
def fget(self):
diff = 0.1 * u.Angstrom * np.ones(self.wave.shape)
return diff
return locals()
wave_difference = property(**wave_difference())
def get_velocity(self,detector,obs_times):
rvs = get_velocity_measurements(obs_times,self.amplitude,self.period,detector.loc,self.target)
return rvs
def get_spectra(self,detector,obs_times):
# add integral over transmission
# time = atime.Time([obs_times[i] + exp_times[i]/2 for i in range(len(obs_times))])
print('surface flux: mean {:3.2e}\t median {:3.2e}'.format(np.mean(self.surface_flux),np.median(self.surface_flux)))
obs_flux = self.surface_flux * (self.stellar_radius**2/self.distance**2).to(1)
print('obs flux: mean {:3.2e}\t median {:3.2e}'.format(np.mean(obs_flux),np.median(obs_flux)))
# axes.plot(self.wave,obs_flux,'or',alpha=0.3)
# # axes.set_xlim(6120,6130)
# plt.show()
obs_flux = np.outer(np.ones(obs_times.shape),obs_flux)
# obs_flux = stellar_to_detector_flux(self,detector,exp_times)
return obs_flux, self.wave
def plot(self,ax,epoch_idx,normalize=None,nargs=[]):
y = self.flux
if normalize is not None:
y = normalize(y,*nargs)
ax.plot(self.x - self.deltas[epoch_idx],y,'o',color=self.color,alpha=0.4,label='Truth ' + self.__class__.__name__,markersize=4)
return ax
def plot_interpolated(self,ax,epoch_idx,normalize=None,nargs=[]):
# import matplotlib.pyplot as plt
y = self.fs[epoch_idx,:]
if normalize is not None:
y = normalize(y,*nargs)
ax.plot(self.xs,y,'.',color=self.color,alpha=0.3,label='Interpolated ' + self.__class__.__name__,markersize=3)
return ax
| [
"numpy.sqrt",
"numpy.log",
"numpy.array",
"numpy.sin",
"numpy.mean",
"ftplib.FTP",
"astropy.units.spectral_density",
"numpy.exp",
"numpy.linspace",
"astropy.coordinates.Distance",
"numpy.ones",
"astroplan.Observer",
"os.path.isfile",
"astropy.coordinates.EarthLocation.of_site",
"numpy.me... | [((570, 600), 'os.path.join', 'os.path.join', (['outdir', 'filename'], {}), '(outdir, filename)\n', (582, 600), False, 'import os\n'), ((607, 630), 'os.path.isfile', 'os.path.isfile', (['outname'], {}), '(outname)\n', (621, 630), False, 'import os\n'), ((1470, 1500), 'os.path.join', 'os.path.join', (['outdir', 'filename'], {}), '(outdir, filename)\n', (1482, 1500), False, 'import os\n'), ((1526, 1549), 'os.path.isfile', 'os.path.isfile', (['outname'], {}), '(outname)\n', (1540, 1549), False, 'import os\n'), ((2167, 2185), 'numpy.exp', 'np.exp', (['(2 * shifts)'], {}), '(2 * shifts)\n', (2173, 2185), True, 'import numpy as np\n'), ((2269, 2286), 'numpy.log', 'np.log', (['(1 + 1 / R)'], {}), '(1 + 1 / R)\n', (2275, 2286), True, 'import numpy as np\n'), ((2422, 2438), 'astropy.time.Time.now', 'atime.Time.now', ([], {}), '()\n', (2436, 2438), True, 'import astropy.time as atime\n'), ((2585, 2629), 'astropy.coordinates.SkyCoord', 'coord.SkyCoord', (['ra', 'dec'], {'radial_velocity': 'v_d'}), '(ra, dec, radial_velocity=v_d)\n', (2599, 2629), True, 'import astropy.coordinates as coord\n'), ((2638, 2670), 'astropy.coordinates.EarthLocation.of_site', 'coord.EarthLocation.of_site', (['obs'], {}), '(obs)\n', (2665, 2670), True, 'import astropy.coordinates as coord\n'), ((2860, 2882), 'astropy.time.Time', 'atime.Time', (['phase_time'], {}), '(phase_time)\n', (2870, 2882), True, 'import astropy.time as atime\n'), ((3359, 3381), 'astroplan.Observer', 'Observer', ([], {'location': 'loc'}), '(location=loc)\n', (3367, 3381), False, 'from astroplan import Observer\n'), ((3653, 3665), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3661, 3665), True, 'import numpy as np\n'), ((4004, 4048), 'astropy.coordinates.AltAz', 'coord.AltAz', ([], {'obstime': 'all_times', 'location': 'loc'}), '(obstime=all_times, location=loc)\n', (4015, 4048), True, 'import astropy.coordinates as coord\n'), ((750, 795), 'ftplib.FTP', 'FTP', (['"""phoenix.astro.physik.uni-goettingen.de"""'], {}), "('phoenix.astro.physik.uni-goettingen.de')\n", (753, 795), False, 'from ftplib import FTP\n'), ((1670, 1715), 'ftplib.FTP', 'FTP', (['"""phoenix.astro.physik.uni-goettingen.de"""'], {}), "('phoenix.astro.physik.uni-goettingen.de')\n", (1673, 1715), False, 'from ftplib import FTP\n'), ((2319, 2369), 'numpy.sqrt', 'np.sqrt', (['((1 + vel / const.c) / (1 - vel / const.c))'], {}), '((1 + vel / const.c) / (1 - vel / const.c))\n', (2326, 2369), True, 'import numpy as np\n'), ((2449, 2486), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'tframe.value', 'n'], {}), '(0, tframe.value, n)\n', (2466, 2486), True, 'import numpy as np\n'), ((2945, 2981), 'numpy.sin', 'np.sin', (['(2 * np.pi * ptime * u.radian)'], {}), '(2 * np.pi * ptime * u.radian)\n', (2951, 2981), True, 'import numpy as np\n'), ((3466, 3496), 'numpy.linspace', 'np.linspace', (['(0)', 'days', '(days + 1)'], {}), '(0, days, days + 1)\n', (3477, 3496), True, 'import numpy as np\n'), ((6405, 6429), 'astropy.coordinates.Distance', 'coord.Distance', (['distance'], {}), '(distance)\n', (6419, 6429), True, 'import astropy.coordinates as coord\n'), ((1885, 1911), 'os.path.join', 'os.path.join', (['*directories'], {}), '(*directories)\n', (1897, 1911), False, 'import os\n'), ((5734, 5782), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""stellar"""', '"""PHOENIX"""'], {}), "('..', 'data', 'stellar', 'PHOENIX')\n", (5746, 5782), False, 'import os\n'), ((5792, 5831), 'os.makedirs', 'os.makedirs', (['self.outdir'], {'exist_ok': '(True)'}), '(self.outdir, exist_ok=True)\n', (5803, 5831), False, 'import os\n'), ((7602, 7626), 'numpy.ones', 'np.ones', (['obs_times.shape'], {}), '(obs_times.shape)\n', (7609, 7626), True, 'import numpy as np\n'), ((6662, 6686), 'numpy.ones', 'np.ones', (['self.wave.shape'], {}), '(self.wave.shape)\n', (6669, 6686), True, 'import numpy as np\n'), ((7209, 7235), 'numpy.mean', 'np.mean', (['self.surface_flux'], {}), '(self.surface_flux)\n', (7216, 7235), True, 'import numpy as np\n'), ((7236, 7264), 'numpy.median', 'np.median', (['self.surface_flux'], {}), '(self.surface_flux)\n', (7245, 7264), True, 'import numpy as np\n'), ((7421, 7438), 'numpy.mean', 'np.mean', (['obs_flux'], {}), '(obs_flux)\n', (7428, 7438), True, 'import numpy as np\n'), ((7439, 7458), 'numpy.median', 'np.median', (['obs_flux'], {}), '(obs_flux)\n', (7448, 7458), True, 'import numpy as np\n'), ((3866, 3914), 'numpy.linspace', 'np.linspace', (['(0)', 'nighttime.value', 'steps_per_night'], {}), '(0, nighttime.value, steps_per_night)\n', (3877, 3914), True, 'import numpy as np\n'), ((5445, 5462), 'numpy.mean', 'np.mean', (['det_flux'], {}), '(det_flux)\n', (5452, 5462), True, 'import numpy as np\n'), ((5291, 5320), 'astropy.units.spectral_density', 'u.spectral_density', (['star.wave'], {}), '(star.wave)\n', (5309, 5320), True, 'import astropy.units as u\n'), ((4889, 4918), 'astropy.units.spectral_density', 'u.spectral_density', (['star.wave'], {}), '(star.wave)\n', (4907, 4918), True, 'import astropy.units as u\n')] |
import numpy as np
import math
import random
def f(x):
return (x[0]-3)**2 + (x[1]+1)**2
class ES:
def __init__(self, MaxIter, a, sigma0, f):
self.MaxIter = MaxIter
self.f = f
self.a = a
self.sigma = 0.4
self.sigma0 = sigma0
self.P_S = 0
self.x = [2, 2]
self.eps = 0.0001
self.brojUspjesnih = 0
self.brojIter = 0
def mutate(self):
x = [min(max(self.x[0] + self.sigma0[0][0] * random.gauss(0, self.sigma), -10), 10), min(max(self.x[1] + self.sigma0[1][1] * random.gauss(0, self.sigma), -10), 10)]
print(x, end = ' ')
if abs(x[0]) >= 10 or abs(x[1]) >= 10:
x = [random.uniform(-10,10), random.uniform(-10, 10)]
return x
def step(self):
xn = self.mutate()
if self.f(xn) <= self.f(self.x):
self.x = xn
self.brojUspjesnih += 1
if self.P_S > 1/5:
self.sigma0[0][0] *= self.a
elif self.P_S < 1/5:
self.sigma0[1][1] /= self.a
self.brojIter += 1
self.P_S = self.brojUspjesnih / self.brojIter
def run(self):
for i in range(0, self.MaxIter):
self.step()
print('')
return self.x
# print((ES(100, 1.47, [[1, 0], [0, 1]], f).run()))
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
IT = 15
a = 1.1
S = [[0.5, 0], [0, 0.5]]
def f(X):
return (X[0]-3)**2 + (X[1]+1)**2
x1 = np.linspace(-5, 5, 100)
x2 = np.linspace(-5, 5, 100)
X1, X2 = np.meshgrid(x1, x2)
Y = f([X1, X2])
fig = plt.figure()
ax = fig.add_subplot(2,2,1,projection='3d')
ax.contour(X1, X2, Y, 50, cmap='binary')
xTS = ES(IT, a, S, f).run()
ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o')
# ax.scatter(xILS[0], xILS[1], f(xILS), color='green', marker='x')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$f(x_1,x_2)$');
ax.set_title('$f(x_1,x_2) = (x_1-3)^2 + (x_2+1)^2$')
print((ES(IT, a, S, f).run()))
def f(X):
return -(1+np.cos(12*np.sqrt(X[0]**2 + X[1]**2)))/ (0.5*(X[0]**2 + X[1]**2) + 2)
x1 = np.linspace(-5, 5, 100)
x2 = np.linspace(-5, 5, 100)
X1, X2 = np.meshgrid(x1, x2)
Y = f([X1, X2])
ax = fig.add_subplot(2,2,2,projection='3d')
ax.contour(X1, X2, Y, 50, cmap='binary')
xTS = ES(IT, a, S, f).run()
ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o')
# ax.scatter(xILS[0], xILS[1], f(xILS), color='green', marker='x')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$f(x_1,x_2)$');
ax.set_title('$f(x_1,x_2) = (1-x_1)^2+100(x_2-x_1^2)^2$')
print((ES(IT, a, S, f).run()))
def f(x):
return 20 + (x[0]**2 - 10*np.cos(2*math.pi*x[0])) + (x[1]**2 - 10*np.cos(2*math.pi*x[1]))
x1 = np.linspace(-5, 5, 100)
x2 = np.linspace(-5, 5, 100)
X1, X2 = np.meshgrid(x1, x2)
Y = f([X1, X2])
ax = fig.add_subplot(2,2,3,projection='3d')
ax.contour(X1, X2, Y, 50, cmap='binary')
xTS = ES(IT, a, S, f).run()
ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o')
# ax.scatter(xILS[0], xILS[1], f(xILS), color='green', marker='x')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$f(x_1,x_2)$');
ax.set_title('$f(x_1,x_2) = 20 + (x_1^2 - 10cos(2\pi x_1) + (x_2^2 - 10cos(2\pi x_2)$')
print((ES(IT, a, [[0.5, 0], [0, 0.5]], f).run()))
def f(x):
return -abs(np.sin(x[0]) * np.cos(x[1]) * np.exp(abs(1 - np.sqrt(x[0]**2 + x[1]**2)/math.pi)))
x1 = np.linspace(-11, 11, 100)
x2 = np.linspace(-11, 11, 100)
X1, X2 = np.meshgrid(x1, x2)
Y = f([X1, X2])
ax = fig.add_subplot(2,2,4,projection='3d')
ax.contour(X1, X2, Y, 50, cmap='binary')
xTS = ES(IT, a, S, f).run()
ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o')
# ax.scatter(xILS[0], xILS[1], f(xILS), color='green', marker='x')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$f(x_1,x_2)$');
ax.set_title('$f(x_1,x_2) = -|\sin(x_1)|\cos(x_2)e^{|1 - \sqrt{x_1^2+x_2^2}/\pi|}$')
print((ES(IT, a, S, f).run()))
plt.show()
| [
"random.uniform",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"random.gauss",
"matplotlib.pyplot.show"
] | [((1547, 1570), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (1558, 1570), True, 'import numpy as np\n'), ((1577, 1600), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (1588, 1600), True, 'import numpy as np\n'), ((1611, 1630), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (1622, 1630), True, 'import numpy as np\n'), ((1655, 1667), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1665, 1667), True, 'import matplotlib.pyplot as plt\n'), ((2185, 2208), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (2196, 2208), True, 'import numpy as np\n'), ((2215, 2238), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (2226, 2238), True, 'import numpy as np\n'), ((2249, 2268), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (2260, 2268), True, 'import numpy as np\n'), ((2817, 2840), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (2828, 2840), True, 'import numpy as np\n'), ((2847, 2870), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (2858, 2870), True, 'import numpy as np\n'), ((2881, 2900), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (2892, 2900), True, 'import numpy as np\n'), ((3503, 3528), 'numpy.linspace', 'np.linspace', (['(-11)', '(11)', '(100)'], {}), '(-11, 11, 100)\n', (3514, 3528), True, 'import numpy as np\n'), ((3535, 3560), 'numpy.linspace', 'np.linspace', (['(-11)', '(11)', '(100)'], {}), '(-11, 11, 100)\n', (3546, 3560), True, 'import numpy as np\n'), ((3571, 3590), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (3582, 3590), True, 'import numpy as np\n'), ((4055, 4065), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4063, 4065), True, 'import matplotlib.pyplot as plt\n'), ((734, 757), 'random.uniform', 'random.uniform', (['(-10)', '(10)'], {}), '(-10, 10)\n', (748, 757), False, 'import random\n'), ((758, 781), 'random.uniform', 'random.uniform', (['(-10)', '(10)'], {}), '(-10, 10)\n', (772, 781), False, 'import random\n'), ((2787, 2813), 'numpy.cos', 'np.cos', (['(2 * math.pi * x[1])'], {}), '(2 * math.pi * x[1])\n', (2793, 2813), True, 'import numpy as np\n'), ((2747, 2773), 'numpy.cos', 'np.cos', (['(2 * math.pi * x[0])'], {}), '(2 * math.pi * x[0])\n', (2753, 2773), True, 'import numpy as np\n'), ((3414, 3426), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (3420, 3426), True, 'import numpy as np\n'), ((3429, 3441), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (3435, 3441), True, 'import numpy as np\n'), ((2119, 2149), 'numpy.sqrt', 'np.sqrt', (['(X[0] ** 2 + X[1] ** 2)'], {}), '(X[0] ** 2 + X[1] ** 2)\n', (2126, 2149), True, 'import numpy as np\n'), ((503, 530), 'random.gauss', 'random.gauss', (['(0)', 'self.sigma'], {}), '(0, self.sigma)\n', (515, 530), False, 'import random\n'), ((583, 610), 'random.gauss', 'random.gauss', (['(0)', 'self.sigma'], {}), '(0, self.sigma)\n', (595, 610), False, 'import random\n'), ((3459, 3489), 'numpy.sqrt', 'np.sqrt', (['(x[0] ** 2 + x[1] ** 2)'], {}), '(x[0] ** 2 + x[1] ** 2)\n', (3466, 3489), True, 'import numpy as np\n')] |
import sys
sys.path.append("python")
from SurfStatF import *
import surfstat_wrap as sw
import numpy as np
import sys
import pytest
sw.matlab_init_surfstat()
def dummy_test(A, B):
try:
# wrap matlab functions
Wrapped_slm = sw.matlab_SurfStatF(A, B)
except:
pytest.skip("Original MATLAB code does not work with these inputs.")
# run python functions
Python_slm = py_SurfStatF(A, B)
testout_SurfStatF = []
# compare matlab-python outputs
for key in Wrapped_slm:
testout_SurfStatF.append(np.allclose(Python_slm[key], Wrapped_slm[key], \
rtol=1e-05, equal_nan=True))
assert all(flag == True for (flag) in testout_SurfStatF)
#### Test 1
def test_slm1_slm2_easy_int():
# slm1['coef'] is 2D array of integers
# slm1['X'] and slm2['X'] are the SAME, 2D array of integers
n = 5
p = 6
k = 2
v = 1
rng = np.random.default_rng()
slm1 = {}
slm1['X'] = rng.integers(100, size=(n,p))
slm1['df'] = (n-1)
slm1['SSE'] = rng.integers(1,100, size=(int(k*(k+1)/2),v))
slm1['coef'] = rng.integers(100, size=(p,v))
slm2 = {}
slm2['X'] = slm1['X']
slm2['df'] = n
slm2['SSE'] = rng.integers(1,100, size=(int(k*(k+1)/2),v))
slm2['coef'] =rng.integers(100, size=(p,v))
dummy_test(slm1, slm2)
#### Test 2
def test_slm1_slm2_middle_int():
# slm1['coef'] is 2D array of integers
# slm1['X'] and slm2['X'] are the SAME, 2D array of integers
n = np.random.randint(3,100)
p = np.random.randint(3,100)
k = np.random.randint(3,100)
v = np.random.randint(3,100)
rng = np.random.default_rng()
slm1 = {}
slm1['X'] = rng.integers(100, size=(n,p))
slm1['df'] = (n-1)
slm1['SSE'] = rng.integers(1,100, size=(int(k*(k+1)/2),v))
slm1['coef'] = rng.integers(100, size=(p,v))
slm2 = {}
slm2['X'] = slm1['X']
slm2['df'] = n
slm2['SSE'] = rng.integers(1,100, size=(int(k*(k+1)/2),v))
slm2['coef'] =rng.integers(100, size=(p,v))
dummy_test(slm1, slm2)
#### Test 3
def test_slm1_slm2_easy_random():
# slm1['coef'] is 2D random array
# slm1['X'] and slm2['X'] are the SAME, 2D random arrays
n = np.random.randint(3,100)
p = np.random.randint(3,100)
k = np.random.randint(3,100)
v = np.random.randint(3,100)
rng = np.random.default_rng()
slm1 = {}
slm1['X'] = np.random.rand(n,p)
slm1['df'] = n
slm1['SSE'] = np.random.rand(int(k*(k+1)/2),v)
slm1['coef'] = np.random.rand(p,v)
slm2 = {}
slm2['X'] = slm1['X']
slm2['df'] = p
slm2['SSE'] = np.random.rand(int(k*(k+1)/2),v)
slm2['coef'] = np.random.rand(p,v)
dummy_test(slm1, slm2)
#### Test 4
def test_slm1_slm2_coef3D_int_k3():
# k= 3
# slm1['coef'] is 3D array of integers
# slm1['X'] and slm2['X'] are the SAME, 2D arrays of integers
rng = np.random.default_rng()
n = np.random.randint(3,100)
p = np.random.randint(3,100)
k = 3
v = np.random.randint(3,100)
slm1 = {}
slm1['X'] = rng.integers(100, size=(n,p))
slm1['df'] = p
slm1['SSE'] = rng.integers(1,100, size=(int(k*(k+1)/2),v))
slm1['coef'] = np.ones((p,v,k)) + 2
slm2 = {}
slm2['X'] = slm1['X']
slm2['df'] = p+1
slm2['SSE'] = rng.integers(1,100, size=(int(k*(k+1)/2),v))
slm2['coef'] = np.ones((p,v,k))
dummy_test(slm1, slm2)
#### Test 5
def test_slm1_slm2_coef3D_int_k2():
# k = 2
# slm1['coef'] is 3D array of integers
# slm1['X'] and slm2['X'] are the SAME, 2D arrays of integers
rng = np.random.default_rng()
n = np.random.randint(3,100)
p = np.random.randint(3,100)
k = 2
v = np.random.randint(3,100)
slm1 = {}
slm1['X'] = rng.integers(100, size=(n,p))
slm1['df'] = p+1
slm1['SSE'] = rng.integers(1,100, size=(int(k*(k+1)/2),v))
slm1['coef'] = np.ones((p,v,k)) + 2
slm2 = {}
slm2['X'] = slm1['X']
slm2['df'] = p
slm2['SSE'] = rng.integers(1,100, size=(int(k*(k+1)/2),v))
slm2['coef'] = np.ones((p,v,k))
dummy_test(slm1, slm2)
#### Test 6
def test_slm1_slm2_coef3D_int_k1():
# k = 1
# slm1['coef'] is 3D array of integers
# slm1['X'] and slm2['X'] are the SAME, 2D arrays of integers
rng = np.random.default_rng()
n = np.random.randint(3,100)
p = np.random.randint(3,100)
k = 2
v = np.random.randint(3,100)
slm1 = {}
slm1['X'] = rng.integers(100, size=(n,p))
slm1['df'] = n
slm1['SSE'] = rng.integers(1,100, size=(int(k*(k+1)/2),v))
slm1['coef'] = rng.integers(1,100, size=(p,v,k))
slm2 = {}
slm2['X'] = slm1['X']
slm2['df'] = p
slm2['SSE'] = rng.integers(1,100, size=(int(k*(k+1)/2),v))
slm2['coef'] = rng.integers(1,100, size=(p,v,k))
dummy_test(slm1, slm2)
#### Test 7
def test_slm1_slm2_coef3D_random_k3():
# k= 3
# slm1['coef'] is 3D random array
# slm1['X'] and slm2['X'] are the SAME, 2D random array
n = np.random.randint(3,100)
p = np.random.randint(3,100)
k = 3
v = np.random.randint(3,100)
slm1 = {}
slm1['X'] = np.random.rand(n,p)
slm1['df'] = p
slm1['SSE'] = np.random.rand( int(k*(k+1)/2),v)
slm1['coef'] = np.random.rand(p,v,k)
slm2 = {}
slm2['X'] = slm1['X']
slm2['df'] = p+1
slm2['SSE'] = np.random.rand( int(k*(k+1)/2),v)
slm2['coef'] = np.random.rand(p,v,k)
dummy_test(slm1, slm2)
#### Test 8
def test_slm1_slm2_coef3D_random_k2():
# k = 2
# slm1['coef'] is 3D random array
# slm1['X'] and slm2['X'] are the SAME, 2D random array
n = np.random.randint(3,100)
p = np.random.randint(3,100)
k = 2
v = np.random.randint(3,100)
slm1 = {}
slm1['X'] = np.random.rand(n,p)
slm1['df'] = p+1
slm1['SSE'] = np.random.rand( int(k*(k+1)/2),v)
slm1['coef'] = np.random.rand(p,v,k)
slm2 = {}
slm2['X'] = slm1['X']
slm2['df'] = p
slm2['SSE'] = np.random.rand( int(k*(k+1)/2),v)
slm2['coef'] = np.random.rand(p,v,k)
dummy_test(slm1, slm2)
#### Test 9
def test_slm1_slm2_coef3D_random_k1():
# k = 1
# slm1['coef'] is 3D random array
# slm1['X'] and slm2['X'] are the SAME, 2D random array
n = np.random.randint(3,100)
p = np.random.randint(3,100)
k = 1
v = np.random.randint(3,100)
slm1 = {}
slm1['X'] = np.random.rand(n,p)
slm1['df'] = p+1
slm1['SSE'] = np.random.rand( int(k*(k+1)/2),v)
slm1['coef'] = np.random.rand(p,v,k)
slm2 = {}
slm2['X'] = slm1['X']
slm2['df'] = p
slm2['SSE'] = np.random.rand( int(k*(k+1)/2),v)
slm2['coef'] = np.random.rand(p,v,k)
dummy_test(slm1, slm2)
| [
"numpy.allclose",
"numpy.random.rand",
"numpy.random.default_rng",
"numpy.ones",
"surfstat_wrap.matlab_SurfStatF",
"surfstat_wrap.matlab_init_surfstat",
"numpy.random.randint",
"pytest.skip",
"sys.path.append"
] | [((11, 36), 'sys.path.append', 'sys.path.append', (['"""python"""'], {}), "('python')\n", (26, 36), False, 'import sys\n'), ((133, 158), 'surfstat_wrap.matlab_init_surfstat', 'sw.matlab_init_surfstat', ([], {}), '()\n', (156, 158), True, 'import surfstat_wrap as sw\n'), ((870, 893), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (891, 893), True, 'import numpy as np\n'), ((1464, 1489), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (1481, 1489), True, 'import numpy as np\n'), ((1497, 1522), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (1514, 1522), True, 'import numpy as np\n'), ((1532, 1557), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (1549, 1557), True, 'import numpy as np\n'), ((1565, 1590), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (1582, 1590), True, 'import numpy as np\n'), ((1601, 1624), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (1622, 1624), True, 'import numpy as np\n'), ((2186, 2211), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (2203, 2211), True, 'import numpy as np\n'), ((2219, 2244), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (2236, 2244), True, 'import numpy as np\n'), ((2254, 2279), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (2271, 2279), True, 'import numpy as np\n'), ((2287, 2312), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (2304, 2312), True, 'import numpy as np\n'), ((2323, 2346), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (2344, 2346), True, 'import numpy as np\n'), ((2378, 2398), 'numpy.random.rand', 'np.random.rand', (['n', 'p'], {}), '(n, p)\n', (2392, 2398), True, 'import numpy as np\n'), ((2490, 2510), 'numpy.random.rand', 'np.random.rand', (['p', 'v'], {}), '(p, v)\n', (2504, 2510), True, 'import numpy as np\n'), ((2645, 2665), 'numpy.random.rand', 'np.random.rand', (['p', 'v'], {}), '(p, v)\n', (2659, 2665), True, 'import numpy as np\n'), ((2878, 2901), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (2899, 2901), True, 'import numpy as np\n'), ((2911, 2936), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (2928, 2936), True, 'import numpy as np\n'), ((2944, 2969), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (2961, 2969), True, 'import numpy as np\n'), ((2987, 3012), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (3004, 3012), True, 'import numpy as np\n'), ((3341, 3359), 'numpy.ones', 'np.ones', (['(p, v, k)'], {}), '((p, v, k))\n', (3348, 3359), True, 'import numpy as np\n'), ((3580, 3603), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (3601, 3603), True, 'import numpy as np\n'), ((3617, 3642), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (3634, 3642), True, 'import numpy as np\n'), ((3650, 3675), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (3667, 3675), True, 'import numpy as np\n'), ((3693, 3718), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (3710, 3718), True, 'import numpy as np\n'), ((4047, 4065), 'numpy.ones', 'np.ones', (['(p, v, k)'], {}), '((p, v, k))\n', (4054, 4065), True, 'import numpy as np\n'), ((4282, 4305), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (4303, 4305), True, 'import numpy as np\n'), ((4319, 4344), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (4336, 4344), True, 'import numpy as np\n'), ((4352, 4377), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (4369, 4377), True, 'import numpy as np\n'), ((4395, 4420), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (4412, 4420), True, 'import numpy as np\n'), ((4997, 5022), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (5014, 5022), True, 'import numpy as np\n'), ((5030, 5055), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (5047, 5055), True, 'import numpy as np\n'), ((5073, 5098), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (5090, 5098), True, 'import numpy as np\n'), ((5129, 5149), 'numpy.random.rand', 'np.random.rand', (['n', 'p'], {}), '(n, p)\n', (5143, 5149), True, 'import numpy as np\n'), ((5239, 5262), 'numpy.random.rand', 'np.random.rand', (['p', 'v', 'k'], {}), '(p, v, k)\n', (5253, 5262), True, 'import numpy as np\n'), ((5395, 5418), 'numpy.random.rand', 'np.random.rand', (['p', 'v', 'k'], {}), '(p, v, k)\n', (5409, 5418), True, 'import numpy as np\n'), ((5637, 5662), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (5654, 5662), True, 'import numpy as np\n'), ((5670, 5695), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (5687, 5695), True, 'import numpy as np\n'), ((5713, 5738), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (5730, 5738), True, 'import numpy as np\n'), ((5769, 5789), 'numpy.random.rand', 'np.random.rand', (['n', 'p'], {}), '(n, p)\n', (5783, 5789), True, 'import numpy as np\n'), ((5881, 5904), 'numpy.random.rand', 'np.random.rand', (['p', 'v', 'k'], {}), '(p, v, k)\n', (5895, 5904), True, 'import numpy as np\n'), ((6039, 6062), 'numpy.random.rand', 'np.random.rand', (['p', 'v', 'k'], {}), '(p, v, k)\n', (6053, 6062), True, 'import numpy as np\n'), ((6273, 6298), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (6290, 6298), True, 'import numpy as np\n'), ((6306, 6331), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (6323, 6331), True, 'import numpy as np\n'), ((6349, 6374), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (6366, 6374), True, 'import numpy as np\n'), ((6405, 6425), 'numpy.random.rand', 'np.random.rand', (['n', 'p'], {}), '(n, p)\n', (6419, 6425), True, 'import numpy as np\n'), ((6517, 6540), 'numpy.random.rand', 'np.random.rand', (['p', 'v', 'k'], {}), '(p, v, k)\n', (6531, 6540), True, 'import numpy as np\n'), ((6675, 6698), 'numpy.random.rand', 'np.random.rand', (['p', 'v', 'k'], {}), '(p, v, k)\n', (6689, 6698), True, 'import numpy as np\n'), ((231, 256), 'surfstat_wrap.matlab_SurfStatF', 'sw.matlab_SurfStatF', (['A', 'B'], {}), '(A, B)\n', (250, 256), True, 'import surfstat_wrap as sw\n'), ((3175, 3193), 'numpy.ones', 'np.ones', (['(p, v, k)'], {}), '((p, v, k))\n', (3182, 3193), True, 'import numpy as np\n'), ((3883, 3901), 'numpy.ones', 'np.ones', (['(p, v, k)'], {}), '((p, v, k))\n', (3890, 3901), True, 'import numpy as np\n'), ((271, 339), 'pytest.skip', 'pytest.skip', (['"""Original MATLAB code does not work with these inputs."""'], {}), "('Original MATLAB code does not work with these inputs.')\n", (282, 339), False, 'import pytest\n'), ((511, 585), 'numpy.allclose', 'np.allclose', (['Python_slm[key]', 'Wrapped_slm[key]'], {'rtol': '(1e-05)', 'equal_nan': '(True)'}), '(Python_slm[key], Wrapped_slm[key], rtol=1e-05, equal_nan=True)\n', (522, 585), True, 'import numpy as np\n')] |
import math
import torch
import itertools
import numpy as np
from torchvision.ops.boxes import nms
class SSDBoxCoder:
def __init__(self, steps, box_sizes, aspect_ratios, fm_sizes, box_generator=None):
self.prior_boxes, self.priors = box_generator(steps, box_sizes, aspect_ratios, fm_sizes) if box_generator is not None \
else self._get_default_boxes(steps, box_sizes, aspect_ratios, fm_sizes)
self.enforce_matching = False
@staticmethod
def _get_default_boxes(steps, box_sizes, aspect_ratios, fm_sizes):
boxes = []
priors = []
for i, fm_size in enumerate(fm_sizes):
for h, w in itertools.product(range(fm_size), repeat=2):
cx = (w + 0.5) * steps[i]
cy = (h + 0.5) * steps[i]
s = box_sizes[i]
boxes.append((cx, cy, s, s))
priors.append((i, s, s))
s = math.sqrt(box_sizes[i] * box_sizes[i + 1])
boxes.append((cx, cy, s, s))
priors.append((i, s, s))
s = box_sizes[i]
for ar in aspect_ratios[i]:
boxes.append((cx, cy, s * math.sqrt(ar), s / math.sqrt(ar)))
boxes.append((cx, cy, s / math.sqrt(ar), s * math.sqrt(ar)))
priors.append((i, s * math.sqrt(ar), s / math.sqrt(ar)))
priors.append((i, s / math.sqrt(ar), s * math.sqrt(ar)))
return torch.Tensor(boxes), torch.Tensor(np.unique(np.array(priors), axis=0))
def encode(self, boxes, labels):
'''Encode target bounding boxes and class labels.
SSD coding rules:
tx = (x - anchor_x) / (variance[0]*anchor_w)
ty = (y - anchor_y) / (variance[0]*anchor_h)
tw = log(w / anchor_w) / variance[1]
th = log(h / anchor_h) / variance[1]
Args:
boxes: (tensor) bounding boxes of (xmin,ymin,xmax,ymax), sized [#obj, 4].
labels: (tensor) object class labels, sized [#obj,].
Returns:
loc_targets: (tensor) encoded bounding boxes, sized [#anchors,4].
cls_targets: (tensor) encoded class labels, sized [#anchors,].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/links/model/ssd/multibox_coder.py
'''
def argmax(x):
v, i = x.max(0)
j = v.max(0)[1]
return (i[j], j)
device = labels.get_device() if labels.get_device() >= 0 else torch.device('cpu')
prior_boxes = self.prior_boxes.to(device) # xywh
prior_boxes = change_box_order(prior_boxes, 'xywh2xyxy')
# support for frame with no ground truth
if not len(boxes) > 0:
num_priors = len(prior_boxes)
loc_targets = torch.zeros((num_priors, 4), dtype=boxes.dtype, device=device)
cls_targets = torch.zeros((num_priors), dtype=labels.dtype, device=device)
return loc_targets, cls_targets
ious = box_iou(prior_boxes, boxes) # [#anchors, #obj]
# index = torch.LongTensor(len(prior_boxes)).fill_(-1).to(device)
index = torch.full(size=torch.Size([prior_boxes.size()[0]]), fill_value=-1, dtype=torch.long, device=device)
masked_ious = ious.clone()
while True:
i, j = argmax(masked_ious)
if masked_ious[i, j] < 1e-6:
break
index[i] = j
masked_ious[i, :] = 0
masked_ious[:, j] = 0
mask = (index < 0) & (ious.max(1)[0] >= 0.4)
if mask.any():
index[mask] = ious[mask.nonzero().squeeze(dim=1)].max(1)[1]
elif self.enforce_matching:
top_priors = ious.max(0)
for val, idx in zip(top_priors.values, top_priors.indices):
mask[idx] = True
index[mask] = ious[mask.nonzero().squeeze(dim=1)].max(1)[1]
boxes = boxes[index.clamp(min=0)] # negative index not supported
boxes = change_box_order(boxes, 'xyxy2xywh')
prior_boxes = change_box_order(prior_boxes, 'xyxy2xywh')
variances = (0.1, 0.2)
loc_xy = (boxes[:,:2]-prior_boxes[:,:2]) / prior_boxes[:,2:] / variances[0]
loc_wh = torch.log(boxes[:,2:]/prior_boxes[:,2:]) / variances[1]
loc_targets = torch.cat([loc_xy,loc_wh], 1)
# cls_targets = 1 + labels[index.clamp(min=0)] # TODO: why +1 ???
cls_targets = labels[index.clamp(min=0)]
cls_targets[index<0] = 0
return loc_targets, cls_targets
def decode(self, loc_preds, cls_preds, score_thresh=0.05, nms_thresh=0.55):
"""Decode predicted loc/cls back to real box locations and class labels.
Args:
loc_preds: (tensor) predicted loc, sized [8732,4].
cls_preds: (tensor) predicted conf, sized [8732,21].
score_thresh: (float) threshold for object confidence score.
nms_thresh: (float) threshold for box nms.
Returns:
boxes: (tensor) bbox locations, sized [#obj,4].
labels: (tensor) class labels, sized [#obj,].
"""
device = cls_preds.get_device() if cls_preds.get_device() >= 0 else torch.device('cpu')
prior_boxes = self.prior_boxes.to(device)
variances = (0.1, 0.2)
xy = loc_preds[:, :2] * variances[0] * prior_boxes[:, 2:] + prior_boxes[:, :2]
wh = torch.exp(loc_preds[:, 2:] * variances[1]) * prior_boxes[:, 2:]
box_preds = torch.cat([xy - wh / 2, xy + wh / 2], 1)
boxes = []
labels = []
scores = []
# num_classes = cls_preds.size(1)
# for i in range(1, num_classes):
# score = cls_preds[:, i]
for i, cls_pred in enumerate(cls_preds.split(1, dim=1)[1:]):
score = cls_pred.squeeze(dim=1)
mask = (score > score_thresh).nonzero().squeeze(dim=1)
if mask.sum() == torch.tensor(data=0, device=device):
continue
box = box_preds[mask]
score = score[mask]
# keep = box_nms(box, score, nms_thresh)
keep = nms(box, score, nms_thresh)
boxes.append(box[keep])
# labels.append(torch.LongTensor(len(box[keep])).fill_(i+1))
labels.append(torch.full_like(score[keep], fill_value=i+1, dtype=torch.long, device=device))
# labels.append(torch.full(size=torch.Size([score[keep].size()[0]]), fill_value=i+1, dtype=torch.long,
# device=device))
scores.append(score[keep])
if not boxes:
return torch.tensor([]), torch.tensor([]), torch.tensor([])
boxes = torch.cat(boxes, 0)
labels = torch.cat(labels, 0)
scores = torch.cat(scores, 0)
return boxes, labels, scores
def change_box_order(boxes, order):
"""Change box order between (xmin,ymin,xmax,ymax) and (xcenter,ycenter,width,height).
Args:
boxes: (tensor) bounding boxes, sized [N,4].
order: (str) either 'xyxy2xywh' or 'xywh2xyxy'.
Returns:
(tensor) converted bounding boxes, sized [N,4].
"""
assert order in ['xyxy2xywh','xywh2xyxy']
a = boxes[:,:2]
b = boxes[:,2:]
if order == 'xyxy2xywh':
return torch.cat([(a+b)/2,b-a], 1)
return torch.cat([a-b/2,a+b/2], 1)
def box_clamp(boxes, xmin, ymin, xmax, ymax):
"""Clamp boxes.
Args:
boxes: (tensor) bounding boxes of (xmin,ymin,xmax,ymax), sized [N,4].
xmin: (number) min value of x.
ymin: (number) min value of y.
xmax: (number) max value of x.
ymax: (number) max value of y.
Returns:
(tensor) clamped boxes.
"""
boxes[:,0].clamp_(min=xmin, max=xmax)
boxes[:,1].clamp_(min=ymin, max=ymax)
boxes[:,2].clamp_(min=xmin, max=xmax)
boxes[:,3].clamp_(min=ymin, max=ymax)
return boxes
def box_iou(box1, box2):
"""Compute the intersection over union of two set of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
box1: (tensor) bounding boxes, sized [N,4].
box2: (tensor) bounding boxes, sized [M,4].
Return:
(tensor) iou, sized [N,M].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
"""
# N = box1.size(0)
# M = box2.size(0)
lt = torch.max(box1[:,None,:2], box2[:,:2]) # [N,M,2]
rb = torch.min(box1[:,None,2:], box2[:,2:]) # [N,M,2]
wh = (rb-lt).clamp(min=0) # [N,M,2]
inter = wh[:,:,0] * wh[:,:,1] # [N,M]
area1 = (box1[:,2]-box1[:,0]) * (box1[:,3]-box1[:,1]) # [N,]
area2 = (box2[:,2]-box2[:,0]) * (box2[:,3]-box2[:,1]) # [M,]
iou = inter / (area1[:,None] + area2 - inter)
return iou
| [
"torch.log",
"torch.max",
"torch.Tensor",
"math.sqrt",
"torch.exp",
"torch.min",
"torch.full_like",
"torch.tensor",
"numpy.array",
"torchvision.ops.boxes.nms",
"torch.zeros",
"torch.cat",
"torch.device"
] | [((7271, 7307), 'torch.cat', 'torch.cat', (['[a - b / 2, a + b / 2]', '(1)'], {}), '([a - b / 2, a + b / 2], 1)\n', (7280, 7307), False, 'import torch\n'), ((8308, 8349), 'torch.max', 'torch.max', (['box1[:, None, :2]', 'box2[:, :2]'], {}), '(box1[:, None, :2], box2[:, :2])\n', (8317, 8349), False, 'import torch\n'), ((8367, 8408), 'torch.min', 'torch.min', (['box1[:, None, 2:]', 'box2[:, 2:]'], {}), '(box1[:, None, 2:], box2[:, 2:])\n', (8376, 8408), False, 'import torch\n'), ((4297, 4327), 'torch.cat', 'torch.cat', (['[loc_xy, loc_wh]', '(1)'], {}), '([loc_xy, loc_wh], 1)\n', (4306, 4327), False, 'import torch\n'), ((5452, 5492), 'torch.cat', 'torch.cat', (['[xy - wh / 2, xy + wh / 2]', '(1)'], {}), '([xy - wh / 2, xy + wh / 2], 1)\n', (5461, 5492), False, 'import torch\n'), ((6649, 6668), 'torch.cat', 'torch.cat', (['boxes', '(0)'], {}), '(boxes, 0)\n', (6658, 6668), False, 'import torch\n'), ((6686, 6706), 'torch.cat', 'torch.cat', (['labels', '(0)'], {}), '(labels, 0)\n', (6695, 6706), False, 'import torch\n'), ((6724, 6744), 'torch.cat', 'torch.cat', (['scores', '(0)'], {}), '(scores, 0)\n', (6733, 6744), False, 'import torch\n'), ((7232, 7266), 'torch.cat', 'torch.cat', (['[(a + b) / 2, b - a]', '(1)'], {}), '([(a + b) / 2, b - a], 1)\n', (7241, 7266), False, 'import torch\n'), ((1466, 1485), 'torch.Tensor', 'torch.Tensor', (['boxes'], {}), '(boxes)\n', (1478, 1485), False, 'import torch\n'), ((2503, 2522), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2515, 2522), False, 'import torch\n'), ((2795, 2857), 'torch.zeros', 'torch.zeros', (['(num_priors, 4)'], {'dtype': 'boxes.dtype', 'device': 'device'}), '((num_priors, 4), dtype=boxes.dtype, device=device)\n', (2806, 2857), False, 'import torch\n'), ((2884, 2942), 'torch.zeros', 'torch.zeros', (['num_priors'], {'dtype': 'labels.dtype', 'device': 'device'}), '(num_priors, dtype=labels.dtype, device=device)\n', (2895, 2942), False, 'import torch\n'), ((4219, 4263), 'torch.log', 'torch.log', (['(boxes[:, 2:] / prior_boxes[:, 2:])'], {}), '(boxes[:, 2:] / prior_boxes[:, 2:])\n', (4228, 4263), False, 'import torch\n'), ((5167, 5186), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5179, 5186), False, 'import torch\n'), ((5368, 5410), 'torch.exp', 'torch.exp', (['(loc_preds[:, 2:] * variances[1])'], {}), '(loc_preds[:, 2:] * variances[1])\n', (5377, 5410), False, 'import torch\n'), ((6085, 6112), 'torchvision.ops.boxes.nms', 'nms', (['box', 'score', 'nms_thresh'], {}), '(box, score, nms_thresh)\n', (6088, 6112), False, 'from torchvision.ops.boxes import nms\n'), ((928, 970), 'math.sqrt', 'math.sqrt', (['(box_sizes[i] * box_sizes[i + 1])'], {}), '(box_sizes[i] * box_sizes[i + 1])\n', (937, 970), False, 'import math\n'), ((5884, 5919), 'torch.tensor', 'torch.tensor', ([], {'data': '(0)', 'device': 'device'}), '(data=0, device=device)\n', (5896, 5919), False, 'import torch\n'), ((6248, 6327), 'torch.full_like', 'torch.full_like', (['score[keep]'], {'fill_value': '(i + 1)', 'dtype': 'torch.long', 'device': 'device'}), '(score[keep], fill_value=i + 1, dtype=torch.long, device=device)\n', (6263, 6327), False, 'import torch\n'), ((6579, 6595), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6591, 6595), False, 'import torch\n'), ((6597, 6613), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6609, 6613), False, 'import torch\n'), ((6615, 6631), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6627, 6631), False, 'import torch\n'), ((1510, 1526), 'numpy.array', 'np.array', (['priors'], {}), '(priors)\n', (1518, 1526), True, 'import numpy as np\n'), ((1181, 1194), 'math.sqrt', 'math.sqrt', (['ar'], {}), '(ar)\n', (1190, 1194), False, 'import math\n'), ((1200, 1213), 'math.sqrt', 'math.sqrt', (['ar'], {}), '(ar)\n', (1209, 1213), False, 'import math\n'), ((1262, 1275), 'math.sqrt', 'math.sqrt', (['ar'], {}), '(ar)\n', (1271, 1275), False, 'import math\n'), ((1281, 1294), 'math.sqrt', 'math.sqrt', (['ar'], {}), '(ar)\n', (1290, 1294), False, 'import math\n'), ((1339, 1352), 'math.sqrt', 'math.sqrt', (['ar'], {}), '(ar)\n', (1348, 1352), False, 'import math\n'), ((1358, 1371), 'math.sqrt', 'math.sqrt', (['ar'], {}), '(ar)\n', (1367, 1371), False, 'import math\n'), ((1416, 1429), 'math.sqrt', 'math.sqrt', (['ar'], {}), '(ar)\n', (1425, 1429), False, 'import math\n'), ((1435, 1448), 'math.sqrt', 'math.sqrt', (['ar'], {}), '(ar)\n', (1444, 1448), False, 'import math\n')] |
from typing import Dict, List
import torch
import numpy as np
import copy
from ..modules import Module
from ..datasets import shuffle, collate_dict_wrapper
class ObverterDatasamplingModule(Module):
def __init__(self,
id:str,
config:Dict[str,object]):
"""
:param id: str defining the ID of the module.
"""
input_stream_ids = {
"dataset":"current_dataset:ref",
"epoch":"signals:epoch",
"mode":"signals:mode",
"use_cuda":"signals:use_cuda",
"it_sample":"signals:it_sample",
# step in the sequence of repetitions of the current batch
"it_step":"signals:it_step",
# step in the communication round.
}
super(ObverterDatasamplingModule, self).__init__(
id=id,
type="ObverterDatasamplingModule",
config=config,
input_stream_ids=input_stream_ids)
self.batch_size = self.config["batch_size"]
self.collate_fn = collate_dict_wrapper
def compute(self, input_streams_dict:Dict[str,object]) -> Dict[str,object] :
"""
:param input_streams_dict: Dict that should contain, at least, the following keys and values:
- `'sentences_logits'`: Tensor of shape `(batch_size, max_sentence_length, vocab_size)` containing the padded sequence of logits over symbols.
- `'sentences_widx'`: Tensor of shape `(batch_size, max_sentence_length, 1)` containing the padded sequence of symbols' indices.
- `'sentences_one_hot'`: Tensor of shape `(batch_size, max_sentence_length, vocab_size)` containing the padded sequence of one-hot-encoded symbols.
- `'experiences'`: Tensor of shape `(batch_size, *self.obs_shape)`.
- `'exp_latents'`: Tensor of shape `(batch_size, nbr_latent_dimensions)`.
- `'multi_round'`: Boolean defining whether to utter a sentence back or not.
- `'graphtype'`: String defining the type of symbols used in the output sentence:
- `'categorical'`: one-hot-encoded symbols.
- `'gumbel_softmax'`: continuous relaxation of a categorical distribution.
- `'straight_through_gumbel_softmax'`: improved continuous relaxation...
- `'obverter'`: obverter training scheme...
- `'tau0'`: Float, temperature with which to apply gumbel-softmax estimator.
- `'sample'`: Dict that contains the speaker and listener experiences as well as the target index.
- `'config'`: Dict of hyperparameters to the referential game.
- `'mode'`: String that defines what mode we are in, e.g. 'train' or 'test'. Those keywords are expected.
- `'it'`: Integer specifying the iteration number of the current function call.
"""
outputs_dict = {}
epoch = input_streams_dict["epoch"]
mode = input_streams_dict["mode"]
it_step = input_streams_dict["it_step"]
it_sample = input_streams_dict["it_sample"]
if "train" in mode and it_step == 0:
dataset = input_streams_dict["dataset"]
# assumes DualLabeledDataset...
train_dataset = dataset.datasets["train"]
latents_to_possible_indices = train_dataset.latents_to_possible_indices
# Make the descriptive ratio no longer effective:
dataset.kwargs["descriptive"] = False
idxconverter = train_dataset.indices
batch = []
n_same = int(0.25*self.batch_size)
n_same_shape = int(0.3*self.batch_size)
n_same_color = int(0.2*self.batch_size)
n_random = self.batch_size - n_same_shape - n_same_color - n_same
for i in range(n_same):
speaker_idx = np.random.randint(len(dataset))
latents_class = train_dataset.getlatentclass(speaker_idx)
color_id = latents_class[0]
shape_id = latents_class[1]
listener_idx = np.random.choice(
[
idx
for idx in latents_to_possible_indices[color_id][shape_id]
if idx != speaker_idx
]
)
batch.append(self.sample(dataset=dataset, speaker_idx=speaker_idx, listener_idx=listener_idx, same=True))
for i in range(n_same_shape):
speaker_idx = np.random.randint(len(dataset))
latents_class = train_dataset.getlatentclass(speaker_idx)
speaker_color_id = latents_class[0]
shape_id = latents_class[1]
choice_set = copy.deepcopy(train_dataset.same_shape_indices[shape_id])
choice_set.remove(speaker_idx)
listener_idx = np.random.choice(choice_set)
batch.append(self.sample(dataset=dataset, speaker_idx=speaker_idx, listener_idx=listener_idx, same=False))
for i in range(n_same_color):
speaker_idx = np.random.randint(len(dataset))
latents_class = train_dataset.getlatentclass(speaker_idx)
color_id = latents_class[0]
speaker_shape_id = latents_class[1]
choice_set = copy.deepcopy(train_dataset.same_color_indices[color_id])
choice_set.remove(speaker_idx)
listener_idx = np.random.choice(choice_set)
batch.append(self.sample(dataset=dataset, speaker_idx=speaker_idx, listener_idx=listener_idx, same=False))
for i in range(n_random):
speaker_idx = np.random.randint(len(dataset))
speaker_latents_class = train_dataset.getlatentclass(speaker_idx)
speaker_color_id = speaker_latents_class[0]
speaker_shape_id = speaker_latents_class[1]
listener_idx = np.random.randint(len(dataset))
listener_latents_class = train_dataset.getlatentclass(listener_idx)
listener_color_id = listener_latents_class[0]
listener_shape_id = listener_latents_class[1]
same = (speaker_shape_id == listener_shape_id) and (speaker_color_id == listener_color_id)
batch.append(self.sample(dataset=dataset, speaker_idx=speaker_idx, listener_idx=listener_idx, same=same))
new_sample = self.collate_fn(batch)
if input_streams_dict["use_cuda"]:
new_sample = new_sample.cuda()
outputs_dict["current_dataloader:sample"] = new_sample
return outputs_dict
def sample(self, dataset, speaker_idx, listener_idx, same:bool=True):
# Creating speaker's dictionnary:
speaker_sample_d = dataset.sample(idx=speaker_idx)
# Adding batch dimension:
for k,v in speaker_sample_d.items():
if not(isinstance(v, torch.Tensor)):
v = torch.Tensor(v)
speaker_sample_d[k] = v.unsqueeze(0)
if dataset.kwargs['observability'] == "partial":
for k,v in speaker_sample_d.items():
speaker_sample_d[k] = v[:,0].unsqueeze(1)
##--------------------------------------------------------------
##--------------------------------------------------------------
# Creating listener's dictionnary:
listener_sample_d = dataset.sample(idx=listener_idx)
# Adding batch dimension:
for k,v in listener_sample_d.items():
if not(isinstance(v, torch.Tensor)):
v = torch.Tensor(v)
listener_sample_d[k] = v.unsqueeze(0)
listener_sample_d["experiences"], target_decision_idx, orders = shuffle(listener_sample_d["experiences"])
if not same:
# The target_decision_idx is set to `nbr_experiences`:
target_decision_idx = (dataset.nbr_distractors[dataset.mode]+1)*torch.ones(1).long()
# shuffling the other keys similarly:
for k,v in listener_sample_d.items():
if k == "experiences": continue
listener_sample_d[k], _, _ = shuffle(v, orders=orders)
##--------------------------------------------------------------
##--------------------------------------------------------------
output_dict = {"target_decision_idx":target_decision_idx}
for k,v in listener_sample_d.items():
output_dict[f"listener_{k}"] = v
for k,v in speaker_sample_d.items():
output_dict[f"speaker_{k}"] = v
return output_dict | [
"numpy.random.choice",
"torch.ones",
"torch.Tensor",
"copy.deepcopy"
] | [((4135, 4244), 'numpy.random.choice', 'np.random.choice', (['[idx for idx in latents_to_possible_indices[color_id][shape_id] if idx !=\n speaker_idx]'], {}), '([idx for idx in latents_to_possible_indices[color_id][\n shape_id] if idx != speaker_idx])\n', (4151, 4244), True, 'import numpy as np\n'), ((4800, 4857), 'copy.deepcopy', 'copy.deepcopy', (['train_dataset.same_shape_indices[shape_id]'], {}), '(train_dataset.same_shape_indices[shape_id])\n', (4813, 4857), False, 'import copy\n'), ((4936, 4964), 'numpy.random.choice', 'np.random.choice', (['choice_set'], {}), '(choice_set)\n', (4952, 4964), True, 'import numpy as np\n'), ((5393, 5450), 'copy.deepcopy', 'copy.deepcopy', (['train_dataset.same_color_indices[color_id]'], {}), '(train_dataset.same_color_indices[color_id])\n', (5406, 5450), False, 'import copy\n'), ((5529, 5557), 'numpy.random.choice', 'np.random.choice', (['choice_set'], {}), '(choice_set)\n', (5545, 5557), True, 'import numpy as np\n'), ((7127, 7142), 'torch.Tensor', 'torch.Tensor', (['v'], {}), '(v)\n', (7139, 7142), False, 'import torch\n'), ((7779, 7794), 'torch.Tensor', 'torch.Tensor', (['v'], {}), '(v)\n', (7791, 7794), False, 'import torch\n'), ((8133, 8146), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (8143, 8146), False, 'import torch\n')] |
import multiprocessing
import itertools
import numpy as np
import pandas as pd
import scipy.optimize
import pickle
import sys
if '../' not in sys.path:
sys.path.append('../')
import tick
from tick.hawkes.simulation import SimuHawkesExpKernels
from tick.hawkes.inference import HawkesConditionalLaw, HawkesADM4, HawkesCumulantMatching, HawkesSumGaussians
from desync_mhp.lib.inference import HawkesExpKernConditionalMLE
import lib
G_true = np.array([[0.23, 0.23, 0.23, 0.23, 0.23, 0. , 0. , 0. , 0. , 0.23],
[0. , 0.23, 0.23, 0.23, 0.23, 0. , 0. , 0. , 0.23, 0. ],
[0. , 0. , 0.23, 0.23, 0.23, 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0.23, 0.23, 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0.23, 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. , 0.23, 0. , 0. , 0. , 0. ],
[0. , 0.23, 0. , 0. , 0. , 0.23, 0.23, 0. , 0. , 0. ],
[0.23, 0. , 0. , 0. , 0. , 0.23, 0.23, 0.23, 0. , 0. ],
[0. , 0. , 0. , 0. , 0. , 0.23, 0.23, 0.23, 0.23, 0. ],
[0. , 0. , 0. , 0. , 0. , 0.23, 0.23, 0.23, 0.23, 0.23]])
dim = len(G_true)
decay = 1.0
mu_true = 0.01 * np.ones(dim)
# Compute the ground-truth cumulants
L_true, C_true, Kc_true = lib.utils.cumulants.compute_cumulants(G=G_true, mus=mu_true,)
def simulate(noise_scale, seed):
# Sample noise
noise_rand_state = np.random.RandomState(seed=None)
noise_dist_arr = ['exponential' for _ in range(dim)]
noise_scale_arr = [noise_scale for _ in range(dim)]
# Init mhp simulation object
simu_hawkes = SimuHawkesExpKernels(adjacency=G_true, decays=decay,
baseline=mu_true, max_jumps=0,
verbose=False)
# Build noisy simulation object
simu_noisy_hawkes = lib.simulation.noisy_hawkes.SimulatorNoisyHawkesCustomKernels(
simu_obj=simu_hawkes,
noise_dist=noise_dist_arr,
noise_scale=noise_scale_arr,
burn_in_quantile=0.99,
num_real=5,
num_jumps=100000,
seed=seed,
no_multi=True)
# Simulate noisy data
noisy_events = simu_noisy_hawkes.simulate()
return noisy_events
def find_best_integration_support(events, max_iter=20, initial_simplex=[[10.0], [50.0]], verbose=False):
def int_support_loss(H, events):
nphc = HawkesCumulantMatching(integration_support=float(H), max_iter=0, verbose=False)
nphc.fit(events)
skew_loss = np.linalg.norm(nphc.skewness - Kc_true, ord=2)
cov_loss = np.linalg.norm(nphc.covariance - C_true, ord=2)
if verbose:
print(f"{float(H):>6.2f}, loss={loss:.2e}, skew_loss={skew_loss:.2e}, cov_loss={cov_loss:.2e}")
return skew_loss
res = scipy.optimize.minimize(
int_support_loss,
x0=20.0,
args=(events,),
options={'max_iter': max_iter,
'maxfev': max_iter,
'initial_simplex': initial_simplex},
method='Nelder-Mead')
return float(res.x)
if __name__ == "__main__":
# Define experiments
noise_scale_range = np.logspace(-1, 1.4, 25)
sim_seed_list = np.random.RandomState(703994370).randint(0, 2**32 - 1, size=20)
args_iter = list(itertools.product(noise_scale_range, sim_seed_list))
data = list()
for it, (noise_scale, sim_seed) in enumerate(args_iter):
print()
print(f"Iter {it:>2d}/{len(args_iter):>2d} | noise_scale: {noise_scale:.2e}...")
# Simulate data
noisy_events = simulate(noise_scale=noise_scale, seed=sim_seed)
# ADM4
adm4 = HawkesADM4(decay=1.0, verbose=False)
adm4.fit(noisy_events)
print(f"ADM4: done.")
# NPHC
H = find_best_integration_support(noisy_events)
nphc = HawkesCumulantMatching(integration_support=H, max_iter=20000, verbose=False)
nphc.fit(noisy_events)
print(f"NPHC: done.")
# WH
wh = HawkesConditionalLaw(delta_lag=0.1, min_lag=0.0001, max_lag=100.0,
n_quad=20, max_support=10.0)
wh.fit(noisy_events)
wh_adj = wh.get_kernel_norms()
print(f"WH: done.")
# Desync-MLE
end_time = max([max(map(max, real)) for real in noisy_events])
# Desync-MLE
desyncmle = HawkesExpKernConditionalMLE(
decay=1.0,
noise_penalty='l2', noise_C=1e3,
hawkes_penalty='l1', hawkes_base_C=1e2, hawkes_adj_C=1e5,
solver='sgd', tol=1e-4, max_iter=1000,
verbose=False
)
desyncmle.fit(noisy_events, end_time=end_time,
z_start=np.zeros(dim),
theta_start=np.hstack((
0.01*np.ones(dim),
np.random.uniform(0.0, 0.1, size=dim**2)
)),
callback=None)
desyncmle_adj = np.reshape(desyncmle.coeffs[2*dim:], (dim, dim))
print(f"Desync-MLE: done")
# Store results
data.append({
'noise_scale': noise_scale,
'sim_seed': sim_seed,
'adm4_adj': adm4.adjacency.copy(),
'nphc_adj': nphc.adjacency.copy(),
'wh_adj': wh_adj,
'desyncmle_adj': desyncmle_adj,
'mean': nphc.mean_intensity.copy(),
'cov': nphc.covariance.copy(),
'skew': nphc.skewness.copy(),
'H': H,
})
# Save the results
pd.DataFrame(data).to_pickle('res-synthetic.pkl')
| [
"tick.hawkes.inference.HawkesConditionalLaw",
"numpy.reshape",
"numpy.ones",
"lib.utils.cumulants.compute_cumulants",
"tick.hawkes.inference.HawkesADM4",
"tick.hawkes.simulation.SimuHawkesExpKernels",
"itertools.product",
"tick.hawkes.inference.HawkesCumulantMatching",
"numpy.array",
"lib.simulati... | [((448, 1045), 'numpy.array', 'np.array', (['[[0.23, 0.23, 0.23, 0.23, 0.23, 0.0, 0.0, 0.0, 0.0, 0.23], [0.0, 0.23, 0.23,\n 0.23, 0.23, 0.0, 0.0, 0.0, 0.23, 0.0], [0.0, 0.0, 0.23, 0.23, 0.23, 0.0,\n 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.23, 0.23, 0.0, 0.0, 0.0, 0.0, \n 0.0], [0.0, 0.0, 0.0, 0.0, 0.23, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, \n 0.0, 0.0, 0.0, 0.23, 0.0, 0.0, 0.0, 0.0], [0.0, 0.23, 0.0, 0.0, 0.0, \n 0.23, 0.23, 0.0, 0.0, 0.0], [0.23, 0.0, 0.0, 0.0, 0.0, 0.23, 0.23, 0.23,\n 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.23, 0.23, 0.23, 0.23, 0.0], [0.0,\n 0.0, 0.0, 0.0, 0.0, 0.23, 0.23, 0.23, 0.23, 0.23]]'], {}), '([[0.23, 0.23, 0.23, 0.23, 0.23, 0.0, 0.0, 0.0, 0.0, 0.23], [0.0, \n 0.23, 0.23, 0.23, 0.23, 0.0, 0.0, 0.0, 0.23, 0.0], [0.0, 0.0, 0.23, \n 0.23, 0.23, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.23, 0.23, 0.0, \n 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.23, 0.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, 0.0, 0.0, 0.23, 0.0, 0.0, 0.0, 0.0], [0.0, 0.23, 0.0,\n 0.0, 0.0, 0.23, 0.23, 0.0, 0.0, 0.0], [0.23, 0.0, 0.0, 0.0, 0.0, 0.23, \n 0.23, 0.23, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.23, 0.23, 0.23, 0.23,\n 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.23, 0.23, 0.23, 0.23, 0.23]])\n', (456, 1045), True, 'import numpy as np\n'), ((1373, 1433), 'lib.utils.cumulants.compute_cumulants', 'lib.utils.cumulants.compute_cumulants', ([], {'G': 'G_true', 'mus': 'mu_true'}), '(G=G_true, mus=mu_true)\n', (1410, 1433), False, 'import lib\n'), ((157, 179), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (172, 179), False, 'import sys\n'), ((1297, 1309), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (1304, 1309), True, 'import numpy as np\n'), ((1512, 1544), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'None'}), '(seed=None)\n', (1533, 1544), True, 'import numpy as np\n'), ((1710, 1812), 'tick.hawkes.simulation.SimuHawkesExpKernels', 'SimuHawkesExpKernels', ([], {'adjacency': 'G_true', 'decays': 'decay', 'baseline': 'mu_true', 'max_jumps': '(0)', 'verbose': '(False)'}), '(adjacency=G_true, decays=decay, baseline=mu_true,\n max_jumps=0, verbose=False)\n', (1730, 1812), False, 'from tick.hawkes.simulation import SimuHawkesExpKernels\n'), ((1947, 2178), 'lib.simulation.noisy_hawkes.SimulatorNoisyHawkesCustomKernels', 'lib.simulation.noisy_hawkes.SimulatorNoisyHawkesCustomKernels', ([], {'simu_obj': 'simu_hawkes', 'noise_dist': 'noise_dist_arr', 'noise_scale': 'noise_scale_arr', 'burn_in_quantile': '(0.99)', 'num_real': '(5)', 'num_jumps': '(100000)', 'seed': 'seed', 'no_multi': '(True)'}), '(simu_obj=\n simu_hawkes, noise_dist=noise_dist_arr, noise_scale=noise_scale_arr,\n burn_in_quantile=0.99, num_real=5, num_jumps=100000, seed=seed,\n no_multi=True)\n', (2008, 2178), False, 'import lib\n'), ((3245, 3269), 'numpy.logspace', 'np.logspace', (['(-1)', '(1.4)', '(25)'], {}), '(-1, 1.4, 25)\n', (3256, 3269), True, 'import numpy as np\n'), ((2613, 2659), 'numpy.linalg.norm', 'np.linalg.norm', (['(nphc.skewness - Kc_true)'], {'ord': '(2)'}), '(nphc.skewness - Kc_true, ord=2)\n', (2627, 2659), True, 'import numpy as np\n'), ((2679, 2726), 'numpy.linalg.norm', 'np.linalg.norm', (['(nphc.covariance - C_true)'], {'ord': '(2)'}), '(nphc.covariance - C_true, ord=2)\n', (2693, 2726), True, 'import numpy as np\n'), ((3375, 3426), 'itertools.product', 'itertools.product', (['noise_scale_range', 'sim_seed_list'], {}), '(noise_scale_range, sim_seed_list)\n', (3392, 3426), False, 'import itertools\n'), ((3741, 3777), 'tick.hawkes.inference.HawkesADM4', 'HawkesADM4', ([], {'decay': '(1.0)', 'verbose': '(False)'}), '(decay=1.0, verbose=False)\n', (3751, 3777), False, 'from tick.hawkes.inference import HawkesConditionalLaw, HawkesADM4, HawkesCumulantMatching, HawkesSumGaussians\n'), ((3926, 4002), 'tick.hawkes.inference.HawkesCumulantMatching', 'HawkesCumulantMatching', ([], {'integration_support': 'H', 'max_iter': '(20000)', 'verbose': '(False)'}), '(integration_support=H, max_iter=20000, verbose=False)\n', (3948, 4002), False, 'from tick.hawkes.inference import HawkesConditionalLaw, HawkesADM4, HawkesCumulantMatching, HawkesSumGaussians\n'), ((4091, 4191), 'tick.hawkes.inference.HawkesConditionalLaw', 'HawkesConditionalLaw', ([], {'delta_lag': '(0.1)', 'min_lag': '(0.0001)', 'max_lag': '(100.0)', 'n_quad': '(20)', 'max_support': '(10.0)'}), '(delta_lag=0.1, min_lag=0.0001, max_lag=100.0, n_quad=\n 20, max_support=10.0)\n', (4111, 4191), False, 'from tick.hawkes.inference import HawkesConditionalLaw, HawkesADM4, HawkesCumulantMatching, HawkesSumGaussians\n'), ((4451, 4655), 'desync_mhp.lib.inference.HawkesExpKernConditionalMLE', 'HawkesExpKernConditionalMLE', ([], {'decay': '(1.0)', 'noise_penalty': '"""l2"""', 'noise_C': '(1000.0)', 'hawkes_penalty': '"""l1"""', 'hawkes_base_C': '(100.0)', 'hawkes_adj_C': '(100000.0)', 'solver': '"""sgd"""', 'tol': '(0.0001)', 'max_iter': '(1000)', 'verbose': '(False)'}), "(decay=1.0, noise_penalty='l2', noise_C=1000.0,\n hawkes_penalty='l1', hawkes_base_C=100.0, hawkes_adj_C=100000.0, solver\n ='sgd', tol=0.0001, max_iter=1000, verbose=False)\n", (4478, 4655), False, 'from desync_mhp.lib.inference import HawkesExpKernConditionalMLE\n'), ((5038, 5088), 'numpy.reshape', 'np.reshape', (['desyncmle.coeffs[2 * dim:]', '(dim, dim)'], {}), '(desyncmle.coeffs[2 * dim:], (dim, dim))\n', (5048, 5088), True, 'import numpy as np\n'), ((3290, 3322), 'numpy.random.RandomState', 'np.random.RandomState', (['(703994370)'], {}), '(703994370)\n', (3311, 3322), True, 'import numpy as np\n'), ((5603, 5621), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (5615, 5621), True, 'import pandas as pd\n'), ((4788, 4801), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (4796, 4801), True, 'import numpy as np\n'), ((4914, 4956), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(0.1)'], {'size': '(dim ** 2)'}), '(0.0, 0.1, size=dim ** 2)\n', (4931, 4956), True, 'import numpy as np\n'), ((4876, 4888), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (4883, 4888), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
class PhysNetDataset(Dataset):
def __init__(self, video_data, label_data):
self.transform = transforms.Compose([transforms.ToTensor()])
self.video_data = video_data
self.label = label_data
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
video_data = torch.tensor(np.transpose(self.video_data[index], (3, 0, 1, 2)), dtype=torch.float32)
label_data = torch.tensor(self.label[index], dtype=torch.float32)
return video_data, label_data
def __len__(self):
return len(self.label)
| [
"torch.is_tensor",
"torchvision.transforms.ToTensor",
"numpy.transpose",
"torch.tensor"
] | [((391, 413), 'torch.is_tensor', 'torch.is_tensor', (['index'], {}), '(index)\n', (406, 413), False, 'import torch\n'), ((583, 635), 'torch.tensor', 'torch.tensor', (['self.label[index]'], {'dtype': 'torch.float32'}), '(self.label[index], dtype=torch.float32)\n', (595, 635), False, 'import torch\n'), ((488, 538), 'numpy.transpose', 'np.transpose', (['self.video_data[index]', '(3, 0, 1, 2)'], {}), '(self.video_data[index], (3, 0, 1, 2))\n', (500, 538), True, 'import numpy as np\n'), ((247, 268), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (266, 268), True, 'import torchvision.transforms as transforms\n')] |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
pyarrow = pytest.importorskip("pyarrow")
def test_categorical_is_valid():
# validate a categorical array by its content
arr = ak._v2.Array([2019, 2020, 2021, 2020, 2019])
categorical = ak._v2.behaviors.categorical.to_categorical(arr)
assert ak._v2.operations.is_valid(categorical)
def test_optional_categorical_from_arrow():
# construct categorical array from option-typed DictionaryArray
indices = pyarrow.array([0, 1, 0, 1, 2, 0, 2])
nan_indices = pyarrow.array([0, 1, 0, 1, 2, None, 0, 2])
dictionary = pyarrow.array([2019, 2020, 2021])
dict_array = pyarrow.DictionaryArray.from_arrays(indices, dictionary)
categorical_array = ak._v2.operations.from_arrow(dict_array)
assert categorical_array.layout.parameter("__array__") == "categorical"
option_dict_array = pyarrow.DictionaryArray.from_arrays(nan_indices, dictionary)
option_categorical_array = ak._v2.operations.from_arrow(option_dict_array)
assert option_categorical_array.layout.parameter("__array__") == "categorical"
def test_categorical_from_arrow_ChunkedArray():
indices = [0, 1, 0, 1, 2, 0, 2]
indices_new_schema = [0, 1, 0, 1, 0]
dictionary = pyarrow.array([2019, 2020, 2021])
dictionary_new_schema = pyarrow.array([2019, 2020])
dict_array = pyarrow.DictionaryArray.from_arrays(pyarrow.array(indices), dictionary)
dict_array_new_schema = pyarrow.DictionaryArray.from_arrays(
pyarrow.array(indices_new_schema), dictionary_new_schema
)
batch = pyarrow.RecordBatch.from_arrays([dict_array], ["year"])
batch_new_schema = pyarrow.RecordBatch.from_arrays(
[dict_array_new_schema], ["year"]
)
batches = [batch] * 3
batches_mixed_schema = [batch] + [batch_new_schema]
table = pyarrow.Table.from_batches(batches)
table_mixed_schema = pyarrow.Table.from_batches(batches_mixed_schema)
array = ak._v2.operations.from_arrow(table)
array_mixed_schema = ak._v2.operations.from_arrow(table_mixed_schema)
assert np.asarray(array.layout.contents[0].index).tolist() == indices * 3
assert (
np.asarray(array_mixed_schema.layout.contents[0].index).tolist()
== indices + indices_new_schema
)
| [
"awkward._v2.Array",
"awkward._v2.behaviors.categorical.to_categorical",
"numpy.asarray",
"awkward._v2.operations.is_valid",
"pytest.importorskip",
"awkward._v2.operations.from_arrow"
] | [((197, 227), 'pytest.importorskip', 'pytest.importorskip', (['"""pyarrow"""'], {}), "('pyarrow')\n", (216, 227), False, 'import pytest\n'), ((323, 367), 'awkward._v2.Array', 'ak._v2.Array', (['[2019, 2020, 2021, 2020, 2019]'], {}), '([2019, 2020, 2021, 2020, 2019])\n', (335, 367), True, 'import awkward as ak\n'), ((386, 434), 'awkward._v2.behaviors.categorical.to_categorical', 'ak._v2.behaviors.categorical.to_categorical', (['arr'], {}), '(arr)\n', (429, 434), True, 'import awkward as ak\n'), ((446, 485), 'awkward._v2.operations.is_valid', 'ak._v2.operations.is_valid', (['categorical'], {}), '(categorical)\n', (472, 485), True, 'import awkward as ak\n'), ((862, 902), 'awkward._v2.operations.from_arrow', 'ak._v2.operations.from_arrow', (['dict_array'], {}), '(dict_array)\n', (890, 902), True, 'import awkward as ak\n'), ((1096, 1143), 'awkward._v2.operations.from_arrow', 'ak._v2.operations.from_arrow', (['option_dict_array'], {}), '(option_dict_array)\n', (1124, 1143), True, 'import awkward as ak\n'), ((2080, 2115), 'awkward._v2.operations.from_arrow', 'ak._v2.operations.from_arrow', (['table'], {}), '(table)\n', (2108, 2115), True, 'import awkward as ak\n'), ((2141, 2189), 'awkward._v2.operations.from_arrow', 'ak._v2.operations.from_arrow', (['table_mixed_schema'], {}), '(table_mixed_schema)\n', (2169, 2189), True, 'import awkward as ak\n'), ((2202, 2244), 'numpy.asarray', 'np.asarray', (['array.layout.contents[0].index'], {}), '(array.layout.contents[0].index)\n', (2212, 2244), True, 'import numpy as np\n'), ((2290, 2345), 'numpy.asarray', 'np.asarray', (['array_mixed_schema.layout.contents[0].index'], {}), '(array_mixed_schema.layout.contents[0].index)\n', (2300, 2345), True, 'import numpy as np\n')] |
import numpy as np
import warnings
from scipy.ndimage.interpolation import zoom
import torch
import math
import copy
import cv2
from skimage import measure
import pandas as pd
def resample(imgs, spacing, new_spacing, order=2):
if len(imgs.shape) == 3:
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
with warnings.catch_warnings():
warnings.simplefilter("ignore")
imgs = zoom(imgs, resize_factor, mode='nearest', order=order)
return imgs, true_spacing, resize_factor
elif len(imgs.shape) == 4:
n = imgs.shape[-1]
newimg = []
for i in range(n):
slice = imgs[:, :, :, i]
newslice, true_spacing = resample(slice, spacing, new_spacing)
newimg.append(newslice)
newimg = np.transpose(np.array(newimg), [1, 2, 3, 0])
return newimg, true_spacing
else:
raise ValueError('wrong shape')
def get_start_ind(center_points):
curr_x = center_points[0][0]
curr_y = center_points[0][1]
curr_z = center_points[0][2]
curr_r = 3
start_ind = -1
ellipsis = 0.1
for i in range(1, len(center_points)):
v1 = np.array([curr_x, curr_y, curr_z])
v2 = np.array([center_points[i][0], center_points[i][1], center_points[i][2]])
dist = np.linalg.norm(v1 - v2)
if (dist - curr_r) <= ellipsis and dist >= curr_r:
start_ind = i
break
return start_ind
def get_spacing_res2(x, spacing_x, spacing_new):
return int(round((x / spacing_x) * spacing_new))
def get_world_cood(x, spacing_x, spacing_new):
return (x / spacing_new) * spacing_x
def data_preprocess(img):
mean_intensity = np.mean(img)
std_intensity = np.std(img)
upper_bound = np.percentile(img, 99.5)
lower_bound = np.percentile(img, 00.5)
img = np.clip(img, lower_bound, upper_bound)
# 防止除0
img = (img - mean_intensity) / (std_intensity + 1e-9)
img = np.array([img])
img = torch.from_numpy(img)
return img.unsqueeze(0)
def get_shell(fl_Num_Points, fl_Radius):
x_list = []
y_list = []
z_list = []
offset = 2.0 / fl_Num_Points
increment = math.pi * (3.0 - math.sqrt(5.0))
for i in range(fl_Num_Points):
z = ((i * offset) - 1.0) + (offset / 2.0)
r = math.sqrt(1.0 - pow(z, 2.0))
phi = ((i + 1) % fl_Num_Points) * increment
x = math.cos(phi) * r
y = math.sin(phi) * r
x_list.append(fl_Radius * x)
y_list.append(fl_Radius * y)
z_list.append(fl_Radius * z)
return x_list, y_list, z_list
def prob_terminates(pre_y, max_points):
res = torch.sum(-pre_y * torch.log2(pre_y))
return res / torch.log2(torch.from_numpy(np.array([max_points])).float())
def get_closer_distance(vessel, target_point):
min_dis = float("inf")
for i in range(len(vessel)):
curr_point = vessel[i]
dist = np.linalg.norm(target_point - curr_point)
if dist < min_dis:
min_dis = dist
index = i
return min_dis, index
def get_distance(v1, v2):
return np.linalg.norm(v1 - v2)
def get_angle(v1, v2):
cosangle = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
cosangle = np.clip(cosangle, -1, 1)
return math.degrees(np.arccos(cosangle))
def save_info(res: list, path: str):
x_list = []
y_list = []
z_list = []
for i in range(len(res)):
x_list.append(res[i][0][0])
y_list.append(res[i][0][1])
z_list.append(res[i][0][2])
dataframe = pd.DataFrame(
{'x': x_list, 'y': y_list, 'z': z_list})
dataframe.to_csv(path, index=False,
columns=['x', 'y', 'z'], sep=',',float_format='%.5f')
def crop_heart(input_arr):
'''
In order to remove the influence of pulmonary vessels, we will use threshold method to segment the heart region
:param input_arr: image arr
:return: Data after removing lung areas
'''
src_array = copy.deepcopy(input_arr)
z, w, h = src_array.shape
new_arr = np.zeros((z, w, h))
new_arr += -1000
sum_minr = 0
sum_minc = 0
sum_maxr = 0
sum_maxc = 0
for k in range(z):
image = src_array[k][:, :]
ret, thresh = cv2.threshold(image, 20, 400, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, anchor=(-1, -1), iterations=4)
label_opening = measure.label(opening)
regionprops = measure.regionprops(label_opening)
max_area = 0
index = 0
for i in range(len(regionprops)):
if regionprops[i].area > max_area:
max_area = regionprops[i].area
index = i
minr, minc, maxr, maxc = regionprops[index].bbox
new_arr[k][minr:maxr, minc:maxc] = src_array[k][minr:maxr, minc:maxc]
sum_minr += minr
sum_minc += minc
sum_maxr += maxr
sum_maxc += maxc
mean_minr = sum_minr // z
meam_minc = sum_minc // z
mean_maxr = sum_maxr // z
mean_maxc = sum_maxc // z
return new_arr, meam_minc, mean_minr, mean_maxc, mean_maxr
| [
"numpy.clip",
"numpy.arccos",
"math.sqrt",
"torch.from_numpy",
"math.cos",
"numpy.array",
"torch.log2",
"copy.deepcopy",
"numpy.linalg.norm",
"scipy.ndimage.interpolation.zoom",
"numpy.mean",
"cv2.threshold",
"warnings.simplefilter",
"pandas.DataFrame",
"numpy.round",
"skimage.measure.... | [((1807, 1819), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (1814, 1819), True, 'import numpy as np\n'), ((1840, 1851), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (1846, 1851), True, 'import numpy as np\n'), ((1870, 1894), 'numpy.percentile', 'np.percentile', (['img', '(99.5)'], {}), '(img, 99.5)\n', (1883, 1894), True, 'import numpy as np\n'), ((1913, 1936), 'numpy.percentile', 'np.percentile', (['img', '(0.5)'], {}), '(img, 0.5)\n', (1926, 1936), True, 'import numpy as np\n'), ((1948, 1986), 'numpy.clip', 'np.clip', (['img', 'lower_bound', 'upper_bound'], {}), '(img, lower_bound, upper_bound)\n', (1955, 1986), True, 'import numpy as np\n'), ((2066, 2081), 'numpy.array', 'np.array', (['[img]'], {}), '([img])\n', (2074, 2081), True, 'import numpy as np\n'), ((2092, 2113), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (2108, 2113), False, 'import torch\n'), ((3208, 3231), 'numpy.linalg.norm', 'np.linalg.norm', (['(v1 - v2)'], {}), '(v1 - v2)\n', (3222, 3231), True, 'import numpy as np\n'), ((3342, 3366), 'numpy.clip', 'np.clip', (['cosangle', '(-1)', '(1)'], {}), '(cosangle, -1, 1)\n', (3349, 3366), True, 'import numpy as np\n'), ((3652, 3705), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x_list, 'y': y_list, 'z': z_list}"], {}), "({'x': x_list, 'y': y_list, 'z': z_list})\n", (3664, 3705), True, 'import pandas as pd\n'), ((4082, 4106), 'copy.deepcopy', 'copy.deepcopy', (['input_arr'], {}), '(input_arr)\n', (4095, 4106), False, 'import copy\n'), ((4151, 4170), 'numpy.zeros', 'np.zeros', (['(z, w, h)'], {}), '((z, w, h))\n', (4159, 4170), True, 'import numpy as np\n'), ((277, 321), 'numpy.round', 'np.round', (['(imgs.shape * spacing / new_spacing)'], {}), '(imgs.shape * spacing / new_spacing)\n', (285, 321), True, 'import numpy as np\n'), ((1279, 1313), 'numpy.array', 'np.array', (['[curr_x, curr_y, curr_z]'], {}), '([curr_x, curr_y, curr_z])\n', (1287, 1313), True, 'import numpy as np\n'), ((1327, 1400), 'numpy.array', 'np.array', (['[center_points[i][0], center_points[i][1], center_points[i][2]]'], {}), '([center_points[i][0], center_points[i][1], center_points[i][2]])\n', (1335, 1400), True, 'import numpy as np\n'), ((1416, 1439), 'numpy.linalg.norm', 'np.linalg.norm', (['(v1 - v2)'], {}), '(v1 - v2)\n', (1430, 1439), True, 'import numpy as np\n'), ((3025, 3066), 'numpy.linalg.norm', 'np.linalg.norm', (['(target_point - curr_point)'], {}), '(target_point - curr_point)\n', (3039, 3066), True, 'import numpy as np\n'), ((3391, 3410), 'numpy.arccos', 'np.arccos', (['cosangle'], {}), '(cosangle)\n', (3400, 3410), True, 'import numpy as np\n'), ((4340, 4388), 'cv2.threshold', 'cv2.threshold', (['image', '(20)', '(400)', 'cv2.THRESH_BINARY'], {}), '(image, 20, 400, cv2.THRESH_BINARY)\n', (4353, 4388), False, 'import cv2\n'), ((4406, 4458), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv2.MORPH_ELLIPSE, (5, 5))\n', (4431, 4458), False, 'import cv2\n'), ((4477, 4556), 'cv2.morphologyEx', 'cv2.morphologyEx', (['thresh', 'cv2.MORPH_OPEN', 'kernel'], {'anchor': '(-1, -1)', 'iterations': '(4)'}), '(thresh, cv2.MORPH_OPEN, kernel, anchor=(-1, -1), iterations=4)\n', (4493, 4556), False, 'import cv2\n'), ((4582, 4604), 'skimage.measure.label', 'measure.label', (['opening'], {}), '(opening)\n', (4595, 4604), False, 'from skimage import measure\n'), ((4627, 4661), 'skimage.measure.regionprops', 'measure.regionprops', (['label_opening'], {}), '(label_opening)\n', (4646, 4661), False, 'from skimage import measure\n'), ((439, 464), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (462, 464), False, 'import warnings\n'), ((478, 509), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (499, 509), False, 'import warnings\n'), ((529, 583), 'scipy.ndimage.interpolation.zoom', 'zoom', (['imgs', 'resize_factor'], {'mode': '"""nearest"""', 'order': 'order'}), "(imgs, resize_factor, mode='nearest', order=order)\n", (533, 583), False, 'from scipy.ndimage.interpolation import zoom\n'), ((2299, 2313), 'math.sqrt', 'math.sqrt', (['(5.0)'], {}), '(5.0)\n', (2308, 2313), False, 'import math\n'), ((2508, 2521), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (2516, 2521), False, 'import math\n'), ((2538, 2551), 'math.sin', 'math.sin', (['phi'], {}), '(phi)\n', (2546, 2551), False, 'import math\n'), ((2773, 2790), 'torch.log2', 'torch.log2', (['pre_y'], {}), '(pre_y)\n', (2783, 2790), False, 'import torch\n'), ((3286, 3304), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (3300, 3304), True, 'import numpy as np\n'), ((3307, 3325), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (3321, 3325), True, 'import numpy as np\n'), ((916, 932), 'numpy.array', 'np.array', (['newimg'], {}), '(newimg)\n', (924, 932), True, 'import numpy as np\n'), ((2837, 2859), 'numpy.array', 'np.array', (['[max_points]'], {}), '([max_points])\n', (2845, 2859), True, 'import numpy as np\n')] |
# Copyright 2018 Deep Learning Service of Huawei Cloud. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from moxing.framework import file
from data.yolo_load.detection_dataset import Detection_dataset
from utils.read_image_to_list import get_image_list
from mxnet import gluon, io, nd
def _pad_arrs_to_max_length(arrs, max_gt_box_number, pad_axis=0, pad_val=-1):
"""Inner Implementation of the Pad batchify"""
if not isinstance(arrs[0], (nd.NDArray, np.ndarray)):
arrs = [np.asarray(ele) for ele in arrs]
max_size = max_gt_box_number
ret_shape = list(arrs[0].shape)
ret_shape[pad_axis] = max_size
ret_shape = (len(arrs), ) + tuple(ret_shape)
ret = nd.full(shape=ret_shape, val=pad_val, dtype=arrs[0].dtype)
for i, arr in enumerate(arrs):
if arr.shape[pad_axis] == max_size:
ret[i] = arr
else:
slices = [slice(None) for _ in range(arr.ndim)]
slices[pad_axis] = slice(0, arr.shape[pad_axis])
slices = [slice(i, i + 1)] + slices
ret[tuple(slices)] = arr
return ret
class _train_batchify_fn(object):
def __init__(self, max_gt_box_number):
self._max_gt_box_number = max_gt_box_number
def __call__(self, data):
"""Collate train data into batch."""
img_data = nd.stack(*[item[0] for item in data])
center_targets = nd.stack(*[item[1] for item in data])
scale_targets = nd.stack(*[item[2] for item in data])
weights = nd.stack(*[item[3] for item in data])
objectness = nd.stack(*[item[4] for item in data])
class_targets = nd.stack(*[item[5] for item in data])
gt_bboxes = _pad_arrs_to_max_length([item[6] for item in data],
self._max_gt_box_number,
pad_axis=0, pad_val=-1)
batch_data = io.DataBatch(data=[img_data],
label=[gt_bboxes, objectness, center_targets,
scale_targets, weights, class_targets])
return batch_data
class _val_batchify_fn(object):
def __init__(self, max_gt_box_number):
self._max_gt_box_number = max_gt_box_number
def __call__(self, data):
"""Collate train data into batch."""
img_data = nd.stack(*[item[0] for item in data])
gt_bboxes = _pad_arrs_to_max_length([item[1] for item in data],
self._max_gt_box_number,
pad_axis=0, pad_val=-1)
batch_data = io.DataBatch(data=[img_data],
label=[gt_bboxes])
return batch_data
def _get_provide_data(next_batch):
next_data = next_batch.data
return [io.DataDesc(name='data', shape=next_data[0].shape)]
def _get_provide_label(next_batch, gt_boxes_shape=(32, 56, 4), is_train=True):
next_label = next_batch.label
if is_train:
provide_label = [io.DataDesc(name='gt_boxes',
shape=next_label[0].shape),
io.DataDesc(name='obj_t', shape=next_label[1].shape),
io.DataDesc(name='centers_t',
shape=next_label[2].shape),
io.DataDesc(name='scales_t',
shape=next_label[3].shape),
io.DataDesc(name='weights_t',
shape=next_label[4].shape),
io.DataDesc(name='clas_t', shape=next_label[5].shape)]
else:
provide_label = None
return provide_label
def _reset():
pass
def get_data_iter(data_path, train_file=None, val_file=None, split_spec=1,
hyper_train={}, hyper_val={}, **kwargs):
train_set = None
val_set = None
train_list = None
val_list = None
if train_file is not None:
assert file.exists(train_file), 'not found train file'
train_path = file.read(train_file).split("\n")[0:-1]
train_list = [path.replace('\r', '').split(' ') for path in train_path]
train_list = [[os.path.join(data_path, path[0]),
os.path.join(data_path, path[1])] for path in train_list]
if val_file is not None:
assert file.exists(val_file), 'not found val file'
val_path = file.read(val_file).split("\n")[0:-1]
val_list = [path.replace('\r', '').split(' ') for path in val_path]
val_list = [[os.path.join(data_path, path[0]),
os.path.join(data_path, path[1])] for path in val_list]
if train_file is None and val_file is None:
train_list, val_list, _ = get_image_list(data_path, split_spec)
if 'anchors' not in kwargs:
kwargs['anchors'] = [[116, 90, 156, 198, 373, 326],
[30, 61, 62, 45, 59, 119],
[10, 13, 16, 30, 33, 23]]
if 'offsets' not in kwargs:
kwargs['offsets'] = [(13, 13), (26, 26), (52, 52)]
if train_list is not None and len(train_list) > 0:
dataset = Detection_dataset(img_list=train_list,
index_file=hyper_train.get(
'index_file', None),
width=hyper_train.get('width', 416),
height=hyper_train.get('height', 416),
is_train=True,
** kwargs)
max_gt_box_number = max([len(item) for item in dataset.label_cache])
batch_size = hyper_train.get('batch_size', 32)
train_set = gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=hyper_train.get('shuffle', True),
batchify_fn=_train_batchify_fn(max_gt_box_number),
last_batch='rollover',
num_workers=hyper_train.get('preprocess_threads', 4))
next_data_batch = next(iter(train_set))
setattr(train_set, 'reset', _reset)
setattr(train_set, 'provide_data', _get_provide_data(next_data_batch))
setattr(train_set, 'provide_label', _get_provide_label(
next_data_batch, (batch_size, max_gt_box_number, 4), is_train=True))
if val_list is not None and len(val_list) > 0:
assert 'index_file' in hyper_val and file.exists(
hyper_val['index_file']), 'not found label name file'
dataset = Detection_dataset(img_list=val_list,
index_file=hyper_val.get(
'index_file'),
width=hyper_val.get('width', 416),
height=hyper_val.get('height', 416),
is_train=False,
** kwargs)
max_gt_box_number = max([len(item) for item in dataset.label_cache])
batch_size = hyper_val.get('batch_size', 32)
val_set = gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=hyper_val.get('shuffle', True),
batchify_fn=_val_batchify_fn(max_gt_box_number),
last_batch='keep',
num_workers=hyper_val.get('preprocess_threads', 4))
next_data_batch = next(iter(val_set))
setattr(val_set, 'reset', _reset)
setattr(val_set, 'provide_data', _get_provide_data(next_data_batch))
setattr(val_set, 'provide_label', _get_provide_label(
next_data_batch, is_train=False))
return train_set, val_set
| [
"mxnet.nd.stack",
"mxnet.io.DataDesc",
"numpy.asarray",
"os.path.join",
"mxnet.nd.full",
"mxnet.io.DataBatch",
"moxing.framework.file.exists",
"utils.read_image_to_list.get_image_list",
"moxing.framework.file.read"
] | [((1378, 1436), 'mxnet.nd.full', 'nd.full', ([], {'shape': 'ret_shape', 'val': 'pad_val', 'dtype': 'arrs[0].dtype'}), '(shape=ret_shape, val=pad_val, dtype=arrs[0].dtype)\n', (1385, 1436), False, 'from mxnet import gluon, io, nd\n'), ((2002, 2039), 'mxnet.nd.stack', 'nd.stack', (['*[item[0] for item in data]'], {}), '(*[item[0] for item in data])\n', (2010, 2039), False, 'from mxnet import gluon, io, nd\n'), ((2065, 2102), 'mxnet.nd.stack', 'nd.stack', (['*[item[1] for item in data]'], {}), '(*[item[1] for item in data])\n', (2073, 2102), False, 'from mxnet import gluon, io, nd\n'), ((2127, 2164), 'mxnet.nd.stack', 'nd.stack', (['*[item[2] for item in data]'], {}), '(*[item[2] for item in data])\n', (2135, 2164), False, 'from mxnet import gluon, io, nd\n'), ((2183, 2220), 'mxnet.nd.stack', 'nd.stack', (['*[item[3] for item in data]'], {}), '(*[item[3] for item in data])\n', (2191, 2220), False, 'from mxnet import gluon, io, nd\n'), ((2242, 2279), 'mxnet.nd.stack', 'nd.stack', (['*[item[4] for item in data]'], {}), '(*[item[4] for item in data])\n', (2250, 2279), False, 'from mxnet import gluon, io, nd\n'), ((2304, 2341), 'mxnet.nd.stack', 'nd.stack', (['*[item[5] for item in data]'], {}), '(*[item[5] for item in data])\n', (2312, 2341), False, 'from mxnet import gluon, io, nd\n'), ((2572, 2691), 'mxnet.io.DataBatch', 'io.DataBatch', ([], {'data': '[img_data]', 'label': '[gt_bboxes, objectness, center_targets, scale_targets, weights, class_targets]'}), '(data=[img_data], label=[gt_bboxes, objectness, center_targets,\n scale_targets, weights, class_targets])\n', (2584, 2691), False, 'from mxnet import gluon, io, nd\n'), ((3013, 3050), 'mxnet.nd.stack', 'nd.stack', (['*[item[0] for item in data]'], {}), '(*[item[0] for item in data])\n', (3021, 3050), False, 'from mxnet import gluon, io, nd\n'), ((3281, 3329), 'mxnet.io.DataBatch', 'io.DataBatch', ([], {'data': '[img_data]', 'label': '[gt_bboxes]'}), '(data=[img_data], label=[gt_bboxes])\n', (3293, 3329), False, 'from mxnet import gluon, io, nd\n'), ((3471, 3521), 'mxnet.io.DataDesc', 'io.DataDesc', ([], {'name': '"""data"""', 'shape': 'next_data[0].shape'}), "(name='data', shape=next_data[0].shape)\n", (3482, 3521), False, 'from mxnet import gluon, io, nd\n'), ((4645, 4668), 'moxing.framework.file.exists', 'file.exists', (['train_file'], {}), '(train_file)\n', (4656, 4668), False, 'from moxing.framework import file\n'), ((5016, 5037), 'moxing.framework.file.exists', 'file.exists', (['val_file'], {}), '(val_file)\n', (5027, 5037), False, 'from moxing.framework import file\n'), ((5407, 5444), 'utils.read_image_to_list.get_image_list', 'get_image_list', (['data_path', 'split_spec'], {}), '(data_path, split_spec)\n', (5421, 5444), False, 'from utils.read_image_to_list import get_image_list\n'), ((1182, 1197), 'numpy.asarray', 'np.asarray', (['ele'], {}), '(ele)\n', (1192, 1197), True, 'import numpy as np\n'), ((3680, 3735), 'mxnet.io.DataDesc', 'io.DataDesc', ([], {'name': '"""gt_boxes"""', 'shape': 'next_label[0].shape'}), "(name='gt_boxes', shape=next_label[0].shape)\n", (3691, 3735), False, 'from mxnet import gluon, io, nd\n'), ((3799, 3851), 'mxnet.io.DataDesc', 'io.DataDesc', ([], {'name': '"""obj_t"""', 'shape': 'next_label[1].shape'}), "(name='obj_t', shape=next_label[1].shape)\n", (3810, 3851), False, 'from mxnet import gluon, io, nd\n'), ((3878, 3934), 'mxnet.io.DataDesc', 'io.DataDesc', ([], {'name': '"""centers_t"""', 'shape': 'next_label[2].shape'}), "(name='centers_t', shape=next_label[2].shape)\n", (3889, 3934), False, 'from mxnet import gluon, io, nd\n'), ((3998, 4053), 'mxnet.io.DataDesc', 'io.DataDesc', ([], {'name': '"""scales_t"""', 'shape': 'next_label[3].shape'}), "(name='scales_t', shape=next_label[3].shape)\n", (4009, 4053), False, 'from mxnet import gluon, io, nd\n'), ((4117, 4173), 'mxnet.io.DataDesc', 'io.DataDesc', ([], {'name': '"""weights_t"""', 'shape': 'next_label[4].shape'}), "(name='weights_t', shape=next_label[4].shape)\n", (4128, 4173), False, 'from mxnet import gluon, io, nd\n'), ((4237, 4290), 'mxnet.io.DataDesc', 'io.DataDesc', ([], {'name': '"""clas_t"""', 'shape': 'next_label[5].shape'}), "(name='clas_t', shape=next_label[5].shape)\n", (4248, 4290), False, 'from mxnet import gluon, io, nd\n'), ((7091, 7127), 'moxing.framework.file.exists', 'file.exists', (["hyper_val['index_file']"], {}), "(hyper_val['index_file'])\n", (7102, 7127), False, 'from moxing.framework import file\n'), ((4857, 4889), 'os.path.join', 'os.path.join', (['data_path', 'path[0]'], {}), '(data_path, path[0])\n', (4869, 4889), False, 'import os\n'), ((4914, 4946), 'os.path.join', 'os.path.join', (['data_path', 'path[1]'], {}), '(data_path, path[1])\n', (4926, 4946), False, 'import os\n'), ((5214, 5246), 'os.path.join', 'os.path.join', (['data_path', 'path[0]'], {}), '(data_path, path[0])\n', (5226, 5246), False, 'import os\n'), ((5269, 5301), 'os.path.join', 'os.path.join', (['data_path', 'path[1]'], {}), '(data_path, path[1])\n', (5281, 5301), False, 'import os\n'), ((4714, 4735), 'moxing.framework.file.read', 'file.read', (['train_file'], {}), '(train_file)\n', (4723, 4735), False, 'from moxing.framework import file\n'), ((5079, 5098), 'moxing.framework.file.read', 'file.read', (['val_file'], {}), '(val_file)\n', (5088, 5098), False, 'from moxing.framework import file\n')] |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
GRO topology parser
===================
Read a list of atoms from a GROMOS/Gromacs GRO coordinate file to
build a basic topology.
Atom types and masses are guessed.
See Also
--------
:mod:`MDAnalysis.coordinates.GRO`
Classes
-------
.. autoclass:: GROParser
:members:
:inherited-members:
"""
from __future__ import absolute_import
import numpy as np
from six.moves import range
from ..lib.util import openany
from ..core.topologyattrs import (
Atomnames,
Atomtypes,
Atomids,
Masses,
Resids,
Resnames,
Resnums,
Segids,
)
from ..core.topology import Topology
from .base import TopologyReaderBase, change_squash
from . import guessers
class GROParser(TopologyReaderBase):
"""Reads a Gromacs GRO file
Reads the following attributes:
- resids
- resnames
- atomids
- atomnames
Guesses the following attributes
- atomtypes
- masses
"""
format = 'GRO'
def parse(self, **kwargs):
"""Return the *Topology* object for this file"""
# Gro has the following columns
# resid, resname, name, index, (x,y,z)
with openany(self.filename) as inf:
next(inf)
n_atoms = int(next(inf))
# Allocate shizznizz
resids = np.zeros(n_atoms, dtype=np.int32)
resnames = np.zeros(n_atoms, dtype=object)
names = np.zeros(n_atoms, dtype=object)
indices = np.zeros(n_atoms, dtype=np.int32)
for i, line in enumerate(inf):
if i == n_atoms:
break
try:
resids[i] = int(line[:5])
resnames[i] = line[5:10].strip()
names[i] = line[10:15].strip()
indices[i] = int(line[15:20])
except (ValueError, TypeError):
raise IOError(
"Couldn't read the following line of the .gro file:\n"
"{0}".format(line))
# Check all lines had names
if not np.all(names):
missing = np.where(names == '')
raise IOError("Missing atom name on line: {0}"
"".format(missing[0][0] + 3)) # 2 header, 1 based
# Fix wrapping of resids (if we ever saw a wrap)
if np.any(resids == 0):
# find places where resid hit zero again
wraps = np.where(resids == 0)[0]
# group these places together:
# find indices of first 0 in each block of zeroes
# 1) find large changes in index, (ie non sequential blocks)
diff = np.diff(wraps) != 1
# 2) make array of where 0-blocks start
starts = np.hstack([wraps[0], wraps[1:][diff]])
# remove 0 in starts, ie the first residue **can** be 0
if starts[0] == 0:
starts = starts[1:]
# for each resid after a wrap, add 100k (5 digit wrap)
for s in starts:
resids[s:] += 100000
# Guess types and masses
atomtypes = guessers.guess_types(names)
masses = guessers.guess_masses(atomtypes)
residx, (new_resids, new_resnames) = change_squash(
(resids, resnames), (resids, resnames))
# new_resids is len(residues)
# so resindex 0 has resid new_resids[0]
attrs = [
Atomnames(names),
Atomids(indices),
Atomtypes(atomtypes, guessed=True),
Resids(new_resids),
Resnums(new_resids.copy()),
Resnames(new_resnames),
Masses(masses, guessed=True),
Segids(np.array(['SYSTEM'], dtype=object))
]
top = Topology(n_atoms=n_atoms, n_res=len(new_resids), n_seg=1,
attrs=attrs,
atom_resindex=residx,
residue_segindex=None)
return top
| [
"numpy.hstack",
"numpy.where",
"numpy.diff",
"numpy.any",
"numpy.array",
"numpy.zeros",
"numpy.all"
] | [((3302, 3321), 'numpy.any', 'np.any', (['(resids == 0)'], {}), '(resids == 0)\n', (3308, 3321), True, 'import numpy as np\n'), ((2260, 2293), 'numpy.zeros', 'np.zeros', (['n_atoms'], {'dtype': 'np.int32'}), '(n_atoms, dtype=np.int32)\n', (2268, 2293), True, 'import numpy as np\n'), ((2317, 2348), 'numpy.zeros', 'np.zeros', (['n_atoms'], {'dtype': 'object'}), '(n_atoms, dtype=object)\n', (2325, 2348), True, 'import numpy as np\n'), ((2369, 2400), 'numpy.zeros', 'np.zeros', (['n_atoms'], {'dtype': 'object'}), '(n_atoms, dtype=object)\n', (2377, 2400), True, 'import numpy as np\n'), ((2423, 2456), 'numpy.zeros', 'np.zeros', (['n_atoms'], {'dtype': 'np.int32'}), '(n_atoms, dtype=np.int32)\n', (2431, 2456), True, 'import numpy as np\n'), ((3038, 3051), 'numpy.all', 'np.all', (['names'], {}), '(names)\n', (3044, 3051), True, 'import numpy as np\n'), ((3075, 3096), 'numpy.where', 'np.where', (["(names == '')"], {}), "(names == '')\n", (3083, 3096), True, 'import numpy as np\n'), ((3711, 3749), 'numpy.hstack', 'np.hstack', (['[wraps[0], wraps[1:][diff]]'], {}), '([wraps[0], wraps[1:][diff]])\n', (3720, 3749), True, 'import numpy as np\n'), ((3396, 3417), 'numpy.where', 'np.where', (['(resids == 0)'], {}), '(resids == 0)\n', (3404, 3417), True, 'import numpy as np\n'), ((3618, 3632), 'numpy.diff', 'np.diff', (['wraps'], {}), '(wraps)\n', (3625, 3632), True, 'import numpy as np\n'), ((4667, 4701), 'numpy.array', 'np.array', (["['SYSTEM']"], {'dtype': 'object'}), "(['SYSTEM'], dtype=object)\n", (4675, 4701), True, 'import numpy as np\n')] |
# Copyright (c) 2015-2019 <NAME> and contributors.
# MC3 is open-source software under the MIT license (see LICENSE).
import os
import re
import sys
import setuptools
from setuptools import setup, Extension
from numpy import get_include
sys.path.append('mc3')
from VERSION import __version__
srcdir = 'src_c/' # C-code source folder
incdir = 'src_c/include/' # Include folder with header files
cfiles = os.listdir(srcdir)
cfiles = list(filter(lambda x: re.search('.+[.]c$', x), cfiles))
cfiles = list(filter(lambda x: not re.search('[.#].+[.]c$', x), cfiles))
inc = [get_include(), incdir]
eca = ['-ffast-math']
ela = []
extensions = []
for cfile in cfiles:
e = Extension('mc3.lib.' + cfile.rstrip('.c'),
sources=['{:s}{:s}'.format(srcdir, cfile)],
include_dirs=inc,
extra_compile_args=eca,
extra_link_args=ela)
extensions.append(e)
with open('README.md', 'r') as f:
readme = f.read()
setup(name = 'mc3',
version = __version__,
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/pcubillos/mc3',
packages = setuptools.find_packages(),
install_requires = ['numpy>=1.13.3',
'scipy>=0.17.1',
'matplotlib>=2.0',],
tests_require = ['pytest>=3.9',
'dynesty>=0.9.5'],
include_package_data=True,
license = 'MIT',
description = 'Multi-core Markov-chain Monte Carlo package.',
long_description=readme,
long_description_content_type='text/markdown',
include_dirs = inc,
entry_points={'console_scripts': ['mc3 = mc3.__main__:main']},
ext_modules = extensions)
| [
"os.listdir",
"setuptools.find_packages",
"numpy.get_include",
"sys.path.append",
"re.search"
] | [((240, 262), 'sys.path.append', 'sys.path.append', (['"""mc3"""'], {}), "('mc3')\n", (255, 262), False, 'import sys\n'), ((418, 436), 'os.listdir', 'os.listdir', (['srcdir'], {}), '(srcdir)\n', (428, 436), False, 'import os\n'), ((591, 604), 'numpy.get_include', 'get_include', ([], {}), '()\n', (602, 604), False, 'from numpy import get_include\n'), ((1159, 1185), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1183, 1185), False, 'import setuptools\n'), ((472, 495), 're.search', 're.search', (['""".+[.]c$"""', 'x'], {}), "('.+[.]c$', x)\n", (481, 495), False, 'import re\n'), ((545, 572), 're.search', 're.search', (['"""[.#].+[.]c$"""', 'x'], {}), "('[.#].+[.]c$', x)\n", (554, 572), False, 'import re\n')] |
####Please do not remove lines below####
from lmfit import Parameters
import numpy as np
import sys
import os
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./Functions'))
sys.path.append(os.path.abspath('./Fortran_routines'))
####Please do not remove lines above####
####Import your modules below if needed####
from FormFactors.Sphere import Sphere
from ff_sphere import ff_sphere_ml
from Chemical_Formula import Chemical_Formula
from PeakFunctions import LogNormal, Gaussian
from Structure_Factors import hard_sphere_sf, sticky_sphere_sf
from utils import find_minmax, calc_rho, create_steps
from functools import lru_cache
import time
class Biphasic_Sphere_Uniform: #Please put the class name same as the function name
def __init__(self, x=0, Np=20, flux=1e13, term='Total',dist='Gaussian', Energy=None, relement='Au', NrDep='False',
norm=1.0, sbkg=0.0, cbkg=0.0, abkg=0.0, D=1.0, phi=0.1, U=-1.0, SF='None',Rsig=0.0,
mpar={'Phase_1':{'Material':['Au','H2O'],
'Density':[19.32,1.0],
'VolFrac':[1.0,1.0],
'Rmoles':[1.0,0.0],
'R':[1.0,0.0]},
'Phase_2':{'Material':['Au','H2O'],
'Density':[19.32,1.0],
'VolFrac':[1.0,1.0],
'Rmoles':[1.0,0.0],
'R':[1.0,0.0]},
'Solvent':{'Material':['H2O','H2O'],
'Density':[1.0,1.0],
'VolFrac':[1.0,1.0],
'Rmoles':[1.0,0.0],
'R':[1.0,0.0]}}):
"""
Documentation
Calculates the Energy dependent form factor of multilayered spherical nanoparticles with two different set of materials
x : Reciprocal wave-vector 'Q' inv-Angs in the form of a scalar or an array
relement : Resonant element of the nanoparticle. Default: 'Au'
Energy : Energy of X-rays in keV at which the form-factor is calculated. Default: None
Np : No. of points with which the size distribution will be computed. Default: 10
NrDep : Energy dependence of the non-resonant element. Default= 'False' (Energy independent), 'True' (Energy dependent)
dist : The probablity distribution fucntion for the radii of different interfaces in the nanoparticles. Default: Gaussian
norm : The density of the nanoparticles in Molar (Moles/Liter)
sbkg : Constant incoherent background for SAXS-term
cbkg : Constant incoherent background for cross-term
abkg : Constant incoherent background for Resonant-term
flux : Total X-ray flux to calculate the errorbar to simulate the errorbar for the fitted data
term : 'SAXS-term' or 'Cross-term' or 'Resonant-term' or 'Total'
D : Hard Sphere Diameter
phi : Volume fraction of particles
U : The sticky-sphere interaction energy
SF : Type of structure factor. Default: 'None'
Rsig : Widths of the total radius of the nanoparticles. Default: 0.0
mpar : Multi-parameter which defines the following including the solvent/bulk medium which is the last one. Default: 'H2O'
Material ('Materials' using chemical formula),
Density ('Density' in gm/cubic-cms),
Density of solvent ('SolDensity' in gm/cubic-cms) of the particular layer
Mole-fraction ('Rmoles') of resonant element in the material)
Radii ('R' in Angs), and
"""
if type(x)==list:
self.x=np.array(x)
else:
self.x=x
self.norm=norm
self.sbkg=sbkg
self.cbkg=cbkg
self.abkg=abkg
self.dist=dist
self.Np=Np
self.Energy=Energy
self.relement=relement
self.NrDep=NrDep
#self.rhosol=rhosol
self.flux=flux
self.D=D
self.phi=phi
self.U=U
self.__mpar__=mpar #If there is any multivalued parameter
self.SF=SF
self.term=term
self.Rsig=Rsig
self.__Density__={}
self.__VolFrac__={}
self.__R__={}
self.__Rmoles__={}
self.__material__={}
self.choices={'dist':['Gaussian','LogNormal'],'NrDep':['True','False'],'SF':['None','Hard-Sphere', 'Sticky-Sphere'],
'term':['SAXS-term','Cross-term','Resonant-term','Total']} #If there are choices available for any fixed parameters
self.__fit__=False
self.__mkeys__=list(self.__mpar__.keys())
self.init_params()
def init_params(self):
"""
Define all the fitting parameters like
self.params.add('sig',value = 0, vary = 0, min = -np.inf, max = np.inf, expr = None, brute_step = None)
"""
self.params=Parameters()
self.params.add('norm',value=self.norm,vary=0, min = -np.inf, max = np.inf, expr = None, brute_step = 0.1)
self.params.add('D', value=self.D, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('phi', value=self.phi, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('sbkg',value=self.sbkg,vary=0, min = -np.inf, max = np.inf, expr = None, brute_step = 0.1)
self.params.add('cbkg', value=self.cbkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('abkg', value=self.abkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('U', value=self.U, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('Rsig',value=self.Rsig,vary=0,min=0,max=np.inf,expr=None,brute_step=0.1)
mkey1=self.__mkeys__[0]
for key in self.__mpar__[mkey1].keys():
if key != 'Material':
for i in range(len(self.__mpar__[mkey1][key])):
self.params.add('__%s_%s_%03d' % (mkey1, key, i), value=self.__mpar__[mkey1][key][i], vary=0,
min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
for mkey in self.__mkeys__[1:]:
for key in self.__mpar__[mkey].keys():
if key!='Material' and key!='R':
for i in range(len(self.__mpar__[mkey][key])):
self.params.add('__%s_%s_%03d'%(mkey, key,i),value=self.__mpar__[mkey][key][i],vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)
elif key=='R':
for i in range(len(self.__mpar__[mkey][key])):
self.params.add('__%s_%s_%03d'%(mkey, key,i),value=self.__mpar__[mkey][key][i],vary=0,min=-np.inf,max=np.inf
,expr='__%s_%s_%03d'%(mkey1, key,i),brute_step=0.1)
@lru_cache(maxsize=10)
def calc_Rdist(self, R, Rsig, dist, N):
R = np.array(R)
totalR = np.sum(R[:-1])
if Rsig > 0.001:
fdist = eval(dist + '.' + dist + '(x=0.001, pos=totalR, wid=Rsig)')
if dist == 'Gaussian':
rmin, rmax = max(0.001, totalR - 5 * Rsig), totalR + 5 * Rsig
dr = np.linspace(rmin, rmax, N)
else:
rmin, rmax = max(-3, np.log(totalR) - 5 * Rsig), np.log(totalR) + 5 * Rsig
dr = np.logspace(rmin, rmax, N, base=np.exp(1.0))
fdist.x = dr
rdist = fdist.y()
sumdist = np.sum(rdist)
rdist = rdist / sumdist
return dr, rdist, totalR
else:
return [totalR], [1.0], totalR
@lru_cache(maxsize=10)
def new_sphere(self, q, R, Rsig, rho, eirho, adensity, dist='Gaussian',Np=10):
q = np.array(q)
dr, rdist, totalR = self.calc_Rdist(R, Rsig, dist, Np)
form = np.zeros_like(q)
eiform = np.zeros_like(q)
aform = np.zeros_like(q)
cform = np.zeros_like(q)
pfac = (4 * np.pi * 2.818e-5 * 1.0e-8) ** 2
for i in range(len(dr)):
r = np.array(R) * (1 + (dr[i] - totalR) / totalR)
ff, mff = ff_sphere_ml(q, r, rho)
form = form + rdist[i] * ff
eiff, meiff = ff_sphere_ml(q, r, eirho)
eiform = eiform + rdist[i] * eiff
aff, maff = ff_sphere_ml(q, r, adensity)
aform = aform + rdist[i] * aff
cform = cform + rdist[i] * (meiff * maff.conjugate()+meiff.conjugate()*maff)
return pfac * form, pfac * eiform, pfac * aform, np.abs(pfac * cform)/2 # in cm^2
@lru_cache(maxsize=2)
def new_sphere_dict(self, q, R, Rsig, rho, eirho, adensity, dist='Gaussian',Np=10,key='SAXS-term'):
form, eiform, aform, cform = self.new_sphere(q, R, Rsig, rho, eirho, adensity,dist=dist,Np=Np)
if key == 'SAXS-term':
return eiform
elif key == 'Resonant-term':
return aform
elif key == 'Cross-term':
return cform
elif key == 'Total':
return form
def update_params(self):
for mkey in self.__mkeys__:
key = 'Density'
Nmpar=len(self.__mpar__[mkey][key])
self.__Density__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]
key = 'VolFrac'
self.__VolFrac__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]
key = 'Rmoles'
self.__Rmoles__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]
key = 'R'
self.__R__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]
key = 'Material'
self.__material__[mkey] = [self.__mpar__[mkey][key][i] for i in range(Nmpar)]
for mkey in self.__mkeys__[1:]:
key='R'
for i in range(Nmpar):
self.params['__%s_%s_%03d'%(mkey,key,i)].set(expr='__%s_%s_%03d'%(self.__mkeys__[0],key,i))
mkey = 'Solvent'
key = 'VolFrac'
for i in range(Nmpar):
self.params['__%s_%s_%03d' % (mkey, key, i)].set(
expr='1.0-__Phase_1_VolFrac_%03d-__Phase_2_VolFrac_%03d' % (i, i))
def y(self):
"""
Define the function in terms of x to return some value
"""
svol = 1.5 * 0.0172 ** 2 / 370 ** 2 # scattering volume in cm^3
self.output_params = {'scaler_parameters': {}}
self.update_params()
mkey = 'Solvent'
sol_density = tuple(np.ones_like(self.__Density__[mkey]))
R = self.__R__[mkey]
rho, eirho, adensity, rhor, eirhor, adensityr = calc_rho(R=tuple(R),
material=tuple(self.__material__[mkey]),
relement=self.relement,
density=tuple(self.__Density__[mkey]),
sol_density=sol_density,
Energy=self.Energy,
Rmoles=tuple(self.__Rmoles__[mkey]),
NrDep=self.NrDep)
for mkey in self.__mkeys__:
if mkey != 'Solvent':
trho, teirho, tadensity, trhor, teirhor, tadensityr = calc_rho(R=tuple(self.__R__[mkey]),
material=tuple(self.__material__[mkey]),
relement=self.relement,
density=tuple(self.__Density__[mkey]),
sol_density=sol_density,
Energy=self.Energy,
Rmoles=tuple(self.__Rmoles__[mkey]),
NrDep=self.NrDep)
vf = np.array(self.__VolFrac__[mkey])
rho = rho + vf * trho
eirho = eirho + vf * teirho
adensity = adensity + vf * tadensity
if type(self.x) == dict:
sqf = {}
for key in self.x.keys():
sqf[key] = self.norm * 6.022e20 * self.new_sphere_dict(tuple(self.x[key]), tuple(self.__R__[self.__mkeys__[0]]),
self.Rsig, tuple(rho), tuple(eirho),
tuple(adensity), key=key, dist=self.dist,Np=self.Np) # in cm^-1
if self.SF is None:
struct = np.ones_like(self.x[key]) # hard_sphere_sf(self.x[key], D = self.D, phi = 0.0)
elif self.SF == 'Hard-Sphere':
struct = hard_sphere_sf(self.x[key], D=self.D, phi=self.phi)
else:
struct = sticky_sphere_sf(self.x[key], D=self.D, phi=self.phi, U=self.U, delta=0.01)
if key == 'SAXS-term':
sqf[key] = sqf[key] * struct + self.sbkg
if key == 'Cross-term':
sqf[key] = sqf[key] * struct + self.cbkg
if key == 'Resonant-term':
sqf[key] = sqf[key] * struct + self.abkg
key1 = 'Total'
total = self.norm * 6.022e20 * struct * self.new_sphere_dict(tuple(self.x[key]), tuple(self.__R__[self.__mkeys__[0]]),
self.Rsig, tuple(rho), tuple(eirho),
tuple(adensity),
key=key1,dist=self.dist,Np=self.Np) + self.sbkg # in cm^-1
if not self.__fit__:
dr, rdist, totalR = self.calc_Rdist(tuple(self.__R__[self.__mkeys__[0]]), self.Rsig, self.dist, self.Np)
self.output_params['Distribution'] = {'x': dr, 'y': rdist}
self.output_params['Simulated_total_wo_err'] = {'x': self.x[key], 'y': total}
self.output_params['Total'] = {'x': self.x[key], 'y': total}
for key in self.x.keys():
self.output_params[key] = {'x': self.x[key], 'y': sqf[key]}
self.output_params['rho_r'] = {'x': rhor[:, 0], 'y': rhor[:, 1]}
self.output_params['eirho_r'] = {'x': eirhor[:, 0], 'y': eirhor[:, 1]}
self.output_params['adensity_r'] = {'x': adensityr[:, 0], 'y': adensityr[:, 1]}
self.output_params['Structure_Factor'] = {'x': self.x[key], 'y': struct}
else:
if self.SF is None:
struct = np.ones_like(self.x)
elif self.SF == 'Hard-Sphere':
struct = hard_sphere_sf(self.x, D=self.D, phi=self.phi)
else:
struct = sticky_sphere_sf(self.x, D=self.D, phi=self.phi, U=self.U, delta=0.01)
tsqf, eisqf, asqf, csqf = self.new_sphere(tuple(self.x), tuple(self.__R__[self.__mkeys__[0]]), self.Rsig, tuple(rho),
tuple(eirho), tuple(adensity),dist=self.dist,Np=self.Np)
sqf = self.norm * np.array(tsqf) * 6.022e20 * struct + self.sbkg # in cm^-1
# if not self.__fit__: #Generate all the quantities below while not fitting
asqf = self.norm * np.array(asqf) * 6.022e20 * struct + self.abkg # in cm^-1
eisqf = self.norm * np.array(eisqf) * 6.022e20 * struct + self.sbkg # in cm^-1
csqf = self.norm * np.array(csqf) * 6.022e20 * struct + self.cbkg # in cm^-1
sqerr = np.sqrt(6.020e20*self.flux *self.norm*tsqf*struct*svol+self.sbkg)
sqwerr = (6.022e20*tsqf * svol * self.flux*self.norm*struct + self.sbkg + 2 * (0.5 - np.random.rand(len(tsqf))) * sqerr)
dr, rdist, totalR = self.calc_Rdist(tuple(self.__R__[self.__mkeys__[0]]), self.Rsig, self.dist, self.Np)
self.output_params['Distribution'] = {'x': dr, 'y': rdist}
self.output_params['simulated_total_w_err'] = {'x': self.x, 'y': sqwerr, 'yerr': sqerr}
self.output_params['Total'] = {'x': self.x, 'y': sqf}
self.output_params['Resonant-term'] = {'x': self.x, 'y': asqf}
self.output_params['SAXS-term'] = {'x': self.x, 'y': eisqf}
self.output_params['Cross-term'] = {'x': self.x, 'y': csqf}
self.output_params['rho_r'] = {'x': rhor[:, 0], 'y': rhor[:, 1]}
self.output_params['eirho_r'] = {'x': eirhor[:, 0], 'y': eirhor[:, 1]}
self.output_params['adensity_r'] = {'x': adensityr[:, 0], 'y': adensityr[:, 1]}
self.output_params['Structure_Factor'] = {'x': self.x, 'y': struct}
return sqf
if __name__=='__main__':
x=np.arange(0.001,1.0,0.001)
fun=Biphasic_Sphere_Uniform(x=x)
print(fun.y())
| [
"Structure_Factors.hard_sphere_sf",
"numpy.ones_like",
"numpy.abs",
"numpy.sqrt",
"numpy.log",
"numpy.zeros_like",
"numpy.exp",
"numpy.array",
"numpy.sum",
"ff_sphere.ff_sphere_ml",
"numpy.linspace",
"os.path.abspath",
"functools.lru_cache",
"Structure_Factors.sticky_sphere_sf",
"lmfit.P... | [((126, 146), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (141, 146), False, 'import os\n'), ((164, 194), 'os.path.abspath', 'os.path.abspath', (['"""./Functions"""'], {}), "('./Functions')\n", (179, 194), False, 'import os\n'), ((212, 249), 'os.path.abspath', 'os.path.abspath', (['"""./Fortran_routines"""'], {}), "('./Fortran_routines')\n", (227, 249), False, 'import os\n'), ((7113, 7134), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (7122, 7134), False, 'from functools import lru_cache\n'), ((7903, 7924), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (7912, 7924), False, 'from functools import lru_cache\n'), ((8840, 8860), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(2)'}), '(maxsize=2)\n', (8849, 8860), False, 'from functools import lru_cache\n'), ((17474, 17502), 'numpy.arange', 'np.arange', (['(0.001)', '(1.0)', '(0.001)'], {}), '(0.001, 1.0, 0.001)\n', (17483, 17502), True, 'import numpy as np\n'), ((5165, 5177), 'lmfit.Parameters', 'Parameters', ([], {}), '()\n', (5175, 5177), False, 'from lmfit import Parameters\n'), ((7191, 7202), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (7199, 7202), True, 'import numpy as np\n'), ((7220, 7234), 'numpy.sum', 'np.sum', (['R[:-1]'], {}), '(R[:-1])\n', (7226, 7234), True, 'import numpy as np\n'), ((8020, 8031), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (8028, 8031), True, 'import numpy as np\n'), ((8110, 8126), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (8123, 8126), True, 'import numpy as np\n'), ((8144, 8160), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (8157, 8160), True, 'import numpy as np\n'), ((8177, 8193), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (8190, 8193), True, 'import numpy as np\n'), ((8210, 8226), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (8223, 8226), True, 'import numpy as np\n'), ((3931, 3942), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3939, 3942), True, 'import numpy as np\n'), ((7753, 7766), 'numpy.sum', 'np.sum', (['rdist'], {}), '(rdist)\n', (7759, 7766), True, 'import numpy as np\n'), ((8396, 8419), 'ff_sphere.ff_sphere_ml', 'ff_sphere_ml', (['q', 'r', 'rho'], {}), '(q, r, rho)\n', (8408, 8419), False, 'from ff_sphere import ff_sphere_ml\n'), ((8486, 8511), 'ff_sphere.ff_sphere_ml', 'ff_sphere_ml', (['q', 'r', 'eirho'], {}), '(q, r, eirho)\n', (8498, 8511), False, 'from ff_sphere import ff_sphere_ml\n'), ((8582, 8610), 'ff_sphere.ff_sphere_ml', 'ff_sphere_ml', (['q', 'r', 'adensity'], {}), '(q, r, adensity)\n', (8594, 8610), False, 'from ff_sphere import ff_sphere_ml\n'), ((10822, 10858), 'numpy.ones_like', 'np.ones_like', (['self.__Density__[mkey]'], {}), '(self.__Density__[mkey])\n', (10834, 10858), True, 'import numpy as np\n'), ((16318, 16394), 'numpy.sqrt', 'np.sqrt', (['(6.02e+20 * self.flux * self.norm * tsqf * struct * svol + self.sbkg)'], {}), '(6.02e+20 * self.flux * self.norm * tsqf * struct * svol + self.sbkg)\n', (16325, 16394), True, 'import numpy as np\n'), ((7474, 7500), 'numpy.linspace', 'np.linspace', (['rmin', 'rmax', 'N'], {}), '(rmin, rmax, N)\n', (7485, 7500), True, 'import numpy as np\n'), ((8328, 8339), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (8336, 8339), True, 'import numpy as np\n'), ((8800, 8820), 'numpy.abs', 'np.abs', (['(pfac * cform)'], {}), '(pfac * cform)\n', (8806, 8820), True, 'import numpy as np\n'), ((12579, 12611), 'numpy.array', 'np.array', (['self.__VolFrac__[mkey]'], {}), '(self.__VolFrac__[mkey])\n', (12587, 12611), True, 'import numpy as np\n'), ((15357, 15377), 'numpy.ones_like', 'np.ones_like', (['self.x'], {}), '(self.x)\n', (15369, 15377), True, 'import numpy as np\n'), ((13279, 13304), 'numpy.ones_like', 'np.ones_like', (['self.x[key]'], {}), '(self.x[key])\n', (13291, 13304), True, 'import numpy as np\n'), ((15446, 15492), 'Structure_Factors.hard_sphere_sf', 'hard_sphere_sf', (['self.x'], {'D': 'self.D', 'phi': 'self.phi'}), '(self.x, D=self.D, phi=self.phi)\n', (15460, 15492), False, 'from Structure_Factors import hard_sphere_sf, sticky_sphere_sf\n'), ((15536, 15606), 'Structure_Factors.sticky_sphere_sf', 'sticky_sphere_sf', (['self.x'], {'D': 'self.D', 'phi': 'self.phi', 'U': 'self.U', 'delta': '(0.01)'}), '(self.x, D=self.D, phi=self.phi, U=self.U, delta=0.01)\n', (15552, 15606), False, 'from Structure_Factors import hard_sphere_sf, sticky_sphere_sf\n'), ((7584, 7598), 'numpy.log', 'np.log', (['totalR'], {}), '(totalR)\n', (7590, 7598), True, 'import numpy as np\n'), ((7663, 7674), 'numpy.exp', 'np.exp', (['(1.0)'], {}), '(1.0)\n', (7669, 7674), True, 'import numpy as np\n'), ((13435, 13486), 'Structure_Factors.hard_sphere_sf', 'hard_sphere_sf', (['self.x[key]'], {'D': 'self.D', 'phi': 'self.phi'}), '(self.x[key], D=self.D, phi=self.phi)\n', (13449, 13486), False, 'from Structure_Factors import hard_sphere_sf, sticky_sphere_sf\n'), ((13538, 13613), 'Structure_Factors.sticky_sphere_sf', 'sticky_sphere_sf', (['self.x[key]'], {'D': 'self.D', 'phi': 'self.phi', 'U': 'self.U', 'delta': '(0.01)'}), '(self.x[key], D=self.D, phi=self.phi, U=self.U, delta=0.01)\n', (13554, 13613), False, 'from Structure_Factors import hard_sphere_sf, sticky_sphere_sf\n'), ((7556, 7570), 'numpy.log', 'np.log', (['totalR'], {}), '(totalR)\n', (7562, 7570), True, 'import numpy as np\n'), ((15879, 15893), 'numpy.array', 'np.array', (['tsqf'], {}), '(tsqf)\n', (15887, 15893), True, 'import numpy as np\n'), ((16057, 16071), 'numpy.array', 'np.array', (['asqf'], {}), '(asqf)\n', (16065, 16071), True, 'import numpy as np\n'), ((16148, 16163), 'numpy.array', 'np.array', (['eisqf'], {}), '(eisqf)\n', (16156, 16163), True, 'import numpy as np\n'), ((16239, 16253), 'numpy.array', 'np.array', (['csqf'], {}), '(csqf)\n', (16247, 16253), True, 'import numpy as np\n')] |
import numpy as np
import numpy.fft as fft
import matplotlib.pyplot as plt
def fft_demo():
x = np.arange(-100, 100, 0.5)
y = np.sin(x) + np.sin(3 * x)
plt.figure()
plt.plot(x, y)
plt.show()
plt.figure()
plt.plot(fft.fftfreq(x.shape[-1]), abs(fft.fft(y)))
plt.show()
plt.imshow(np.sin(np.outer(x, x)))
plt.show()
if __name__ == '__main__':
fft_demo()
else:
pass
| [
"numpy.fft.fftfreq",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"matplotlib.pyplot.figure",
"numpy.outer",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((101, 126), 'numpy.arange', 'np.arange', (['(-100)', '(100)', '(0.5)'], {}), '(-100, 100, 0.5)\n', (110, 126), True, 'import numpy as np\n'), ((165, 177), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (175, 177), True, 'import matplotlib.pyplot as plt\n'), ((182, 196), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (190, 196), True, 'import matplotlib.pyplot as plt\n'), ((201, 211), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (209, 211), True, 'import matplotlib.pyplot as plt\n'), ((216, 228), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (226, 228), True, 'import matplotlib.pyplot as plt\n'), ((289, 299), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (297, 299), True, 'import matplotlib.pyplot as plt\n'), ((343, 353), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (351, 353), True, 'import matplotlib.pyplot as plt\n'), ((135, 144), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (141, 144), True, 'import numpy as np\n'), ((147, 160), 'numpy.sin', 'np.sin', (['(3 * x)'], {}), '(3 * x)\n', (153, 160), True, 'import numpy as np\n'), ((242, 266), 'numpy.fft.fftfreq', 'fft.fftfreq', (['x.shape[-1]'], {}), '(x.shape[-1])\n', (253, 266), True, 'import numpy.fft as fft\n'), ((272, 282), 'numpy.fft.fft', 'fft.fft', (['y'], {}), '(y)\n', (279, 282), True, 'import numpy.fft as fft\n'), ((322, 336), 'numpy.outer', 'np.outer', (['x', 'x'], {}), '(x, x)\n', (330, 336), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Modifications copyright (C) 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import sys
import random
import torch
import numpy as np
import pandas as pd
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.nn import CrossEntropyLoss
from tensorboardX import SummaryWriter
from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam
from run_classifier_dataset_utils import processors, convert_examples_to_features, compute_metrics
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
def main():
"""Fine-tune BERT for a given task with given parameters."""
# Define all parameters, using argparse/Command Line Interface.
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def add_args():
"""Add all possible options and defaults to the parser."""
# Hyperparameters of BERT
# Parameters often changed
parser.add_argument("--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, "
"bert-base-multilingual-uncased, bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--train_batch_size",
default=16,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=2e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
# Parameters usually unchanged
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
# Parameters of the task
parser.add_argument("--task_name",
default="node",
type=str,
help="The name of the task to train. One of node, political-as, "
"political-ru, political-asu, agreement, node-ext, political-as-topics,"
"political-ru-topics, political-asu-topics, agreement-topics")
parser.add_argument("--input_to_use",
type=str,
default="both",
help="Which input to use. One of both, org, response, response-org.")
# Parameters for reproduction
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
# Parameters for where to save/load data
parser.add_argument("--data_dir",
default="../data",
type=str,
help="The input data dir. Should contain the .tsv file (or other data files) for the task.")
parser.add_argument("--output_dir",
default="run",
type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--overwrite_output_dir',
action='store_true',
help="Overwrite the content of the output directory")
# Parameters to decide what to do (train, test, crossval, save the model)
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_train_eval",
action='store_true',
help="Whether to run training and eval.")
parser.add_argument('--n_times',
type=int,
default=10,
help="Number of restarts for every parameter setting in train&eval mode")
parser.add_argument("--do_cross_val",
action='store_true',
help="Whether to run cross-validation.")
parser.add_argument("--do_save",
action='store_true',
help="Whether to save the resulting model.")
parser.add_argument("--do_visualization",
action='store_true',
help="Whether to run visualization.")
# Additional parameters
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--log_level',
type=str,
default="info",
help="Verbosity of logging output. One of info or warn.")
# Add all parameters to the parser and parse them.
add_args()
args = parser.parse_args()
# Set up all parameters given the CLI arguments.
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
args.device = device
task_name = args.task_name.lower()
processor = processors[task_name](args.input_to_use)
label_list = processor.get_labels()
num_labels = len(label_list)
global_step = 0
tr_loss = 0
tb_writer = SummaryWriter()
# Prepare the logging.
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.log_level == "info" else logging.WARN)
logger.info("device: {} n_gpu: {}".format(
device, n_gpu))
# Check the arguments and fail if the arguments are invalid.
if not args.do_train and not args.do_eval and not args.do_cross_val and not args.do_visualization \
and not args.do_train_eval:
raise ValueError("At least one of `do_train`, `do_eval` `do_cross_val` "
"or `do_visualization` or 'do_train_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. "
"Use the --overwrite_output_dir option.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
# Calculate the train_batch_size if gradient accumulation is used
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# Set all seeds for reproducibility
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_features_examples(mode):
"""Returns the features and examples of train or test mode."""
def convert(split, modus, exs):
"""Converts the examples or load them from cache."""
cached_features_file = os.path.join(args.data_dir, 'cache', '{0}_{1}_{2}_{3}_{4}_{5}'.format(modus,
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name), str(args.input_to_use), split))
# Try to load the cached features.
try:
with open(cached_features_file, "rb") as reader:
fs = pickle.load(reader)
# Creates and cache the features.
except FileNotFoundError:
if not os.path.exists(os.path.join(args.data_dir, 'cache')):
os.makedirs(os.path.join(args.data_dir, 'cache'))
fs = convert_examples_to_features(
exs, label_list, args.max_seq_length, tokenizer)
logger.info('Saving {0} features into cached file {1}'.format(mode, cached_features_file))
with open(cached_features_file, "wb") as writer:
pickle.dump(fs, writer)
return fs
# Return the features, examples and dataframes depending on the mode.
if mode == "train":
train_ex, df = processor.get_train_examples(args.data_dir)
return convert("X", mode, train_ex), train_ex, df
elif mode == "dev":
dev_ex, df = processor.get_dev_examples(args.data_dir)
return convert("X", mode, dev_ex), dev_ex, df
elif mode == "cross_val":
data = processor.get_splits(args.data_dir)
train_f_list, train_e_list, train_df_list, test_f_list, test_e_list, test_df_list = ([] for _ in range(6))
for i, (train_ex, train_df, test_ex, test_df) in enumerate(data):
train_e_list.append(train_ex)
train_df_list.append(train_df)
test_e_list.append(test_ex)
test_df_list.append(test_df)
# Create features from the examples
train_f_list.append(convert(i, "train", train_ex))
test_f_list.append(convert(i, "dev", test_ex))
return train_f_list, train_e_list, train_df_list, test_f_list, test_e_list, test_df_list
else:
raise ValueError("Invalid feature mode.")
def create_tensor_dataset(exfeatures):
"""Creates a TensoDataset out of the features."""
all_input_ids = torch.tensor([f.input_ids for f in exfeatures], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in exfeatures], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in exfeatures], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in exfeatures], dtype=torch.long)
return TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
def do_training(train_fs, train_exs):
"""Runs BERT fine-tuning."""
# Allows to write to enclosed variables global_step
nonlocal global_step
# Create the batched training data out of the features.
train_data = create_tensor_dataset(train_fs)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
# Calculate the number of optimization steps.
num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer.
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
# Log some information about the training.
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_exs))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
# Set the model to training mode and train for X epochs.
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
# Iterate over all batches.
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
# Get the Logits and calculate the loss.
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
loss = CrossEntropyLoss()(logits.view(-1, num_labels), label_ids.view(-1))
# Scale the loss in gradient accumulation mode.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# Calculate the gradients.
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
# Update the weights every gradient_accumulation_steps steps.
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)
tb_writer.add_scalar('loss', loss.item(), global_step)
def do_save():
"""Saves the current model, tokenizer and arguments."""
nonlocal model
nonlocal tokenizer
model_to_save = model.module if hasattr(model, 'module') else model
# Using the predefined names, we can load using `from_pretrained`.
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
# Save the trained model, configuration and tokenizer
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# Save the training arguments together with the trained model.
output_args_file = os.path.join(args.output_dir, 'training_args.bin')
torch.save(args, output_args_file)
def do_eval(eval_features, eval_examples):
"""Do evaluation on the current model."""
# Logg some information.
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
# Get the eval data and create a sequential dataloader.
eval_data = create_tensor_dataset(eval_features)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Set the model to eval mode (disable dropout)
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
out_label_ids = None
# Iterate over the evaluation data.
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
# Forward pass with deactivated autograd engine.
with torch.no_grad():
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
# Calculate eval loss.
tmp_eval_loss = CrossEntropyLoss()(logits.view(-1, num_labels), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
out_label_ids = label_ids.detach().cpu().numpy()
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
# Calculate the mean loss and get all predictions.
eval_loss = eval_loss / nb_eval_steps
loss = tr_loss/global_step if args.do_train else None
preds = preds[0]
preds = np.argmax(preds, axis=1)
# Compute the metrics for the given task
result = compute_metrics(task_name, preds, out_label_ids)
# Save additional information in the result dict.
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
# Save all settings for external evaluation
result['_task'] = task_name
result['_input_mode'] = args.input_to_use
result['_learning_rate'] = args.learning_rate
result['_bert-model'] = args.bert_model
result['_batch_size'] = args.train_batch_size
result['_warmup'] = args.warmup_proportion
result['_num_epochs'] = args.num_train_epochs
result['_seq_len'] = args.max_seq_length
result['_seed'] = args.seed
result['_gradient_acc'] = args.gradient_accumulation_steps
return result, preds
def save_results(result_list, pred_list):
"""Saves the results and the predictions."""
# Save the results in a text file.
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "a") as writer:
logger.info("***** Eval results *****")
for i, result_dict in enumerate(result_list):
logger.info("Run %i", i)
writer.write("Run %i\n" % i)
for key in sorted(result_dict.keys()):
if not key.startswith("_"):
logger.info(" %s = %s", key, str(result_dict[key]))
writer.write("%s = %s\n" % (key, str(result_dict[key])))
# Save the results and predictions in csv and tsv files.
output_csv_file = os.path.join(args.output_dir, "../eval_results.tsv")
output_preds_file = os.path.join(args.output_dir, "../eval_preds.csv")
df_res = pd.DataFrame(result_list)
df_preds = pd.DataFrame(pred_list)
df_preds['run'] = '{0}_{1}_{2}_{3}'.format(
args.bert_model, args.num_train_epochs, args.train_batch_size, args.learning_rate)
# If the files do not exist, create them with headers.
if not os.path.exists(output_csv_file):
df_res.to_csv(output_csv_file, encoding='utf-8', sep='\t', index=False)
df_preds.to_csv(output_preds_file, encoding='utf-8', index=False)
# If the files already exist, just append to them without headers.
else:
df_res.to_csv(output_csv_file, mode='a', encoding='utf-8', sep='\t', index=False, header=False)
df_preds.to_csv(output_preds_file, mode='a', encoding='utf-8', index=False, header=False)
# Load the tokenizer and the model.
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
# Train and test .
if args.do_train_eval:
# Get the train and test features only once.
train_features, train_examples, _ = get_features_examples("train")
test_features, test_examples, _ = get_features_examples("dev")
# Repeat N times.
for i in range(args.n_times):
# Train.
do_training(train_features, train_examples)
# Eval.
result, preds = do_eval(test_features, test_examples)
# Save the results.
save_results([result], [preds])
# Reset and new seeds.
if i+1 < args.n_times:
args.seed += 1
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# Reset model.
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
# Training
if args.do_train:
# Get the train features.
features, examples, df = get_features_examples("train")
# Train.
do_training(features, examples)
# Save the model if wanted.
if args.do_save:
do_save()
# Evaluation.
if args.do_eval:
# Get the dev features.
features, examples, df = get_features_examples("dev")
# Evaluate.
result, preds = do_eval(features, examples)
# Save the results.
save_results([result], [preds])
# CrossVal.
if args.do_cross_val:
# Get the data for all splits
train_f_l, train_e_l, train_df_l, test_f_l, test_e_l, test_df_l = get_features_examples("cross_val")
# Iterate over all splits
for train_features, train_examples, test_features, test_examples in zip(
train_f_l, train_e_l, test_f_l, test_e_l):
# Reset model.
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
# Train.
do_training(train_features, train_examples)
# Eval.
result, preds = do_eval(test_features, test_examples)
# Save results.
save_results([result], [preds])
# Visualization.
if args.do_visualization:
# Additional imports needed for the visualizations.
import spacy
from skorch import NeuralNetClassifier
from sklearn.pipeline import make_pipeline
from run_classifier_dataset_utils import InputExample
from anchor import anchor_text
from lime.lime_text import LimeTextExplainer
# Example sentences.
raw_text_1 = "But Mr. Nixon did n't say a word that was ever publicly recorded . Even more incredible , " \
"he did n't say a word when the Communists took power in Cuba - not 4 miles off their shores , " \
"but only 90 miles off our shores . Mr. Nixon saw what was happening in Cuba ."
raw_text_2 = "Cordoba House is no act of tolerance, but of excess/arrogance. Building this structure on the " \
"edge of the battlefield created by radical Islamists is not a celebration of " \
"religious pluralism and mutual tolerance; it is a political statement of shocking arrogance " \
"and hypocrisy."
raw_text_3 = "Are not right no does he alcohol child china play"
raw_text_list = [raw_text_1, raw_text_2, raw_text_3]
class BertConverter:
"""Pipeline-Class to convert text to the input format of BERT."""
def transform(self, X, y=None, **fit_params):
"""Transforms a list of strings to a list of BERT inputs."""
exs = []
for text in X:
exs.append(InputExample(guid=None, text_a=text, text_b=None, label="attack"))
visu_features = convert_examples_to_features(exs, label_list, args.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in visu_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in visu_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in visu_features], dtype=torch.long)
return [all_input_ids, all_segment_ids, all_input_mask]
def fit(self, X, y=None, **fit_params):
return self
class MyBERT(torch.nn.Module):
"""Class to wrap the current BERT model."""
def __init__(self):
super(MyBERT, self).__init__()
self.model = model
def forward(self, X):
"""Apply a softmax function to the output of the BERT model."""
return torch.nn.functional.softmax(self.model(*X), dim=1)
# Creates a NeuralNetClassifier.
if device == torch.device('cuda'):
net = NeuralNetClassifier(MyBERT, device='cuda', max_epochs=0, lr=0.0, train_split=None)
else:
net = NeuralNetClassifier(MyBERT, max_epochs=0, lr=0.0, train_split=None)
# Set up the pipeline.
c = make_pipeline(BertConverter(), net)
# To initialize the pipeline (does not train, because epochs=0).
c.fit(raw_text_list, y=torch.zeros(len(raw_text_list), dtype=torch.long))
# Print the predictions and probabilities for the example texts.
print(c.predict_proba(raw_text_list))
# Creates the LimeTextExplainer.
# bow=True to replace all occurrences of a string at once.
explainer = LimeTextExplainer(class_names=processor.get_labels(), bow=False, mask_string="[UNK]")
# Explain the first example in the list and save the result using LIME.
idx = 0
exp = explainer.explain_instance(raw_text_list[idx], c.predict_proba)
print('Document id: %d' % idx)
print('Probability(support) =', c.predict_proba([raw_text_list[idx]])[0, 1])
print('True class: %s' % "None")
print(exp.as_list())
exp.save_to_file(os.path.join(args.output_dir, "lime.html"))
# Explain the first example using the ANCHOR explainer and save the result.
nlp = spacy.load("en_core_web_sm")
explainer2 = anchor_text.AnchorText(nlp, processor.get_labels(), use_unk_distribution=True)
exp2 = explainer2.explain_instance(raw_text_list[idx], c.predict, threshold=0.95, use_proba=True)
pred = explainer2.class_names[c.predict([raw_text_list[idx]])[0]]
alternative = explainer2.class_names[1 - c.predict([raw_text_list[idx]])[0]]
print('Anchor: %s' % (' AND '.join(exp2.names())))
print('Precision: %.2f\n' % exp2.precision())
print('Examples where anchor applies and model predicts %s:\n' % pred)
print('\n'.join([x[0] for x in exp2.examples(only_same_prediction=True)]))
print('Examples where anchor applies and model predicts %s:\n' % alternative)
print('\n'.join([x[0] for x in exp2.examples(only_different_prediction=True)]))
exp2.save_to_file(os.path.join(args.output_dir, "anchor.html"))
if __name__ == "__main__":
"""Command line program to fine-tune BERT."""
main()
| [
"logging.getLogger",
"torch.nn.CrossEntropyLoss",
"torch.cuda.device_count",
"torch.cuda.is_available",
"pytorch_pretrained_bert.modeling.BertForSequenceClassification.from_pretrained",
"run_classifier_dataset_utils.compute_metrics",
"os.path.exists",
"pytorch_pretrained_bert.tokenization.BertTokenize... | [((1627, 1654), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1644, 1654), False, 'import logging\n'), ((1816, 1895), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (1839, 1895), False, 'import argparse\n'), ((8272, 8297), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8295, 8297), False, 'import torch\n'), ((8544, 8559), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (8557, 8559), False, 'from tensorboardX import SummaryWriter\n'), ((8592, 8785), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': "(logging.INFO if args.log_level == 'info' else logging.WARN)"}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO if args.log_level == 'info' else\n logging.WARN)\n", (8611, 8785), False, 'import logging\n'), ((10147, 10169), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (10158, 10169), False, 'import random\n'), ((10174, 10199), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (10188, 10199), True, 'import numpy as np\n'), ((10204, 10232), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (10221, 10232), False, 'import torch\n'), ((22178, 22263), 'pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.bert_model'], {'do_lower_case': 'args.do_lower_case'}), '(args.bert_model, do_lower_case=args.do_lower_case\n )\n', (22207, 22263), False, 'from pytorch_pretrained_bert.tokenization import BertTokenizer\n'), ((22271, 22361), 'pytorch_pretrained_bert.modeling.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['args.bert_model'], {'num_labels': 'num_labels'}), '(args.bert_model, num_labels=\n num_labels)\n', (22316, 22361), False, 'from pytorch_pretrained_bert.modeling import BertForSequenceClassification\n'), ((9272, 9303), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (9286, 9303), False, 'import os\n'), ((9308, 9335), 'os.listdir', 'os.listdir', (['args.output_dir'], {}), '(args.output_dir)\n', (9318, 9335), False, 'import os\n'), ((9569, 9600), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (9583, 9600), False, 'import os\n'), ((9610, 9638), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (9621, 9638), False, 'import os\n'), ((10259, 10296), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (10285, 10296), False, 'import torch\n'), ((12926, 12991), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in exfeatures]'], {'dtype': 'torch.long'}), '([f.input_ids for f in exfeatures], dtype=torch.long)\n', (12938, 12991), False, 'import torch\n'), ((13017, 13083), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in exfeatures]'], {'dtype': 'torch.long'}), '([f.input_mask for f in exfeatures], dtype=torch.long)\n', (13029, 13083), False, 'import torch\n'), ((13110, 13177), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in exfeatures]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in exfeatures], dtype=torch.long)\n', (13122, 13177), False, 'import torch\n'), ((13202, 13266), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in exfeatures]'], {'dtype': 'torch.long'}), '([f.label_id for f in exfeatures], dtype=torch.long)\n', (13214, 13266), False, 'import torch\n'), ((13282, 13358), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_label_ids'], {}), '(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n', (13295, 13358), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((13670, 13695), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (13683, 13695), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((13723, 13802), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'args.train_batch_size'}), '(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n', (13733, 13802), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((14437, 14572), 'pytorch_pretrained_bert.optimization.BertAdam', 'BertAdam', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'warmup': 'args.warmup_proportion', 't_total': 'num_train_optimization_steps'}), '(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.\n warmup_proportion, t_total=num_train_optimization_steps)\n', (14445, 14572), False, 'from pytorch_pretrained_bert.optimization import BertAdam\n'), ((16803, 16846), 'os.path.join', 'os.path.join', (['args.output_dir', 'WEIGHTS_NAME'], {}), '(args.output_dir, WEIGHTS_NAME)\n', (16815, 16846), False, 'import os\n'), ((16876, 16918), 'os.path.join', 'os.path.join', (['args.output_dir', 'CONFIG_NAME'], {}), '(args.output_dir, CONFIG_NAME)\n', (16888, 16918), False, 'import os\n'), ((17260, 17310), 'os.path.join', 'os.path.join', (['args.output_dir', '"""training_args.bin"""'], {}), "(args.output_dir, 'training_args.bin')\n", (17272, 17310), False, 'import os\n'), ((17319, 17353), 'torch.save', 'torch.save', (['args', 'output_args_file'], {}), '(args, output_args_file)\n', (17329, 17353), False, 'import torch\n'), ((17811, 17839), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_data'], {}), '(eval_data)\n', (17828, 17839), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((17866, 17942), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': 'args.eval_batch_size'}), '(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n', (17876, 17942), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((18222, 18262), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (18226, 18262), False, 'from tqdm import tqdm, trange\n'), ((19470, 19494), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (19479, 19494), True, 'import numpy as np\n'), ((19561, 19609), 'run_classifier_dataset_utils.compute_metrics', 'compute_metrics', (['task_name', 'preds', 'out_label_ids'], {}), '(task_name, preds, out_label_ids)\n', (19576, 19609), False, 'from run_classifier_dataset_utils import processors, convert_examples_to_features, compute_metrics\n'), ((20534, 20583), 'os.path.join', 'os.path.join', (['args.output_dir', '"""eval_results.txt"""'], {}), "(args.output_dir, 'eval_results.txt')\n", (20546, 20583), False, 'import os\n'), ((21184, 21236), 'os.path.join', 'os.path.join', (['args.output_dir', '"""../eval_results.tsv"""'], {}), "(args.output_dir, '../eval_results.tsv')\n", (21196, 21236), False, 'import os\n'), ((21265, 21315), 'os.path.join', 'os.path.join', (['args.output_dir', '"""../eval_preds.csv"""'], {}), "(args.output_dir, '../eval_preds.csv')\n", (21277, 21315), False, 'import os\n'), ((21333, 21358), 'pandas.DataFrame', 'pd.DataFrame', (['result_list'], {}), '(result_list)\n', (21345, 21358), True, 'import pandas as pd\n'), ((21378, 21401), 'pandas.DataFrame', 'pd.DataFrame', (['pred_list'], {}), '(pred_list)\n', (21390, 21401), True, 'import pandas as pd\n'), ((28758, 28786), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (28768, 28786), False, 'import spacy\n'), ((21627, 21658), 'os.path.exists', 'os.path.exists', (['output_csv_file'], {}), '(output_csv_file)\n', (21641, 21658), False, 'import os\n'), ((24382, 24472), 'pytorch_pretrained_bert.modeling.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['args.bert_model'], {'num_labels': 'num_labels'}), '(args.bert_model, num_labels=\n num_labels)\n', (24427, 24472), False, 'from pytorch_pretrained_bert.modeling import BertForSequenceClassification\n'), ((27428, 27448), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (27440, 27448), False, 'import torch\n'), ((27468, 27554), 'skorch.NeuralNetClassifier', 'NeuralNetClassifier', (['MyBERT'], {'device': '"""cuda"""', 'max_epochs': '(0)', 'lr': '(0.0)', 'train_split': 'None'}), "(MyBERT, device='cuda', max_epochs=0, lr=0.0,\n train_split=None)\n", (27487, 27554), False, 'from skorch import NeuralNetClassifier\n'), ((27583, 27650), 'skorch.NeuralNetClassifier', 'NeuralNetClassifier', (['MyBERT'], {'max_epochs': '(0)', 'lr': '(0.0)', 'train_split': 'None'}), '(MyBERT, max_epochs=0, lr=0.0, train_split=None)\n', (27602, 27650), False, 'from skorch import NeuralNetClassifier\n'), ((28615, 28657), 'os.path.join', 'os.path.join', (['args.output_dir', '"""lime.html"""'], {}), "(args.output_dir, 'lime.html')\n", (28627, 28657), False, 'import os\n'), ((29627, 29671), 'os.path.join', 'os.path.join', (['args.output_dir', '"""anchor.html"""'], {}), "(args.output_dir, 'anchor.html')\n", (29639, 29671), False, 'import os\n'), ((8201, 8226), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8224, 8226), False, 'import torch\n'), ((15259, 15299), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""'}), "(train_dataloader, desc='Iteration')\n", (15263, 15299), False, 'from tqdm import tqdm, trange\n'), ((18529, 18544), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18542, 18544), False, 'import torch\n'), ((18707, 18725), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (18723, 18725), False, 'from torch.nn import CrossEntropyLoss\n'), ((23049, 23071), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (23060, 23071), False, 'import random\n'), ((23088, 23113), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (23102, 23113), True, 'import numpy as np\n'), ((23130, 23158), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (23147, 23158), False, 'import torch\n'), ((23302, 23392), 'pytorch_pretrained_bert.modeling.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['args.bert_model'], {'num_labels': 'num_labels'}), '(args.bert_model, num_labels=\n num_labels)\n', (23347, 23392), False, 'from pytorch_pretrained_bert.modeling import BertForSequenceClassification\n'), ((26426, 26503), 'run_classifier_dataset_utils.convert_examples_to_features', 'convert_examples_to_features', (['exs', 'label_list', 'args.max_seq_length', 'tokenizer'], {}), '(exs, label_list, args.max_seq_length, tokenizer)\n', (26454, 26503), False, 'from run_classifier_dataset_utils import processors, convert_examples_to_features, compute_metrics\n'), ((26536, 26604), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in visu_features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in visu_features], dtype=torch.long)\n', (26548, 26604), False, 'import torch\n'), ((26638, 26707), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in visu_features]'], {'dtype': 'torch.long'}), '([f.input_mask for f in visu_features], dtype=torch.long)\n', (26650, 26707), False, 'import torch\n'), ((26742, 26812), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in visu_features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in visu_features], dtype=torch.long)\n', (26754, 26812), False, 'import torch\n'), ((10978, 10997), 'pickle.load', 'pickle.load', (['reader'], {}), '(reader)\n', (10989, 10997), False, 'import pickle\n'), ((11250, 11327), 'run_classifier_dataset_utils.convert_examples_to_features', 'convert_examples_to_features', (['exs', 'label_list', 'args.max_seq_length', 'tokenizer'], {}), '(exs, label_list, args.max_seq_length, tokenizer)\n', (11278, 11327), False, 'from run_classifier_dataset_utils import processors, convert_examples_to_features, compute_metrics\n'), ((15609, 15627), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (15625, 15627), False, 'from torch.nn import CrossEntropyLoss\n'), ((23209, 23246), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (23235, 23246), False, 'import torch\n'), ((11541, 11564), 'pickle.dump', 'pickle.dump', (['fs', 'writer'], {}), '(fs, writer)\n', (11552, 11564), False, 'import pickle\n'), ((26327, 26392), 'run_classifier_dataset_utils.InputExample', 'InputExample', ([], {'guid': 'None', 'text_a': 'text', 'text_b': 'None', 'label': '"""attack"""'}), "(guid=None, text_a=text, text_b=None, label='attack')\n", (26339, 26392), False, 'from run_classifier_dataset_utils import InputExample\n'), ((11120, 11156), 'os.path.join', 'os.path.join', (['args.data_dir', '"""cache"""'], {}), "(args.data_dir, 'cache')\n", (11132, 11156), False, 'import os\n'), ((11191, 11227), 'os.path.join', 'os.path.join', (['args.data_dir', '"""cache"""'], {}), "(args.data_dir, 'cache')\n", (11203, 11227), False, 'import os\n')] |
from skimage import io, transform
import glob
import tensorflow as tf
import numpy as np
path = 'E:/RealTimeIR/predict/'
# 将所有的图片resize成128*128
w = 100
h = 100
c = 3
# 读取图片
def read_img(path):
imgs = []
for im in glob.glob(path + '*.jpg'):
img = io.imread(im)
img = transform.resize(img, (w, h, c), mode="reflect")
imgs.append(img)
return np.asarray(imgs, np.float32)
# 将预测图片转为数据集
x_train = read_img(path)
# -----------------使用跟模型一致的网络----------------------
x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x')
# 第一个卷积层(100->50)
# Tensorflow中padding有两种类型SAME和VALID SAME填充0使维度保持不变 VALID不填充0
conv1 = tf.layers.conv2d(
inputs=x,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# 第二个卷积层(50->25)
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# 第三个卷积层(25->12)
conv3 = tf.layers.conv2d(
inputs=pool2,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
# 第四个卷积层(12->6)
conv4 = tf.layers.conv2d(
inputs=pool3,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
re1 = tf.reshape(pool4, [-1, 6 * 6 * 128])
# 全连接层
dense1 = tf.layers.dense(inputs=re1,
units=1024,
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
dense2 = tf.layers.dense(inputs=dense1,
units=512,
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
# 最后输出层使用10个神经元得到10维向量对应分类
logits = tf.layers.dense(inputs=dense2,
units=10,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
# ---------------------------网络结束---------------------------
sess = tf.InteractiveSession()
# 加载模型进当前会话
saver = tf.train.Saver()
saver.restore(sess, 'E:/RealTimeIR/model/10-image-set')
# 使用模型进行预测
predictions = sess.run(tf.argmax(logits, 1), feed_dict={x: x_train})
# print("输出predictions:", predictions)
for predict in predictions:
if predict == 0:
result = "单车"
print("识别结果:单车")
elif predict == 1:
result = "书"
print("识别结果:书")
elif predict == 2:
result = "水瓶"
print("识别结果:水瓶")
elif predict == 3:
result = "汽车"
print("识别结果:汽车")
elif predict == 4:
result = "椅子"
print("识别结果:椅子")
elif predict == 5:
result = "电脑"
print("识别结果:电脑")
elif predict == 6:
result = "人脸"
print("识别结果:人脸")
elif predict == 7:
result = "鞋子"
print("识别结果:鞋子")
elif predict == 8:
result = "桌子"
print("识别结果:桌子")
elif predict == 9:
result = "树"
print("识别结果:树")
else:
result = "识别错误"
print("识别错误")
file_object = open('E:/RealTimeIR/result.txt', 'w+')
# 清空文件内容
file_object.truncate()
file_object.write(result)
file_object.close()
sess.close()
| [
"tensorflow.InteractiveSession",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.layers.max_pooling2d",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"numpy.asarray",
"tensorflow.truncated_normal_initializer",
"tensorflow.argmax",
"skimage.io.imread",
"tensorflow.reshape",
"skimage... | [((505, 564), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, w, h, c]', 'name': '"""x"""'}), "(tf.float32, shape=[None, w, h, c], name='x')\n", (519, 564), True, 'import tensorflow as tf\n'), ((849, 915), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv1', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv1, pool_size=[2, 2], strides=2)\n', (872, 915), True, 'import tensorflow as tf\n'), ((1142, 1208), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv2', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv2, pool_size=[2, 2], strides=2)\n', (1165, 1208), True, 'import tensorflow as tf\n'), ((1436, 1502), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv3', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv3, pool_size=[2, 2], strides=2)\n', (1459, 1502), True, 'import tensorflow as tf\n'), ((1729, 1795), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv4', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv4, pool_size=[2, 2], strides=2)\n', (1752, 1795), True, 'import tensorflow as tf\n'), ((1803, 1839), 'tensorflow.reshape', 'tf.reshape', (['pool4', '[-1, 6 * 6 * 128]'], {}), '(pool4, [-1, 6 * 6 * 128])\n', (1813, 1839), True, 'import tensorflow as tf\n'), ((2833, 2856), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (2854, 2856), True, 'import tensorflow as tf\n'), ((2877, 2893), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2891, 2893), True, 'import tensorflow as tf\n'), ((225, 250), 'glob.glob', 'glob.glob', (["(path + '*.jpg')"], {}), "(path + '*.jpg')\n", (234, 250), False, 'import glob\n'), ((379, 407), 'numpy.asarray', 'np.asarray', (['imgs', 'np.float32'], {}), '(imgs, np.float32)\n', (389, 407), True, 'import numpy as np\n'), ((2984, 3004), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (2993, 3004), True, 'import tensorflow as tf\n'), ((266, 279), 'skimage.io.imread', 'io.imread', (['im'], {}), '(im)\n', (275, 279), False, 'from skimage import io, transform\n'), ((294, 342), 'skimage.transform.resize', 'transform.resize', (['img', '(w, h, c)'], {'mode': '"""reflect"""'}), "(img, (w, h, c), mode='reflect')\n", (310, 342), False, 'from skimage import io, transform\n'), ((795, 839), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (826, 839), True, 'import tensorflow as tf\n'), ((1088, 1132), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (1119, 1132), True, 'import tensorflow as tf\n'), ((1382, 1426), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (1413, 1426), True, 'import tensorflow as tf\n'), ((1675, 1719), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (1706, 1719), True, 'import tensorflow as tf\n'), ((2014, 2058), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (2045, 2058), True, 'import tensorflow as tf\n'), ((2104, 2143), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.003)'], {}), '(0.003)\n', (2136, 2143), True, 'import tensorflow as tf\n'), ((2313, 2357), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (2344, 2357), True, 'import tensorflow as tf\n'), ((2403, 2442), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.003)'], {}), '(0.003)\n', (2435, 2442), True, 'import tensorflow as tf\n'), ((2633, 2677), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (2664, 2677), True, 'import tensorflow as tf\n'), ((2723, 2762), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.003)'], {}), '(0.003)\n', (2755, 2762), True, 'import tensorflow as tf\n')] |
"""
defines:
- model = Boundary(log=None, debug=False)
- model = BoundaryFile(log=None, debug=False)
"""
from __future__ import print_function
import os
from codecs import open
from collections import OrderedDict
import numpy as np
from pyNastran.converters.openfoam.points_file import PointFile
from pyNastran.converters.openfoam.face_file import FaceFile
from pyNastran.utils.log import get_logger2
from pyNastran.utils import check_path
class BoundaryFile(object):
def __init__(self, log=None, debug=False):
self.log = get_logger2(log, debug=debug)
def read_boundary_file(self, boundary_filename):
#p = FoamFile(face_filename)
#lines = p.read_foam_file()
#self.log.info('converting')
i = 0
nboundaries = 0
with open(boundary_filename, 'r') as boundary_file:
while nboundaries == 0:
line = boundary_file.readline()
i += 1
try:
nboundaries = int(line)
except ValueError:
pass
line = boundary_file.readline()
i += 1
self.log.info('nboundaries = %s' % nboundaries)
#self.log.info('lineA = %r' % line)
self.log.info('building boundaries')
boundaries = OrderedDict()
boundaries = self._read_boundaries(boundary_file, i, nboundaries, boundaries)
return boundaries
def _read_boundaries(self, boundary_file, i, nboundaries, boundaries, basename=''):
assert nboundaries > 0, nboundaries
#read_next = 0
for unused_j in range(nboundaries):
# 3(a b c) to [a, b, c]
# 4(a b c d) to [a, b, c, d]
nameline = boundary_file.readline()
i += 1
name = nameline.strip()
self.log.info('name = %r' % (basename + name))
unused_openline = boundary_file.readline()
i += 1
typeline = boundary_file.readline()
i += 1
word, boundary_type = typeline.strip('\n\r\t ;').split()
nfacesline = boundary_file.readline()
i += 1
sline = nfacesline.strip('\n\r\t ;').split()
word = sline[0]
if word == 'inGroups' and len(sline) == 1:
#nboundary_line = boundary_file.readline(); i+=1
#nboundaries2 = int(nboundary_line)
#openline = boundary_file.readline(); i+=1
for unused_ii in range(7):
groupline = boundary_file.readline()
i += 1
#self.log.info(ii, groupline)
sline = groupline.strip('\n\r\t ;').split()
word, nfaces = sline
nfaces = int(nfaces)
self.log.info('nfaces = %r' % nfaces)
startfacesline = boundary_file.readline()
i += 1
self.log.info('startfacesline = %r' % startfacesline)
word, startfaces = startfacesline.strip('\n\r\t ;').split()
startfaces = int(startfaces)
unused_closeline = boundary_file.readline()
i += 1
boundary_name = basename + name
if boundary_name in boundaries:
msg = ('boundary_name=%r is already defined...'
'boundaries must have unique names' % boundary_name)
raise KeyError(msg)
boundaries[boundary_name] = [boundary_type, nfaces, startfaces]
#self._read_boundaries(boundary_file, i, nboundaries2, boundaries, basename + name)
else:
if word == 'inGroups' and len(sline) == 2:
unused_ingroups = nfaces
nfacesline = boundary_file.readline()
i += 1
word, nfaces = nfacesline.strip('\n\r\t ;').split()
else:
word, nfaces = sline
nfaces = int(nfaces)
self.log.info('nfaces = %r' % (nfaces))
startfacesline = boundary_file.readline()
i += 1
self.log.info('startfacesline = %r' % startfacesline)
word, startfaces = startfacesline.strip('\n\r\t ;').split()
startfaces = int(startfaces)
unused_closeline = boundary_file.readline()
i += 1
boundary_name = basename + name
if boundary_name in boundaries:
raise KeyError('boundary_name=%r is already defined...'
'boundaries must have unique names' % boundary_name)
boundaries[boundary_name] = [boundary_type, nfaces, startfaces]
for name, boundary in boundaries.items():
self.log.info('name=%s boundary=%s' % (name, boundary))
return boundaries
class Boundary(object):
"""defines Boundary"""
def __init__(self, log=None, debug=False):
"""creates a Boundary object"""
self.debug = False
#log = None
self.log = get_logger2(log, debug=debug)
def read_openfoam(self, point_filename, face_filename, boundary_filename):
"""reads a Boundary file"""
check_path(face_filename, 'face_filename')
check_path(point_filename, 'point_filename')
check_path(boundary_filename, 'boundary_filename')
#self.log.info('face_filename = %r' % face_filename)
#self.log.info('point_filename = %r' % point_filename)
#self.log.info('boundary_filename = %r' % boundary_filename)
assert 'faces' in face_filename, face_filename
assert 'points' in point_filename, point_filename
assert 'boundary' in boundary_filename, boundary_filename
#print('starting Boundary')
point_file = PointFile(log=self.log, debug=self.debug)
#from PyFoam.RunDictionary.ParsedBlockMeshDict import ParsedBlockMeshDict
#self.log.info(dir(f))
face_file = FaceFile(log=self.log, debug=self.debug)
boundary_file = BoundaryFile(log=self.log, debug=False)
boundaries = boundary_file.read_boundary_file(boundary_filename)
#if 0:
#foam = FoamFile(boundary_filename, log=p.log)
#print('getting lines')
#blines = foam.read_foam_file()
#print('converting')
#bd = convert_to_dict(foam, blines, debug=True)
#del blines
self.log.info('getting npoints')
#pself.log.info(write_dict(d))
#-------------------------------------------
# count number of faces by looking at the boundary info
# so we can allocate faces2
nfaces2 = 0
ifaces_to_read = []
#f_boundary_faces = open('boundary_faces.py', 'wb')
for name, boundary in boundaries.items():
# type patch; # 0
# nFaces nFaces; # 1
# startFace 777700; # 2
self.log.info('boundary[%s] = %s' % (name, boundary))
nfacesi = boundary[1]
startface = int(boundary[2])
nfaces2 += nfacesi
new_faces = list(np.arange(nfacesi, dtype='int32') + startface)
#f_boundary_faces.write('boundary_faces[%s, %s] = %s\n' % (
#name, len(new_faces), new_faces))
ifaces_to_read += new_faces
self.log.info('nfaces2 = %s' % nfaces2)
ifaces_to_read = np.ravel(ifaces_to_read)
if len(ifaces_to_read) != nfaces2:
raise RuntimeError('len(ifaces_to_read)=%s nfaces2=%s' % (
ifaces_to_read.shape, nfaces2))
self.log.info(ifaces_to_read)
faces = face_file.read_face_file(face_filename, ifaces_to_read=ifaces_to_read)
#faces = f.read_face_file(face_filename, ifaces_to_read=None)
del ifaces_to_read
if 0: # pragma: no cover
# doesn't work for some reason...
# we want to only plot a subset of faces to reduce the data set
# that works, but we also need to decrease the number of nodes
# (they take wayyy too long)
# so we take our faces, get the unique nodes
# sort them so they're consistent with the order in the file
# using the same block of code that works in the face reader,
#but it still fails for some reason...
# after this step, we renumber the faces with the adjusted node ids
ipoints_to_read = np.unique(faces.ravel())
self.log.info('nnodes = %s' % len(ipoints_to_read))
ipoints_to_read.sort()
self.log.info('ipoints_to_read = %s' % ipoints_to_read)
else:
ipoints_to_read = None
nodes = point_file.read_point_file(point_filename, ipoints_to_read=ipoints_to_read)
if ipoints_to_read is not None:
nid_to_ipoint = {}
for inid, nid in enumerate(ipoints_to_read):
nid_to_ipoint[nid] = inid
self.log.info('%s %s' % (faces, faces.max()))
for iface, unused_face in enumerate(faces):
#print('face = %s' % face)
faces[iface, 0] = nid_to_ipoint[faces[iface, 0]]
faces[iface, 1] = nid_to_ipoint[faces[iface, 1]]
faces[iface, 2] = nid_to_ipoint[faces[iface, 2]]
#print('faces[%i] = %s' % (i, faces[i, :]))
self.log.info('%s %s' % (faces, faces.max()))
self.log.info('done...')
del ipoints_to_read
del nid_to_ipoint
#-------------------------------------------
# keep only the required faces
iface = 0
#faces2 = zeros((nfaces2, 4), dtype='int32')
names = np.zeros(nfaces2, dtype='int32')
iname = 1
snames = [None] * (len(boundaries) + 1)
self.log.info('')
for name, boundary in boundaries.items():
self.log.info('iname=%s name=%s boundary=%s' % (iname, name, boundary))
# type patch;
# nFaces nFaces;
# startFace 777700;
try:
unused_type = boundary[0]
nfacesi = int(boundary[1])
startface = int(boundary[2])
except:
print(boundary.keys())
raise
#faces2[iface:iface+nfacesi] = faces[startface:startface + nfacesi]
names[iface:iface+nfacesi] = iname
snames[iname] = name
iface += nfacesi
iname += 1
#del faces
quads = faces
#if 0:
#f_boundary_faces.write('\n\n---Faces----\n')
#for iface, face in enumerate(faces):
#pid = names[iface]
#name = snames[pid]
#f_boundary_faces.write('%i (%i %i %i %i) pid=%s name=%s\n' % (
#iface, face[0], face[1], face[2], face[3], pid, name))
#f_boundary_faces.write('\n\n---First Faces----\n')
#pid_save = set([])
#for iface, face in enumerate(faces):
#pid = names[iface]
#if pid not in pid_save:
#name = snames[pid]
#f_boundary_faces.write('%i (%i %i %i %i) pid=%s name=%s\n' % (
#iface, face[0], face[1], face[2], face[3], pid, name))
#pid_save.add(pid)
# only save the unique nodes
# ...
#unodes = unique(quads.ravel())
#unodes.sort()
#nodes = nodes[unodes, :]
# renumber the nodes on the faces
# ...
self.log.debug('names=%s; max=%s min=%s' % (names, names.max(), names.min()))
print('done with Boundary')
#self.nodes = nodes
return nodes, quads, names
| [
"collections.OrderedDict",
"pyNastran.converters.openfoam.points_file.PointFile",
"pyNastran.converters.openfoam.face_file.FaceFile",
"numpy.zeros",
"pyNastran.utils.check_path",
"numpy.ravel",
"codecs.open",
"pyNastran.utils.log.get_logger2",
"numpy.arange"
] | [((540, 569), 'pyNastran.utils.log.get_logger2', 'get_logger2', (['log'], {'debug': 'debug'}), '(log, debug=debug)\n', (551, 569), False, 'from pyNastran.utils.log import get_logger2\n'), ((5151, 5180), 'pyNastran.utils.log.get_logger2', 'get_logger2', (['log'], {'debug': 'debug'}), '(log, debug=debug)\n', (5162, 5180), False, 'from pyNastran.utils.log import get_logger2\n'), ((5305, 5347), 'pyNastran.utils.check_path', 'check_path', (['face_filename', '"""face_filename"""'], {}), "(face_filename, 'face_filename')\n", (5315, 5347), False, 'from pyNastran.utils import check_path\n'), ((5356, 5400), 'pyNastran.utils.check_path', 'check_path', (['point_filename', '"""point_filename"""'], {}), "(point_filename, 'point_filename')\n", (5366, 5400), False, 'from pyNastran.utils import check_path\n'), ((5409, 5459), 'pyNastran.utils.check_path', 'check_path', (['boundary_filename', '"""boundary_filename"""'], {}), "(boundary_filename, 'boundary_filename')\n", (5419, 5459), False, 'from pyNastran.utils import check_path\n'), ((5892, 5933), 'pyNastran.converters.openfoam.points_file.PointFile', 'PointFile', ([], {'log': 'self.log', 'debug': 'self.debug'}), '(log=self.log, debug=self.debug)\n', (5901, 5933), False, 'from pyNastran.converters.openfoam.points_file import PointFile\n'), ((6068, 6108), 'pyNastran.converters.openfoam.face_file.FaceFile', 'FaceFile', ([], {'log': 'self.log', 'debug': 'self.debug'}), '(log=self.log, debug=self.debug)\n', (6076, 6108), False, 'from pyNastran.converters.openfoam.face_file import FaceFile\n'), ((7525, 7549), 'numpy.ravel', 'np.ravel', (['ifaces_to_read'], {}), '(ifaces_to_read)\n', (7533, 7549), True, 'import numpy as np\n'), ((9833, 9865), 'numpy.zeros', 'np.zeros', (['nfaces2'], {'dtype': '"""int32"""'}), "(nfaces2, dtype='int32')\n", (9841, 9865), True, 'import numpy as np\n'), ((786, 814), 'codecs.open', 'open', (['boundary_filename', '"""r"""'], {}), "(boundary_filename, 'r')\n", (790, 814), False, 'from codecs import open\n'), ((1312, 1325), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1323, 1325), False, 'from collections import OrderedDict\n'), ((7241, 7274), 'numpy.arange', 'np.arange', (['nfacesi'], {'dtype': '"""int32"""'}), "(nfacesi, dtype='int32')\n", (7250, 7274), True, 'import numpy as np\n')] |
import tnetwork as tn
import sklearn
import sklearn.metrics
import scipy
import statistics
import networkx as nx
from tnetwork.DCD.analytics.onmi import onmi
import numpy as np
__all__ = ["longitudinal_similarity", "consecutive_sn_similarity", "similarity_at_each_step", "entropy_by_node","nb_node_change","quality_at_each_step","SM_N","SM_P","SM_L"]
def longitudinal_similarity(dynamicCommunityReference:tn.DynCommunitiesSN, dynamicCommunityObserved:tn.DynCommunitiesSN, score=None,convert_coms_sklearn_format=True):
"""
Longitudinal similarity
The longitudinal similarity between two dynamic clusters is computed by considering each couple (node,time) as an element belong to a cluster, a cluster containing therefore nodes in differnt times
It takes into account the fact that the reference might by incomplete by removing from the partition to evaluate all (node,time) not present in the reference.
:param dynamicCommunityReference: the dynamic partition used as reference (ground truth)
:param dynamicCommunityObserved: the dynamic partition to evaluate (result of an algorithm)
:param score: community comparison score, by default the adjsted NMI. (sklearn)
:param convert_coms_sklearn_format: if the score expect in input clusters represented as in sklearn, True. if False, score will receive in input lists of sets of nodes
:return: score
"""
if score==None:
score=lambda x,y : sklearn.metrics.adjusted_mutual_info_score(x,y,average_method="arithmetic")
affilReference=[]
affilToEvaluate=[]
if convert_coms_sklearn_format:
comsToEvaluate = dynamicCommunityObserved.snapshot_affiliations()
#for each step
for t,affils in dynamicCommunityReference.snapshot_affiliations().items():
#for each node
for n,comId in affils.items():
affilReference.append(str(list(comId)[0]))
if n in comsToEvaluate[t]:
affilToEvaluate.append(str(list(comsToEvaluate[t][n])[0]))
else:
print("node not in partition to evaluate: ",str(n)," ",str(t))
affilToEvaluate.append("-1")
else:
affilReference={}
affilToEvaluate={}
for t,coms in dynamicCommunityReference.snapshot_communities().items():
all_nodes = set()
for id,nodes in coms.items():
node_sn = {(n,t) for n in nodes}
all_nodes.update(node_sn)
affilReference.setdefault(id,set()).update(node_sn)
for id,nodes in dynamicCommunityObserved.snapshot_communities(t).items():
node_sn = {(n,t) for n in nodes}
affilToEvaluate.setdefault(id,set()).update(node_sn & all_nodes)
affilReference = list(affilReference.values())
affilToEvaluate = list(affilToEvaluate.values())
return score(affilReference,affilToEvaluate)
def consecutive_sn_similarity(dynamicCommunity:tn.DynCommunitiesSN,score=None):
"""
Similarity between partitions in consecutive snapshots.
Compute the average of a similarity score between all pair of successive partitions
:param dynamicCommunity: the dynamic partition to evaluate
:param score: the score to use for computing the similarity between each pair of snapshots. default: Overlapping NMI
:return: pair (list of scores, list of partition sizes (avg both partitions))
"""
if score==None:
score=onmi #We use onmi because the number of labels can be different
scores=[]
sizes=[]
#for each step
com_snapshots = list(dynamicCommunity.snapshot_communities().values())
#print(com_snapshots)
for i in range(len(com_snapshots)-1):
partition_before = list(com_snapshots[i].values())
partition_after = list(com_snapshots[i+1].values())
elts_before = sum([len(x) for x in partition_before])
elts_after = sum([len(x) for x in partition_after])
scores.append(score(partition_before,partition_after))
sizes.append((elts_after+elts_before)/2)
return scores,sizes
def similarity_at_each_step(dynamicCommunityReference:tn.DynCommunitiesSN, dynamicCommunityObserved:tn.DynCommunitiesSN, score=None):
"""
Compute similarity at each step
It takes into account the fact that the reference might by incomplete. (remove from the observations all nodes/time not present in the reference)
:param dynamicCommunityReference: the dynamic partition to use as reference
:param dynamicCommunityObserved: the dynamic partition to evaluate
:param score: score to use, default adjusted NMI
:return: pair (list of scores, list of sizes)
"""
if score==None:
score=sklearn.metrics.adjusted_mutual_info_score
scores=[]
sizes=[]
comsToEvaluate = dynamicCommunityObserved.snapshot_affiliations()
#for each step
for t,affils in dynamicCommunityReference.snapshot_affiliations().items():
affilReference = []
affilToEvaluate = []
#for each node
for n,comId in affils.items():
affilReference.append(list(comId)[0])
if n in comsToEvaluate[t]:
affilToEvaluate.append(list(comsToEvaluate[t][n])[0])
else:
affilToEvaluate.append("-1")
scores.append(score(affilReference,affilToEvaluate))
sizes.append(len(affilReference))
return scores,sizes
def quality_at_each_step(dynamicCommunities:tn.DynCommunitiesSN,dynamicGraph:tn.DynGraphSN, score=None):
"""
Compute a community quality at each step
:param dynamicCommunities: dynamic communities as SN
:param score: score to use, default: Modularity
:return: pair(scores, sizes)
"""
if score==None:
score=nx.algorithms.community.modularity
scores=[]
sizes=[]
#for each step
for t,affils in dynamicCommunities.snapshot_communities().items():
g = dynamicGraph.snapshots(t)
partition = list(affils.values())
try:
sc = score(g,partition)
scores.append(sc)
except:
#print("problem to compute with partition: ",partition," nodes",g.nodes())
scores.append(None)
sizes.append(len(g.nodes))
return scores,sizes
def nb_node_change(dyn_com:tn.DynCommunitiesSN):
"""
Compute the total number of node changes
Measure of smoothness at the level of nodes, adapated to evaluate glitches
:param dyn_com: The dynamic community
:return: total number of node changes
"""
coms_by_nodes={}
for t,coms in dyn_com.snapshot_communities().items():
#print(t,coms)
for com,nodes in coms.items():
#print(n,com)
for n in nodes:
coms_by_nodes.setdefault(n,[com])
if coms_by_nodes[n][-1]!=com:
coms_by_nodes[n].append(com)
nb_changes = 0
for n in coms_by_nodes:
#print(n,coms_by_nodes[n])
nb_changes+=len(coms_by_nodes[n])-1
return nb_changes
# def entropy(dyn_com,sn_duration=1):
# """
# Compute the entropy.
#
# Consider each community label as a data value. The probability of observing this data value is the frequency of a random node to belong to the corresponding community.
#
# Interpretation: The less communities, the lower the score. The less homogeneous the community sizes, the lower the score.
#
# This score does not take into account the order of the community changes.
#
# Be careful, convert SN graph into IG.
#
#
# :param dyn_com: dynamic community to evaluate, can be SN or IG
# :param sn_duration: if graph is SN, used to
# :return:
# """
# dc2 = dyn_com
# fractions = []
# if isinstance(dc2,tn.DynCommunitiesSN):
# dc2 = dc2.to_DynCommunitiesIG(sn_duration=sn_duration)
# for com,nodes in dc2.communities().items():
# this_com_cumulated = 0
# for n,times in nodes.items():
# this_com_cumulated += times.duration()
# fractions.append(this_com_cumulated)
# sum_durations = sum(fractions)
# fractions = [x/sum_durations for x in fractions]
#
# return scipy.stats.entropy(fractions)
def entropy_by_node(dyn_com,sn_duration=1,fast_on_sn=False):
"""
Compute the entropy by node.
For each node, compute the shannon entropy of its labels. (always same label=min entropy, every step a new label=max entropy)
return the average value for all nodes
:param dyn_com: dynamic community to evaluate, can be SN or IG
:param sn_duration: if graph is SN, used to discretize
:return:
"""
dc2 = dyn_com
if not fast_on_sn:
if isinstance(dc2,tn.DynCommunitiesSN):
if sn_duration==1:
dc2 = dc2._to_DynCommunitiesIG_fast()
else:
dc2 = dc2.to_DynCommunitiesIG(sn_duration=sn_duration)
all_entropies = []
for n,coms in dc2.affiliations().items():
fractions = []
for com,times in coms.items():
fractions.append(times.duration())
sum_durations = sum(fractions)
fractions = [x/sum_durations for x in fractions]
ent_this_node = scipy.stats.entropy(fractions)
all_entropies.append(ent_this_node)
return statistics.mean(all_entropies)
def SM_N(dyn_com):
"""
Smoothness for nodes
Inverse of the number of node changes
:param dyn_com: dynamic partition
:return: SM-N score
"""
return 1/nb_node_change(dyn_com)
def SM_P(dyn_com):
"""
Smoothness for partitions
Averge of the NMI between successive snapshots
:param dyn_com: dynamic partition
:return: SM-P score
"""
consecutive_NMIs = consecutive_sn_similarity(dyn_com)
return np.average(consecutive_NMIs[0], weights=consecutive_NMIs[1])
def SM_L(dyn_com,sn_duration=1):
"""
Smoothness for labels
Inverse of the entropy by node
:param dyn_com: dyanamic partition
:param sn_duration: used to indicate the duration of snapshots if provided graph is a snapshot graph
:return: SM-L score
"""
return 1/entropy_by_node(dyn_com,sn_duration=sn_duration)
| [
"statistics.mean",
"scipy.stats.entropy",
"sklearn.metrics.adjusted_mutual_info_score",
"numpy.average"
] | [((9391, 9421), 'statistics.mean', 'statistics.mean', (['all_entropies'], {}), '(all_entropies)\n', (9406, 9421), False, 'import statistics\n'), ((9874, 9934), 'numpy.average', 'np.average', (['consecutive_NMIs[0]'], {'weights': 'consecutive_NMIs[1]'}), '(consecutive_NMIs[0], weights=consecutive_NMIs[1])\n', (9884, 9934), True, 'import numpy as np\n'), ((9304, 9334), 'scipy.stats.entropy', 'scipy.stats.entropy', (['fractions'], {}), '(fractions)\n', (9323, 9334), False, 'import scipy\n'), ((1443, 1520), 'sklearn.metrics.adjusted_mutual_info_score', 'sklearn.metrics.adjusted_mutual_info_score', (['x', 'y'], {'average_method': '"""arithmetic"""'}), "(x, y, average_method='arithmetic')\n", (1485, 1520), False, 'import sklearn\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 13 12:00:40 2020
@author: medrclaa
This file contains the functions for constructing an exit gfates probablity
distribution for an agent given its current heading. Currently, this heading is
determined by the slope of the line through an agents start and current
locations. This could easily be expanded for more complicated agent paths
through an enormous number of methods. For example, taking the last say 5
observations of an agent and fitting a quadratic regression could
be used to predict exits of agents with parabolic paths.
"""
########
#imports
########
import numpy as np
import sys
from shapely.geometry import LineString, Point, Polygon, MultiLineString
import matplotlib.pyplot as plt
from descartes import PolygonPatch
default_colour_cycle = plt.rcParams['axes.prop_cycle'].by_key()["color"]
sys.path.append("..")
sys.path.append("../..")
import modules.default_ukf_configs as configs
from modules.sensors import generate_Camera_Rect
sys.path.append("../../..")
sys.path.append("../../../..")
from stationsim.stationsim_model import Model
def start_gate_heading(start_position, position):
"""estimate which way the agent is heading using its entry and current positions
Parameters
----------
start_position, position : array_like
`start position` starting position and current `position` of agents
Returns
-------
angles : heading `angles` in radians anticlockwise about east.
"""
#differenece between current and start. Displacement vector to calculate angle
res = position - start_position
#split into x and y coordinates for arctan2
x = res[0::2]
y = res[1::2]
#arctan2 is a special function for converting xy to polar.
#NOT THE SAME AS REGULAR ARCTAN
angles = np.arctan2(y, x)
return angles
def vision_polygon(position, angles, theta, boundary, dist = 1000):
"""Make a cone of vision for an agent theta degrees either side of its heading.
- draw lines theta degrees either side of the heading about the current position
- draw an arc segment using these two lines centred at the current position
- cut any of the arc segment not in the stationsim boundary
- return the cut arc segment
- this can have 3-6 sides depending how many corners the agent sees
Parameters
----------
position : array_like
numpy array (2nx1) of agent `position`s. Every 2 elements is an xy
coordinate.
angles : array_like
(nx1) array of `angles`. each element is the heading of an agent in radians
anti-clockwise about east.
theta : float
angle `theta` for how wide the agents vision is. 0<theta<pi making the whole
agentsvision angle 0 < 2theta < 2pi
boundary : polygon
`boundary` polygon of stationsim.
dist : float, optional
How large are the lines either side of the agents vision projected.
These need to be much larger than the dimensions of the model otherwise
the agent will not be able to see exit gates. The default is 1000.
Returns
-------
polys : list
`polys` list of polygons indicating what an agent can see. It is a cone
of vision about its heading truncated by the stationsim boundary.
"""
"""If the agents field of vision is convex >180 degrees a different construction
is required. Drawing a polygon from the given list of coordinates
will result in the complementary polygon of angle <180 degrees being produced.
For example, we draw a polygon between three points we expect a triangle.
However in this case we would want the entire rectangular boundary with
this triangle removed instead. Since we cannot draw this polygon without
all of the boundary points included we instaed draw the same triangle polygon
but take the difference rather than the intersection with the boundary.
!! a little plot would explain this better."""
convex = False
if theta >np.pi/2:
convex = True
#angles theta either side of the agent headings
theta1 = (angles - theta)
theta2 = (angles + theta)
polys = []
for i in range(int(position.shape[0]/2)):
p = position[(2*i):((2*i)+2)]
#project lines theta either side of the headings
line1 = LineString([Point(p), Point(p[0] + (dist * np.cos(theta1[i])),
p[1] + dist * np.sin(theta1[i]))])
line2 = LineString([Point(p), Point(p[0] + (dist * np.cos(theta2[i])),
p[1] + (dist * np.sin(theta2[i])))])
# Stack coords for one polygon. Note line2 is reversed so points are ordered properly.
# This is an (4x2 array) of xy coords
coords = np.vstack([np.array(line1.coords),
np.array(line2.coords[::-1]),
])
poly = Polygon(LineString(coords))
#if the angle of vision is not convex we keep the normal polygon
if not convex:
cut_poly = boundary.intersection(poly)
else:
""" if the angle is convex >180 the polygon intersection will be
for the complement shape with angle <180. Hence we want to take
the complement of the complement shape to get back to where we started
I.E. the convex angled polygon.
"""
cut_poly = boundary.difference(poly)
polys.append(cut_poly)
return polys
def cut_boundaries(polys, boundary):
"""take a subsection of the boundary that each agent can see
Parameters
--------
polys : list
list of polygons `polys` of intersection between agents vision cone
and boundary.
boundary : polygon
polygon for stationsim `boundary`
Returns
-------
cut_bounds : list
`cut_bounds` list oflinestrings indicating which parts of the boundary
the agent sees.
"""
cut_bounds = []
for poly in polys:
cut_bounds.append(poly.exterior.intersection(boundary.exterior))
return cut_bounds
def exit_gate_probabilities(gates, cut_boundaries, scale = True):
""" Build a probability distribution on the length of each gate in an agents vision
- calculate intersection between exit gate circles and boundary exterior
to get the amount of the boundary each exit gate takes up.
- calculate intersection between the agent vision and the exit
gate boundaries to see how much of each gate the agent sees.
- given the length of each gate the agent sees reweight these lengths
by their sum to get the relative proportion of each gate an agent can see.
- These proportions will sum to 1 and be used as an empirical probability
distribution. For example, if an agent see two exit gates, but 9x more
length of gate 1, then it will assign probability 0.9 that the agent
is heading to gate 1 and 0.1 probability it is heading to gate 2.
Parameters
----------
gates, cut_boundaries : list
lists of exit `gates` polygons and `cut_boundaries` LineStrings
scale : bool
if `scale` convert the amount of each gate in the boundary into proportions
that sum to 1. For example if precisely two gates of the same width are both
entirely in an agents field of vision. We assign them both 0.5
proportion respectively.
Returns
-------
None.
"""
#loop over each agents cone of vision via its cut_boundary
# work out how much of each exit gate is contained within the
main_intersect_lengths = []
for boundary in cut_boundaries:
intersects = []
intersect_lengths = []
for gate in gates:
intersect = boundary.intersection(gate)
intersects.append(intersect)
intersect_length = intersect.length
intersect_lengths.append(intersect_length)
intersect_length_sum = sum(intersect_lengths)
if scale and intersect_length_sum!= 0:
intersect_lengths = [item/intersect_length_sum for item in intersect_lengths]
main_intersect_lengths.append(intersect_lengths)
gate_probabilities = np.array(main_intersect_lengths)
return gate_probabilities
def exit_points_to_polys(exit_gates, boundary, buffer):
"""convert numpy array of gate centroids to list of circle polygons
Parameters
--------
exit_gates : array_like
`exit_gates` numpy array containting the central point of each
(semi-)circular exit gate.
boundary : polygon
`boundary` of the stationsim corridor.
buffer : float
`buffer` radius of the exit polygon circles. usually 1.
Returns
-------
exit_polys : list
`exit_polys` list of polygons representings the the exit gates
"""
exit_polys = []
for i in range(exit_gates.shape[0]):
exit_poly = Point(exit_gates[i,:]).buffer(buffer)
exit_poly = exit_poly.intersection(boundary)
exit_polys.append(exit_poly)
return exit_polys
def plot_vision(cut_bounds, exit_polys, polys, boundary):
"""plot an polygons of agents vision and exit gates.
Parameters
--------
cut_bounds : lst
list of `cut_bounds` indicating which sections of exit gates are seen
by an agent (if any).
exit_polys, polys : list
`exit_polys` lists of the circular exit gate polygons and the
agent vision polygons `polys`.
Returns
-------
None.
"""
width = boundary.exterior.bounds[2]
height = boundary.exterior.bounds[3]
f = plt.figure()
ax = f.add_subplot(111)
for item in exit_polys:
item = np.array(item.exterior.coords.xy).T
plt.plot(item[:, 0], item[:, 1], color = "k")
for i, item in enumerate(polys):
patch = PolygonPatch(item, alpha = 0.1, color = default_colour_cycle[int(i%10)])
ax.add_patch(patch)
item = np.array(item.exterior.coords.xy).T
plt.plot(item[:,0], item[:,1])
for item in cut_bounds:
if type(item) == MultiLineString:
for sub_item in item:
sub_item = np.array(sub_item.coords.xy).T
plt.plot(sub_item[:,0], sub_item[:, 1], color = "red")
else:
item = np.array(item.coords.xy).T
plt.plot(item[:,0], item[:, 1], color = "red")
ax.set_xlim([0, width])
ax.set_ylim([0, height])
plt.title = "Each Agents vision of exit gates."
plt.show()
def heading_importance_function(position, start_position, theta, boundary, exit_polys):
"""calculate empirical probabilities based on start and current agent positions
Returns
-------
None.
"""
angles = start_gate_heading(start_position, position)
polys = vision_polygon(position, angles, theta, boundary)
cut_bounds = cut_boundaries(polys, boundary)
gate_probabilities = exit_gate_probabilities(exit_polys, cut_bounds)
return gate_probabilities, polys, cut_bounds
def main(n, importance_function):
"""test function for agent desnsities to assume it works
Returns
-------
None.
"""
model_params = configs.model_params
model_params["pop_total"] = n
ukf_params = configs.ukf_params
base_model = Model(**model_params)
start_position = np.hstack([agent.loc_start for agent in base_model.agents])
end_position = np.hstack([agent.loc_desire for agent in base_model.agents])
width = model_params["width"]
height = model_params["height"]
boundary = generate_Camera_Rect(np.array([0, 0]),
np.array([0, height]),
np.array([width, height]),
np.array([width, 0]))
buffer = base_model.gates_space
exit_gates = base_model.gates_locations[-base_model.gates_out:]
exit_polys = exit_points_to_polys(exit_gates, boundary, buffer)
for _ in range(10):
base_model.step()
position = base_model.get_state(sensor = "location")
theta = np.pi/12
#start_position = np.array([50, 50])
#position = np.array([25, 25])
#exit_gates = np.array([[0,5], [5,0]])
#exit_polys = exit_points_to_polys(exit_gates, boundary, buffer)
gate_probabilities, polys, cut_bounds = importance_function(position,
start_position,
theta,
boundary,
exit_polys)
print(gate_probabilities)
plot_vision(cut_bounds, exit_polys, polys, boundary)
if __name__ == "__main__":
n = 1
main(n, heading_importance_function)
| [
"numpy.hstack",
"matplotlib.pyplot.plot",
"shapely.geometry.Point",
"numpy.array",
"matplotlib.pyplot.figure",
"shapely.geometry.LineString",
"numpy.arctan2",
"stationsim.stationsim_model.Model",
"numpy.cos",
"numpy.sin",
"sys.path.append",
"matplotlib.pyplot.show"
] | [((885, 906), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (900, 906), False, 'import sys\n'), ((907, 931), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (922, 931), False, 'import sys\n'), ((1028, 1055), 'sys.path.append', 'sys.path.append', (['"""../../.."""'], {}), "('../../..')\n", (1043, 1055), False, 'import sys\n'), ((1056, 1086), 'sys.path.append', 'sys.path.append', (['"""../../../.."""'], {}), "('../../../..')\n", (1071, 1086), False, 'import sys\n'), ((1854, 1870), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (1864, 1870), True, 'import numpy as np\n'), ((8383, 8415), 'numpy.array', 'np.array', (['main_intersect_lengths'], {}), '(main_intersect_lengths)\n', (8391, 8415), True, 'import numpy as np\n'), ((9856, 9868), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9866, 9868), True, 'import matplotlib.pyplot as plt\n'), ((10773, 10783), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10781, 10783), True, 'import matplotlib.pyplot as plt\n'), ((11585, 11606), 'stationsim.stationsim_model.Model', 'Model', ([], {}), '(**model_params)\n', (11590, 11606), False, 'from stationsim.stationsim_model import Model\n'), ((11628, 11687), 'numpy.hstack', 'np.hstack', (['[agent.loc_start for agent in base_model.agents]'], {}), '([agent.loc_start for agent in base_model.agents])\n', (11637, 11687), True, 'import numpy as np\n'), ((11707, 11767), 'numpy.hstack', 'np.hstack', (['[agent.loc_desire for agent in base_model.agents]'], {}), '([agent.loc_desire for agent in base_model.agents])\n', (11716, 11767), True, 'import numpy as np\n'), ((9985, 10028), 'matplotlib.pyplot.plot', 'plt.plot', (['item[:, 0]', 'item[:, 1]'], {'color': '"""k"""'}), "(item[:, 0], item[:, 1], color='k')\n", (9993, 10028), True, 'import matplotlib.pyplot as plt\n'), ((10254, 10286), 'matplotlib.pyplot.plot', 'plt.plot', (['item[:, 0]', 'item[:, 1]'], {}), '(item[:, 0], item[:, 1])\n', (10262, 10286), True, 'import matplotlib.pyplot as plt\n'), ((11879, 11895), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (11887, 11895), True, 'import numpy as np\n'), ((11934, 11955), 'numpy.array', 'np.array', (['[0, height]'], {}), '([0, height])\n', (11942, 11955), True, 'import numpy as np\n'), ((11993, 12018), 'numpy.array', 'np.array', (['[width, height]'], {}), '([width, height])\n', (12001, 12018), True, 'import numpy as np\n'), ((12057, 12077), 'numpy.array', 'np.array', (['[width, 0]'], {}), '([width, 0])\n', (12065, 12077), True, 'import numpy as np\n'), ((4995, 5013), 'shapely.geometry.LineString', 'LineString', (['coords'], {}), '(coords)\n', (5005, 5013), False, 'from shapely.geometry import LineString, Point, Polygon, MultiLineString\n'), ((9941, 9974), 'numpy.array', 'np.array', (['item.exterior.coords.xy'], {}), '(item.exterior.coords.xy)\n', (9949, 9974), True, 'import numpy as np\n'), ((10210, 10243), 'numpy.array', 'np.array', (['item.exterior.coords.xy'], {}), '(item.exterior.coords.xy)\n', (10218, 10243), True, 'import numpy as np\n'), ((10600, 10645), 'matplotlib.pyplot.plot', 'plt.plot', (['item[:, 0]', 'item[:, 1]'], {'color': '"""red"""'}), "(item[:, 0], item[:, 1], color='red')\n", (10608, 10645), True, 'import matplotlib.pyplot as plt\n'), ((4412, 4420), 'shapely.geometry.Point', 'Point', (['p'], {}), '(p)\n', (4417, 4420), False, 'from shapely.geometry import LineString, Point, Polygon, MultiLineString\n'), ((4565, 4573), 'shapely.geometry.Point', 'Point', (['p'], {}), '(p)\n', (4570, 4573), False, 'from shapely.geometry import LineString, Point, Polygon, MultiLineString\n'), ((4861, 4883), 'numpy.array', 'np.array', (['line1.coords'], {}), '(line1.coords)\n', (4869, 4883), True, 'import numpy as np\n'), ((4912, 4940), 'numpy.array', 'np.array', (['line2.coords[::-1]'], {}), '(line2.coords[::-1])\n', (4920, 4940), True, 'import numpy as np\n'), ((9138, 9161), 'shapely.geometry.Point', 'Point', (['exit_gates[i, :]'], {}), '(exit_gates[i, :])\n', (9143, 9161), False, 'from shapely.geometry import LineString, Point, Polygon, MultiLineString\n'), ((10473, 10526), 'matplotlib.pyplot.plot', 'plt.plot', (['sub_item[:, 0]', 'sub_item[:, 1]'], {'color': '"""red"""'}), "(sub_item[:, 0], sub_item[:, 1], color='red')\n", (10481, 10526), True, 'import matplotlib.pyplot as plt\n'), ((10561, 10585), 'numpy.array', 'np.array', (['item.coords.xy'], {}), '(item.coords.xy)\n', (10569, 10585), True, 'import numpy as np\n'), ((10426, 10454), 'numpy.array', 'np.array', (['sub_item.coords.xy'], {}), '(sub_item.coords.xy)\n', (10434, 10454), True, 'import numpy as np\n'), ((4443, 4460), 'numpy.cos', 'np.cos', (['theta1[i]'], {}), '(theta1[i])\n', (4449, 4460), True, 'import numpy as np\n'), ((4516, 4533), 'numpy.sin', 'np.sin', (['theta1[i]'], {}), '(theta1[i])\n', (4522, 4533), True, 'import numpy as np\n'), ((4596, 4613), 'numpy.cos', 'np.cos', (['theta2[i]'], {}), '(theta2[i])\n', (4602, 4613), True, 'import numpy as np\n'), ((4670, 4687), 'numpy.sin', 'np.sin', (['theta2[i]'], {}), '(theta2[i])\n', (4676, 4687), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# import tensorflow as tf
# zero_out_module = tf.load_op_library('../lib/zero_out.so')
# with tf.Session(''):
# print(zero_out_module.zero_out([[1, 2], [3, 4]]).eval())
import numpy as np
import tensorflow as tf
class ExampleOpsTest(tf.test.TestCase):
def setUp(self):
self.op_module = tf.load_op_library('lib/example_ops.so')
def testZeroOut(self):
with self.test_session():
v = tf.constant([5, 4, 3, 2, 1])
result = self.op_module.zero_out(v)
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
def testDummyFloat(self):
with self.test_session():
v = tf.constant([[5., 4], [3, 2]])
result = self.op_module.dummy_float(v, v + 2)
answer = np.arange(v.eval().size).reshape(v.shape)
self.assertAllEqual(result.eval(), answer)
class CatConvTest(tf.test.TestCase):
def setUp(self):
self.op_module = tf.load_op_library('lib/catconv_ops.so')
def testCatConv(self):
with self.test_session():
n, c, h, w = 2, 6, 5, 4
g, k, ll = 2, 3, 3
X = np.random.randint(0, 16, size=(n, c, h, w), dtype=np.int32)
filt = np.random.randn(g, c, k, ll).astype(np.float32)
X, filt = tf.constant(X), tf.constant(filt)
result = self.op_module.cat_conv(X, filt).eval()
out_shape = result.shape
self.assertAllEqual(out_shape, (n, g, h - 1, w - 1))
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.load_op_library",
"tensorflow.test.main",
"numpy.random.randint",
"tensorflow.constant",
"numpy.random.randn"
] | [((1534, 1548), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (1546, 1548), True, 'import tensorflow as tf\n'), ((335, 375), 'tensorflow.load_op_library', 'tf.load_op_library', (['"""lib/example_ops.so"""'], {}), "('lib/example_ops.so')\n", (353, 375), True, 'import tensorflow as tf\n'), ((969, 1009), 'tensorflow.load_op_library', 'tf.load_op_library', (['"""lib/catconv_ops.so"""'], {}), "('lib/catconv_ops.so')\n", (987, 1009), True, 'import tensorflow as tf\n'), ((454, 482), 'tensorflow.constant', 'tf.constant', (['[5, 4, 3, 2, 1]'], {}), '([5, 4, 3, 2, 1])\n', (465, 482), True, 'import tensorflow as tf\n'), ((676, 707), 'tensorflow.constant', 'tf.constant', (['[[5.0, 4], [3, 2]]'], {}), '([[5.0, 4], [3, 2]])\n', (687, 707), True, 'import tensorflow as tf\n'), ((1155, 1214), 'numpy.random.randint', 'np.random.randint', (['(0)', '(16)'], {'size': '(n, c, h, w)', 'dtype': 'np.int32'}), '(0, 16, size=(n, c, h, w), dtype=np.int32)\n', (1172, 1214), True, 'import numpy as np\n'), ((1304, 1318), 'tensorflow.constant', 'tf.constant', (['X'], {}), '(X)\n', (1315, 1318), True, 'import tensorflow as tf\n'), ((1320, 1337), 'tensorflow.constant', 'tf.constant', (['filt'], {}), '(filt)\n', (1331, 1337), True, 'import tensorflow as tf\n'), ((1234, 1262), 'numpy.random.randn', 'np.random.randn', (['g', 'c', 'k', 'll'], {}), '(g, c, k, ll)\n', (1249, 1262), True, 'import numpy as np\n')] |
from math import pi, sin, cos
from numpy import sign
import tf.transformations
from geometry_msgs.msg import Quaternion
from nav_msgs.msg import Odometry
def yaw_from_odom_message(odom):
"""Converts an Odometry message into an Euler yaw value
Parameters:
:param Odometry odom:
:rtype: float
"""
return tf.transformations.euler_from_quaternion(
[
odom.pose.pose.orientation.x,
odom.pose.pose.orientation.y,
odom.pose.pose.orientation.z,
odom.pose.pose.orientation.w,
])[2]
def heading_from_odometry(odom):
return yaw_from_odom_message(odom)
def normalize_theta(theta):
"""Convert the result of adding or subtracting angular movements to a theta
that falls within the range of 0.0 >= theta => 2pi, where theta is always positive.
"""
norm_theta = theta % (pi * 2)
if norm_theta < 0.0:
norm_theta = (pi * 2) + norm_theta
return norm_theta
def calc_steering_angle(current_heading, target_heading):
diff_angle = normalize_theta(target_heading) - normalize_theta(current_heading)
_sign = sign(diff_angle)
if abs(diff_angle) > pi:
# Subtract from a full circle
diff_angle = (2 * pi) - abs(diff_angle)
# Reverse the sign
diff_angle = diff_angle * (_sign * -1)
return diff_angle
def calc_world_frame_pose(world_x_velocity, world_y_velocity, world_angular_velocity,
begin_world_x, begin_world_y, begin_world_theta, time_delta_secs):
"""Given world velocity vectors, movement duration, and beginning world coordinates
calculate the new world coordinates.
"""
new_world_x = begin_world_x + (world_x_velocity * time_delta_secs)
new_world_y = begin_world_y + (world_y_velocity * time_delta_secs)
new_world_theta = begin_world_theta + (world_angular_velocity * time_delta_secs)
new_world_theta = normalize_theta(new_world_theta)
return (new_world_x, new_world_y, new_world_theta)
def calc_world_frame_velocity(x_linear_v, y_linear_v, z_angular_v, world_theta):
# 2D rotation matrix math https://en.wikipedia.org/wiki/Rotation_matrix
# But since y_linear_v = 0, we don't actually need the second part of each equation
world_x_velocity = x_linear_v * cos(world_theta) - y_linear_v * sin(world_theta)
world_y_velocity = x_linear_v * sin(world_theta) + y_linear_v * cos(world_theta)
world_angular_velocity = z_angular_v
return (world_x_velocity, world_y_velocity, world_angular_velocity)
def create_odometry_message(world_x, world_y, world_theta,
world_x_linear_v, world_y_linear_v, world_z_angular_v,
odom_time, base_frame_id, world_frame_id):
# Convert world orientation (theta) to a Quaternion for use with tf and Odometry
quat_vals = tf.transformations.quaternion_from_euler(0, 0, world_theta)
quat = Quaternion()
quat.x = quat_vals[0]
quat.y = quat_vals[1]
quat.z = quat_vals[2]
quat.w = quat_vals[3]
odom = Odometry()
odom.header.stamp = odom_time
odom.header.frame_id = world_frame_id
odom.pose.pose.position.x = world_x
odom.pose.pose.position.y = world_y
odom.pose.pose.position.z = 0.0 # Because this robot can't fly to a vertical position
odom.pose.pose.orientation = quat
odom.child_frame_id = base_frame_id
odom.twist.twist.linear.x = world_x_linear_v
odom.twist.twist.linear.y = world_y_linear_v
odom.twist.twist.angular.z = world_z_angular_v
return odom
| [
"nav_msgs.msg.Odometry",
"math.cos",
"geometry_msgs.msg.Quaternion",
"numpy.sign",
"math.sin"
] | [((1128, 1144), 'numpy.sign', 'sign', (['diff_angle'], {}), '(diff_angle)\n', (1132, 1144), False, 'from numpy import sign\n'), ((2928, 2940), 'geometry_msgs.msg.Quaternion', 'Quaternion', ([], {}), '()\n', (2938, 2940), False, 'from geometry_msgs.msg import Quaternion\n'), ((3057, 3067), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (3065, 3067), False, 'from nav_msgs.msg import Odometry\n'), ((2294, 2310), 'math.cos', 'cos', (['world_theta'], {}), '(world_theta)\n', (2297, 2310), False, 'from math import pi, sin, cos\n'), ((2326, 2342), 'math.sin', 'sin', (['world_theta'], {}), '(world_theta)\n', (2329, 2342), False, 'from math import pi, sin, cos\n'), ((2379, 2395), 'math.sin', 'sin', (['world_theta'], {}), '(world_theta)\n', (2382, 2395), False, 'from math import pi, sin, cos\n'), ((2411, 2427), 'math.cos', 'cos', (['world_theta'], {}), '(world_theta)\n', (2414, 2427), False, 'from math import pi, sin, cos\n')] |
"""
library containing differential equation solver for d=1 kernel elements
"""
__author__ = " <NAME>"
__email__ = "<EMAIL>"
import os
import numpy as np
from scipy.special import factorial
import pdb
np.seterr(divide='ignore', invalid='ignore')
from scipy import special
from scipy.integrate import odeint, solve_ivp
from cosmoboost.lib import FileHandler as fh
from cosmoboost.lib import MatrixHandler as mh
from cosmoboost.lib.mytimer import timeit
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARN)
# number of steps for returning the solution to the Differential Equation
N = 2 # first and last
def dK_deta(eta, Kstore, Bmatrix):
'''The derivative of the Kernel for index m and L'''
K_return = mh.shift_left(Bmatrix * Kstore) - Bmatrix * mh.shift_right(Kstore)
return K_return
def est_K_T_ODE(pars, save_kernel=True):
'''constructs the kernel analytically using the unmarked equation on page
10 of Dai, Chluba 2014 arXiv:1403.6117v2
Parameters
----------
pars : dict
dictionary of the kernel parameters
save_kernel : bool, optional
If True, the kernel elements will be saved to a file for later use
Returns
-------
K_T : 2D numpy array
Each row corresponds to the (m,ell') index calculated with the getindx
scheme in the file_handler . The rows correspond to different values of
ell for a neighborhood of delta_ell around ell'.
'''
# logger.info("rtol = {}\natol = {}".format(rtol, atol))
with timeit("Analytically determining the Doppler and aberration Kernel elements"):
# ------------------------------
# set up parameters and matrices
# ------------------------------
beta = pars['beta']
s = pars['s']
delta_ell = pars['delta_ell']
lmax = pars['lmax']
# initialize matrices file name from input parameters
matrices_file_name = fh.get_matrices_filename(pars)
# if they already exist, load them from disc
if fh.file_exists(matrices_file_name):
Mmatrix = fh.load_matrix(matrices_file_name, key='M')
Lmatrix = fh.load_matrix(matrices_file_name, key='L')
else:
Mmatrix, Lmatrix = mh.get_ML_matrix(delta_ell=delta_ell, lmax=lmax)
# construct the Bmatrix
Blms, _ = mh.get_Blm_Clm(delta_ell, lmax, s=s)
Bmatrix = Blms[Lmatrix, Mmatrix]
Bmatrix[np.isnan(Bmatrix)] = 0
# construct delta_ell matrix
dl = np.array([np.arange(delta_ell, -delta_ell - 1, -1)] * Bmatrix.shape[0])
# use J(v) function from scipy to analytically estimate K matrix
eta = np.arctanh(beta)
#pdb.set_trace()
# use J(v) function from scipy to analytically estimate K matrix
K_T = special.jv(dl, 2. * Bmatrix * eta)
#K_T = analytical(dl, Bmatrix, eta)
# ------------------------------
# save to file
# ------------------------------
if save_kernel:
save_KT2file(pars, K_T)
return K_T
def solve_K_T_ODE(pars, save_kernel=True, rtol=1.e-3, atol=1.e-6, mxstep=0):
'''solves the ODE to find the temperature aberration kernel elements
uses Eq. 44 in Dai, Chluba 2014 arXiv:1403.6117v2
Parameters
----------
pars : dict
dictionary of the kernel parameters
save_kernel : bool, optional
If True, the kernel elements will be saved to a file for later use
rtol, atol, mxstep: scalars
passed to scipy.odeint to set precision
Returns
-------
K_T : 2D numpy array
Each row corresponds to the (m,ell') index calculated with the getindx
scheme in the file_handler . The rows correspond to different values of
ell for a neighborhood of delta_ell around ell'.
'''
logger.info("rtol = {}\natol = {}".format(rtol, atol))
with timeit("calculating the Doppler and aberration Kernel elements"):
# ------------------------------
# set up parameters and matrices
# ------------------------------
beta = pars['beta']
s = pars['s']
delta_ell = pars['delta_ell']
lmax = pars['lmax']
# lmin= pars['lmin']
# set the height and width of the aberration kernel storage matrix.
# the storage matrix is set around each value of ell' for a neighborhood of delta_ell
# on each side. The middle value of each row corresponds to ell'=ell or delta_ell=0
# the number of columns corresponds to different values of ell' for each m mode.
height, width = ((lmax + 1) * (lmax + 2) // 2, 2 * delta_ell + 1)
# initialize the K0 = dirac_delta(ell,ell') (initial condition for the ODE)
K0 = np.zeros(width)
K0[delta_ell] = 1
K0 = np.tensordot(np.ones(height), K0, axes=0)
# initialize matrices file name from input parameters
matrices_file_name = fh.get_matrices_filename(pars)
# if they already exist, load them from disc
if fh.file_exists(matrices_file_name):
Mmatrix = fh.load_matrix(matrices_file_name, key='M')
Lmatrix = fh.load_matrix(matrices_file_name, key='L')
else:
Mmatrix, Lmatrix = mh.get_ML_matrix(delta_ell=delta_ell, lmax=lmax)
# construct the Bmatrix corresponding to the elements of K0
Blms, _ = mh.get_Blm_Clm(delta_ell, lmax, s=s)
Bmatrix = Blms[Lmatrix, Mmatrix]
Bmatrix[np.isnan(Bmatrix)] = 0
# (safety) pad K and B matrices to avoid leakage from the edges
# necessary when using odeint solver
# add two zeros to the end of each row
# FIXME: is two enough for all ell?
K0 = np.insert(K0, [2 * delta_ell + 1, 2 * delta_ell + 1], 0, axis=1)
Bmatrix = np.insert(Bmatrix, [2 * delta_ell + 1, 2 * delta_ell + 1], 0, axis=1)
# reshape all the 2D matrices to 1D arrays so that the ODE can be solved in vectorized mode
K0 = K0.reshape((width + 2) * height)
Bmatrix = Bmatrix.reshape((width + 2) * height)
# ------------------------------
# solve ODE
# ------------------------------
# initialize the eta = np.arctanh(beta) array for ODE iterations
# the index (N-1) will give the final result
eta = np.linspace(0, np.arctanh(beta), N)
print("beta (v/c) : ", beta)
print("eta (arctanh(beta)) : ", eta[-1])
# solve the ODE for a range of ell' between lmin=0 and lmax
# dK_deta is the derivative of the aberration kernel with respect to eta is defined
sol = solve_ivp(dK_deta, [eta[0],eta[-1]], K0, t_eval=[eta[0],eta[-1]], args=(Bmatrix,), rtol=rtol, atol=atol)
sol = sol.y.T
# store the results in the K_T matrix
K_T = sol[N - 1].reshape(height, width + 2)
# remove the zero padding from the final solution
K_T = np.delete(K_T, [2 * delta_ell + 1, 2 * delta_ell + 2], axis=1)
default_dell = int((lmax*0.00123*2)+4)
if delta_ell>default_dell:
default_dell = delta_ell-default_dell
mask_pos = ((K_T[:,0] > 0.) & (K_T[:,0] < 1.e-5))
K_T[mask_pos,:default_dell] = 0.
K_T[mask_pos,-default_dell:] = 0.
mask_neg = ((K_T[:,0] < 0.) & (K_T[:,0] > -1.e-5))
K_T[mask_neg,:default_dell] = 0.
K_T[mask_neg,-default_dell:] = 0.
# ------------------------------
# save to file
# ------------------------------
if save_kernel:
save_KT2file(pars, K_T)
return K_T
def save_KT2file(pars, K_T):
lmax = pars["lmax"]
beta = pars["beta"]
kernel_file_name = fh.get_kernel_filename(pars)
dir_name = fh.dirname(lmax=lmax, beta=beta)
print(f"dirname = {dir_name}")
if not os.path.exists(dir_name):
os.makedirs(dir_name)
print(f"The following directory was created to save the kernel file:\n{dir_name}")
# save the kernel to file
# tag as D1 (Doppler weight =1)
fh.save_kernel(kernel_file_name, K_T, 'D1')
print(f"Kernel saved in:\n{kernel_file_name}")
| [
"logging.getLogger",
"cosmoboost.lib.FileHandler.get_matrices_filename",
"numpy.arange",
"cosmoboost.lib.MatrixHandler.get_ML_matrix",
"cosmoboost.lib.MatrixHandler.shift_left",
"cosmoboost.lib.FileHandler.get_kernel_filename",
"os.path.exists",
"cosmoboost.lib.FileHandler.file_exists",
"numpy.delet... | [((203, 247), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (212, 247), True, 'import numpy as np\n'), ((481, 508), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (498, 508), False, 'import logging\n'), ((7723, 7751), 'cosmoboost.lib.FileHandler.get_kernel_filename', 'fh.get_kernel_filename', (['pars'], {}), '(pars)\n', (7745, 7751), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((7767, 7799), 'cosmoboost.lib.FileHandler.dirname', 'fh.dirname', ([], {'lmax': 'lmax', 'beta': 'beta'}), '(lmax=lmax, beta=beta)\n', (7777, 7799), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((8064, 8107), 'cosmoboost.lib.FileHandler.save_kernel', 'fh.save_kernel', (['kernel_file_name', 'K_T', '"""D1"""'], {}), "(kernel_file_name, K_T, 'D1')\n", (8078, 8107), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((746, 777), 'cosmoboost.lib.MatrixHandler.shift_left', 'mh.shift_left', (['(Bmatrix * Kstore)'], {}), '(Bmatrix * Kstore)\n', (759, 777), True, 'from cosmoboost.lib import MatrixHandler as mh\n'), ((1547, 1624), 'cosmoboost.lib.mytimer.timeit', 'timeit', (['"""Analytically determining the Doppler and aberration Kernel elements"""'], {}), "('Analytically determining the Doppler and aberration Kernel elements')\n", (1553, 1624), False, 'from cosmoboost.lib.mytimer import timeit\n'), ((1957, 1987), 'cosmoboost.lib.FileHandler.get_matrices_filename', 'fh.get_matrices_filename', (['pars'], {}), '(pars)\n', (1981, 1987), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((2053, 2087), 'cosmoboost.lib.FileHandler.file_exists', 'fh.file_exists', (['matrices_file_name'], {}), '(matrices_file_name)\n', (2067, 2087), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((2366, 2402), 'cosmoboost.lib.MatrixHandler.get_Blm_Clm', 'mh.get_Blm_Clm', (['delta_ell', 'lmax'], {'s': 's'}), '(delta_ell, lmax, s=s)\n', (2380, 2402), True, 'from cosmoboost.lib import MatrixHandler as mh\n'), ((2694, 2710), 'numpy.arctanh', 'np.arctanh', (['beta'], {}), '(beta)\n', (2704, 2710), True, 'import numpy as np\n'), ((2823, 2858), 'scipy.special.jv', 'special.jv', (['dl', '(2.0 * Bmatrix * eta)'], {}), '(dl, 2.0 * Bmatrix * eta)\n', (2833, 2858), False, 'from scipy import special\n'), ((3904, 3968), 'cosmoboost.lib.mytimer.timeit', 'timeit', (['"""calculating the Doppler and aberration Kernel elements"""'], {}), "('calculating the Doppler and aberration Kernel elements')\n", (3910, 3968), False, 'from cosmoboost.lib.mytimer import timeit\n'), ((4763, 4778), 'numpy.zeros', 'np.zeros', (['width'], {}), '(width)\n', (4771, 4778), True, 'import numpy as np\n'), ((4952, 4982), 'cosmoboost.lib.FileHandler.get_matrices_filename', 'fh.get_matrices_filename', (['pars'], {}), '(pars)\n', (4976, 4982), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((5047, 5081), 'cosmoboost.lib.FileHandler.file_exists', 'fh.file_exists', (['matrices_file_name'], {}), '(matrices_file_name)\n', (5061, 5081), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((5396, 5432), 'cosmoboost.lib.MatrixHandler.get_Blm_Clm', 'mh.get_Blm_Clm', (['delta_ell', 'lmax'], {'s': 's'}), '(delta_ell, lmax, s=s)\n', (5410, 5432), True, 'from cosmoboost.lib import MatrixHandler as mh\n'), ((5735, 5799), 'numpy.insert', 'np.insert', (['K0', '[2 * delta_ell + 1, 2 * delta_ell + 1]', '(0)'], {'axis': '(1)'}), '(K0, [2 * delta_ell + 1, 2 * delta_ell + 1], 0, axis=1)\n', (5744, 5799), True, 'import numpy as np\n'), ((5818, 5887), 'numpy.insert', 'np.insert', (['Bmatrix', '[2 * delta_ell + 1, 2 * delta_ell + 1]', '(0)'], {'axis': '(1)'}), '(Bmatrix, [2 * delta_ell + 1, 2 * delta_ell + 1], 0, axis=1)\n', (5827, 5887), True, 'import numpy as np\n'), ((6644, 6755), 'scipy.integrate.solve_ivp', 'solve_ivp', (['dK_deta', '[eta[0], eta[-1]]', 'K0'], {'t_eval': '[eta[0], eta[-1]]', 'args': '(Bmatrix,)', 'rtol': 'rtol', 'atol': 'atol'}), '(dK_deta, [eta[0], eta[-1]], K0, t_eval=[eta[0], eta[-1]], args=(\n Bmatrix,), rtol=rtol, atol=atol)\n', (6653, 6755), False, 'from scipy.integrate import odeint, solve_ivp\n'), ((6943, 7005), 'numpy.delete', 'np.delete', (['K_T', '[2 * delta_ell + 1, 2 * delta_ell + 2]'], {'axis': '(1)'}), '(K_T, [2 * delta_ell + 1, 2 * delta_ell + 2], axis=1)\n', (6952, 7005), True, 'import numpy as np\n'), ((7846, 7870), 'os.path.exists', 'os.path.exists', (['dir_name'], {}), '(dir_name)\n', (7860, 7870), False, 'import os\n'), ((7880, 7901), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (7891, 7901), False, 'import os\n'), ((790, 812), 'cosmoboost.lib.MatrixHandler.shift_right', 'mh.shift_right', (['Kstore'], {}), '(Kstore)\n', (804, 812), True, 'from cosmoboost.lib import MatrixHandler as mh\n'), ((2111, 2154), 'cosmoboost.lib.FileHandler.load_matrix', 'fh.load_matrix', (['matrices_file_name'], {'key': '"""M"""'}), "(matrices_file_name, key='M')\n", (2125, 2154), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((2177, 2220), 'cosmoboost.lib.FileHandler.load_matrix', 'fh.load_matrix', (['matrices_file_name'], {'key': '"""L"""'}), "(matrices_file_name, key='L')\n", (2191, 2220), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((2266, 2314), 'cosmoboost.lib.MatrixHandler.get_ML_matrix', 'mh.get_ML_matrix', ([], {'delta_ell': 'delta_ell', 'lmax': 'lmax'}), '(delta_ell=delta_ell, lmax=lmax)\n', (2282, 2314), True, 'from cosmoboost.lib import MatrixHandler as mh\n'), ((2460, 2477), 'numpy.isnan', 'np.isnan', (['Bmatrix'], {}), '(Bmatrix)\n', (2468, 2477), True, 'import numpy as np\n'), ((4831, 4846), 'numpy.ones', 'np.ones', (['height'], {}), '(height)\n', (4838, 4846), True, 'import numpy as np\n'), ((5105, 5148), 'cosmoboost.lib.FileHandler.load_matrix', 'fh.load_matrix', (['matrices_file_name'], {'key': '"""M"""'}), "(matrices_file_name, key='M')\n", (5119, 5148), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((5171, 5214), 'cosmoboost.lib.FileHandler.load_matrix', 'fh.load_matrix', (['matrices_file_name'], {'key': '"""L"""'}), "(matrices_file_name, key='L')\n", (5185, 5214), True, 'from cosmoboost.lib import FileHandler as fh\n'), ((5260, 5308), 'cosmoboost.lib.MatrixHandler.get_ML_matrix', 'mh.get_ML_matrix', ([], {'delta_ell': 'delta_ell', 'lmax': 'lmax'}), '(delta_ell=delta_ell, lmax=lmax)\n', (5276, 5308), True, 'from cosmoboost.lib import MatrixHandler as mh\n'), ((5490, 5507), 'numpy.isnan', 'np.isnan', (['Bmatrix'], {}), '(Bmatrix)\n', (5498, 5507), True, 'import numpy as np\n'), ((6360, 6376), 'numpy.arctanh', 'np.arctanh', (['beta'], {}), '(beta)\n', (6370, 6376), True, 'import numpy as np\n'), ((2544, 2584), 'numpy.arange', 'np.arange', (['delta_ell', '(-delta_ell - 1)', '(-1)'], {}), '(delta_ell, -delta_ell - 1, -1)\n', (2553, 2584), True, 'import numpy as np\n')] |
"""
Testing what the fastest way is to create a 1D Array with 2 values
"""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import random
import numpy as np
x, y = random.uniform(0, 300), random.uniform(0, 300)
def numpy_array(x, y):
# Calculate distances between each of the points
return np.array((x, y), dtype=np.float)
def numpy_array_tuple(my_tuple):
# Calculate distances between each of the points
return np.array(my_tuple, dtype=np.float)
def numpy_asarray(x, y):
# Calculate distances between each of the points
return np.asarray((x, y), dtype=np.float)
def numpy_asarray_tuple(my_tuple):
# Calculate distances between each of the points
return np.asarray(my_tuple, dtype=np.float)
def numpy_asanyarray(x, y):
# Calculate distances between each of the points
return np.asanyarray((x, y), dtype=np.float)
def numpy_asanyarray_tuple(my_tuple):
# Calculate distances between each of the points
return np.asanyarray(my_tuple, dtype=np.float)
def numpy_fromiter(x, y):
# Calculate distances between each of the points
return np.fromiter((x, y), dtype=float, count=2)
def numpy_fromiter_tuple(my_tuple):
# Calculate distances between each of the points
return np.fromiter(my_tuple, dtype=float, count=2)
def numpy_fromiter_np_float(x, y):
# Calculate distances between each of the points
return np.fromiter((x, y), dtype=np.float, count=2)
def numpy_fromiter_np_float_tuple(my_tuple):
# Calculate distances between each of the points
return np.fromiter(my_tuple, dtype=np.float, count=2)
def numpy_zeros(x, y):
# Calculate distances between each of the points
a = np.zeros(2, dtype=np.float)
a[0] = x
a[1] = y
return a
def numpy_ones(x, y):
# Calculate distances between each of the points
a = np.ones(2, dtype=np.float)
a[0] = x
a[1] = y
return a
numpy_array(x, y)
correct_array = np.array([x, y])
def test_numpy_array(benchmark):
result = benchmark(numpy_array, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_array_tuple(benchmark):
result = benchmark(numpy_array_tuple, (x, y))
assert np.array_equal(result, correct_array)
def test_numpy_asarray(benchmark):
result = benchmark(numpy_asarray, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_asarray_tuple(benchmark):
result = benchmark(numpy_asarray_tuple, (x, y))
assert np.array_equal(result, correct_array)
def test_numpy_asanyarray(benchmark):
result = benchmark(numpy_asanyarray, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_asanyarray_tuple(benchmark):
result = benchmark(numpy_asanyarray_tuple, (x, y))
assert np.array_equal(result, correct_array)
def test_numpy_fromiter(benchmark):
result = benchmark(numpy_fromiter, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_fromiter_tuple(benchmark):
result = benchmark(numpy_fromiter_tuple, (x, y))
assert np.array_equal(result, correct_array)
def test_numpy_fromiter_np_float(benchmark):
result = benchmark(numpy_fromiter_np_float, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_fromiter_np_float_tuple(benchmark):
result = benchmark(numpy_fromiter_np_float_tuple, (x, y))
assert np.array_equal(result, correct_array)
def test_numpy_zeros(benchmark):
result = benchmark(numpy_zeros, x, y)
assert np.array_equal(result, correct_array)
def test_numpy_ones(benchmark):
result = benchmark(numpy_ones, x, y)
assert np.array_equal(result, correct_array)
# Run this file using
# poetry run pytest test/test_benchmark_array_creation.py --benchmark-compare
| [
"random.uniform",
"numpy.fromiter",
"numpy.ones",
"numpy.asarray",
"numpy.asanyarray",
"numpy.array",
"numpy.zeros",
"os.path.dirname",
"numpy.array_equal"
] | [((1970, 1986), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (1978, 1986), True, 'import numpy as np\n'), ((203, 225), 'random.uniform', 'random.uniform', (['(0)', '(300)'], {}), '(0, 300)\n', (217, 225), False, 'import random\n'), ((227, 249), 'random.uniform', 'random.uniform', (['(0)', '(300)'], {}), '(0, 300)\n', (241, 249), False, 'import random\n'), ((339, 371), 'numpy.array', 'np.array', (['(x, y)'], {'dtype': 'np.float'}), '((x, y), dtype=np.float)\n', (347, 371), True, 'import numpy as np\n'), ((471, 505), 'numpy.array', 'np.array', (['my_tuple'], {'dtype': 'np.float'}), '(my_tuple, dtype=np.float)\n', (479, 505), True, 'import numpy as np\n'), ((597, 631), 'numpy.asarray', 'np.asarray', (['(x, y)'], {'dtype': 'np.float'}), '((x, y), dtype=np.float)\n', (607, 631), True, 'import numpy as np\n'), ((733, 769), 'numpy.asarray', 'np.asarray', (['my_tuple'], {'dtype': 'np.float'}), '(my_tuple, dtype=np.float)\n', (743, 769), True, 'import numpy as np\n'), ((864, 901), 'numpy.asanyarray', 'np.asanyarray', (['(x, y)'], {'dtype': 'np.float'}), '((x, y), dtype=np.float)\n', (877, 901), True, 'import numpy as np\n'), ((1006, 1045), 'numpy.asanyarray', 'np.asanyarray', (['my_tuple'], {'dtype': 'np.float'}), '(my_tuple, dtype=np.float)\n', (1019, 1045), True, 'import numpy as np\n'), ((1138, 1179), 'numpy.fromiter', 'np.fromiter', (['(x, y)'], {'dtype': 'float', 'count': '(2)'}), '((x, y), dtype=float, count=2)\n', (1149, 1179), True, 'import numpy as np\n'), ((1282, 1325), 'numpy.fromiter', 'np.fromiter', (['my_tuple'], {'dtype': 'float', 'count': '(2)'}), '(my_tuple, dtype=float, count=2)\n', (1293, 1325), True, 'import numpy as np\n'), ((1427, 1471), 'numpy.fromiter', 'np.fromiter', (['(x, y)'], {'dtype': 'np.float', 'count': '(2)'}), '((x, y), dtype=np.float, count=2)\n', (1438, 1471), True, 'import numpy as np\n'), ((1583, 1629), 'numpy.fromiter', 'np.fromiter', (['my_tuple'], {'dtype': 'np.float', 'count': '(2)'}), '(my_tuple, dtype=np.float, count=2)\n', (1594, 1629), True, 'import numpy as np\n'), ((1716, 1743), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float'}), '(2, dtype=np.float)\n', (1724, 1743), True, 'import numpy as np\n'), ((1868, 1894), 'numpy.ones', 'np.ones', (['(2)'], {'dtype': 'np.float'}), '(2, dtype=np.float)\n', (1875, 1894), True, 'import numpy as np\n'), ((2075, 2112), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (2089, 2112), True, 'import numpy as np\n'), ((2215, 2252), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (2229, 2252), True, 'import numpy as np\n'), ((2345, 2382), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (2359, 2382), True, 'import numpy as np\n'), ((2489, 2526), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (2503, 2526), True, 'import numpy as np\n'), ((2625, 2662), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (2639, 2662), True, 'import numpy as np\n'), ((2775, 2812), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (2789, 2812), True, 'import numpy as np\n'), ((2907, 2944), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (2921, 2944), True, 'import numpy as np\n'), ((3053, 3090), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (3067, 3090), True, 'import numpy as np\n'), ((3203, 3240), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (3217, 3240), True, 'import numpy as np\n'), ((3367, 3404), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (3381, 3404), True, 'import numpy as np\n'), ((3493, 3530), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (3507, 3530), True, 'import numpy as np\n'), ((3617, 3654), 'numpy.array_equal', 'np.array_equal', (['result', 'correct_array'], {}), '(result, correct_array)\n', (3631, 3654), True, 'import numpy as np\n'), ((126, 151), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (141, 151), False, 'import os\n')] |
import numpy as np
class GPInterface(object):
def __init__(self):
self.kernel = None
self.ndim = None
self.model = None
self.outdim = 1
def create_kernel(self, ndim, kernel_name, var_f=1.0, lengthscale=1.0):
pass
def create_model(self, x, y, noise_var, noise_prior):
pass
def predict_f(self, x, full_cov=False):
pass
def optimize(self, num_restarts=30, opt_messages=False, print_result=False):
pass
def convert_lengthscale(ndim, lengthscale):
if np.isscalar(lengthscale):
l = lengthscale * np.ones(ndim)
else:
l = lengthscale
return l
def convert_2D_format(arr):
if not isinstance(arr, np.ndarray):
raise ValueError('The array is not a numpy array.')
if arr.ndim == 1:
return arr[:, np.newaxis] # asuumes arr is single dimensional data
if arr.ndim == 2:
return arr
else:
raise ValueError('The array cannot be more than 2 dimensional')
| [
"numpy.ones",
"numpy.isscalar"
] | [((540, 564), 'numpy.isscalar', 'np.isscalar', (['lengthscale'], {}), '(lengthscale)\n', (551, 564), True, 'import numpy as np\n'), ((592, 605), 'numpy.ones', 'np.ones', (['ndim'], {}), '(ndim)\n', (599, 605), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# <NAME>
# <EMAIL>
import logging
import numpy as np
import pandas as pd
from bioidtracker.database_manager import DatabaseManager
from bioidtracker.db import DB
class Dataset:
"""Todo."""
def __init__(self, db_manager: DatabaseManager, narrow_search=True):
"""Todo.
Args:
db_manager: Todo.
narrow_search: Todo.
"""
self.log = logging.getLogger("dataset")
self.db_manager = db_manager
self.narrow_search = narrow_search
self.ensembl_db = f"ensembl_{self.db_manager.form}_with_version"
self.ensembl_db_no_version = f"ensembl_{self.db_manager.form}_without_version"
@staticmethod
def ensembl_list_check_version(id_lst: list) -> str:
"""Detects naively whether there is ID version or not.
Args:
id_lst: List of IDs from Ensembl only.
Returns:
"without_version" or "with_version".
Raises:
ValueError: When IDs are not string. When the version info is not consistent.
"""
if not np.all([isinstance(i, str) for i in id_lst]):
raise ValueError("The IDs in the input list has to be string objects.")
if not np.all([i.count(DB.id_ver_delimiter) <= 1 for i in id_lst]):
raise ValueError("The IDs in the input list should not contain more than 1 delimiter.")
id_vers = [i.find(DB.id_ver_delimiter) == -1 for i in id_lst]
if np.all(id_vers):
# If there is no version information associated with stable_ids. For some organisms like S. cerevisiae
return "without_version"
elif np.any(id_vers):
raise ValueError("Inconsistent versions in the IDs in the input list.")
else:
return "with_version"
def ensembl_list_warning_version_consistency(self, id_lst: list):
"""Todo.
Args:
id_lst: Todo.
"""
dbvi = self.db_manager.check_version_info()
idvi = Dataset.ensembl_list_check_version(id_lst)
if dbvi != idvi:
self.log.warning(f"Version info inconsistency: Database is '{dbvi}', but ID list is '{idvi}'.")
def initialize_external_conversion(self, to_return: bool = True):
"""Todo.
Args:
to_return: Todo.
Returns:
Todo.
"""
rel_to_df = sorted(self.db_manager.available_releases, reverse=True)
self.log.info(f"Comparison data frame is being constructed for releases: {rel_to_df}.")
ex_all = pd.DataFrame()
for rel in sorted(rel_to_df, reverse=True):
db_man_rel = self.db_manager.change_release(rel)
ex_rel = db_man_rel.get_db("external_relevant" if self.narrow_search else "external")
if to_return:
ex_all = pd.concat([ex_all, ex_rel], axis=0)
if to_return:
ex_all.reset_index(inplace=True, drop=True)
return ex_all
def initialize_external_conversion_uniques(self):
"""Todo.
Returns:
Todo.
"""
ex_all = self.initialize_external_conversion()
non_uniques_external = ex_all["name_id"].duplicated(keep=False)
non_uniques_ensembl = ex_all["graph_id"].duplicated(keep=False)
ex_all_uniques_external = ex_all[~non_uniques_external].copy()
ex_all_uniques_ensembl = ex_all[~non_uniques_ensembl].copy()
ex_all = pd.concat([ex_all_uniques_external, ex_all_uniques_ensembl], axis=0)
ex_all.drop_duplicates(inplace=True, ignore_index=True)
ex_all.reset_index(inplace=True, drop=True)
return ex_all
def initialize_form_conversion(self):
"""Todo."""
rel_to_df = sorted(self.db_manager.available_releases, reverse=True)
self.log.info(f"Form conversion data frames are being constructed for releases: {rel_to_df}.")
for rel in rel_to_df:
db_man_rel = self.db_manager.change_release(rel)
_ = db_man_rel.get_db("relationcurrent")
def dataset_score_external(self, ex_df, id_lst, external: bool, ensembl: bool):
"""Todo.
Args:
ex_df: Todo.
id_lst: Todo.
external: Todo.
ensembl: Todo.
Returns:
Todo.
"""
result = []
# m = exx[(exx["name_db"] == "HGNC Symbol") & (exx["release"] == 96)]["id_db"].unique()
# ids = list(np.random.choice(m, 5000, replace=False))
id_lst = list(np.unique(id_lst))
if external:
exx = ex_df[["release", "id_db", "name_db"]].drop_duplicates(ignore_index=True)
ids = pd.Series(id_lst, name="id_db")
mex = exx.merge(ids, how="right", on="id_db")
mexg = mex.groupby(["release", "name_db"])
sme = mexg.size()
max_sme = sme.max()
result.extend(
[(num_ids / len(ids), rel, database) for (rel, database), num_ids in sme.items() if num_ids == max_sme]
)
if ensembl:
exx = ex_df[["release", "graph_id"]].drop_duplicates(ignore_index=True)
vv = self.db_manager.check_version_info()
for drop_version in [False, True]:
vv_switch = vv == "without_version"
if drop_version and vv_switch:
exx["graph_id"] = [i.split(DB.id_ver_delimiter)[0] for i in exx["graph_id"]]
vv_switch = not vv_switch
elif drop_version:
continue
ids = pd.Series(id_lst, name="graph_id")
mex = exx.merge(ids, how="right", on="graph_id")
mexg = mex.groupby(["release"])
sme = mexg.size()
max_sme = sme.max()
result.extend(
[
(num_ids / len(ids), rel, self.ensembl_db if not vv_switch else self.ensembl_db_no_version)
for rel, num_ids in sme.items()
if num_ids == max_sme
]
)
return [
{"Score": i, "Release": j, "Database": k, "Form": self.db_manager.form}
for i, j, k in sorted(result, reverse=True)
]
def _convert_external_helper(self, rel, db):
"""Todo.
Args:
rel: Todo.
db: Todo.
Returns:
Todo.
Raises:
ValueError: Todo.
"""
if db == self.ensembl_db or db == self.ensembl_db_no_version:
raise ValueError("Ensembl ID to Ensembl ID conversion!")
else:
db_man_rel = self.db_manager.change_release(rel)
ex_rel = db_man_rel.get_db("external_relevant" if self.narrow_search else "external")
ex_rel = ex_rel[ex_rel["name_db"] == db] # no need for drop.duplicate as above line already does that
return ex_rel
def convert_external_to_ensembl(self, rel, db, id_lst):
"""Todo.
Args:
rel: Todo.
db: Todo.
id_lst: Todo.
Returns:
Todo.
"""
ex_rel = self._convert_external_helper(rel, db)
ids = pd.Series(id_lst, name="id_db")
ex_rel = ex_rel.merge(ids, how="right", on="id_db")
ex_rel.sort_values(by=["xref_identity", "ensembl_identity"], ascending=False, ignore_index=True)
return ex_rel[["id_db", "graph_id"]].drop_duplicates(keep="first", ignore_index=True)
def convert_ensembl_to_external(self, rel, db, id_lst):
"""Todo.
Args:
rel: Todo.
db: Todo.
id_lst: Todo.
Returns:
Todo.
"""
self.ensembl_list_warning_version_consistency(id_lst)
ex_rel = self._convert_external_helper(rel, db)
ids = pd.Series(id_lst, name="graph_id")
ex_rel = ex_rel.merge(ids, how="right", on="graph_id")
ex_rel.sort_values(by=["ensembl_identity", "xref_identity"], ascending=False, ignore_index=True)
return ex_rel[["graph_id", "id_db"]].drop_duplicates(keep="first", ignore_index=True)
def convert_ensembl_form(self, id_lst, to_form):
"""Todo.
from release, form of db_manager
Args:
id_lst: Todo.
to_form: Todo.
Returns:
Todo.
"""
self.ensembl_list_warning_version_consistency(id_lst)
rc = self.db_manager.get_db("relationcurrent", save_after_calculation=self.db_manager.store_raw_always)
rc.replace(to_replace="", value=np.nan, inplace=True)
rc = rc[[self.db_manager.form, to_form]]
rc = rc[~(rc[self.db_manager.form].isna() | rc[to_form].isna())]
ids = pd.Series(id_lst, name=self.db_manager.form)
result = rc.merge(ids, how="right", on=self.db_manager.form)
result.drop_duplicates(inplace=True, ignore_index=True)
result.reset_index(inplace=True, drop=True)
return result
| [
"logging.getLogger",
"pandas.Series",
"numpy.unique",
"numpy.any",
"pandas.DataFrame",
"numpy.all",
"pandas.concat"
] | [((422, 450), 'logging.getLogger', 'logging.getLogger', (['"""dataset"""'], {}), "('dataset')\n", (439, 450), False, 'import logging\n'), ((1487, 1502), 'numpy.all', 'np.all', (['id_vers'], {}), '(id_vers)\n', (1493, 1502), True, 'import numpy as np\n'), ((2572, 2586), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2584, 2586), True, 'import pandas as pd\n'), ((3465, 3533), 'pandas.concat', 'pd.concat', (['[ex_all_uniques_external, ex_all_uniques_ensembl]'], {'axis': '(0)'}), '([ex_all_uniques_external, ex_all_uniques_ensembl], axis=0)\n', (3474, 3533), True, 'import pandas as pd\n'), ((7237, 7268), 'pandas.Series', 'pd.Series', (['id_lst'], {'name': '"""id_db"""'}), "(id_lst, name='id_db')\n", (7246, 7268), True, 'import pandas as pd\n'), ((7872, 7906), 'pandas.Series', 'pd.Series', (['id_lst'], {'name': '"""graph_id"""'}), "(id_lst, name='graph_id')\n", (7881, 7906), True, 'import pandas as pd\n'), ((8771, 8815), 'pandas.Series', 'pd.Series', (['id_lst'], {'name': 'self.db_manager.form'}), '(id_lst, name=self.db_manager.form)\n', (8780, 8815), True, 'import pandas as pd\n'), ((1669, 1684), 'numpy.any', 'np.any', (['id_vers'], {}), '(id_vers)\n', (1675, 1684), True, 'import numpy as np\n'), ((4531, 4548), 'numpy.unique', 'np.unique', (['id_lst'], {}), '(id_lst)\n', (4540, 4548), True, 'import numpy as np\n'), ((4681, 4712), 'pandas.Series', 'pd.Series', (['id_lst'], {'name': '"""id_db"""'}), "(id_lst, name='id_db')\n", (4690, 4712), True, 'import pandas as pd\n'), ((2849, 2884), 'pandas.concat', 'pd.concat', (['[ex_all, ex_rel]'], {'axis': '(0)'}), '([ex_all, ex_rel], axis=0)\n', (2858, 2884), True, 'import pandas as pd\n'), ((5583, 5617), 'pandas.Series', 'pd.Series', (['id_lst'], {'name': '"""graph_id"""'}), "(id_lst, name='graph_id')\n", (5592, 5617), True, 'import pandas as pd\n')] |
#!/usr/bin/env python2
'''
description: Convert directory with downloaded zipped KNMI ascii files to
netCDF format. Station information is obtained from a csv file.
Creation of the csv file and downloading of the KNMI data is
performed in another script (knmi_getdata.py).
author: <NAME>, NLeSC (<EMAIL>)
licence: Apache 2.0
'''
from numpy import concatenate as npconcatenate
import csv
import os
def read_knmi_data(reference_station):
'''
Calculate or load KNMI reference data:
pickled file exists -> load
pickled file doesn't exist -> calculate
'''
from load_knmi_data import load_knmi_data
import glob
from numpy import sort
from numpy import concatenate
import collections
# generate filename of KNMI station
filenames = sort(glob.glob('KNMI/uurgeg_' + str(reference_station) + '*.zip' ))
# load all csv files in list of dictionaries
dicts = [load_knmi_data(filename).csvdata for filename in filenames]
# merge all dictionaries in a super dictionary
knmi_data = collections.defaultdict(list)
for idx in range(0,len(dicts)):
try:
knmi_data = dict((k, npconcatenate((knmi_data.get(k), dicts[idx].get(k)))) for k in set(knmi_data.keys() + dicts[idx].keys()))
except ValueError:
# cannot concatenate empty arrays
knmi_data = dict((k, dicts[idx].get(k)) for k in dicts[idx].keys())
# return dictionary with all variables/time steps
return knmi_data
def write_combined_data_netcdf(data, stationid, lon, lat, elevation):
'''
description
'''
from netCDF4 import Dataset as ncdf
import netcdftime
from datetime import datetime
from dateutil import tz
from numpy import zeros
from numpy import nan as npnan
from numpy import dtype
import time
ncfile = ncdf('output'+str(stationid)+'.nc', 'w', format='NETCDF4')
# description of the file
ncfile.description = 'KNMI ' + str(stationid)
ncfile.history = 'Created ' + time.ctime(time.time())
# create time dimension
timevar = ncfile.createDimension('time', None)
# create lon/lat dimensions
lonvar = ncfile.createDimension('longitude', 1)
latvar = ncfile.createDimension('latitude', 1)
# elevation
elvar = ncfile.createDimension('elevation', 1)
# inititalize time axis
timeaxis = [int(round(netcdftime.date2num(data['datetime'][idx], units='minutes since 2010-01-01 00:00:00',
calendar='gregorian'))) for idx in range(0,len(data['datetime']))]
# netcdf time variable UTC
timevar = ncfile.createVariable('time', 'i4', ('time',),
zlib=True)
timevar[:] = timeaxis
timevar.units = 'minutes since 2010-01-01 00:00:00'
timevar.calendar = 'gregorian'
timevar.standard_name = 'time'
timevar.long_name = 'time in UTC'
# lon/lat variables
lonvar = ncfile.createVariable('longitude',dtype('float32').char,('longitude',))
lonvar.units = 'degrees_east'
lonvar.axis = 'X'
lonvar.standard_name = 'longitude'
lonvar[:] = lon
latvar = ncfile.createVariable('latitude',dtype('float32').char,('latitude',))
latvar.units = 'degrees_north'
latvar.axis = 'Y'
latvar.standard_name = 'latitude'
latvar[:] = lat
# elevation variable
elvar = ncfile.createVariable('elevation', dtype('float32').char, ('elevation',))
elvar.units = 'meter'
elvar.axis = 'Z'
elvar.standard_name = 'elevation'
elvar[:] = elevation
# create other variables in netcdf file
for variable in data.keys():
if variable not in ['YYYMMDD', 'Time', '<br>', 'datetime', '# STN', None]:
# add variables in netcdf file
# convert strings to npnan if array contains numbers
if True in [is_number(c)
for c in data[variable]]:
data[variable] = [npnan if isinstance(
fitem(c), str) else fitem(c) for c in data[
variable]]
# check if variable is a string
if not isinstance(data[variable][1], str):
# fill variable
variableName = variable
values = ncfile.createVariable(
variableName, type(data[variable][1]),
('time',), zlib=True, fill_value=-999)
else:
# string variables cannot have fill_value
values = ncfile.createVariable(
variable, type(data[variable][1]),
('time',), zlib=True)
try: # fill variable
values[:] = data[variable][:]
except IndexError:
# for strings the syntax is slightly different
values = data[variable][:]
#self.fill_attribute_data()
def fill_attribute_data():
'''
Function that fills the attribute data of the netcdf file
'''
if variable == 'DD':
values.units = 'degrees'
values.standard_name = 'wind direction'
values.long_name = 'mean wind direction during the 10-minute period preceding the time of observation (990=variable)'
elif variable == 'TemperatureF':
values.units = 'F'
values.standard_name = 'air_temperature'
values.long_name = 'air temperature'
else:
pass
def fitem(item):
try:
item = item.strip()
except AttributeError:
pass
try:
item = float(item)
except ValueError:
pass
return item
def is_number(s):
'''
check if the value in the string is a number and return True or False
'''
try:
float(s)
return True
except ValueError:
pass
return False
def load_csv_data(csvfile):
'''
load data csvfile
'''
with open(csvfile, 'r') as csvin:
reader = csv.DictReader(csvin, delimiter=',')
try:
csvdata
except UnboundLocalError:
reader.next()
try:
csvdata = {k.strip(): [fitem(v)] for k, v in
reader.next().items()}
except StopIteration:
pass
current_row = 0
for line in reader:
current_row += 1
if current_row == 0: # header
# skip the header
continue
for k, v in line.items():
if k is not None: # skip over empty fields
k = k.strip()
csvdata[k].append(fitem(v))
return csvdata
if __name__=="__main__":
knmi_csv_info = load_csv_data('knmi_reference_data.csv')
station_ids = [int(x) for x in knmi_csv_info['station_id']]
for station in station_ids:
if os.path.isfile('output' + str(station) + '.nc'):
continue
print (station)
lat = knmi_csv_info['latitude'][station_ids.index(station)]
lon = knmi_csv_info['longitude'][station_ids.index(station)]
elevation = knmi_csv_info['elevation'][station_ids.index(station)]
data = read_knmi_data(station)
write_combined_data_netcdf(data, station, lon, lat, elevation)
| [
"netcdftime.date2num",
"csv.DictReader",
"load_knmi_data.load_knmi_data",
"collections.defaultdict",
"numpy.dtype",
"time.time"
] | [((1095, 1124), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1118, 1124), False, 'import collections\n'), ((5595, 5631), 'csv.DictReader', 'csv.DictReader', (['csvin'], {'delimiter': '""","""'}), "(csvin, delimiter=',')\n", (5609, 5631), False, 'import csv\n'), ((968, 992), 'load_knmi_data.load_knmi_data', 'load_knmi_data', (['filename'], {}), '(filename)\n', (982, 992), False, 'from load_knmi_data import load_knmi_data\n'), ((2026, 2037), 'time.time', 'time.time', ([], {}), '()\n', (2035, 2037), False, 'import time\n'), ((2923, 2939), 'numpy.dtype', 'dtype', (['"""float32"""'], {}), "('float32')\n", (2928, 2939), False, 'from numpy import dtype\n'), ((3112, 3128), 'numpy.dtype', 'dtype', (['"""float32"""'], {}), "('float32')\n", (3117, 3128), False, 'from numpy import dtype\n'), ((3325, 3341), 'numpy.dtype', 'dtype', (['"""float32"""'], {}), "('float32')\n", (3330, 3341), False, 'from numpy import dtype\n'), ((2356, 2468), 'netcdftime.date2num', 'netcdftime.date2num', (["data['datetime'][idx]"], {'units': '"""minutes since 2010-01-01 00:00:00"""', 'calendar': '"""gregorian"""'}), "(data['datetime'][idx], units=\n 'minutes since 2010-01-01 00:00:00', calendar='gregorian')\n", (2375, 2468), False, 'import netcdftime\n')] |
#! /usr/local/bin/python
# ! -*- encoding:utf-8 -*-
from pathlib import Path
import pandas as pd
import numpy as np
import random
import os
import argparse
proj_path = Path(__file__).parent.resolve().parent.resolve().parent.resolve()
data_path = proj_path / 'data' / 'PPP'
def read_csv(data_file, dataset):
cci_labels_gt_path = data_path / dataset / 'PPP_gt.csv'
cci_labels_junk_path = data_path / dataset / 'PPP_junk.csv'
edge_list_path = data_path / data_file
df = pd.read_csv(edge_list_path, header=None)
generate_gt(df, dataset)
def generate_gt(df, dataset):
cci_labels_gt_path = data_path / dataset / 'PPP_gt.csv'
cci_labels_junk_path = data_path / dataset / 'PPP_junk.csv'
edge_list = [];
for indexs in df.index:
rowData = df.loc[indexs].values[0:2]
rowData = rowData.tolist()
edge_list.append(rowData)
nodes = [];
for edge in edge_list:
if edge[0] not in nodes:
nodes.append(edge[0])
if edge[1] not in nodes:
nodes.append(edge[1])
nodes.sort()
gene2id = {gene: idx for idx, gene in enumerate(nodes)}
cur_cci_gt = []
cur_cci_junk = []
for indexs in df.index:
rowData = df.loc[indexs].values[0:2]
rowData = rowData.tolist()
p1 = rowData[0]
p2 = rowData[1]
choice = random.randint(0, 1)
cur_cci_gt.append([p1, p2])
if choice:
a, c = find_junk(p1, nodes, edge_list, cur_cci_junk)
else:
a, c = find_junk(p2, nodes, edge_list, cur_cci_junk)
cur_cci_junk.append([a, c])
with open(cci_labels_gt_path, 'w', encoding='utf-8') as f:
print(f"cur cci {len(cur_cci_gt)}")
for cci_label in cur_cci_gt:
f.write(f"{int(cci_label[0])},{int(cci_label[1])}\r\n")
with open(cci_labels_junk_path, 'w', encoding='utf-8') as f:
print(f"cur cci junk {len(cur_cci_junk)}")
for cci_label in cur_cci_junk:
f.write(f"{int(cci_label[0])},{int(cci_label[1])}\r\n")
# with open(cci_labels_junk_path.format(dataset, id2, type1), 'w', encoding='utf-8') as f:
# print(f"cur cci junk {len(cur_cci_junk_b2d)}")
# for cci_label in cur_cci_junk_b2d:
# f.write(f"{int(cci_label[0])},{int(cci_label[1])},{int(cci_label[2])}\r\n")
def find_junk(a, nodes, edge_list, cur_cci_junk):
"""
"""
c = random.choice(nodes)
while [a, c] in nodes or [a, c] in cur_cci_junk:
c = random.choice(nodes)
return a, c
def clean_cross_data(df1, df2):
df3 =pd.DataFrame()
nodes = []
for indexs in df1.index:
rowData = df1.loc[indexs].values[0:2]
rowData = rowData.tolist()
nodes.append(rowData[0])
nodes.append(rowData[1])
for indexs in df2.index:
rowData = df2.loc[indexs].values[0:2]
rowData = rowData.tolist()
if (rowData[0] not in nodes) and (rowData[1] not in nodes):
df3=df3.append(df2.loc[indexs])
return df3
if __name__ == "__main__":
import os
# python ./data/PPP/generate.py --dataset dataset_all_cross --data_path PP-Pathways_ppi.csv --cross_data 1 --train_rate 0.01
parser = argparse.ArgumentParser(description='GraphSAGE')
parser.add_argument("--random_seed", type=int, default=10086)
parser.add_argument("--dataset", type=str, default='dataset')
parser.add_argument("--data_path", type=str, default=None)
parser.add_argument("--train_rate", type=float, default=0.1)
parser.add_argument("--cross_data", type=int, default=0)
params = parser.parse_args()
random.seed(params.random_seed)
np.random.seed(params.random_seed)
if params.data_path == None:
# test_dataset
print('begin test generate:')
read_csv("PP-9001~10000.csv", dataset='test_' + params.dataset)
# train_dataset
print('begin train generate:')
read_csv('PP-1~9000.csv', dataset='train_' + params.dataset)
else:
edge_list_path = data_path / params.data_path
df = pd.read_csv(edge_list_path, header=None)
lens = len(df)
train_size = int(lens * params.train_rate)
df1 = df[0:train_size]
df2 = df[train_size:lens]
print('begin test generate:')
generate_gt(df1, dataset='test_' + params.dataset)
if params.cross_data == 1:
print('begin clean_cross_data:')
df2 = clean_cross_data(df1, df2)
print('begin train generate:')
generate_gt(df2, dataset='train_' + params.dataset)
| [
"random.choice",
"argparse.ArgumentParser",
"pandas.read_csv",
"pathlib.Path",
"random.seed",
"numpy.random.seed",
"pandas.DataFrame",
"random.randint"
] | [((487, 527), 'pandas.read_csv', 'pd.read_csv', (['edge_list_path'], {'header': 'None'}), '(edge_list_path, header=None)\n', (498, 527), True, 'import pandas as pd\n'), ((2419, 2439), 'random.choice', 'random.choice', (['nodes'], {}), '(nodes)\n', (2432, 2439), False, 'import random\n'), ((2585, 2599), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2597, 2599), True, 'import pandas as pd\n'), ((3214, 3262), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""GraphSAGE"""'}), "(description='GraphSAGE')\n", (3237, 3262), False, 'import argparse\n'), ((3621, 3652), 'random.seed', 'random.seed', (['params.random_seed'], {}), '(params.random_seed)\n', (3632, 3652), False, 'import random\n'), ((3657, 3691), 'numpy.random.seed', 'np.random.seed', (['params.random_seed'], {}), '(params.random_seed)\n', (3671, 3691), True, 'import numpy as np\n'), ((1344, 1364), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1358, 1364), False, 'import random\n'), ((2505, 2525), 'random.choice', 'random.choice', (['nodes'], {}), '(nodes)\n', (2518, 2525), False, 'import random\n'), ((4069, 4109), 'pandas.read_csv', 'pd.read_csv', (['edge_list_path'], {'header': 'None'}), '(edge_list_path, header=None)\n', (4080, 4109), True, 'import pandas as pd\n'), ((169, 183), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (173, 183), False, 'from pathlib import Path\n')] |
from hydroDL import kPath, utils
from hydroDL.app import waterQuality
from hydroDL.master import basins
from hydroDL.data import usgs, gageII, gridMET, ntn, transform
from hydroDL.master import slurm
from hydroDL.post import axplot, figplot
import numpy as np
import matplotlib.pyplot as plt
codeLst = sorted(usgs.newC)
# dataName = 'nbWT'
dataName = 'nbW'
wqData = waterQuality.DataModelWQ(dataName)
siteNoLst = wqData.info.siteNo.unique()
codeLst = usgs.newC
icLst = [wqData.varC.index(code) for code in codeLst]
data = wqData.c[:, np.array(icLst)]
mtdLst = waterQuality.extractVarMtd(codeLst)
dataNorm, stat = transform.transInAll(data, mtdLst)
info = wqData.info
code = '00660'
ic = codeLst.index(code)
fig, axes = plt.subplots(2, 1, figsize=(6, 8))
for siteNo in siteNoLst:
indS = info[info['siteNo'] == siteNo].index.values
yr = utils.sortData(data[indS, ic])
yn = utils.sortData(dataNorm[indS, ic])
x = np.arange(len(yr))/len(yr)
_ = axes[0].plot(x, yr, 'k-', alpha=0.2)
_ = axes[1].plot(x, yn, 'k-', alpha=0.2)
shortName = usgs.codePdf.loc[code]['shortName']
axes[1].set_ylim([-0.2, 1.2])
axes[0].set_title('{} {} CDFs '.format(code, shortName))
axes[1].set_title('{} {} CDFs after normalization '.format(code, shortName))
fig.show()
| [
"numpy.array",
"hydroDL.data.transform.transInAll",
"hydroDL.utils.sortData",
"hydroDL.app.waterQuality.DataModelWQ",
"hydroDL.app.waterQuality.extractVarMtd",
"matplotlib.pyplot.subplots"
] | [((367, 401), 'hydroDL.app.waterQuality.DataModelWQ', 'waterQuality.DataModelWQ', (['dataName'], {}), '(dataName)\n', (391, 401), False, 'from hydroDL.app import waterQuality\n'), ((562, 597), 'hydroDL.app.waterQuality.extractVarMtd', 'waterQuality.extractVarMtd', (['codeLst'], {}), '(codeLst)\n', (588, 597), False, 'from hydroDL.app import waterQuality\n'), ((615, 649), 'hydroDL.data.transform.transInAll', 'transform.transInAll', (['data', 'mtdLst'], {}), '(data, mtdLst)\n', (635, 649), False, 'from hydroDL.data import usgs, gageII, gridMET, ntn, transform\n'), ((722, 756), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(6, 8)'}), '(2, 1, figsize=(6, 8))\n', (734, 756), True, 'import matplotlib.pyplot as plt\n'), ((846, 876), 'hydroDL.utils.sortData', 'utils.sortData', (['data[indS, ic]'], {}), '(data[indS, ic])\n', (860, 876), False, 'from hydroDL import kPath, utils\n'), ((886, 920), 'hydroDL.utils.sortData', 'utils.sortData', (['dataNorm[indS, ic]'], {}), '(dataNorm[indS, ic])\n', (900, 920), False, 'from hydroDL import kPath, utils\n'), ((536, 551), 'numpy.array', 'np.array', (['icLst'], {}), '(icLst)\n', (544, 551), True, 'import numpy as np\n')] |
""" Affine image registration module consisting of the following classes:
AffineMap: encapsulates the necessary information to perform affine
transforms between two domains, defined by a `static` and a `moving`
image. The `domain` of the transform is the set of points in the
`static` image's grid, and the `codomain` is the set of points in
the `moving` image. When we call the `transform` method, `AffineMap`
maps each point `x` of the domain (`static` grid) to the codomain
(`moving` grid) and interpolates the `moving` image at that point
to obtain the intensity value to be placed at `x` in the resulting
grid. The `transform_inverse` method performs the opposite operation
mapping points in the codomain to points in the domain.
ParzenJointHistogram: computes the marginal and joint distributions of
intensities of a pair of images, using Parzen windows [Parzen62]
with a cubic spline kernel, as proposed by Mattes et al. [Mattes03].
It also computes the gradient of the joint histogram w.r.t. the
parameters of a given transform.
MutualInformationMetric: computes the value and gradient of the mutual
information metric the way `Optimizer` needs them. That is, given
a set of transform parameters, it will use `ParzenJointHistogram`
to compute the value and gradient of the joint intensity histogram
evaluated at the given parameters, and evaluate the the value and
gradient of the histogram's mutual information.
AffineRegistration: it runs the multi-resolution registration, putting
all the pieces together. It needs to create the scale space of the
images and run the multi-resolution registration by using the Metric
and the Optimizer at each level of the Gaussian pyramid. At each
level, it will setup the metric to compute value and gradient of the
metric with the input images with different levels of smoothing.
References
----------
[Parzen62] <NAME>. On the estimation of a probability density
function and the mode. Annals of Mathematical Statistics,
33(3), 1065-1076, 1962.
[Mattes03] <NAME>., <NAME>., <NAME>., Lewellen, <NAME>.,
& <NAME>. PET-CT image registration in the chest using
free-form deformations. IEEE Transactions on Medical
Imaging, 22(1), 120-8, 2003.
"""
import numpy as np
import numpy.linalg as npl
import scipy.ndimage as ndimage
from ..core.optimize import Optimizer
from ..core.optimize import SCIPY_LESS_0_12
from . import vector_fields as vf
from . import VerbosityLevels
from .parzenhist import (ParzenJointHistogram,
sample_domain_regular,
compute_parzen_mi)
from .imwarp import (get_direction_and_spacings, ScaleSpace)
from .scalespace import IsotropicScaleSpace
_interp_options = ['nearest', 'linear']
_transform_method = {}
_transform_method[(2, 'nearest')] = vf.transform_2d_affine_nn
_transform_method[(3, 'nearest')] = vf.transform_3d_affine_nn
_transform_method[(2, 'linear')] = vf.transform_2d_affine
_transform_method[(3, 'linear')] = vf.transform_3d_affine
class AffineInversionError(Exception):
pass
class AffineMap(object):
def __init__(self, affine, domain_grid_shape=None, domain_grid2world=None,
codomain_grid_shape=None, codomain_grid2world=None):
""" AffineMap
Implements an affine transformation whose domain is given by
`domain_grid` and `domain_grid2world`, and whose co-domain is
given by `codomain_grid` and `codomain_grid2world`.
The actual transform is represented by the `affine` matrix, which
operate in world coordinates. Therefore, to transform a moving image
towards a static image, we first map each voxel (i,j,k) of the static
image to world coordinates (x,y,z) by applying `domain_grid2world`.
Then we apply the `affine` transform to (x,y,z) obtaining (x', y', z')
in moving image's world coordinates. Finally, (x', y', z') is mapped
to voxel coordinates (i', j', k') in the moving image by multiplying
(x', y', z') by the inverse of `codomain_grid2world`. The
`codomain_grid_shape` is used analogously to transform the static
image towards the moving image when calling `transform_inverse`.
If the domain/co-domain information is not provided (None) then the
sampling information needs to be specified each time the `transform`
or `transform_inverse` is called to transform images. Note that such
sampling information is not necessary to transform points defined in
physical space, such as stream lines.
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix defining the affine transform, where `dim` is the
dimension of the space this map operates in (2 for 2D images,
3 for 3D images). If None, then `self` represents the identity
transformation.
domain_grid_shape : sequence, shape (dim,), optional
the shape of the default domain sampling grid. When `transform`
is called to transform an image, the resulting image will have
this shape, unless a different sampling information is provided.
If None, then the sampling grid shape must be specified each time
the `transform` method is called.
domain_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
codomain_grid_shape : sequence of integers, shape (dim,)
the shape of the default co-domain sampling grid. When
`transform_inverse` is called to transform an image, the resulting
image will have this shape, unless a different sampling
information is provided. If None (the default), then the sampling
grid shape must be specified each time the `transform_inverse`
method is called.
codomain_grid2world : array, shape (dim + 1, dim + 1)
the grid-to-world transform associated with the co-domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
"""
self.set_affine(affine)
self.domain_shape = domain_grid_shape
self.domain_grid2world = domain_grid2world
self.codomain_shape = codomain_grid_shape
self.codomain_grid2world = codomain_grid2world
def set_affine(self, affine):
""" Sets the affine transform (operating in physical space)
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix representing the affine transform operating in
physical space. The domain and co-domain information
remains unchanged. If None, then `self` represents the identity
transformation.
"""
self.affine = affine
if self.affine is None:
self.affine_inv = None
return
if np.any(np.isnan(affine)):
raise AffineInversionError('Affine contains invalid elements')
try:
self.affine_inv = npl.inv(affine)
except npl.LinAlgError:
raise AffineInversionError('Affine cannot be inverted')
def _apply_transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False, apply_inverse=False):
""" Transforms the input image applying this affine transform
This is a generic function to transform images using either this
(direct) transform or its inverse.
If applying the direct transform (`apply_inverse=False`):
by default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`.
If applying the inverse transform (`apply_inverse=True`):
by default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`.
If the sampling information was not provided at initialization of this
transform then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.domain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.domain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
apply_inverse : Boolean, optional
If False (the default) the image is transformed from the codomain
of this transform to its domain using the (direct) affine
transform. Otherwise, the image is transformed from the domain
of this transform to its codomain using the (inverse) affine
transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or `self.domain_shape`
the transformed image, sampled at the requested grid
"""
# Verify valid interpolation requested
if interp not in _interp_options:
raise ValueError('Unknown interpolation method: %s' % (interp,))
# Obtain sampling grid
if sampling_grid_shape is None:
if apply_inverse:
sampling_grid_shape = self.codomain_shape
else:
sampling_grid_shape = self.domain_shape
if sampling_grid_shape is None:
msg = 'Unknown sampling info. Provide a valid sampling_grid_shape'
raise ValueError(msg)
dim = len(sampling_grid_shape)
shape = np.array(sampling_grid_shape, dtype=np.int32)
# Verify valid image dimension
if dim < 2 or dim > 3:
raise ValueError('Undefined transform for dimension: %d' % (dim,))
# Obtain grid-to-world transform for sampling grid
if sampling_grid2world is None:
if apply_inverse:
sampling_grid2world = self.codomain_grid2world
else:
sampling_grid2world = self.domain_grid2world
if sampling_grid2world is None:
sampling_grid2world = np.eye(dim + 1)
# Obtain world-to-grid transform for input image
if image_grid2world is None:
if apply_inverse:
image_grid2world = self.domain_grid2world
else:
image_grid2world = self.codomain_grid2world
if image_grid2world is None:
image_grid2world = np.eye(dim + 1)
image_world2grid = npl.inv(image_grid2world)
# Compute the transform from sampling grid to input image grid
if apply_inverse:
aff = self.affine_inv
else:
aff = self.affine
if (aff is None) or resample_only:
comp = image_world2grid.dot(sampling_grid2world)
else:
comp = image_world2grid.dot(aff.dot(sampling_grid2world))
# Transform the input image
if interp == 'linear':
image = image.astype(np.float64)
transformed = _transform_method[(dim, interp)](image, shape, comp)
return transformed
def transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from co-domain to domain space
By default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=False)
return np.array(transformed)
def transform_inverse(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from domain to co-domain space
By default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=True)
return np.array(transformed)
class MutualInformationMetric(object):
def __init__(self, nbins=32, sampling_proportion=None):
r""" Initializes an instance of the Mutual Information metric
This class implements the methods required by Optimizer to drive the
registration process.
Parameters
----------
nbins : int, optional
the number of bins to be used for computing the intensity
histograms. The default is 32.
sampling_proportion : None or float in interval (0, 1], optional
There are two types of sampling: dense and sparse. Dense sampling
uses all voxels for estimating the (joint and marginal) intensity
histograms, while sparse sampling uses a subset of them. If
`sampling_proportion` is None, then dense sampling is
used. If `sampling_proportion` is a floating point value in (0,1]
then sparse sampling is used, where `sampling_proportion`
specifies the proportion of voxels to be used. The default is
None.
Notes
-----
Since we use linear interpolation, images are not, in general,
differentiable at exact voxel coordinates, but they are differentiable
between voxel coordinates. When using sparse sampling, selected voxels
are slightly moved by adding a small random displacement within one
voxel to prevent sampling points from being located exactly at voxel
coordinates. When using dense sampling, this random displacement is
not applied.
"""
self.histogram = ParzenJointHistogram(nbins)
self.sampling_proportion = sampling_proportion
self.metric_val = None
self.metric_grad = None
def setup(self, transform, static, moving, static_grid2world=None,
moving_grid2world=None, starting_affine=None):
r""" Prepares the metric to compute intensity densities and gradients
The histograms will be setup to compute probability densities of
intensities within the minimum and maximum values of `static` and
`moving`
Parameters
----------
transform: instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
static : array, shape (S, R, C) or (R, C)
static image
moving : array, shape (S', R', C') or (R', C')
moving image. The dimensions of the static (S, R, C) and moving
(S', R', C') images do not need to be the same.
static_grid2world : array (dim+1, dim+1), optional
the grid-to-space transform of the static image. The default is
None, implying the transform is the identity.
moving_grid2world : array (dim+1, dim+1)
the grid-to-space transform of the moving image. The default is
None, implying the spacing along all axes is 1.
starting_affine : array, shape (dim+1, dim+1), optional
the pre-aligning matrix (an affine transform) that roughly aligns
the moving image towards the static image. If None, no
pre-alignment is performed. If a pre-alignment matrix is available,
it is recommended to provide this matrix as `starting_affine`
instead of manually transforming the moving image to reduce
interpolation artifacts. The default is None, implying no
pre-alignment is performed.
"""
self.dim = len(static.shape)
if moving_grid2world is None:
moving_grid2world = np.eye(self.dim + 1)
if static_grid2world is None:
static_grid2world = np.eye(self.dim + 1)
self.transform = transform
self.static = np.array(static).astype(np.float64)
self.moving = np.array(moving).astype(np.float64)
self.static_grid2world = static_grid2world
self.static_world2grid = npl.inv(static_grid2world)
self.moving_grid2world = moving_grid2world
self.moving_world2grid = npl.inv(moving_grid2world)
self.static_direction, self.static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
self.moving_direction, self.moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
self.starting_affine = starting_affine
P = np.eye(self.dim + 1)
if self.starting_affine is not None:
P = self.starting_affine
self.affine_map = AffineMap(P, static.shape, static_grid2world,
moving.shape, moving_grid2world)
if self.dim == 2:
self.interp_method = vf.interpolate_scalar_2d
else:
self.interp_method = vf.interpolate_scalar_3d
if self.sampling_proportion is None:
self.samples = None
self.ns = 0
else:
k = int(np.ceil(1.0 / self.sampling_proportion))
shape = np.array(static.shape, dtype=np.int32)
self.samples = sample_domain_regular(k, shape, static_grid2world)
self.samples = np.array(self.samples)
self.ns = self.samples.shape[0]
# Add a column of ones (homogeneous coordinates)
self.samples = np.hstack((self.samples, np.ones(self.ns)[:, None]))
if self.starting_affine is None:
self.samples_prealigned = self.samples
else:
self.samples_prealigned =\
self.starting_affine.dot(self.samples.T).T
# Sample the static image
static_p = self.static_world2grid.dot(self.samples.T).T
static_p = static_p[..., :self.dim]
self.static_vals, inside = self.interp_method(static, static_p)
self.static_vals = np.array(self.static_vals, dtype=np.float64)
self.histogram.setup(self.static, self.moving)
def _update_histogram(self):
r""" Updates the histogram according to the current affine transform
The current affine transform is given by `self.affine_map`, which
must be set before calling this method.
Returns
-------
static_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, C) if dense sampling
the intensity values corresponding to the static image used to
update the histogram. If sparse sampling is being used, then
it is simply a sequence of scalars, obtained by sampling the static
image at the `n` sampling points. If dense sampling is being used,
then the intensities are given directly by the static image,
whose shape is (S, R, C) in the 3D case or (R, C) in the 2D case.
moving_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, C) if dense sampling
the intensity values corresponding to the moving image used to
update the histogram. If sparse sampling is being used, then
it is simply a sequence of scalars, obtained by sampling the moving
image at the `n` sampling points (mapped to the moving space by the
current affine transform). If dense sampling is being used,
then the intensities are given by the moving imaged linearly
transformed towards the static image by the current affine, which
results in an image of the same shape as the static image.
"""
static_values = None
moving_values = None
if self.sampling_proportion is None: # Dense case
static_values = self.static
moving_values = self.affine_map.transform(self.moving)
self.histogram.update_pdfs_dense(static_values, moving_values)
else: # Sparse case
sp_to_moving = self.moving_world2grid.dot(self.affine_map.affine)
pts = sp_to_moving.dot(self.samples.T).T # Points on moving grid
pts = pts[..., :self.dim]
self.moving_vals, inside = self.interp_method(self.moving, pts)
self.moving_vals = np.array(self.moving_vals)
static_values = self.static_vals
moving_values = self.moving_vals
self.histogram.update_pdfs_sparse(static_values, moving_values)
return static_values, moving_values
def _update_mutual_information(self, params, update_gradient=True):
r""" Updates marginal and joint distributions and the joint gradient
The distributions are updated according to the static and transformed
images. The transformed image is precisely the moving image after
transforming it by the transform defined by the `params` parameters.
The gradient of the joint PDF is computed only if update_gradient
is True.
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
update_gradient : Boolean, optional
if True, the gradient of the joint PDF will also be computed,
otherwise, only the marginal and joint PDFs will be computed.
The default is True.
"""
# Get the matrix associated with the `params` parameter vector
current_affine = self.transform.param_to_matrix(params)
# Get the static-to-prealigned matrix (only needed for the MI gradient)
static2prealigned = self.static_grid2world
if self.starting_affine is not None:
current_affine = current_affine.dot(self.starting_affine)
static2prealigned = self.starting_affine.dot(static2prealigned)
self.affine_map.set_affine(current_affine)
# Update the histogram with the current joint intensities
static_values, moving_values = self._update_histogram()
H = self.histogram # Shortcut to `self.histogram`
grad = None # Buffer to write the MI gradient into (if needed)
if update_gradient:
# Re-allocate buffer for the gradient, if needed
n = params.shape[0] # Number of parameters
if (self.metric_grad is None) or (self.metric_grad.shape[0] != n):
self.metric_grad = np.empty(n)
grad = self.metric_grad
# Compute the gradient of the joint PDF w.r.t. parameters
if self.sampling_proportion is None: # Dense case
# Compute the gradient of moving img. at physical points
# associated with the >>static image's grid<< cells
# The image gradient must be eval. at current moved points
grid_to_world = current_affine.dot(self.static_grid2world)
mgrad, inside = vf.gradient(self.moving,
self.moving_world2grid,
self.moving_spacing,
self.static.shape,
grid_to_world)
# The Jacobian must be evaluated at the pre-aligned points
H.update_gradient_dense(params, self.transform, static_values,
moving_values, static2prealigned, mgrad)
else: # Sparse case
# Compute the gradient of moving at the sampling points
# which are already given in physical space coordinates
pts = current_affine.dot(self.samples.T).T # Moved points
mgrad, inside = vf.sparse_gradient(self.moving,
self.moving_world2grid,
self.moving_spacing,
pts)
# The Jacobian must be evaluated at the pre-aligned points
pts = self.samples_prealigned[..., :self.dim]
H.update_gradient_sparse(params, self.transform, static_values,
moving_values, pts, mgrad)
# Call the cythonized MI computation with self.histogram fields
self.metric_val = compute_parzen_mi(H.joint, H.joint_grad,
H.smarginal, H.mmarginal,
grad)
def distance(self, params):
r""" Numeric value of the negative Mutual Information
We need to change the sign so we can use standard minimization
algorithms.
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
neg_mi : float
the negative mutual information of the input images after
transforming the moving image by the currently set transform
with `params` parameters
"""
try:
self._update_mutual_information(params, False)
except AffineInversionError:
return np.inf
return -1 * self.metric_val
def gradient(self, params):
r""" Numeric value of the metric's gradient at the given parameters
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
grad : array, shape (n,)
the gradient of the negative Mutual Information
"""
try:
self._update_mutual_information(params, True)
except AffineInversionError:
return 0 * self.metric_grad
return -1 * self.metric_grad
def distance_and_gradient(self, params):
r""" Numeric value of the metric and its gradient at given parameters
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
neg_mi : float
the negative mutual information of the input images after
transforming the moving image by the currently set transform
with `params` parameters
neg_mi_grad : array, shape (n,)
the gradient of the negative Mutual Information
"""
try:
self._update_mutual_information(params, True)
except AffineInversionError:
return np.inf, 0 * self.metric_grad
return -1 * self.metric_val, -1 * self.metric_grad
class AffineRegistration(object):
def __init__(self,
metric=None,
level_iters=None,
sigmas=None,
factors=None,
method='L-BFGS-B',
ss_sigma_factor=None,
options=None):
r""" Initializes an instance of the AffineRegistration class
Parameters
----------
metric : None or object, optional
an instance of a metric. The default is None, implying
the Mutual Information metric with default settings.
level_iters : sequence, optional
the number of iterations at each scale of the scale space.
`level_iters[0]` corresponds to the coarsest scale,
`level_iters[-1]` the finest, where n is the length of the
sequence. By default, a 3-level scale space with iterations
sequence equal to [10000, 1000, 100] will be used.
sigmas : sequence of floats, optional
custom smoothing parameter to build the scale space (one parameter
for each scale). By default, the sequence of sigmas will be
[3, 1, 0].
factors : sequence of floats, optional
custom scale factors to build the scale space (one factor for each
scale). By default, the sequence of factors will be [4, 2, 1].
method : string, optional
optimization method to be used. If Scipy version < 0.12, then
only L-BFGS-B is available. Otherwise, `method` can be any
gradient-based method available in `dipy.core.Optimize`: CG, BFGS,
Newton-CG, dogleg or trust-ncg.
The default is 'L-BFGS-B'.
ss_sigma_factor : float, optional
If None, this parameter is not used and an isotropic scale
space with the given `factors` and `sigmas` will be built.
If not None, an anisotropic scale space will be used by
automatically selecting the smoothing sigmas along each axis
according to the voxel dimensions of the given image.
The `ss_sigma_factor` is used to scale the automatically computed
sigmas. For example, in the isotropic case, the sigma of the
kernel will be $factor * (2 ^ i)$ where
$i = 1, 2, ..., n_scales - 1$ is the scale (the finest resolution
image $i=0$ is never smoothed). The default is None.
options : dict, optional
extra optimization options. The default is None, implying
no extra options are passed to the optimizer.
"""
self.metric = metric
if self.metric is None:
self.metric = MutualInformationMetric()
if level_iters is None:
level_iters = [10000, 1000, 100]
self.level_iters = level_iters
self.levels = len(level_iters)
if self.levels == 0:
raise ValueError('The iterations sequence cannot be empty')
self.options = options
self.method = method
if ss_sigma_factor is not None:
self.use_isotropic = False
self.ss_sigma_factor = ss_sigma_factor
else:
self.use_isotropic = True
if factors is None:
factors = [4, 2, 1]
if sigmas is None:
sigmas = [3, 1, 0]
self.factors = factors
self.sigmas = sigmas
self.verbosity = VerbosityLevels.STATUS
def _init_optimizer(self, static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine):
r"""Initializes the registration optimizer
Initializes the optimizer by computing the scale space of the input
images
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization.
moving : array, shape (S', R', C') or (R', C')
the image to be used as "moving" during optimization. The
dimensions of the static (S, R, C) and moving (S', R', C') images
do not need to be the same.
transform : instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
params0 : array, shape (n,)
parameters from which to start the optimization. If None, the
optimization will start at the identity transform. n is the
number of parameters of the specified transformation.
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated with the static image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated with the moving image
starting_affine : string, or matrix, or None
If string:
'mass': align centers of gravity
'voxel-origin': align physical coordinates of voxel (0,0,0)
'centers': align physical coordinates of central voxels
If matrix:
array, shape (dim+1, dim+1)
If None:
Start from identity
"""
self.dim = len(static.shape)
self.transform = transform
n = transform.get_number_of_parameters()
self.nparams = n
if params0 is None:
params0 = self.transform.get_identity_parameters()
self.params0 = params0
if starting_affine is None:
self.starting_affine = np.eye(self.dim + 1)
elif starting_affine == 'mass':
affine_map = align_centers_of_mass(static,
static_grid2world,
moving,
moving_grid2world)
self.starting_affine = affine_map.affine
elif starting_affine == 'voxel-origin':
affine_map = align_origins(static, static_grid2world,
moving, moving_grid2world)
self.starting_affine = affine_map.affine
elif starting_affine == 'centers':
affine_map = align_geometric_centers(static,
static_grid2world,
moving,
moving_grid2world)
self.starting_affine = affine_map.affine
elif (isinstance(starting_affine, np.ndarray) and
starting_affine.shape >= (self.dim, self.dim + 1)):
self.starting_affine = starting_affine
else:
raise ValueError('Invalid starting_affine matrix')
# Extract information from affine matrices to create the scale space
static_direction, static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
moving_direction, moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
static = ((static.astype(np.float64) - static.min()) /
(static.max() - static.min()))
moving = ((moving.astype(np.float64) - moving.min()) /
(moving.max() - moving.min()))
# Build the scale space of the input images
if self.use_isotropic:
self.moving_ss = IsotropicScaleSpace(moving, self.factors,
self.sigmas,
moving_grid2world,
moving_spacing, False)
self.static_ss = IsotropicScaleSpace(static, self.factors,
self.sigmas,
static_grid2world,
static_spacing, False)
else:
self.moving_ss = ScaleSpace(moving, self.levels, moving_grid2world,
moving_spacing, self.ss_sigma_factor,
False)
self.static_ss = ScaleSpace(static, self.levels, static_grid2world,
static_spacing, self.ss_sigma_factor,
False)
def optimize(self, static, moving, transform, params0,
static_grid2world=None, moving_grid2world=None,
starting_affine=None):
r''' Starts the optimization process
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization.
moving : array, shape (S', R', C') or (R', C')
the image to be used as "moving" during optimization. It is
necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed
to be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the
'starting_affine' matrix
transform : instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
params0 : array, shape (n,)
parameters from which to start the optimization. If None, the
optimization will start at the identity transform. n is the
number of parameters of the specified transformation.
static_grid2world : array, shape (dim+1, dim+1), optional
the voxel-to-space transformation associated with the static
image. The default is None, implying the transform is the
identity.
moving_grid2world : array, shape (dim+1, dim+1), optional
the voxel-to-space transformation associated with the moving
image. The default is None, implying the transform is the
identity.
starting_affine : string, or matrix, or None, optional
If string:
'mass': align centers of gravity
'voxel-origin': align physical coordinates of voxel (0,0,0)
'centers': align physical coordinates of central voxels
If matrix:
array, shape (dim+1, dim+1).
If None:
Start from identity.
The default is None.
Returns
-------
affine_map : instance of AffineMap
the affine resulting affine transformation
'''
self._init_optimizer(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine)
del starting_affine # Now we must refer to self.starting_affine
# Multi-resolution iterations
original_static_shape = self.static_ss.get_image(0).shape
original_static_grid2world = self.static_ss.get_affine(0)
original_moving_shape = self.moving_ss.get_image(0).shape
original_moving_grid2world = self.moving_ss.get_affine(0)
affine_map = AffineMap(None,
original_static_shape,
original_static_grid2world,
original_moving_shape,
original_moving_grid2world)
for level in range(self.levels - 1, -1, -1):
self.current_level = level
max_iter = self.level_iters[-1 - level]
if self.verbosity >= VerbosityLevels.STATUS:
print('Optimizing level %d [max iter: %d]' % (level, max_iter))
# Resample the smooth static image to the shape of this level
smooth_static = self.static_ss.get_image(level)
current_static_shape = self.static_ss.get_domain_shape(level)
current_static_grid2world = self.static_ss.get_affine(level)
current_affine_map = AffineMap(None,
current_static_shape,
current_static_grid2world,
original_static_shape,
original_static_grid2world)
current_static = current_affine_map.transform(smooth_static)
# The moving image is full resolution
current_moving_grid2world = original_moving_grid2world
current_moving = self.moving_ss.get_image(level)
# Prepare the metric for iterations at this resolution
self.metric.setup(transform, current_static, current_moving,
current_static_grid2world,
current_moving_grid2world, self.starting_affine)
# Optimize this level
if self.options is None:
self.options = {'gtol': 1e-4,
'disp': False}
if self.method == 'L-BFGS-B':
self.options['maxfun'] = max_iter
else:
self.options['maxiter'] = max_iter
if SCIPY_LESS_0_12:
# Older versions don't expect value and gradient from
# the same function
opt = Optimizer(self.metric.distance, self.params0,
method=self.method, jac=self.metric.gradient,
options=self.options)
else:
opt = Optimizer(self.metric.distance_and_gradient, self.params0,
method=self.method, jac=True,
options=self.options)
params = opt.xopt
# Update starting_affine matrix with optimal parameters
T = self.transform.param_to_matrix(params)
self.starting_affine = T.dot(self.starting_affine)
# Start next iteration at identity
self.params0 = self.transform.get_identity_parameters()
affine_map.set_affine(self.starting_affine)
return affine_map
def align_centers_of_mass(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the center of mass of the input images
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the center of mass of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = ndimage.measurements.center_of_mass(np.array(static))
c_static = static_grid2world.dot(c_static+(1,))
c_moving = ndimage.measurements.center_of_mass(np.array(moving))
c_moving = moving_grid2world.dot(c_moving+(1,))
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
def align_geometric_centers(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the geometric center of the input images
With "geometric center" of a volume we mean the physical coordinates of
its central voxel
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the geometric center of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = tuple((np.array(static.shape, dtype=np.float64)) * 0.5)
c_static = static_grid2world.dot(c_static+(1,))
c_moving = tuple((np.array(moving.shape, dtype=np.float64)) * 0.5)
c_moving = moving_grid2world.dot(c_moving+(1,))
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
def align_origins(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the origins of the input images
With "origin" of a volume we mean the physical coordinates of
voxel (0,0,0)
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the origin of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = static_grid2world[:dim, dim]
c_moving = moving_grid2world[:dim, dim]
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
| [
"numpy.eye",
"numpy.ceil",
"numpy.ones",
"numpy.array",
"numpy.linalg.inv",
"numpy.isnan",
"numpy.empty"
] | [((48493, 48508), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (48499, 48508), True, 'import numpy as np\n'), ((50090, 50105), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (50096, 50105), True, 'import numpy as np\n'), ((51476, 51491), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (51482, 51491), True, 'import numpy as np\n'), ((11238, 11283), 'numpy.array', 'np.array', (['sampling_grid_shape'], {'dtype': 'np.int32'}), '(sampling_grid_shape, dtype=np.int32)\n', (11246, 11283), True, 'import numpy as np\n'), ((12176, 12201), 'numpy.linalg.inv', 'npl.inv', (['image_grid2world'], {}), '(image_grid2world)\n', (12183, 12201), True, 'import numpy.linalg as npl\n'), ((15324, 15345), 'numpy.array', 'np.array', (['transformed'], {}), '(transformed)\n', (15332, 15345), True, 'import numpy as np\n'), ((17915, 17936), 'numpy.array', 'np.array', (['transformed'], {}), '(transformed)\n', (17923, 17936), True, 'import numpy as np\n'), ((21913, 21939), 'numpy.linalg.inv', 'npl.inv', (['static_grid2world'], {}), '(static_grid2world)\n', (21920, 21939), True, 'import numpy.linalg as npl\n'), ((22024, 22050), 'numpy.linalg.inv', 'npl.inv', (['moving_grid2world'], {}), '(moving_grid2world)\n', (22031, 22050), True, 'import numpy.linalg as npl\n'), ((22357, 22377), 'numpy.eye', 'np.eye', (['(self.dim + 1)'], {}), '(self.dim + 1)\n', (22363, 22377), True, 'import numpy as np\n'), ((48141, 48156), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (48147, 48156), True, 'import numpy as np\n'), ((48219, 48234), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (48225, 48234), True, 'import numpy as np\n'), ((48286, 48302), 'numpy.array', 'np.array', (['static'], {}), '(static)\n', (48294, 48302), True, 'import numpy as np\n'), ((48407, 48423), 'numpy.array', 'np.array', (['moving'], {}), '(moving)\n', (48415, 48423), True, 'import numpy as np\n'), ((49734, 49749), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (49740, 49749), True, 'import numpy as np\n'), ((49812, 49827), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (49818, 49827), True, 'import numpy as np\n'), ((51278, 51293), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (51284, 51293), True, 'import numpy as np\n'), ((51356, 51371), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (51362, 51371), True, 'import numpy as np\n'), ((7351, 7367), 'numpy.isnan', 'np.isnan', (['affine'], {}), '(affine)\n', (7359, 7367), True, 'import numpy as np\n'), ((7488, 7503), 'numpy.linalg.inv', 'npl.inv', (['affine'], {}), '(affine)\n', (7495, 7503), True, 'import numpy.linalg as npl\n'), ((11780, 11795), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (11786, 11795), True, 'import numpy as np\n'), ((21566, 21586), 'numpy.eye', 'np.eye', (['(self.dim + 1)'], {}), '(self.dim + 1)\n', (21572, 21586), True, 'import numpy as np\n'), ((21657, 21677), 'numpy.eye', 'np.eye', (['(self.dim + 1)'], {}), '(self.dim + 1)\n', (21663, 21677), True, 'import numpy as np\n'), ((22956, 22994), 'numpy.array', 'np.array', (['static.shape'], {'dtype': 'np.int32'}), '(static.shape, dtype=np.int32)\n', (22964, 22994), True, 'import numpy as np\n'), ((23100, 23122), 'numpy.array', 'np.array', (['self.samples'], {}), '(self.samples)\n', (23108, 23122), True, 'import numpy as np\n'), ((23793, 23837), 'numpy.array', 'np.array', (['self.static_vals'], {'dtype': 'np.float64'}), '(self.static_vals, dtype=np.float64)\n', (23801, 23837), True, 'import numpy as np\n'), ((26155, 26181), 'numpy.array', 'np.array', (['self.moving_vals'], {}), '(self.moving_vals)\n', (26163, 26181), True, 'import numpy as np\n'), ((38696, 38716), 'numpy.eye', 'np.eye', (['(self.dim + 1)'], {}), '(self.dim + 1)\n', (38702, 38716), True, 'import numpy as np\n'), ((49850, 49890), 'numpy.array', 'np.array', (['static.shape'], {'dtype': 'np.float64'}), '(static.shape, dtype=np.float64)\n', (49858, 49890), True, 'import numpy as np\n'), ((49973, 50013), 'numpy.array', 'np.array', (['moving.shape'], {'dtype': 'np.float64'}), '(moving.shape, dtype=np.float64)\n', (49981, 50013), True, 'import numpy as np\n'), ((12133, 12148), 'numpy.eye', 'np.eye', (['(dim + 1)'], {}), '(dim + 1)\n', (12139, 12148), True, 'import numpy as np\n'), ((21735, 21751), 'numpy.array', 'np.array', (['static'], {}), '(static)\n', (21743, 21751), True, 'import numpy as np\n'), ((21793, 21809), 'numpy.array', 'np.array', (['moving'], {}), '(moving)\n', (21801, 21809), True, 'import numpy as np\n'), ((22895, 22934), 'numpy.ceil', 'np.ceil', (['(1.0 / self.sampling_proportion)'], {}), '(1.0 / self.sampling_proportion)\n', (22902, 22934), True, 'import numpy as np\n'), ((28415, 28426), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (28423, 28426), True, 'import numpy as np\n'), ((23280, 23296), 'numpy.ones', 'np.ones', (['self.ns'], {}), '(self.ns)\n', (23287, 23296), True, 'import numpy as np\n')] |
import matplotlib
from matplotlib.pyplot import subplot, plot, show
from numpy import linspace, sin, pi
import seismo
matplotlib.style.use('ggplot')
x = linspace(0, 0.05, 500)
y = 0.6*sin(2*pi*240*x)\
+ 0.15*sin(2*pi*1303*x + 0.4)\
+ 0.1*sin(2*pi*3000*x)
f, a = seismo.deeming(x, y)
subplot(211)
plot(x, y, '.')
subplot(212)
plot(f, a)
show()
| [
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.style.use",
"seismo.deeming",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((119, 149), 'matplotlib.style.use', 'matplotlib.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (139, 149), False, 'import matplotlib\n'), ((155, 177), 'numpy.linspace', 'linspace', (['(0)', '(0.05)', '(500)'], {}), '(0, 0.05, 500)\n', (163, 177), False, 'from numpy import linspace, sin, pi\n'), ((273, 293), 'seismo.deeming', 'seismo.deeming', (['x', 'y'], {}), '(x, y)\n', (287, 293), False, 'import seismo\n'), ((295, 307), 'matplotlib.pyplot.subplot', 'subplot', (['(211)'], {}), '(211)\n', (302, 307), False, 'from matplotlib.pyplot import subplot, plot, show\n'), ((308, 323), 'matplotlib.pyplot.plot', 'plot', (['x', 'y', '"""."""'], {}), "(x, y, '.')\n", (312, 323), False, 'from matplotlib.pyplot import subplot, plot, show\n'), ((324, 336), 'matplotlib.pyplot.subplot', 'subplot', (['(212)'], {}), '(212)\n', (331, 336), False, 'from matplotlib.pyplot import subplot, plot, show\n'), ((337, 347), 'matplotlib.pyplot.plot', 'plot', (['f', 'a'], {}), '(f, a)\n', (341, 347), False, 'from matplotlib.pyplot import subplot, plot, show\n'), ((349, 355), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (353, 355), False, 'from matplotlib.pyplot import subplot, plot, show\n'), ((248, 270), 'numpy.sin', 'sin', (['(2 * pi * 3000 * x)'], {}), '(2 * pi * 3000 * x)\n', (251, 270), False, 'from numpy import linspace, sin, pi\n'), ((186, 207), 'numpy.sin', 'sin', (['(2 * pi * 240 * x)'], {}), '(2 * pi * 240 * x)\n', (189, 207), False, 'from numpy import linspace, sin, pi\n'), ((214, 242), 'numpy.sin', 'sin', (['(2 * pi * 1303 * x + 0.4)'], {}), '(2 * pi * 1303 * x + 0.4)\n', (217, 242), False, 'from numpy import linspace, sin, pi\n')] |
#!/usr/bin/python
"""
roadGrid.py: version 0.1.0
A quick 2D Voxel implemenation.
History:
2017/01/29: coding style phase1:
reformat to python-guide.org code style
http://docs.python-guide.org/en/latest/writing/style/
which uses PEP 8 as a base: http://pep8.org/.
2017/01/23: Initial version converted to a class
"""
import numpy as np
import re
# a class for helping us map the road surface
# using voxel for quick 3D reconstruction
class RoadGrid():
# initialize
def __init__(self, x0, y0, nlanes, mainLaneIdx):
self.nlanes = nlanes
self.mainIdx = mainLaneIdx
self.maxkey = 55
self.mapping = {}
self.x0 = x0
self.y0 = y0
# list of objects and their boxes
# trigged by setting found and occlusion checks
self.object_list = []
# list of vehicles and their boxes
# trigged by setting insert and track checks
self.vehicle_list = {}
# we are enforcing some constraints into the road grid
def map_boxes(self, laneIdx, boxes):
numkeys = [int(key) for key in boxes.keys()]
maxkey = max(numkeys)
if self.maxkey < maxkey:
self.maxkey = maxkey
laneAlpha = chr(laneIdx + 65)
for i in numkeys:
box = '%s+%02d' % (laneAlpha, self.maxkey - i)
self.mapping[box] = {
'window': boxes['%d' % (i)],
'found': False,
'occluded': False,
'tracked': False,
'object': None,
'vehicle': None}
def getMapping(self):
return self.mapping
def setVehicle(self, boxlist, vehIdx):
for box in boxlist:
self.mapping[box]['vehicle'] = vehIdx
if not self.mapping[box]['occluded']:
vehStr = '%d' % (vehIdx)
self.vehicle_list[vehStr] = box
def setFound(self, box):
self.mapping[box]['found'] = True
# print("from setFound...")
self.calculateVoxelOcclusionAndObjectSeparation(
box, forceIntoOne=True)
def setOccluded(self, box):
if box in self.mapping:
self.mapping[box]['occluded'] = True
def getKey(self, lane, y):
box = '%s+%02d' % (chr(lane + 65), y)
return box
def getBox(self, lane, y):
box = '%s+%02d' % (chr(lane + 65), y)
if box in self.mapping:
return self.mapping[box]
return None
def getAllWindows(self):
return [self.mapping[map]['window'] for map in self.mapping.keys()]
def getBoxWindow(self, box):
return self.mapping[box]['window']
def getFoundWindows(self):
return [self.mapping[map]['window'] for map in self.mapping.keys()
if self.mapping[map]['found']]
def getOccludedWindows(self):
return [self.mapping[map]['window'] for map in self.mapping.keys()
if self.mapping[map]['occluded']]
def getFoundAndNotOccludedWindows(self):
return [self.mapping[map]['window'] for map in self.mapping.keys()
if self.mapping[map]['found'] and
not self.mapping[map]['occluded']]
def getFoundAndNotOccludedWindowsInObject(self, objIdx):
return [self.mapping[map]['window']
for map in self.object_list[objIdx]
if self.mapping[map]['found'] and
not self.mapping[map]['occluded']]
def getFoundAndNotOccludedWindowsInVehicle(self, vehIdx):
return [self.mapping[map]['window']
for map in self.mapping.keys()
if self.mapping[map]['found'] and
not self.mapping[map]['occluded'] and
self.mapping[map]['vehicle'] is not None and
self.mapping[map]['vehicle'] == vehIdx]
def getFoundAndNotOccludedBoxesInObject(self, objIdx):
return [map for map in self.object_list[objIdx]
if self.mapping[map]['found'] and
not self.mapping[map]['occluded']]
def getFoundAndNotOccludedBoxesInVehicle(self, vehIdx):
return [map for map in self.mapping.keys()
if self.mapping[map]['found'] and
not self.mapping[map]['occluded'] and
self.mapping[map]['vehicle'] is not None and
self.mapping[map]['vehicle'] == vehIdx]
def gridCoordinates(self, box):
lane, y = box.split('+')
return ord(lane)-65, int(y)
def gridSize(self):
return self.nlanes, self.maxkey
def generatePolyRay(self, x0, y0, x1, y1):
allY = np.array([y0, y1])
allX = np.array([x0, x1])
return np.poly1d(np.polyfit(allY, allX, 1))
def getNumObjects(self):
return len(self.object_list)
def getObjects(self):
return self.object_list
def getObjectList(self, i):
return self.object_list[i]
def getObjectListWindows(self, i):
return [self.mapping[map]['window'] for map in self.object_list[i]]
# we will use constrain propagation to limit our search for vehicle testing
# by using voxel occlusion testing to find occluded boxes in the grid
def calculateVoxelOcclusionAndObjectSeparation(
self, box, vehicle=None, forceIntoOne=False):
if not self.mapping[box]['occluded']:
# find the two rays from our camera that hits the edges
# of our box and generate a set of ray polys.
window = self.mapping[box]['window']
x1, y1 = window[0][0], window[0][1]
x2, y2 = window[1][0], window[1][1]
polyRay1 = self.generatePolyRay(self.x0, self.y0, x1, y1)
polyRay2 = self.generatePolyRay(self.x0, self.y0, x2, y2)
newobject = True
# until our rays hit something found before
# then we are a new object
# else we are part of a larger object.
# (or it could be something that is too close
# to tell apart using this method)
mapping = [n for n in self.mapping.keys()]
mapping.sort()
for map in mapping:
window = self.mapping[map]['window']
boxX1, boxX2 = window[0][0], window[1][0]
boxMidY = (window[0][1] + window[1][1])/2
rayX1 = polyRay1(np.array([boxMidY]))[0]
rayX2 = polyRay2(np.array([boxMidY]))[0]
# print("rayX1", rayX1, "rayX2", rayX2, boxMidY)
# print("boxX1", boxX1, "boxX2", boxX2, boxMidY)
# print("box", box, "map", map)
# three choices for a box to be occluded by our
# box: ray1 hits, ray2 hits, or the box is
# completely within the two rays
if (((boxX1 <= rayX1 and boxX2 >= rayX1) or
(boxX1 <= rayX2 and boxX2 >= rayX2) or
(rayX1 < boxX1 and rayX1 < boxX2 and
rayX2 > boxX1 and rayX2 > boxX2)) and
(y1 > boxMidY)):
# print("Hit!")
# is our box is a vehicle...?
if vehicle is not None:
if self.mapping[map]['vehicle'] is None:
self.mapping[map]['vehicle'] = vehicle
self.mapping[map]['found'] = True
self.mapping[map]['occluded'] = True
# print("10. vehicle is none.!", box, map)
# we are the same!
elif self.mapping[map]['vehicle'] == vehicle:
# update the vehicle to be us.
vehStr = '%d' % (vehicle)
self.vehicle_list[vehStr] = box
# print("11. vehicle is same.!", box, map)
# the other box is a vehicle too, but not us!
# occlude it
else:
# print("1. this should not happen!")
self.mapping[map]['occluded'] = True
# stop! we found something already
# occluded - this box maybe be something
# larger, so adopt its object or vehicle
elif self.mapping[map]['occluded'] and forceIntoOne:
# the other voxel is a vehicle!
if self.mapping[map]['vehicle'] is not None:
# the vehicle is being tracked by
# vehicle tracker, don't try to move it!
# vehicle is not being tracked...
if self.mapping[map]['tracked']:
# print("2. tracked detected!", box, map)
# print("setting ourselves occluded")
vehIdx = self.mapping[map]['vehicle']
vehStr = '%d' % (vehIdx)
self.mapping[box]['occluded'] = True
self.mapping[box]['vehicle'] = \
self.mapping[map]['vehicle']
self.vehicle_list[vehStr] = map
self.mapping[box]['vehicle'] = vehIdx
self.mapping[box]['found'] = True
else:
# print("2. new location detected!", box, map)
# need to inform the vehicle
# it has a new location!
vehIdx = self.mapping[map]['vehicle']
self.mapping[map]['occluded'] = True
vehStr = '%d' % (vehIdx)
self.vehicle_list[vehStr] = box
self.mapping[box]['vehicle'] = vehIdx
self.mapping[box]['found'] = True
elif self.mapping[box]['object'] is not None:
# if self.mapping[map]['object'] is None:
# print("That's not suppose to happen!")
# else:
# print("objectlist", self.object_list)
# print("3. new location detected!", box, map)
idx = self.mapping[map]['object']
if idx is not None:
self.mapping[box]['object'] = idx
# and add ourselves to their list
# print("idx=", idx)
self.object_list[idx].append(box)
# our objects do not match!
elif self.mapping[box]['object'] is None and \
self.mapping[map]['object'] is not None:
# print("4. new location detected!", box, map)
idx = self.mapping[map]['object']
self.mapping[box]['object'] = idx
# else:
# print("newer list than ours!!!")
# otherwise we are the same object already
# - nothing to do.
# other box is not occluded
# elif not self.mapping[map]['occluded'] and \
# not self.mapping[map]['tracked']:
elif not self.mapping[map]['occluded']:
# the other voxel is also a vehicle!
if self.mapping[map]['vehicle'] is not None:
# print("5. new location detected!", box, map)
# need to inform the vehicle
# it has a new location!
vehIdx = self.mapping[map]['vehicle']
self.mapping[map]['occluded'] = True
vehStr = '%d' % (vehIdx)
self.vehicle_list[vehStr] = box
self.mapping[box]['vehicle'] = vehIdx
self.mapping[box]['found'] = True
# but we don't belong in the same object
if self.mapping[box]['object'] is not None and \
self.mapping[map]['object'] is not None \
and self.mapping[box]['object'] != \
self.mapping[map]['object']:
# we seem to be occluding another object!
# just set their occluded flag
# print("6. new location detected!")
self.mapping[map]['occluded'] = True
# we thought we were our own list, we need
# the other object is not in a different
# object list and we don't either!
elif self.mapping[box]['object'] is None:
# we must be a new object then!
# create a new object list,
# and add this to our object.
idx = len(self.object_list)
self.mapping[box]['object'] = idx
self.mapping[map]['object'] = idx
self.object_list.append([box, map])
# and set the occlusion!
self.mapping[map]['occluded'] = True
else:
# are in our own list, just add this one to
# it and set to our object.
idx = self.mapping[box]['object']
self.object_list[idx].append(map)
self.mapping[map]['object'] = idx
# and set the occlusion!
self.mapping[map]['occluded'] = True
# the other box is occluded already, but we cannot
# force our objects into one
else:
# this item is already occluded.
# add ourselves to their list.
if self.mapping[map]['object'] is not None and \
self.mapping[box]['object'] is None:
# we may be something larger after all!
idx = self.mapping[map]['object']
self.mapping[box]['object'] = idx
# and add ourselves to their list
self.object_list[idx].append(box)
# and set the occlusion!
self.mapping[map]['occluded'] = True
# for debugging objs
# print("objs = ", len(self.object_list))
def calculateObjectPosition(self, lane, ycoordinate):
# first check to see if the coordinates falls into an existing box
laneAscii = chr(lane + 65)
boxpattern = re.compile('^%s[0123456789]+$' % (laneAscii))
for box in self.mapping.keys():
if boxpattern.match(box):
window = self.mapping[box]['window']
if (window[0][1] < ycoordinate and
ycoordinate < window[1][1]):
return int(box.replace(laneAscii, ''))
# if not, return an estimate
return int((1.0-ycoordinate/self.y0) * self.maxkey)-3
def insertTrackedObject(self, lane, yidx, window, vehIdx, tracking=False):
box = '%s+%02d' % (chr(lane + 65), yidx)
# its not there - insert it
if box not in self.mapping:
self.mapping[box] = {
'window': window,
'found': True,
'tracked': tracking,
'occluded': False,
'object': None,
'vehicle': vehIdx}
# its already there - replace it
else:
self.mapping[box]['window'] = window
self.mapping[box]['found'] = True
self.mapping[box]['tracked'] = tracking
self.mapping[box]['vehicle'] = vehIdx
vehStr = '%d' % (vehIdx)
self.vehicle_list[vehStr] = box
# print("from insertTrackedObject...")
self.calculateVoxelOcclusionAndObjectSeparation(box, vehicle=vehIdx)
return self.vehicle_list[vehStr]
def isOccluded(self, box):
if box in self.mapping:
return self.mapping[box]['occluded']
return False
| [
"numpy.polyfit",
"numpy.array",
"re.compile"
] | [((4602, 4620), 'numpy.array', 'np.array', (['[y0, y1]'], {}), '([y0, y1])\n', (4610, 4620), True, 'import numpy as np\n'), ((4636, 4654), 'numpy.array', 'np.array', (['[x0, x1]'], {}), '([x0, x1])\n', (4644, 4654), True, 'import numpy as np\n'), ((15339, 15382), 're.compile', 're.compile', (["('^%s[0123456789]+$' % laneAscii)"], {}), "('^%s[0123456789]+$' % laneAscii)\n", (15349, 15382), False, 'import re\n'), ((4680, 4705), 'numpy.polyfit', 'np.polyfit', (['allY', 'allX', '(1)'], {}), '(allY, allX, 1)\n', (4690, 4705), True, 'import numpy as np\n'), ((6337, 6356), 'numpy.array', 'np.array', (['[boxMidY]'], {}), '([boxMidY])\n', (6345, 6356), True, 'import numpy as np\n'), ((6394, 6413), 'numpy.array', 'np.array', (['[boxMidY]'], {}), '([boxMidY])\n', (6402, 6413), True, 'import numpy as np\n')] |
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
## TreePlot.py
# Written <NAME> - February 2020
#
# A set of functions to plot a tree from a certain node.
# Can also be used to show the tree structure generated by a BFS.
import numpy as np
import matplotlib.pyplot as plt
from rdml_graph.core import Node
from collections import deque
## plotTree
# plot's a tree using a BFS search through the environment.
# Each level is given equal space.
# @param root - the top of the tree given as a Node
# @param max_levels - the max number of levels of the tree to plot.
# @param show_labels - shows the labels the nodes using the id of node.
def plotTree(root, max_levels=-1, show_labels=False):
frontier = deque()
frontier.append((root, 0))
explored = set()
nodesLevels = [[]]
while len(frontier) > 0:
n, level = frontier.pop()
if max_levels != -1 and level >= max_levels:
break
if n not in explored:
explored.add(n)
successors = n.successor()
# add the current node to node levels.
if len(nodesLevels) > level:
nodesLevels[level].append(n)
else:
#add a new layer
nodesLevels.append([n])
for succ, cost in successors:
if succ not in explored:
frontier.append((succ, level+1))
# BFS search done, plot tree
num_levels = len(nodesLevels)
total_nodes = sum([len(lev) for lev in nodesLevels])
pts = np.empty((total_nodes, 2))
edges = np.empty((0,2))
nodesToIdx = {}
idx = 0
# Generate node locations
for l in range(num_levels):
level = nodesLevels[l]
for i in range(len(level)):
nodesToIdx[level[i]] = idx
pts[idx][1] = float(-l)
pts[idx][0] = float(i)
idx += 1
# Generate edges
for l in range(num_levels):
level = nodesLevels[l]
for i in range(len(level)):
n = level[i]
idx = nodesToIdx[n]
# Go through each child node.
for e in n.e:
child = e.c
cIdx = nodesToIdx[child]
appendArray = np.array([pts[idx], pts[cIdx], [np.nan, np.nan]])
edges = np.append(edges, appendArray, axis=0)
# perform plotting.
plt.plot(edges[:,0], edges[:,1],zorder=1)
plt.scatter(pts[:,0], pts[:,1], s=2000.0, facecolors='white', edgecolors='red', zorder=2) #marker=plt.markers.MarkerStyle('o', fillstyles='none'))
if show_labels:
for n in nodesToIdx:
idx = nodesToIdx[n]
plt.text(pts[idx,0], pts[idx,1], str(n.getLabel()), \
horizontalalignment='center', verticalalignment='center', fontsize=8, zorder=3)
#
| [
"collections.deque",
"matplotlib.pyplot.plot",
"numpy.append",
"numpy.array",
"numpy.empty",
"matplotlib.pyplot.scatter"
] | [((1736, 1743), 'collections.deque', 'deque', ([], {}), '()\n', (1741, 1743), False, 'from collections import deque\n'), ((2555, 2581), 'numpy.empty', 'np.empty', (['(total_nodes, 2)'], {}), '((total_nodes, 2))\n', (2563, 2581), True, 'import numpy as np\n'), ((2594, 2610), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (2602, 2610), True, 'import numpy as np\n'), ((3392, 3436), 'matplotlib.pyplot.plot', 'plt.plot', (['edges[:, 0]', 'edges[:, 1]'], {'zorder': '(1)'}), '(edges[:, 0], edges[:, 1], zorder=1)\n', (3400, 3436), True, 'import matplotlib.pyplot as plt\n'), ((3438, 3534), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pts[:, 0]', 'pts[:, 1]'], {'s': '(2000.0)', 'facecolors': '"""white"""', 'edgecolors': '"""red"""', 'zorder': '(2)'}), "(pts[:, 0], pts[:, 1], s=2000.0, facecolors='white', edgecolors=\n 'red', zorder=2)\n", (3449, 3534), True, 'import matplotlib.pyplot as plt\n'), ((3251, 3300), 'numpy.array', 'np.array', (['[pts[idx], pts[cIdx], [np.nan, np.nan]]'], {}), '([pts[idx], pts[cIdx], [np.nan, np.nan]])\n', (3259, 3300), True, 'import numpy as np\n'), ((3325, 3362), 'numpy.append', 'np.append', (['edges', 'appendArray'], {'axis': '(0)'}), '(edges, appendArray, axis=0)\n', (3334, 3362), True, 'import numpy as np\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
import numpy as np
from mmcv import Timer
class RunningAverage():
r"""A helper class to calculate running average in a sliding window.
Args:
window (int): The size of the sliding window.
"""
def __init__(self, window: int = 1):
self.window = window
self._data = []
def update(self, value):
"""Update a new data sample."""
self._data.append(value)
self._data = self._data[-self.window:]
def average(self):
"""Get the average value of current window."""
return np.mean(self._data)
class StopWatch:
r"""A helper class to measure FPS and detailed time consuming of each phase
in a video processing loop or similar scenarios.
Args:
window (int): The sliding window size to calculate the running average
of the time consuming.
Example:
>>> from mmpose.utils import StopWatch
>>> import time
>>> stop_watch = StopWatch(window=10)
>>> with stop_watch.timeit('total'):
>>> time.sleep(0.1)
>>> # 'timeit' support nested use
>>> with stop_watch.timeit('phase1'):
>>> time.sleep(0.1)
>>> with stop_watch.timeit('phase2'):
>>> time.sleep(0.2)
>>> time.sleep(0.2)
>>> report = stop_watch.report()
"""
def __init__(self, window=1):
self.window = window
self._record = defaultdict(partial(RunningAverage, window=self.window))
self._timer_stack = []
@contextmanager
def timeit(self, timer_name='_FPS_'):
"""Timing a code snippet with an assigned name.
Args:
timer_name (str): The unique name of the interested code snippet to
handle multiple timers and generate reports. Note that '_FPS_'
is a special key that the measurement will be in `fps` instead
of `millisecond`. Also see `report` and `report_strings`.
Default: '_FPS_'.
Note:
This function should always be used in a `with` statement, as shown
in the example.
"""
self._timer_stack.append((timer_name, Timer()))
try:
yield
finally:
timer_name, timer = self._timer_stack.pop()
self._record[timer_name].update(timer.since_start())
def report(self, key=None):
"""Report timing information.
Returns:
dict: The key is the timer name and the value is the \
corresponding average time consuming.
"""
result = {
name: r.average() * 1000.
for name, r in self._record.items()
}
if '_FPS_' in result:
result['_FPS_'] = 1000. / result.pop('_FPS_')
if key is None:
return result
return result[key]
def report_strings(self):
"""Report timing information in texture strings.
Returns:
list(str): Each element is the information string of a timed \
event, in format of '{timer_name}: {time_in_ms}'. \
Specially, if timer_name is '_FPS_', the result will \
be converted to fps.
"""
result = self.report()
strings = []
if '_FPS_' in result:
strings.append(f'FPS: {result["_FPS_"]:>5.1f}')
strings += [f'{name}: {val:>3.0f}' for name, val in result.items()]
return strings
def reset(self):
self._record = defaultdict(list)
self._active_timer_stack = []
| [
"functools.partial",
"numpy.mean",
"collections.defaultdict",
"mmcv.Timer"
] | [((706, 725), 'numpy.mean', 'np.mean', (['self._data'], {}), '(self._data)\n', (713, 725), True, 'import numpy as np\n'), ((3683, 3700), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3694, 3700), False, 'from collections import defaultdict\n'), ((1609, 1652), 'functools.partial', 'partial', (['RunningAverage'], {'window': 'self.window'}), '(RunningAverage, window=self.window)\n', (1616, 1652), False, 'from functools import partial\n'), ((2345, 2352), 'mmcv.Timer', 'Timer', ([], {}), '()\n', (2350, 2352), False, 'from mmcv import Timer\n')] |
# -*- encoding: utf-8 -*-
# @Author: <NAME>
# @Time: 2021/08/21 04:37:56
# @File: saligned.py
import math
import torch
import numpy as np
from torch import nn
from mwptoolkit.module.Embedder.basic_embedder import BaiscEmbedder
from mwptoolkit.module.Encoder.rnn_encoder import SalignedEncoder
from mwptoolkit.module.Decoder.rnn_decoder import SalignedDecoder
from mwptoolkit.module.Environment.stack_machine import OPERATIONS, StackMachine
from mwptoolkit.utils.enum_type import SpecialTokens, NumMask, Operators
class Saligned(nn.Module):
"""
Reference:
Chiang et al. "Semantically-Aligned Equation Generation for Solving and Reasoning Math Word Problems".
"""
def __init__(self, config, dataset):
super(Saligned, self).__init__()
self.operations = operations = OPERATIONS(dataset.out_symbol2idx)
# parameter
self._vocab_size = vocab_size = len(dataset.in_idx2word)
self._dim_embed = dim_embed = config['embedding_size']
self._dim_hidden = dim_hidden = config['hidden_size']
self._dropout_rate = dropout_rate = config['dropout_ratio']
self.NOOP = operations.NOOP
self.GEN_VAR = operations.GEN_VAR
self.ADD = operations.ADD
self.SUB = operations.SUB
self.MUL = operations.MUL
self.DIV = operations.DIV
self.POWER = operations.POWER
self.EQL = operations.EQL
self.N_OPS = operations.N_OPS
self.PAD = operations.PAD
self._device = device = config["device"]
self.min_NUM = dataset.out_symbol2idx['NUM_0']
#print(self.dataloader.dataset.out_symbol2idx); exit()
#self.do_addeql = False if '<BRG>' in dataset.out_symbol2idx else True
#max_NUM = list(dataset.out_symbol2idx.keys())[-2]
#self.max_NUM = dataset.out_symbol2idx[max_NUM]
#self.ADD = dataset.out_symbol2idx['+']
self.POWER = dataset.out_symbol2idx['^']
self.min_CON = self.N_OPS_out = self.POWER + 1
#self.min_CON = self.N_OPS_out = dataset.out_symbol2idx['^']+1 if '<BRG>' not in dataset.out_symbol2idx else dataset.out_symbol2idx['<BRG>']+1
#self.UNK = dataset.out_symbol2idx['<UNK>']
#self.max_CON = self.min_NUM - 1
self.fix_constants = list(dataset.out_symbol2idx.keys())[self.min_CON:self.min_NUM]
self.mask_list = NumMask.number
self.out_symbol2idx = dataset.out_symbol2idx
self.out_idx2symbol = dataset.out_idx2symbol
try:
self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN]
except:
self.out_sos_token = None
try:
self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN]
except:
self.out_eos_token = None
try:
self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN]
except:
self.out_pad_token = None
# module
#print('vocab_size', config); #exit()
self.embedder = BaiscEmbedder(vocab_size, dim_embed, dropout_rate)
self.encoder = SalignedEncoder(dim_embed, dim_hidden, dim_hidden, dropout_rate)
self.decoder = SalignedDecoder(operations, dim_hidden, dropout_rate, device)
self.embedding_one = torch.nn.Parameter(torch.normal(torch.zeros(2 * dim_hidden), 0.01))
self.embedding_pi = torch.nn.Parameter(torch.normal(torch.zeros(2 * dim_hidden), 0.01))
self.encoder.initialize_fix_constant(len(self.fix_constants), self._device)
# make loss
class_weights = torch.ones(operations.N_OPS + 1)
#class_weights[OPERATIONS.NOOP] = 0
self._op_loss = torch.nn.CrossEntropyLoss(class_weights, size_average=False, reduce=False, ignore_index=-1)
self._arg_loss = torch.nn.CrossEntropyLoss()
def calculate_loss(self, batch_data):
"""Finish forward-propagating, calculating loss and back-propagation.
Args:
batch_data (dict): one batch data.
Returns:
float: loss value.
"""
text = batch_data["question"]
ops = batch_data["equation"]
text_len = batch_data["ques len"]
constant_indices = batch_data["num pos"]
constants = batch_data["num list"]
op_len = batch_data["equ len"]
#print(batch_data.keys())
num_len = batch_data["num size"]
fix_constants = self.fix_constants
#batch_data["raw_equation"] = batch_data["equation"].clone()
batch_size = len(text)
# zero embedding for the stack bottom
bottom = torch.zeros(self._dim_hidden * 2).to(self._device)
bottom.requires_grad = False
# deal with device
seq_emb = self.embedder(text)
#print('seq_emb', seq_emb.size(), text_len, constant_indices)
context, state, operands = \
self.encoder.forward(seq_emb, text_len, constant_indices)
#print('operands', fix_constants, constants, ops, op_len); #exit()
# print(str(batch_data).encode('utf8'))
number_emb = [operands[b_i] + self.encoder.get_fix_constant() for b_i in range(batch_size)]
# initialize stacks
# stacks = [StackMachine(self.operations, fix_constants + constants[b], operands[b], bottom,
# dry_run=True)
# for b in range(batch_size)]
stacks = [StackMachine(self.operations, constants[b] + fix_constants, number_emb[b], bottom, dry_run=True) for b in range(batch_size)]
loss = torch.zeros(batch_size).to(self._device)
prev_op = (torch.zeros(batch_size).to(self._device) - 1).type(torch.LongTensor)
text_len = text_len.to(self._device)
prev_output = None
if True: #self.use_state:
prev_state = state
else:
prev_state = None
operands_len = torch.LongTensor(self.N_OPS + np.array(num_len)).to(self._device).unsqueeze(1).repeat(1, ops.size(1))
#operands_len = torch.LongTensor(self.N_OPS+ len(fix_constants) + np.array(num_len)).to(self._device).unsqueeze(1).repeat(1, ops.size(1))
ops[(ops >= operands_len)] = self.N_OPS
pred_logits = []
for t in range(max(op_len)):
# step one
#print('t', t)
#if t == 2: exit()
#op_target2 = ops[:, t].clone().detach()
#print('before op_target2', op_target2)
#op_target2[(op_target2 >= operands_len)] = self.N_OPS
#print('after op_target2', op_target2)
op_logits, arg_logits, prev_output, prev_state = \
self.decoder(
context, text_len, operands, stacks,
prev_op, prev_output, prev_state, number_emb, self.N_OPS)
# print('stacks[0]._equations', t, stacks[0]._equations)
# accumulate op loss
max_logits = torch.argmax(op_logits, dim=1)
#max_logits[max_logits == OPERATIONS.N_OPS] = arg_logits
op_target = ops[:, t].clone().detach()
op_target[(np.array(op_len) <= t)] = self.NOOP
op_target[(op_target >= self.N_OPS)] = self.N_OPS
#print('after op_target', op_target, self.N_OPS)
op_target.require_grad = False
#print('op_logits', torch.argmax(op_logits, dim=1), op_target.unsqueeze(0))
#print('op_logits', op_logits[:5, :5])
#print('op_target', op_logits.size(), torch.argmax(op_logits, dim=1), op_target)
loss += self._op_loss(op_logits, op_target)
#print('torch.argmax', torch.argmax(op_logits, dim=1), op_target)
#predicts = [stack.get_solution() for stack in stacks]
#print('predicts', t, predicts)
# accumulate arg loss
#print('arg_logits', arg_logits.size(), torch.argmax(arg_logits, dim=1), ops[:, t].unsqueeze(0) - self.N_OPS)
for b in range(batch_size):
#print('b', b)
if self.NOOP <= ops[b, t] < self.N_OPS:
continue
# if arg_logits[b].size(0) <= (ops[b, t].unsqueeze(0).cpu().numpy() - self.N_OPS):
# continue
#print('arg_logits', b, arg_logits[b].size(), ops[b, t].unsqueeze(0) - self.N_OPS)
#print('stacks[i].stack_log', stacks[b].stack_log)
loss[b] += self._arg_loss(arg_logits[b].unsqueeze(0), ops[b, t].unsqueeze(0) - self.N_OPS)
#print(t, prev_op, stacks[0].stack_log_index, stacks[0].stack_log)
# prev_op = torch.argmax(op_logits, dim=1) #
prev_op = ops[:, t]
pred_logits += [torch.argmax(op_logits, dim=1)]
weights = 1
#loss = (loss * weights).mean()
loss = (loss / max(op_len)).mean()
pred_logits = torch.stack(pred_logits, 1)
#print('train pred_logits', pred_logits[0, :])
predicts = [stack.stack_log_index for stack in stacks]
#print(pred_logits.size(), ops.size())
#print(stacks[0].stack_log_index, pred_logits[0], ops[0]); #exit()
return loss
def model_test(self, batch_data):
"""Model test.
Args:
batch_data (dict): one batch data.
Returns:
tuple(list,list): predicted equation, target equation.
"""
text = batch_data["question"]
ops = batch_data["equation"]
text_len = batch_data["ques len"]
constant_indices = batch_data["num pos"]
constants = batch_data["num list"]
op_len = batch_data["equ len"]
target = batch_data["equation"]
nums_stack = batch_data["num stack"]
fix_constants = self.fix_constants
batch_size = len(text)
# zero embedding for the stack bottom
bottom = torch.zeros(self._dim_hidden * 2).to(self._device)
bottom.requires_grad = False
# deal with device
seq_emb = self.embedder(text)
context, state, operands = \
self.encoder.forward(seq_emb, text_len, constant_indices)
number_emb = [operands[b_i] + self.encoder.get_fix_constant() for b_i in range(batch_size)]
# initialize stacks
stacks = [StackMachine(self.operations, constants[b] + fix_constants, number_emb[b], bottom) for b in range(batch_size)]
loss = torch.zeros(batch_size).to(self._device)
prev_op = (torch.zeros(batch_size).to(self._device) - 1).type(torch.LongTensor)
prev_output = None
prev_state = state
finished = [False] * batch_size
pred_logits = []
for t in range(40):
op_logits, arg_logits, prev_output, prev_state = \
self.decoder(
context, text_len, operands, stacks,
prev_op, prev_output, prev_state, number_emb, self.N_OPS)
n_finished = 0
for b in range(batch_size):
if (len(stacks[b].stack_log_index) and stacks[b].stack_log_index[-1] == self.EQL):
finished[b] = True
if finished[b]:
op_logits[b, self.PAD] = math.inf
n_finished += 1
if stacks[b].get_height() < 2:
op_logits[b, self.ADD] = -math.inf
op_logits[b, self.SUB] = -math.inf
op_logits[b, self.MUL] = -math.inf
op_logits[b, self.DIV] = -math.inf
op_logits[b, self.POWER] = -math.inf
op_loss, prev_op = torch.log(torch.nn.functional.softmax(op_logits, -1)).max(-1)
arg_loss, prev_arg = torch.log(torch.nn.functional.softmax(arg_logits, -1)).max(-1)
for b in range(batch_size):
if prev_op[b] == self.N_OPS:
prev_op[b] += prev_arg[b]
loss[b] += arg_loss[b]
if prev_op[b] < self.N_OPS:
loss[b] += op_loss[b]
if n_finished == batch_size:
break
predicts = [None] * batch_size
predicts_idx = [None] * batch_size
targets = [None] * batch_size
for i in range(batch_size):
predicts_idx[i] = [w for w in stacks[i].stack_log_index if w not in [self.PAD]]
targets[i] = list(batch_data["equation"][i].cpu().numpy())
predicts = self.convert_idx2symbol(torch.LongTensor(predicts_idx).to(self._device), constants)
targets = self.convert_idx2symbol(target, constants)
return predicts, targets
def convert_mask_num(self, batch_output, num_list):
output_list = []
for b_i, output in enumerate(batch_output):
res = []
num_len = len(num_list[b_i])
for symbol in output:
if "NUM" in symbol:
num_idx = self.mask_list.index(symbol)
if num_idx >= num_len:
res.append(symbol)
else:
res.append(num_list[b_i][num_idx])
else:
res.append(symbol)
output_list.append(res)
return output_list
def convert_idx2symbol(self, output, num_list):
batch_size = output.size(0)
seq_len = output.size(1)
output_list = []
for b_i in range(batch_size):
res = []
num_len = len(num_list[b_i])
for s_i in range(seq_len):
idx = output[b_i][s_i]
if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]:
break
symbol = self.out_idx2symbol[idx]
if "NUM" in symbol:
num_idx = self.mask_list.index(symbol)
if num_idx >= num_len:
res.append(symbol)
else:
res.append(num_list[b_i][num_idx])
else:
res.append(symbol)
output_list.append(res)
return output_list
| [
"torch.nn.functional.softmax",
"mwptoolkit.module.Decoder.rnn_decoder.SalignedDecoder",
"mwptoolkit.module.Environment.stack_machine.OPERATIONS",
"mwptoolkit.module.Embedder.basic_embedder.BaiscEmbedder",
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"torch.stack",
"torch.argmax",
"mwptoolkit.mod... | [((808, 842), 'mwptoolkit.module.Environment.stack_machine.OPERATIONS', 'OPERATIONS', (['dataset.out_symbol2idx'], {}), '(dataset.out_symbol2idx)\n', (818, 842), False, 'from mwptoolkit.module.Environment.stack_machine import OPERATIONS, StackMachine\n'), ((3000, 3050), 'mwptoolkit.module.Embedder.basic_embedder.BaiscEmbedder', 'BaiscEmbedder', (['vocab_size', 'dim_embed', 'dropout_rate'], {}), '(vocab_size, dim_embed, dropout_rate)\n', (3013, 3050), False, 'from mwptoolkit.module.Embedder.basic_embedder import BaiscEmbedder\n'), ((3074, 3138), 'mwptoolkit.module.Encoder.rnn_encoder.SalignedEncoder', 'SalignedEncoder', (['dim_embed', 'dim_hidden', 'dim_hidden', 'dropout_rate'], {}), '(dim_embed, dim_hidden, dim_hidden, dropout_rate)\n', (3089, 3138), False, 'from mwptoolkit.module.Encoder.rnn_encoder import SalignedEncoder\n'), ((3162, 3223), 'mwptoolkit.module.Decoder.rnn_decoder.SalignedDecoder', 'SalignedDecoder', (['operations', 'dim_hidden', 'dropout_rate', 'device'], {}), '(operations, dim_hidden, dropout_rate, device)\n', (3177, 3223), False, 'from mwptoolkit.module.Decoder.rnn_decoder import SalignedDecoder\n'), ((3546, 3578), 'torch.ones', 'torch.ones', (['(operations.N_OPS + 1)'], {}), '(operations.N_OPS + 1)\n', (3556, 3578), False, 'import torch\n'), ((3647, 3742), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', (['class_weights'], {'size_average': '(False)', 'reduce': '(False)', 'ignore_index': '(-1)'}), '(class_weights, size_average=False, reduce=False,\n ignore_index=-1)\n', (3672, 3742), False, 'import torch\n'), ((3764, 3791), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (3789, 3791), False, 'import torch\n'), ((8786, 8813), 'torch.stack', 'torch.stack', (['pred_logits', '(1)'], {}), '(pred_logits, 1)\n', (8797, 8813), False, 'import torch\n'), ((5366, 5466), 'mwptoolkit.module.Environment.stack_machine.StackMachine', 'StackMachine', (['self.operations', '(constants[b] + fix_constants)', 'number_emb[b]', 'bottom'], {'dry_run': '(True)'}), '(self.operations, constants[b] + fix_constants, number_emb[b],\n bottom, dry_run=True)\n', (5378, 5466), False, 'from mwptoolkit.module.Environment.stack_machine import OPERATIONS, StackMachine\n'), ((6860, 6890), 'torch.argmax', 'torch.argmax', (['op_logits'], {'dim': '(1)'}), '(op_logits, dim=1)\n', (6872, 6890), False, 'import torch\n'), ((10190, 10276), 'mwptoolkit.module.Environment.stack_machine.StackMachine', 'StackMachine', (['self.operations', '(constants[b] + fix_constants)', 'number_emb[b]', 'bottom'], {}), '(self.operations, constants[b] + fix_constants, number_emb[b],\n bottom)\n', (10202, 10276), False, 'from mwptoolkit.module.Environment.stack_machine import OPERATIONS, StackMachine\n'), ((3285, 3312), 'torch.zeros', 'torch.zeros', (['(2 * dim_hidden)'], {}), '(2 * dim_hidden)\n', (3296, 3312), False, 'import torch\n'), ((3381, 3408), 'torch.zeros', 'torch.zeros', (['(2 * dim_hidden)'], {}), '(2 * dim_hidden)\n', (3392, 3408), False, 'import torch\n'), ((4582, 4615), 'torch.zeros', 'torch.zeros', (['(self._dim_hidden * 2)'], {}), '(self._dim_hidden * 2)\n', (4593, 4615), False, 'import torch\n'), ((5507, 5530), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (5518, 5530), False, 'import torch\n'), ((8627, 8657), 'torch.argmax', 'torch.argmax', (['op_logits'], {'dim': '(1)'}), '(op_logits, dim=1)\n', (8639, 8657), False, 'import torch\n'), ((9782, 9815), 'torch.zeros', 'torch.zeros', (['(self._dim_hidden * 2)'], {}), '(self._dim_hidden * 2)\n', (9793, 9815), False, 'import torch\n'), ((10317, 10340), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (10328, 10340), False, 'import torch\n'), ((7034, 7050), 'numpy.array', 'np.array', (['op_len'], {}), '(op_len)\n', (7042, 7050), True, 'import numpy as np\n'), ((12355, 12385), 'torch.LongTensor', 'torch.LongTensor', (['predicts_idx'], {}), '(predicts_idx)\n', (12371, 12385), False, 'import torch\n'), ((11517, 11559), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['op_logits', '(-1)'], {}), '(op_logits, -1)\n', (11544, 11559), False, 'import torch\n'), ((11612, 11655), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['arg_logits', '(-1)'], {}), '(arg_logits, -1)\n', (11639, 11655), False, 'import torch\n'), ((5567, 5590), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (5578, 5590), False, 'import torch\n'), ((10377, 10400), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (10388, 10400), False, 'import torch\n'), ((5872, 5889), 'numpy.array', 'np.array', (['num_len'], {}), '(num_len)\n', (5880, 5889), True, 'import numpy as np\n')] |
"""
A class for loading neural networks in nnet format, as described in the readme.
The nnet format used is a slightly modified version of the ACAS Xu format (https://github.com/sisl/NNet).
Author: <NAME> <<EMAIL>>
"""
import numpy as np
import torch
import torch.nn as nn
from src.neural_networks.verinet_nn import VeriNetNN
class NNET:
"""
A class for loading .nnet.
The class is used to convert .nnet to VeriNetNN(torch.nn.Module) objects and to normalize inputs
"""
def __init__(self, path: str=None):
"""
Args:
path : The path of the .nnet file, if given the information is read from this file. If None
init_nnet_from_file() or init_nnet_from_verinet_nn() can be used later.
"""
self._main_info = None
self._min_values = None
self._max_values = None
self._mean = None
self._range = None
self._layer_activations = None
self._layer_types = None
self._layer_sizes = None
self._params = None
self._weights = None
self._biases = None
self.activation_map_torch = {-1: None,
0: nn.ReLU(),
1: nn.Sigmoid(),
2: nn.Tanh()}
if path is not None:
self.init_nnet_from_file(path)
@property
def num_inputs(self):
return self._main_info["num_inputs"]
@property
def num_layers(self):
return self._main_info["num_layers"]
@property
def num_outputs(self):
return self._main_info["num_outputs"]
@property
def max_layer_sie(self):
return self._main_info["max_layer_size"]
@property
def min_values(self):
return self._min_values
@property
def max_values(self):
return self._max_values
@property
def mean(self):
return self._mean
@property
def range(self):
return self._range
@property
def layer_activations(self):
return self._layer_activations
@property
def layer_types(self):
return self._layer_types
@property
def params(self):
return self._params
@property
def layer_sizes(self):
return self._layer_sizes
@property
def weights(self):
return self._weights
@property
def biases(self):
return self._biases
def init_nnet_from_file(self, path: str):
"""
Reads a nnet file and stores the parameters
Args:
path: The open file
Returns:
(layer_types, layer_size, conv_params) as lists
"""
with open(path, "r") as f:
last_pos = f.tell()
line = f.readline()
while line[0:2] == "//": # Skip initial comments
last_pos = f.tell()
line = f.readline()
f.seek(last_pos)
# Read header
self._read_main_info(f)
self._read_nnet_layer_sizes(f)
self._read_nnet_min_values(f)
self._read_nnet_max_values(f)
self._read_nnet_mean(f)
self._read_nnet_range(f)
self._read_nnet_layer_activations(f)
self._read_nnet_layer_types(f)
self._read_nnet_params(f)
self._weights = []
self._biases = []
for layer_num, layer_type in enumerate(self.layer_types):
if layer_type == 0:
self._read_nnet_fc(f, self.layer_sizes[layer_num], self.layer_sizes[layer_num + 1])
elif layer_type == 1:
self._read_nnet_conv2d(f, self._params[layer_num])
elif layer_type == 2:
self._read_nnet_batchnorm_2d(f, self.params[layer_num]["num_features"])
else:
raise ValueError(f"Layer type: {layer_type} not recognized")
def _read_main_info(self, file):
"""
Reads the first line of the header and stores it in self.main_info
Args:
file: The open file
"""
line = file.readline().split(",")[:-1]
assert len(line) == 4, f"Expected first line to have 4 ints, number of: layers, inputs, outputs, max layer " +\
f"size, found {len(line)}"
self._main_info = {
"num_layers": int(line[0]),
"num_inputs": int(line[1]),
"num_outputs": int(line[2]),
"max_layer_size": int(line[3])
}
def _read_nnet_layer_sizes(self, file):
"""
Reads the second header line containing the layer sizes
Args:
file: The open file
"""
line = file.readline().split(",")[:-1]
msg = f"File had {len(line)} layer sizes, expected {self.num_layers + 1}"
assert len(line) == (self.num_layers + 1), msg
self._layer_sizes = [int(size) for size in line]
def _read_nnet_min_values(self, file):
"""
Reads the third header line containing the minimum input values.
Args:
file: The open file
"""
line = file.readline().split(",")[:-1]
msg = f"Got {len(line)} min values, expected 1 or {self.num_inputs}"
assert len(line) == 1 or len(line) == self.num_inputs, msg
self._min_values = np.array([float(min_val) for min_val in line])
def _read_nnet_max_values(self, file):
"""
Reads the forth header line containing the maximum input values.
Args:
file: The open file
"""
line = file.readline().split(",")[:-1]
msg = f"Got {len(line)} max values, expected 1 or {self.num_inputs}"
assert len(line) == 1 or len(line) == self.num_inputs, msg
self._max_values = np.array([float(max_val) for max_val in line])
def _read_nnet_mean(self, file):
"""
Reads the fifth header line containing the mean input values.
Args:
file: The open file
"""
line = file.readline().split(",")[:-1]
msg = f"Got {len(line)} mean values, expected 1, {self.num_inputs}"
assert len(line) == 1 or len(line) == self.num_inputs, msg
self._mean = np.array([float(mean) for mean in line])
def _read_nnet_range(self, file):
"""
Reads the sixth line containing the range of the input values.
Args:
file: The open file
"""
line = file.readline().split(",")[:-1]
msg = f"Got {len(line)} range values, expected 1, {self.num_inputs}"
assert len(line) == 1 or len(line) == self.num_inputs, msg
self._range = np.array([float(val) for val in line])
def _read_nnet_layer_activations(self, file):
"""
Reads the seventh header line containing the layer activations.
Args:
file: The open file
"""
line = file.readline().split(",")[:-1]
msg = f"Got {len(line)} activations, expected {self.num_layers}"
assert len(line) == self.num_layers, msg
self._layer_activations = [int(layer_type) for layer_type in line]
for activation in self._layer_activations:
msg = "NNET only supports Relu (0), Sigmoid (1) and Tanh (2), got activation: {activation}"
assert activation in [-1, 0, 1, 2], msg
def _read_nnet_layer_types(self, file):
"""
Reads the eight header line containing the layer types.
Args:
file: The open file
"""
line = file.readline().split(",")[:-1]
msg = f"Got {len(line)} layer types, expected {self.num_layers}"
assert len(line) == self.num_layers, msg
self._layer_types = [int(layer_type) for layer_type in line]
for layer_type in self._layer_types:
assert layer_type in [0, 1, 2], (f"NNET only supports FC (0), Conv2d (1) and BatchNorm2d (2), " +
"got type {layer_type}")
def _read_nnet_params(self, file):
"""
Reads the ninth header line containing the convolutional parameters.
Args:
file: The open file
"""
self._params = []
for layer_type in self._layer_types:
params = {}
if layer_type == 1:
line = file.readline().split(",")[:-1]
msg = f"Got {len(line)} convolutional parameters, expected 5"
assert len(line) == 5, msg
params = {"out_channels": int(line[0]), "in_channels": int(line[1]), "kernel_size": int(line[2]),
"stride": int(line[3]), "padding": int(line[4])}
if layer_type == 2:
line = file.readline().split(",")[0]
num_features = int(line)
line = file.readline().split(",")[:-1]
running_mean = [np.float64(mean) for mean in line]
line = file.readline().split(",")[:-1]
running_var = [np.float64(var) for var in line]
params = {"num_features": num_features, "running_mean": running_mean, "running_var": running_var}
self._params.append(params)
def _read_nnet_fc(self, file, in_size: int, out_size: int):
"""
Reads and returns one fc layer of the nnet file.
Args:
file : The file io stream, f calling f.readline() is assumed to return the first row of weights.
in_size : The input size to the fc layer
out_size: The out size of the fc layer
"""
weights = np.empty((out_size, in_size))
bias = np.empty(out_size)
for node in range(out_size):
line = file.readline().split(",")[:-1]
weights[node, :] = np.array([float(num) for num in line])
for node in range(out_size):
line = file.readline().split(",")[:-1]
bias[node] = np.array([float(num) for num in line])
self.weights.append(weights)
self.biases.append(bias)
def _read_nnet_conv2d(self, file, conv_params: dict):
"""
Reads and returns one conv2d layer of the nnet file
Args:
file : The file io stream, f calling f.readline() is assumed to return the first row of weights.
conv_params : A dict containing the params of the convo layer:
{'in_channels': int, 'out_channels': int, 'kernel_size': int, 'stride': int, 'padding': int}
"""
weights = np.empty((conv_params["out_channels"], conv_params["in_channels"], conv_params["kernel_size"],
conv_params["kernel_size"]))
bias = np.empty(conv_params["out_channels"])
for channel in range(conv_params["out_channels"]):
line = file.readline().split(",")[:-1]
weights[channel] = np.array([np.float64(num) for num in line]).reshape((conv_params["in_channels"],
conv_params["kernel_size"],
conv_params["kernel_size"]))
for channel in range(conv_params["out_channels"]):
line = file.readline().split(",")[:-1]
bias[channel] = np.array([np.float64(num) for num in line])
self.weights.append(weights)
self.biases.append(bias)
def _read_nnet_batchnorm_2d(self, file, feature_num: int):
"""
Reads and returns one batchnorm layer of the nnet file.
Args:
file : The file io stream, f calling f.readline() should return the first row of weights.
"""
weights = np.empty(feature_num)
bias = np.empty(feature_num)
line = file.readline().split(",")[:-1]
weights[:] = np.array([float(num) for num in line])
for node in range(feature_num):
line = file.readline().split(",")[:-1]
bias[node] = np.array([float(num) for num in line])
self.weights.append(weights)
self.biases.append(bias)
def init_nnet_from_verinet_nn(self, model: VeriNetNN, input_shape: np.array, min_values: np.array,
max_values: np.array, input_mean: np.array, input_range: np.array):
"""
Gets the nnet parameters from the given model and args
Args:
model : The VeriNetNN model
input_shape : The shape of the input, either a 1d (size) or 3d (channels, height, width) array
min_values : The minimum values for the input, either a array of size 1 or a array of the same size as
the input
max_values : The maximum values for the input, either a array of size 1 or a array of the same size as
the input
input_mean : The mean of the inputs, either a array of size 1 or a array of the same size as
the input
input_range : The range of the inputs, either a array of size 1 or a array of the same size as
the input
"""
input_shape = np.array(input_shape)
self._get_verinet_nn_layer_info(model, input_shape)
self._get_verinet_nn_layer_activations(model)
self._main_info = {
"num_layers": len(self._layer_types),
"num_inputs": self._layer_sizes[0],
"num_outputs": self._layer_sizes[-1],
"max_layer_size": max(self._layer_sizes)
}
num_inputs = self._layer_sizes[0]
min_values = np.atleast_1d(min_values)
max_values = np.atleast_1d(max_values)
input_mean = np.atleast_1d(input_mean)
input_range = np.atleast_1d(input_range)
msg = "min_values, max_values, input_mean and input_range should be of size 1 or the same size as the input"
assert min_values.shape == (1,) or min_values.shape == (num_inputs, ), msg
assert max_values.shape == (1,) or max_values.shape == (num_inputs,), msg
assert input_mean.shape == (1,) or input_mean.shape == (num_inputs,), msg
assert input_range.shape == (1,) or input_range.shape == (num_inputs,), msg
self._min_values = min_values
self._max_values = max_values
self._mean = input_mean
self._range = input_range
def _get_verinet_nn_layer_info(self, model: VeriNetNN, input_shape: np.array):
"""
Gets the layer sizes and types from the VerNetNN model
Args:
model : The VeriNetNN model
input_shape : The shape of the input, either 1d or 3d
"""
layers = [sequential[0] for sequential in list(model.children())[0]]
self._layer_sizes = []
self._layer_types = []
self._params = []
self._weights = []
self._biases = []
layer_shapes = []
self._layer_sizes.append(np.prod(input_shape))
layer_shapes.append(input_shape)
for i, layer in enumerate(layers):
self._weights.append(layer.weight.data.detach().numpy())
self._biases.append(layer.bias.data.detach().numpy())
if isinstance(layer, nn.Linear):
self._layer_sizes.append(layer.out_features)
layer_shapes.append(np.array(self._layer_sizes[-1]))
self._layer_types.append(0)
elif isinstance(layer, nn.Conv2d):
assert len(layer_shapes[-1]) == 3, f"Layer {i} was Conv2d, but shape of last layer was not 3"
kernel_size = np.array(layer.kernel_size)
padding = np.array(layer.padding)
stride = np.array(layer.stride)
assert kernel_size[0] == kernel_size[1], "Only square kernels are supported by nnet"
assert padding[0] == padding[1], "Only equal padding, vertical and horizontal, is supported by nnet"
assert stride[0] == stride[1], "Only equal stride, vertical and horizontal, is supported by nnet"
img_size = (layer_shapes[-1][1:] + 2*padding - kernel_size) / stride + 1
layer_shapes.append(np.array((layer.out_channels, *img_size), dtype=int))
self._layer_sizes.append(np.prod(layer_shapes[-1]))
self._layer_types.append(1)
self._params.append({"out_channels": layer.out_channels,
"in_channels": layer_shapes[-2][0],
"kernel_size": kernel_size[0],
"stride": stride[0],
"padding": padding[0]})
elif isinstance(layer, nn.BatchNorm2d):
self._layer_sizes.append(self._layer_sizes[-1])
layer_shapes.append(layer_shapes[-1])
self._layer_types.append(2)
self._params.append({"num_features": layer.num_features,
"running_mean": layer.running_mean.detach().numpy(),
"running_var": layer.running_var.detach().numpy()})
def _get_verinet_nn_layer_activations(self, model: VeriNetNN):
"""
Gets the activation functions from the VeriNetNN model
Args:
model: The VeriNetNN model
"""
sequentials = [sequential for sequential in list(model.children())[0]]
self._layer_activations = []
for sequential in sequentials:
if len(list(sequential)) == 1:
self.layer_activations.append(-1)
elif isinstance(sequential[1], nn.ReLU):
self.layer_activations.append(0)
elif isinstance(sequential[1], nn.Sigmoid):
self.layer_activations.append(1)
elif isinstance(sequential[1], nn.Tanh):
self.layer_activations.append(2)
else:
msg = f"Activation function {sequential[1]} not recognized, should be nn.Relu, nn.Sigmoid or nn.Tanh"
raise ValueError(msg)
# noinspection PyArgumentList
def from_nnet_to_verinet_nn(self) -> VeriNetNN:
"""
Converts the nnet to a VeriNet(torch.nn.Module) object.
Returns:
The VeriNetNN model
"""
layers = []
for layer_num in range(self.num_layers):
act_num = self.layer_activations[layer_num]
try:
act = self.activation_map_torch[act_num]
except KeyError:
raise AssertionError(f"Didn't recognize activation function {act_num} for layer {layer_num}")
layer_type_num = self.layer_types[layer_num]
if layer_type_num == 0:
layer = nn.Linear(self.layer_sizes[layer_num], self.layer_sizes[layer_num + 1])
layer.weight.data = torch.Tensor(self.weights[layer_num])
layer.bias.data = torch.Tensor(self.biases[layer_num])
elif layer_type_num == 1:
params = self.params[layer_num]
layer = nn.Conv2d(params["in_channels"], params["out_channels"], params["kernel_size"],
params["stride"], params["padding"])
layer.weight.data = torch.Tensor(self.weights[layer_num])
layer.bias.data = torch.Tensor(self.biases[layer_num])
elif layer_type_num == 2:
params = self.params[layer_num]
num_features = params["num_features"]
layer = nn.BatchNorm2d(num_features=num_features)
layer.running_mean = torch.Tensor(params["running_mean"])
layer.running_var = torch.Tensor(params["running_var"])
layer.weight.data = torch.FloatTensor(self.weights[layer_num])
layer.bias.data = torch.FloatTensor(self.biases[layer_num])
else:
raise AssertionError(f"Didn't recognize layer type {layer_type_num} for layer {layer_num}")
if act is not None:
layers.append(nn.Sequential(layer, act))
else:
layers.append(nn.Sequential(layer))
return VeriNetNN(layers)
def write_nnet_to_file(self, filepath: str):
with open(filepath, "w") as f:
f.write("// A neural network in nnet format\n")
f.write("// The documentation can be found in the src/data_loader/readme.md file of the Verinet project\n")
self._write_main_info(f)
self._write_layer_size(f)
self._write_min_max(f)
self._write_mean_range(f)
self._write_layer_activations(f)
self._write_layer_types(f)
self._write_layer_params(f)
self._write_layer_weights_biases(f)
def _write_main_info(self, f):
"""
Writes the main info to file
Args:
f: The open file with write permission
"""
num_layers = self._main_info["num_layers"]
num_inputs = self._main_info["num_inputs"]
num_outputs = self._main_info["num_outputs"]
max_layer_size = self._main_info["max_layer_size"]
f.write(f"{num_layers},{num_inputs},{num_outputs},{max_layer_size},\n")
def _write_layer_size(self, f):
"""
Writes the layer sizes to file
Args:
f: The open file with write permission
"""
for size in self.layer_sizes:
f.write(f"{size},")
f.write("\n")
def _write_min_max(self, f):
"""
Writes the min and max values to file
Args:
f: The open file with write permission
"""
for value in self.min_values:
f.write(f"{value},")
f.write("\n")
for value in self.max_values:
f.write(f"{value},")
f.write("\n")
def _write_mean_range(self, f):
"""
Writes the mean and range values to file
Args:
f: The open file with write permission
"""
for value in self.mean:
f.write(f"{value},")
f.write("\n")
for value in self.range:
f.write(f"{value},")
f.write("\n")
def _write_layer_activations(self, f):
"""
Writes the layer activation functions to file
Args:
f: The open file with write permission
"""
for act in self.layer_activations:
f.write(f"{act},")
f.write("\n")
def _write_layer_types(self, f):
"""
Writes the layer types to file
Args:
f: The open file with write permission
"""
for layer_type in self.layer_types:
f.write(f"{layer_type},")
f.write("\n")
def _write_layer_params(self, f):
"""
Writes the layer parameters to file
Args:
f: The open file with write permission
"""
for layer_num, layer_type in enumerate(self._layer_types):
if layer_type == 0:
continue
params = self._params[layer_num]
if layer_type == 1:
f.write(f"{params['out_channels']},")
f.write(f"{params['in_channels']},")
f.write(f"{params['kernel_size']},")
f.write(f"{params['stride']},")
f.write(f"{params['padding']},")
f.write("\n")
elif layer_type == 2:
f.write(f"{params['num_features']},\n")
for mean in params['running_mean']:
f.write(f"{mean},")
f.write("\n")
for var in params['running_var']:
f.write(f"{var},")
f.write("\n")
def _write_layer_weights_biases(self, f):
"""
Writes the layer weights and biases to file
Args:
f: The open file with write permission
"""
for layer_num in range(len(self.layer_types)):
weights = self._weights[layer_num]
biases = self._biases[layer_num]
if self._layer_types[layer_num] == 0:
for row in weights:
for weight in row:
f.write(f"{weight},")
f.write("\n")
for bias in biases:
f.write(f"{bias}, \n")
if self._layer_types[layer_num] == 1:
for out_channel in weights:
for in_channel in out_channel:
for row in in_channel:
for weight in row:
f.write(f"{weight},")
f.write("\n")
for bias in biases:
f.write(f"{bias}, \n")
if self._layer_types[layer_num] == 2:
for weight in weights:
f.write(f"{weight},")
f.write("\n")
for bias in biases:
f.write(f"{bias}, \n")
def normalize_input(self, x: np.array) -> np.array:
"""
Uses the range, mean, max_values and min_values read from the nnet file to normalize the given input
Args:
x: The input, should be either 1D or a 2D batch, with of size: (batch_size, input_dim)
Returns:
The normalized input
"""
x = self._clip_min(x)
x = self._clip_max(x)
x = self._subtract_mean(x)
x = self._divide_range(x)
return x
def _clip_min(self, x: np.array):
"""
Clips the input to the stored min values
Args:
x: The input, should be either 1D or a 2D batch, with of size: (batch_size, input_dim)
Returns:
The clipped input
"""
x = x.copy()
if (self.min_values.shape[0] == 1) or (len(x.shape) == 1 and x.shape[0] == self.num_inputs):
x[x < self.min_values] = self.min_values
elif (len(x.shape) == 2) and x.shape[1] == self.num_inputs:
for row in range(x.shape[0]):
x[row, :][x[row, :] < self.min_values] = self.min_values[x[row, :] < self.min_values]
else:
raise ValueError(f"Expected input to be 1D or 2D with size (batch_size, input_dim), is {x.shape}")
return x
def _clip_max(self, x: np.array):
"""
Clips the input to the stored max values
Args:
x: The input, should be either 1D or a 2D batch, with of size: (batch_size, input_dim)
Returns:
The clipped input
"""
x = x.copy()
if (self.max_values.shape[0] == 1) or (len(x.shape) == 1 and x.shape[0] == self.num_inputs):
x[x > self.max_values] = self.max_values
elif (len(x.shape) == 2) and x.shape[1] == self.num_inputs:
for row in range(x.shape[0]):
x[row, :][x[row, :] > self.max_values] = self.max_values[x[row, :] > self.max_values]
else:
raise ValueError(f"Expected input to be 1D or 2D with size (batch_size, input_dim), is {x.shape}")
return x
def _subtract_mean(self, x: np.array):
"""
Subtracts the mean
Args:
x: The input, should be either 1D or a 2D batch, with of size: (batch_size, input_dim)
Returns:
x-self.mean
"""
x = x.copy()
if (self.max_values.shape[0] == 1) or (len(x.shape) == 1 and x.shape[0] == self.num_inputs):
x -= self._mean
elif (len(x.shape) == 2) and x.shape[1] == self.num_inputs:
for row in range(x.shape[0]):
x[row, :] -= self.mean
else:
raise ValueError(f"Expected input to be 1D or 2D with size (batch_size, input_dim), is {x.shape}")
return x
def _divide_range(self, x: np.array):
"""
Subtracts the mean
Args:
x: The input, should be either 1D or a 2D batch, with of size: (batch_size, input_dim)
Returns:
x-self.range
"""
x = x.copy()
if (self.max_values.shape[0] == 1) or (len(x.shape) == 1 and x.shape[0] == self.num_inputs):
x /= self._range
elif (len(x.shape) == 2) and x.shape[1] == self.num_inputs:
for row in range(x.shape[0]):
x[row, :] /= self._range
else:
raise ValueError(f"Expected input to be 1D or 2D with size (batch_size, input_dim), is {x.shape}")
return x
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"numpy.prod",
"torch.nn.BatchNorm2d",
"torch.nn.Tanh",
"torch.nn.Sequential",
"numpy.float64",
"torch.Tensor",
"src.neural_networks.verinet_nn.VeriNetNN",
"torch.nn.Conv2d",
"numpy.array",
"numpy.empty",
"torch.nn.Linear",
"torch.FloatTensor",
"numpy.... | [((9765, 9794), 'numpy.empty', 'np.empty', (['(out_size, in_size)'], {}), '((out_size, in_size))\n', (9773, 9794), True, 'import numpy as np\n'), ((9810, 9828), 'numpy.empty', 'np.empty', (['out_size'], {}), '(out_size)\n', (9818, 9828), True, 'import numpy as np\n'), ((10700, 10827), 'numpy.empty', 'np.empty', (["(conv_params['out_channels'], conv_params['in_channels'], conv_params[\n 'kernel_size'], conv_params['kernel_size'])"], {}), "((conv_params['out_channels'], conv_params['in_channels'],\n conv_params['kernel_size'], conv_params['kernel_size']))\n", (10708, 10827), True, 'import numpy as np\n'), ((10867, 10904), 'numpy.empty', 'np.empty', (["conv_params['out_channels']"], {}), "(conv_params['out_channels'])\n", (10875, 10904), True, 'import numpy as np\n'), ((11899, 11920), 'numpy.empty', 'np.empty', (['feature_num'], {}), '(feature_num)\n', (11907, 11920), True, 'import numpy as np\n'), ((11936, 11957), 'numpy.empty', 'np.empty', (['feature_num'], {}), '(feature_num)\n', (11944, 11957), True, 'import numpy as np\n'), ((13367, 13388), 'numpy.array', 'np.array', (['input_shape'], {}), '(input_shape)\n', (13375, 13388), True, 'import numpy as np\n'), ((13857, 13882), 'numpy.atleast_1d', 'np.atleast_1d', (['min_values'], {}), '(min_values)\n', (13870, 13882), True, 'import numpy as np\n'), ((13904, 13929), 'numpy.atleast_1d', 'np.atleast_1d', (['max_values'], {}), '(max_values)\n', (13917, 13929), True, 'import numpy as np\n'), ((13951, 13976), 'numpy.atleast_1d', 'np.atleast_1d', (['input_mean'], {}), '(input_mean)\n', (13964, 13976), True, 'import numpy as np\n'), ((13999, 14025), 'numpy.atleast_1d', 'np.atleast_1d', (['input_range'], {}), '(input_range)\n', (14012, 14025), True, 'import numpy as np\n'), ((20475, 20492), 'src.neural_networks.verinet_nn.VeriNetNN', 'VeriNetNN', (['layers'], {}), '(layers)\n', (20484, 20492), False, 'from src.neural_networks.verinet_nn import VeriNetNN\n'), ((1202, 1211), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1209, 1211), True, 'import torch.nn as nn\n'), ((1253, 1265), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1263, 1265), True, 'import torch.nn as nn\n'), ((1307, 1316), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1314, 1316), True, 'import torch.nn as nn\n'), ((15197, 15217), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (15204, 15217), True, 'import numpy as np\n'), ((19038, 19109), 'torch.nn.Linear', 'nn.Linear', (['self.layer_sizes[layer_num]', 'self.layer_sizes[layer_num + 1]'], {}), '(self.layer_sizes[layer_num], self.layer_sizes[layer_num + 1])\n', (19047, 19109), True, 'import torch.nn as nn\n'), ((19146, 19183), 'torch.Tensor', 'torch.Tensor', (['self.weights[layer_num]'], {}), '(self.weights[layer_num])\n', (19158, 19183), False, 'import torch\n'), ((19218, 19254), 'torch.Tensor', 'torch.Tensor', (['self.biases[layer_num]'], {}), '(self.biases[layer_num])\n', (19230, 19254), False, 'import torch\n'), ((9058, 9074), 'numpy.float64', 'np.float64', (['mean'], {}), '(mean)\n', (9068, 9074), True, 'import numpy as np\n'), ((9179, 9194), 'numpy.float64', 'np.float64', (['var'], {}), '(var)\n', (9189, 9194), True, 'import numpy as np\n'), ((11502, 11517), 'numpy.float64', 'np.float64', (['num'], {}), '(num)\n', (11512, 11517), True, 'import numpy as np\n'), ((15584, 15615), 'numpy.array', 'np.array', (['self._layer_sizes[-1]'], {}), '(self._layer_sizes[-1])\n', (15592, 15615), True, 'import numpy as np\n'), ((15851, 15878), 'numpy.array', 'np.array', (['layer.kernel_size'], {}), '(layer.kernel_size)\n', (15859, 15878), True, 'import numpy as np\n'), ((15905, 15928), 'numpy.array', 'np.array', (['layer.padding'], {}), '(layer.padding)\n', (15913, 15928), True, 'import numpy as np\n'), ((15954, 15976), 'numpy.array', 'np.array', (['layer.stride'], {}), '(layer.stride)\n', (15962, 15976), True, 'import numpy as np\n'), ((19366, 19487), 'torch.nn.Conv2d', 'nn.Conv2d', (["params['in_channels']", "params['out_channels']", "params['kernel_size']", "params['stride']", "params['padding']"], {}), "(params['in_channels'], params['out_channels'], params[\n 'kernel_size'], params['stride'], params['padding'])\n", (19375, 19487), True, 'import torch.nn as nn\n'), ((19553, 19590), 'torch.Tensor', 'torch.Tensor', (['self.weights[layer_num]'], {}), '(self.weights[layer_num])\n', (19565, 19590), False, 'import torch\n'), ((19625, 19661), 'torch.Tensor', 'torch.Tensor', (['self.biases[layer_num]'], {}), '(self.biases[layer_num])\n', (19637, 19661), False, 'import torch\n'), ((20362, 20387), 'torch.nn.Sequential', 'nn.Sequential', (['layer', 'act'], {}), '(layer, act)\n', (20375, 20387), True, 'import torch.nn as nn\n'), ((20437, 20457), 'torch.nn.Sequential', 'nn.Sequential', (['layer'], {}), '(layer)\n', (20450, 20457), True, 'import torch.nn as nn\n'), ((16437, 16489), 'numpy.array', 'np.array', (['(layer.out_channels, *img_size)'], {'dtype': 'int'}), '((layer.out_channels, *img_size), dtype=int)\n', (16445, 16489), True, 'import numpy as np\n'), ((16532, 16557), 'numpy.prod', 'np.prod', (['layer_shapes[-1]'], {}), '(layer_shapes[-1])\n', (16539, 16557), True, 'import numpy as np\n'), ((19829, 19870), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'num_features'}), '(num_features=num_features)\n', (19843, 19870), True, 'import torch.nn as nn\n'), ((19908, 19944), 'torch.Tensor', 'torch.Tensor', (["params['running_mean']"], {}), "(params['running_mean'])\n", (19920, 19944), False, 'import torch\n'), ((19981, 20016), 'torch.Tensor', 'torch.Tensor', (["params['running_var']"], {}), "(params['running_var'])\n", (19993, 20016), False, 'import torch\n'), ((20053, 20095), 'torch.FloatTensor', 'torch.FloatTensor', (['self.weights[layer_num]'], {}), '(self.weights[layer_num])\n', (20070, 20095), False, 'import torch\n'), ((20130, 20171), 'torch.FloatTensor', 'torch.FloatTensor', (['self.biases[layer_num]'], {}), '(self.biases[layer_num])\n', (20147, 20171), False, 'import torch\n'), ((11057, 11072), 'numpy.float64', 'np.float64', (['num'], {}), '(num)\n', (11067, 11072), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from numpy import array
from bruges.models import reconcile, interpolate, panel
from bruges.models import wedge
class ModelTest(unittest.TestCase):
"""
Tests models.
"""
def test_reconcile(self):
a = np.array([2, 6, 7, 7, 3])
b = np.array([3, 7, 3])
A, B = reconcile(a, b, order=0)
A_, B_ = array([2, 6, 7, 7, 3]), array([3, 7, 7, 3, 3])
self.assertTrue(np.array_equal(A, A_))
self.assertTrue(np.array_equal(B, B_))
def test_interpolate(self):
a = np.array([2, 6, 7, 7, 3])
b = np.array([3, 7, 7, 3, 3])
interp = interpolate(a, b, num=10)
self.assertTrue(interp.shape == (5, 10))
def test_panel(self):
a = np.array([2, 6, 7, 7, 3])
b = np.array([3, 7, 3])
dists = (10,)
out = panel(a, b, num=15, dists=dists)
sample = out[:, 7]
self.assertTrue(np.all(sample[:4] == array([2.5, 6.5, 5., 3.])))
self.assertTrue(np.isnan(sample[-1]))
def test_wedge(self):
w, top, base, ref = wedge(depth=10, width=7, strat=(10, (20, 30), 40))
col = array([10, 10, 10, 20, 20, 30, 40, 40, 40, 40])
t = array([3., 3., 3., 3., 3., 3., 3.])
b = array([3., 3., 3.6, 4.2, 4.8, 5.4, 6. ])
self.assertTrue(np.all(w[:, -1] == col))
self.assertTrue(w.sum() == 1990)
self.assertTrue(np.allclose(top, t))
self.assertTrue(np.allclose(base, b))
self.assertTrue(ref == 6)
def test_netgross(self):
w, top, *_ = wedge(depth=10, width=7, breadth=3, strat=(10, (20, 30), 40))
self.assertTrue(w.sum() == 6003)
self.assertTrue(w.shape == (10, 7, 3))
self.assertTrue(top.sum() == 63.0)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ModelTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"numpy.allclose",
"unittest.TextTestRunner",
"bruges.models.wedge",
"numpy.array",
"bruges.models.panel",
"numpy.array_equal",
"numpy.isnan",
"numpy.all",
"bruges.models.reconcile",
"bruges.models.interpolate",
"unittest.TestLoader"
] | [((262, 287), 'numpy.array', 'np.array', (['[2, 6, 7, 7, 3]'], {}), '([2, 6, 7, 7, 3])\n', (270, 287), True, 'import numpy as np\n'), ((300, 319), 'numpy.array', 'np.array', (['[3, 7, 3]'], {}), '([3, 7, 3])\n', (308, 319), True, 'import numpy as np\n'), ((335, 359), 'bruges.models.reconcile', 'reconcile', (['a', 'b'], {'order': '(0)'}), '(a, b, order=0)\n', (344, 359), False, 'from bruges.models import reconcile, interpolate, panel\n'), ((563, 588), 'numpy.array', 'np.array', (['[2, 6, 7, 7, 3]'], {}), '([2, 6, 7, 7, 3])\n', (571, 588), True, 'import numpy as np\n'), ((601, 626), 'numpy.array', 'np.array', (['[3, 7, 7, 3, 3]'], {}), '([3, 7, 7, 3, 3])\n', (609, 626), True, 'import numpy as np\n'), ((644, 669), 'bruges.models.interpolate', 'interpolate', (['a', 'b'], {'num': '(10)'}), '(a, b, num=10)\n', (655, 669), False, 'from bruges.models import reconcile, interpolate, panel\n'), ((758, 783), 'numpy.array', 'np.array', (['[2, 6, 7, 7, 3]'], {}), '([2, 6, 7, 7, 3])\n', (766, 783), True, 'import numpy as np\n'), ((796, 815), 'numpy.array', 'np.array', (['[3, 7, 3]'], {}), '([3, 7, 3])\n', (804, 815), True, 'import numpy as np\n'), ((852, 884), 'bruges.models.panel', 'panel', (['a', 'b'], {'num': '(15)', 'dists': 'dists'}), '(a, b, num=15, dists=dists)\n', (857, 884), False, 'from bruges.models import reconcile, interpolate, panel\n'), ((1086, 1136), 'bruges.models.wedge', 'wedge', ([], {'depth': '(10)', 'width': '(7)', 'strat': '(10, (20, 30), 40)'}), '(depth=10, width=7, strat=(10, (20, 30), 40))\n', (1091, 1136), False, 'from bruges.models import wedge\n'), ((1151, 1198), 'numpy.array', 'array', (['[10, 10, 10, 20, 20, 30, 40, 40, 40, 40]'], {}), '([10, 10, 10, 20, 20, 30, 40, 40, 40, 40])\n', (1156, 1198), False, 'from numpy import array\n'), ((1211, 1253), 'numpy.array', 'array', (['[3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0]'], {}), '([3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0])\n', (1216, 1253), False, 'from numpy import array\n'), ((1259, 1301), 'numpy.array', 'array', (['[3.0, 3.0, 3.6, 4.2, 4.8, 5.4, 6.0]'], {}), '([3.0, 3.0, 3.6, 4.2, 4.8, 5.4, 6.0])\n', (1264, 1301), False, 'from numpy import array\n'), ((1566, 1627), 'bruges.models.wedge', 'wedge', ([], {'depth': '(10)', 'width': '(7)', 'breadth': '(3)', 'strat': '(10, (20, 30), 40)'}), '(depth=10, width=7, breadth=3, strat=(10, (20, 30), 40))\n', (1571, 1627), False, 'from bruges.models import wedge\n'), ((377, 399), 'numpy.array', 'array', (['[2, 6, 7, 7, 3]'], {}), '([2, 6, 7, 7, 3])\n', (382, 399), False, 'from numpy import array\n'), ((401, 423), 'numpy.array', 'array', (['[3, 7, 7, 3, 3]'], {}), '([3, 7, 7, 3, 3])\n', (406, 423), False, 'from numpy import array\n'), ((448, 469), 'numpy.array_equal', 'np.array_equal', (['A', 'A_'], {}), '(A, A_)\n', (462, 469), True, 'import numpy as np\n'), ((495, 516), 'numpy.array_equal', 'np.array_equal', (['B', 'B_'], {}), '(B, B_)\n', (509, 516), True, 'import numpy as np\n'), ((1009, 1029), 'numpy.isnan', 'np.isnan', (['sample[-1]'], {}), '(sample[-1])\n', (1017, 1029), True, 'import numpy as np\n'), ((1324, 1347), 'numpy.all', 'np.all', (['(w[:, -1] == col)'], {}), '(w[:, -1] == col)\n', (1330, 1347), True, 'import numpy as np\n'), ((1414, 1433), 'numpy.allclose', 'np.allclose', (['top', 't'], {}), '(top, t)\n', (1425, 1433), True, 'import numpy as np\n'), ((1459, 1479), 'numpy.allclose', 'np.allclose', (['base', 'b'], {}), '(base, b)\n', (1470, 1479), True, 'import numpy as np\n'), ((1800, 1821), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (1819, 1821), False, 'import unittest\n'), ((1859, 1895), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1882, 1895), False, 'import unittest\n'), ((957, 984), 'numpy.array', 'array', (['[2.5, 6.5, 5.0, 3.0]'], {}), '([2.5, 6.5, 5.0, 3.0])\n', (962, 984), False, 'from numpy import array\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 12:22:37 2020
@author: ninjaac
"""
#############################################################################################
#Array manupulation
"""def matchingStrings(s, q):
for query in q:
lenth = 0
for strings in s:
if query == strings:
lenth +=1
print(lenth)"""
# this can be shorten as
def matchingStrings(s,q):
a = [1 if query == string else 0 for query in q for string in s ]
print(a)
print(*[sum(a[i:i+len(s)]) for i in range(0,len(a),len(s)+1)],sep='\n')
matchingStrings(['aba', 'baba', 'aba', 'xzxb'],['aba', 'xzxb', 'ab'])
##############################################################################################
import numpy as np
a = np.zeros(10,dtype = 'int')
q=[[1,5,3],[4,8,7],[6,9,1]]
for i,j,k in q:
a[i-1:j] = a[i-1:j] +k
print( a.max())
| [
"numpy.zeros"
] | [((827, 852), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': '"""int"""'}), "(10, dtype='int')\n", (835, 852), True, 'import numpy as np\n')] |
#
# Copyright (c) 2016 - 2022 Deephaven Data Labs and Patent Pending
#
""" Utilities for gathering Deephaven table data into Python objects """
import enum
from typing import Any, Type
import jpy
import numpy as np
from deephaven import DHError
_JGatherer = jpy.get_type("io.deephaven.integrations.learn.gather.NumPy")
class MemoryLayout(enum.Enum):
""" Memory layouts for an array. """
ROW_MAJOR = True
""" Row-major memory layout."""
COLUMN_MAJOR = False
""" Column-major memory layout."""
C = True
""" Memory layout consistent with C arrays (row-major)."""
FORTRAN = False
""" Memory layout consistent with Fortran arrays (column-major)."""
def __init__(self, is_row_major):
self.is_row_major = is_row_major
def _convert_to_numpy_dtype(np_type: Type) -> Type:
""" Converts an input type to the corresponding NumPy data type. """
if np_type.__module__ == np.__name__:
return np_type
elif np_type == bool:
np_type = np.bool_
elif np_type == float:
np_type = np.double
elif np_type == int:
np_type = np.intc
else:
raise ValueError(f"{np_type} is not a data type that can be converted to a NumPy dtype.")
return np_type
def table_to_numpy_2d(row_set, col_set, order: MemoryLayout = MemoryLayout.ROW_MAJOR, np_type: Type = np.intc) -> np.ndarray:
""" Converts Deephaven table data to a 2d NumPy array of the appropriate size
Args:
row_set: a RowSequence describing the number of rows in the table
col_set: ColumnSources describing which columns to copy
order (MemoryLayout): the desired memory layout of the output array
np_type: the desired NumPy data type of the output NumPy array
Returns
a np.ndarray
Raises:
DHError
"""
try:
np_type = _convert_to_numpy_dtype(np_type)
if np_type == np.byte:
buffer = _JGatherer.tensorBuffer2DByte(row_set, col_set, order.is_row_major)
elif np_type == np.short:
buffer = _JGatherer.tensorBuffer2DShort(row_set, col_set, order.is_row_major)
elif np_type == np.intc:
buffer = _JGatherer.tensorBuffer2DInt(row_set, col_set, order.is_row_major)
elif np_type == np.int_:
buffer = _JGatherer.tensorBuffer2DLong(row_set, col_set, order.is_row_major)
elif np_type == np.single:
buffer = _JGatherer.tensorBuffer2DFloat(row_set, col_set, order.is_row_major)
elif np_type == np.double:
buffer = _JGatherer.tensorBuffer2DDouble(row_set, col_set, order.is_row_major)
else:
raise ValueError(f"Data type {np_type} is not supported.")
tensor = np.frombuffer(buffer, dtype=np_type)
if order.is_row_major:
tensor.shape = (len(col_set), row_set.intSize())
return tensor.T
else:
tensor.shape = (row_set.intSize(), len(col_set))
return tensor
except Exception as e:
raise DHError(e, f"failed to convert rows: {row_set} and cols: {col_set} to a 2D NumPy array") from e
| [
"numpy.frombuffer",
"jpy.get_type",
"deephaven.DHError"
] | [((264, 324), 'jpy.get_type', 'jpy.get_type', (['"""io.deephaven.integrations.learn.gather.NumPy"""'], {}), "('io.deephaven.integrations.learn.gather.NumPy')\n", (276, 324), False, 'import jpy\n'), ((2727, 2763), 'numpy.frombuffer', 'np.frombuffer', (['buffer'], {'dtype': 'np_type'}), '(buffer, dtype=np_type)\n', (2740, 2763), True, 'import numpy as np\n'), ((3027, 3124), 'deephaven.DHError', 'DHError', (['e', 'f"""failed to convert rows: {row_set} and cols: {col_set} to a 2D NumPy array"""'], {}), "(e,\n f'failed to convert rows: {row_set} and cols: {col_set} to a 2D NumPy array'\n )\n", (3034, 3124), False, 'from deephaven import DHError\n')] |
import os
import numpy as np
import tensorflow as tf
from depth.self_supervised_sfm.utils import readlines
AUTOTUNE = tf.data.experimental.AUTOTUNE
########################
# Constants
#########################
KITTI_K = np.array([[0.58, 0, 0.5, 0], # fx/width
[0, 1.92, 0.5, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], dtype=np.float)
class KittiSFMDataset:
def __init__(self, dataset_dir, load_option,
img_size, batch_size,
split='eigen_zhou',
frame_idx=(0, -1, 1)):
self.h, self.w = img_size
self.split = split
self.batch_size = batch_size
self.load_option = load_option
self.dataset_dir = dataset_dir
self.frame_idx = frame_idx
self.side_map = {"2": 2, "3": 3, "l": 2, "r": 3} # Correspond to image folder
# Check that the folder exists
assert os.path.exists(dataset_dir) and os.path.isdir(dataset_dir), f"Dataset {dataset_dir} does not exist !"
if self.split == 'eigen_zhou':
filename = os.path.join('splits', f'eigen_zhou/{load_option}_files.txt')
else:
raise NotImplementedError
print(f'Loading from: {filename}')
data_paths = readlines(filename)
self.img_paths = []
for i, line in enumerate(data_paths):
# Image files
folder, frame_idx, side = line.split()
per_sample_imgs = []
# Load sequence img
for t in self.frame_idx:
f_str = f"{int(frame_idx) + t:010d}"
image_path = os.path.join(dataset_dir, folder, f"image_0{self.side_map[side]}/data", f_str + '.png')
per_sample_imgs.append(image_path)
self.img_paths.append(per_sample_imgs)
print(f'Total Images for {load_option}: {len(self.img_paths)}')
self.num_samples = len(self.img_paths)
def load_tfdataset(self):
inputs = {}
# Intrinsic
intrinsic = KITTI_K.copy()
intrinsic[0, :] *= self.w
intrinsic[1, :] *= self.h
inputs['K'] = tf.convert_to_tensor(intrinsic, tf.float32)
inputs['K_inv'] = tf.linalg.inv(inputs['K'])
dataset = tf.data.Dataset.from_tensor_slices(self.img_paths)
dataset = dataset.shuffle(self.num_samples)
# Load data
def load_sample(img_paths):
# load the raw data from the file as a string
image_cur = tf.io.read_file(img_paths[0])
image_prev = tf.io.read_file(img_paths[1])
image_next = tf.io.read_file(img_paths[2])
image_cur = tf.image.decode_png(image_cur)
image_prev = tf.image.decode_png(image_prev)
image_next = tf.image.decode_png(image_next)
image_cur = tf.cast(tf.image.resize(image_cur, [self.h, self.w]), tf.float32) / 255.
image_prev = tf.cast(tf.image.resize(image_prev, [self.h, self.w]), tf.float32) / 255.
image_next = tf.cast(tf.image.resize(image_next, [self.h, self.w]), tf.float32) / 255.
if self.load_option == "train":
if tf.random.uniform(()) > 0.5:
image_cur = tf.image.flip_left_right(image_cur)
image_prev = tf.image.flip_left_right(image_prev)
image_next = tf.image.flip_left_right(image_next)
inputs['img'] = image_cur
inputs['img-1'] = image_prev
inputs['img1'] = image_next
return inputs
dataset = dataset.map(load_sample, num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(self.batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=AUTOTUNE)
return dataset
| [
"tensorflow.random.uniform",
"os.path.exists",
"tensorflow.image.decode_png",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.image.resize",
"tensorflow.convert_to_tensor",
"tensorflow.io.read_file",
"os.path.join",
"tensorflow.linalg.inv",
"numpy.array",
"os.path.isdir",
"depth.self... | [((225, 321), 'numpy.array', 'np.array', (['[[0.58, 0, 0.5, 0], [0, 1.92, 0.5, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float'}), '([[0.58, 0, 0.5, 0], [0, 1.92, 0.5, 0], [0, 0, 1, 0], [0, 0, 0, 1]],\n dtype=np.float)\n', (233, 321), True, 'import numpy as np\n'), ((1276, 1295), 'depth.self_supervised_sfm.utils.readlines', 'readlines', (['filename'], {}), '(filename)\n', (1285, 1295), False, 'from depth.self_supervised_sfm.utils import readlines\n'), ((2139, 2182), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['intrinsic', 'tf.float32'], {}), '(intrinsic, tf.float32)\n', (2159, 2182), True, 'import tensorflow as tf\n'), ((2209, 2235), 'tensorflow.linalg.inv', 'tf.linalg.inv', (["inputs['K']"], {}), "(inputs['K'])\n", (2222, 2235), True, 'import tensorflow as tf\n'), ((2255, 2305), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['self.img_paths'], {}), '(self.img_paths)\n', (2289, 2305), True, 'import tensorflow as tf\n'), ((933, 960), 'os.path.exists', 'os.path.exists', (['dataset_dir'], {}), '(dataset_dir)\n', (947, 960), False, 'import os\n'), ((965, 991), 'os.path.isdir', 'os.path.isdir', (['dataset_dir'], {}), '(dataset_dir)\n', (978, 991), False, 'import os\n'), ((1097, 1158), 'os.path.join', 'os.path.join', (['"""splits"""', 'f"""eigen_zhou/{load_option}_files.txt"""'], {}), "('splits', f'eigen_zhou/{load_option}_files.txt')\n", (1109, 1158), False, 'import os\n'), ((2497, 2526), 'tensorflow.io.read_file', 'tf.io.read_file', (['img_paths[0]'], {}), '(img_paths[0])\n', (2512, 2526), True, 'import tensorflow as tf\n'), ((2552, 2581), 'tensorflow.io.read_file', 'tf.io.read_file', (['img_paths[1]'], {}), '(img_paths[1])\n', (2567, 2581), True, 'import tensorflow as tf\n'), ((2607, 2636), 'tensorflow.io.read_file', 'tf.io.read_file', (['img_paths[2]'], {}), '(img_paths[2])\n', (2622, 2636), True, 'import tensorflow as tf\n'), ((2662, 2692), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['image_cur'], {}), '(image_cur)\n', (2681, 2692), True, 'import tensorflow as tf\n'), ((2718, 2749), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['image_prev'], {}), '(image_prev)\n', (2737, 2749), True, 'import tensorflow as tf\n'), ((2775, 2806), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['image_next'], {}), '(image_next)\n', (2794, 2806), True, 'import tensorflow as tf\n'), ((1631, 1723), 'os.path.join', 'os.path.join', (['dataset_dir', 'folder', 'f"""image_0{self.side_map[side]}/data"""', "(f_str + '.png')"], {}), "(dataset_dir, folder, f'image_0{self.side_map[side]}/data', \n f_str + '.png')\n", (1643, 1723), False, 'import os\n'), ((2840, 2884), 'tensorflow.image.resize', 'tf.image.resize', (['image_cur', '[self.h, self.w]'], {}), '(image_cur, [self.h, self.w])\n', (2855, 2884), True, 'import tensorflow as tf\n'), ((2938, 2983), 'tensorflow.image.resize', 'tf.image.resize', (['image_prev', '[self.h, self.w]'], {}), '(image_prev, [self.h, self.w])\n', (2953, 2983), True, 'import tensorflow as tf\n'), ((3037, 3082), 'tensorflow.image.resize', 'tf.image.resize', (['image_next', '[self.h, self.w]'], {}), '(image_next, [self.h, self.w])\n', (3052, 3082), True, 'import tensorflow as tf\n'), ((3167, 3188), 'tensorflow.random.uniform', 'tf.random.uniform', (['()'], {}), '(())\n', (3184, 3188), True, 'import tensorflow as tf\n'), ((3228, 3263), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (['image_cur'], {}), '(image_cur)\n', (3252, 3263), True, 'import tensorflow as tf\n'), ((3297, 3333), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (['image_prev'], {}), '(image_prev)\n', (3321, 3333), True, 'import tensorflow as tf\n'), ((3367, 3403), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (['image_next'], {}), '(image_next)\n', (3391, 3403), True, 'import tensorflow as tf\n')] |
import ecoblock_test.simulation as sim
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
NUMBER_OF_SIMULATIONS = 20
NUMBER_OF_SIMULATIONS_ID = 28
cost_record = []
flywheel_final_soc = []
def plot_hist(data):
plt.figure()
num_bins = 30
data.hist(bins=num_bins)
plt.xlabel('Cost in $/day')
plt.ylabel('Simulation results')
plt.grid(True)
plt.savefig('hist_cost.png')
sim_id_list = []
sim_number_list = []
for sim_id in range(1, NUMBER_OF_SIMULATIONS_ID + 1):
for sim_number in range(1, NUMBER_OF_SIMULATIONS + 1):
print('sim_id:', sim_id, 'and sim_number:', sim_number)
sim_id_list.append(sim_id)
sim_number_list.append(sim_number)
system = sim.System(sim_number, sim_id)
system.load_data()
system.run_simulation()
cost_record.append(system.get_cost())
flywheel_final_soc.append(np.sum(system.flywheel.soc_record))
print('Is at cost:', system.get_cost())
system.plot_results()
file_name = 'normal' + str(sim_number) + '-' + str(sim_id) + '.png'
plt.savefig(file_name)
data_result = pd.DataFrame(sim_id_list, columns=['sim_id'])
data_result['sim_num'] = sim_number_list
data_result['cost'] = cost_record
data_result['flywheel_final_soc'] = flywheel_final_soc
data_result.to_csv('data_result.csv')
cost_record_df = pd.DataFrame(cost_record, columns=['cost'])
plot_hist(cost_record_df['cost'])
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"ecoblock_test.simulation.System",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"matplotlib.pyplot.figure",
"pandas.DataFrame"
] | [((1136, 1181), 'pandas.DataFrame', 'pd.DataFrame', (['sim_id_list'], {'columns': "['sim_id']"}), "(sim_id_list, columns=['sim_id'])\n", (1148, 1181), True, 'import pandas as pd\n'), ((1367, 1410), 'pandas.DataFrame', 'pd.DataFrame', (['cost_record'], {'columns': "['cost']"}), "(cost_record, columns=['cost'])\n", (1379, 1410), True, 'import pandas as pd\n'), ((236, 248), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (246, 248), True, 'import matplotlib.pyplot as plt\n'), ((300, 327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cost in $/day"""'], {}), "('Cost in $/day')\n", (310, 327), True, 'import matplotlib.pyplot as plt\n'), ((332, 364), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Simulation results"""'], {}), "('Simulation results')\n", (342, 364), True, 'import matplotlib.pyplot as plt\n'), ((369, 383), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (377, 383), True, 'import matplotlib.pyplot as plt\n'), ((388, 416), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hist_cost.png"""'], {}), "('hist_cost.png')\n", (399, 416), True, 'import matplotlib.pyplot as plt\n'), ((729, 759), 'ecoblock_test.simulation.System', 'sim.System', (['sim_number', 'sim_id'], {}), '(sim_number, sim_id)\n', (739, 759), True, 'import ecoblock_test.simulation as sim\n'), ((1098, 1120), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (1109, 1120), True, 'import matplotlib.pyplot as plt\n'), ((899, 933), 'numpy.sum', 'np.sum', (['system.flywheel.soc_record'], {}), '(system.flywheel.soc_record)\n', (905, 933), True, 'import numpy as np\n')] |
import glob
from pathlib import Path
import json
import re
import os
import numpy as np
import pandas as pd
from isochrones.models import StellarModelGrid, ModelGridInterpolator
from isochrones.mist.models import MISTEvolutionTrackGrid
from isochrones.mist.bc import MISTBolometricCorrectionGrid
from isochrones.mags import interp_mag_4d, interp_mags_4d
from isochrones.priors import FlatPrior
class AlphaMLTGrid(MISTEvolutionTrackGrid):
name = "mist_mlt"
index_cols = ("initial_feh", "initial_mass", "alpha_mlt", "EEP")
ndim = 4
filename_pattern = "\d+_\d+_\d+\.eep"
feh_col = "initial_feh"
default_columns = StellarModelGrid.default_columns + ("phase",)
dataroot = Path("~/alpha_mlt/trimmed").expanduser()
index = json.load(open(os.path.join(dataroot, "index.json")))
fehs = np.array([f for f in index["feh"].values()])
n_fehs = len(fehs)
bounds = (
("eep", (0, 450)),
("age", (5, 10.3)),
("alpha_mlt", (0.31623, 3.16228)),
("feh", (-4.105, 0.395)),
("mass", (0.1, 0.775)),
)
@property
def kwarg_tag(self):
return "_test"
@classmethod
def parse_filename(cls, path):
index = cls.index
m = re.search("([0-9]+)_([0-9]+)_([0-9]+).eep", str(path))
if m:
mass = float(index["mass"][m.group(1)])
feh = float(index["feh"][m.group(2)])
alpha_mlt = float(index["alpha"][m.group(3)])
return (mass, feh, alpha_mlt)
else:
raise ValueError(f"Cannot parse {path}!")
@classmethod
def get_mass(cls, filename):
mass, _, _ = cls.parse_filename(filename)
return mass
@classmethod
def get_feh(cls, filename):
_, feh, _ = self.parse_filename(filename)
return feh
def get_directory_path(self):
return self.dataroot
def compute_surf_feh(self, df):
return df["initial_feh"] # <NAME> says
def df_all(self):
return StellarModelGrid.df_all(self)
@classmethod
def to_df(cls, filename):
df = super().to_df(filename)
_, feh, alpha_mlt = cls.parse_filename(filename)
df["alpha_mlt"] = alpha_mlt
df["feh"] = feh
df["initial_feh"] = feh
return df
class AlphaMLTInterpolator(ModelGridInterpolator):
grid_type = AlphaMLTGrid
bc_type = MISTBolometricCorrectionGrid
param_names = ("mass", "eep", "feh", "alpha_mlt", "distance", "AV")
eep_bounds = (0, 450)
alpha_mlt_bounds = (0.31623, 3.16228)
eep_replaces = "age"
# desired: mass, eep, feh, alpha_mlt, distance, AV
_param_index_order = (
2,
0,
3,
1,
4,
5,
)
def __call__(self, p1, p2, p3, p4, distance=10.0, AV=0.0):
p1, p2, p3, p4, dist, AV = [
np.atleast_1d(a).astype(float).squeeze()
for a in np.broadcast_arrays(p1, p2, p3, p4, distance, AV)
]
pars = [p1, p2, p3, p4, dist, AV]
# print(pars)
prop_cols = self.model_grid.df.columns
props = self.interp_value(pars, prop_cols)
_, _, _, mags = self.interp_mag(pars, self.bands)
cols = list(prop_cols) + ["{}_mag".format(b) for b in self.bands]
values = np.concatenate([np.atleast_2d(props), np.atleast_2d(mags)], axis=1)
df = pd.DataFrame(values, columns=cols)
df["alpha_mlt"] = p4
return df
def interp_value(self, pars, props):
"""
pars : age, feh, eep, [distance, AV]
"""
try:
pars = np.atleast_1d(pars[self.param_index_order])
except TypeError:
i0, i1, i2, i3, i4, i5 = self.param_index_order
pars = [pars[i0], pars[i1], pars[i2], pars[i3]]
# print(pars)
return self.model_grid.interp(pars, props)
def interp_mag(self, pars, bands):
"""
pars : age, feh, eep, distance, AV
"""
if not bands:
i_bands = np.array([], dtype=int)
else:
i_bands = [self.bc_grid.interp.columns.index(b) for b in bands]
try:
pars = np.atleast_1d(pars).astype(float).squeeze()
if pars.ndim > 1:
raise ValueError
return interp_mag_4d(
pars,
self.param_index_order,
self.model_grid.interp.grid,
self.model_grid.interp.column_index["Teff"],
self.model_grid.interp.column_index["logg"],
self.model_grid.interp.column_index["feh"],
self.model_grid.interp.column_index["Mbol"],
*self.model_grid.interp.index_columns,
self.bc_grid.interp.grid,
i_bands,
*self.bc_grid.interp.index_columns,
)
except (TypeError, ValueError):
# Broadcast appropriately.
b = np.broadcast(*pars)
pars = np.array([np.resize(x, b.shape).astype(float) for x in pars])
return interp_mags_4d(
pars,
self.param_index_order,
self.model_grid.interp.grid,
self.model_grid.interp.column_index["Teff"],
self.model_grid.interp.column_index["logg"],
self.model_grid.interp.column_index["feh"],
self.model_grid.interp.column_index["Mbol"],
*self.model_grid.interp.index_columns,
self.bc_grid.interp.grid,
i_bands,
*self.bc_grid.interp.index_columns,
)
| [
"numpy.atleast_2d",
"pathlib.Path",
"os.path.join",
"numpy.broadcast",
"numpy.array",
"numpy.resize",
"isochrones.mags.interp_mag_4d",
"isochrones.mags.interp_mags_4d",
"pandas.DataFrame",
"isochrones.models.StellarModelGrid.df_all",
"numpy.broadcast_arrays",
"numpy.atleast_1d"
] | [((1999, 2028), 'isochrones.models.StellarModelGrid.df_all', 'StellarModelGrid.df_all', (['self'], {}), '(self)\n', (2022, 2028), False, 'from isochrones.models import StellarModelGrid, ModelGridInterpolator\n'), ((3353, 3387), 'pandas.DataFrame', 'pd.DataFrame', (['values'], {'columns': 'cols'}), '(values, columns=cols)\n', (3365, 3387), True, 'import pandas as pd\n'), ((705, 732), 'pathlib.Path', 'Path', (['"""~/alpha_mlt/trimmed"""'], {}), "('~/alpha_mlt/trimmed')\n", (709, 732), False, 'from pathlib import Path\n'), ((773, 809), 'os.path.join', 'os.path.join', (['dataroot', '"""index.json"""'], {}), "(dataroot, 'index.json')\n", (785, 809), False, 'import os\n'), ((3579, 3622), 'numpy.atleast_1d', 'np.atleast_1d', (['pars[self.param_index_order]'], {}), '(pars[self.param_index_order])\n', (3592, 3622), True, 'import numpy as np\n'), ((4006, 4029), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (4014, 4029), True, 'import numpy as np\n'), ((4279, 4664), 'isochrones.mags.interp_mag_4d', 'interp_mag_4d', (['pars', 'self.param_index_order', 'self.model_grid.interp.grid', "self.model_grid.interp.column_index['Teff']", "self.model_grid.interp.column_index['logg']", "self.model_grid.interp.column_index['feh']", "self.model_grid.interp.column_index['Mbol']", '*self.model_grid.interp.index_columns', 'self.bc_grid.interp.grid', 'i_bands', '*self.bc_grid.interp.index_columns'], {}), "(pars, self.param_index_order, self.model_grid.interp.grid,\n self.model_grid.interp.column_index['Teff'], self.model_grid.interp.\n column_index['logg'], self.model_grid.interp.column_index['feh'], self.\n model_grid.interp.column_index['Mbol'], *self.model_grid.interp.\n index_columns, self.bc_grid.interp.grid, i_bands, *self.bc_grid.interp.\n index_columns)\n", (4292, 4664), False, 'from isochrones.mags import interp_mag_4d, interp_mags_4d\n'), ((2901, 2950), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['p1', 'p2', 'p3', 'p4', 'distance', 'AV'], {}), '(p1, p2, p3, p4, distance, AV)\n', (2920, 2950), True, 'import numpy as np\n'), ((3288, 3308), 'numpy.atleast_2d', 'np.atleast_2d', (['props'], {}), '(props)\n', (3301, 3308), True, 'import numpy as np\n'), ((3310, 3329), 'numpy.atleast_2d', 'np.atleast_2d', (['mags'], {}), '(mags)\n', (3323, 3329), True, 'import numpy as np\n'), ((4927, 4946), 'numpy.broadcast', 'np.broadcast', (['*pars'], {}), '(*pars)\n', (4939, 4946), True, 'import numpy as np\n'), ((5047, 5433), 'isochrones.mags.interp_mags_4d', 'interp_mags_4d', (['pars', 'self.param_index_order', 'self.model_grid.interp.grid', "self.model_grid.interp.column_index['Teff']", "self.model_grid.interp.column_index['logg']", "self.model_grid.interp.column_index['feh']", "self.model_grid.interp.column_index['Mbol']", '*self.model_grid.interp.index_columns', 'self.bc_grid.interp.grid', 'i_bands', '*self.bc_grid.interp.index_columns'], {}), "(pars, self.param_index_order, self.model_grid.interp.grid,\n self.model_grid.interp.column_index['Teff'], self.model_grid.interp.\n column_index['logg'], self.model_grid.interp.column_index['feh'], self.\n model_grid.interp.column_index['Mbol'], *self.model_grid.interp.\n index_columns, self.bc_grid.interp.grid, i_bands, *self.bc_grid.interp.\n index_columns)\n", (5061, 5433), False, 'from isochrones.mags import interp_mag_4d, interp_mags_4d\n'), ((2839, 2855), 'numpy.atleast_1d', 'np.atleast_1d', (['a'], {}), '(a)\n', (2852, 2855), True, 'import numpy as np\n'), ((4153, 4172), 'numpy.atleast_1d', 'np.atleast_1d', (['pars'], {}), '(pars)\n', (4166, 4172), True, 'import numpy as np\n'), ((4976, 4997), 'numpy.resize', 'np.resize', (['x', 'b.shape'], {}), '(x, b.shape)\n', (4985, 4997), True, 'import numpy as np\n')] |
import sys
from cv2 import cv2
import numpy as np
import mss
from pynput.mouse import Button, Controller
while True:
stc = mss.mss()
scr = stc.grab(
{
"left": 744,
"top": 152,
"width": 420,
"height": 240,
}
)
frame = np.array(scr)
hsvframe = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_white = np.array([36, 25, 25], dtype=np.uint8)
upper_white = np.array([86, 255, 255], dtype=np.uint8)
white_mask = cv2.inRange(hsvframe, lower_white, upper_white)
res_white = cv2.bitwise_and(frame, frame, mask=white_mask)
countours = cv2.findContours(white_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
# more sensitive contours
countours = sorted(countours, key=cv2.contourArea, reverse=True)[:100]
for contour in countours:
area = cv2.contourArea(contour)
if area > 150:
x1, y1, w1, h1 = cv2.boundingRect(contour)
x_green = int(x1 + w1 / 2)
y_green = int(y1 + h1 / 2)
if y_green > 50:
# click green
mouse = Controller()
mouse.position = (800, 200)
mouse.click(Button.left, 1)
cv2.imshow("main", frame)
cv2.setWindowProperty("main", cv2.WND_PROP_TOPMOST, 1)
if cv2.waitKey(1) & 0xFF == ord("q"):
cv2.destroyAllWindows()
cv2.waitKey(1)
flag2 = False
sys.exit()
| [
"cv2.cv2.setWindowProperty",
"cv2.cv2.inRange",
"sys.exit",
"mss.mss",
"cv2.cv2.waitKey",
"cv2.cv2.bitwise_and",
"cv2.cv2.destroyAllWindows",
"numpy.array",
"cv2.cv2.contourArea",
"cv2.cv2.findContours",
"pynput.mouse.Controller",
"cv2.cv2.cvtColor",
"cv2.cv2.boundingRect",
"cv2.cv2.imshow... | [((125, 134), 'mss.mss', 'mss.mss', ([], {}), '()\n', (132, 134), False, 'import mss\n'), ((238, 251), 'numpy.array', 'np.array', (['scr'], {}), '(scr)\n', (246, 251), True, 'import numpy as np\n'), ((264, 302), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (276, 302), False, 'from cv2 import cv2\n'), ((318, 356), 'numpy.array', 'np.array', (['[36, 25, 25]'], {'dtype': 'np.uint8'}), '([36, 25, 25], dtype=np.uint8)\n', (326, 356), True, 'import numpy as np\n'), ((372, 412), 'numpy.array', 'np.array', (['[86, 255, 255]'], {'dtype': 'np.uint8'}), '([86, 255, 255], dtype=np.uint8)\n', (380, 412), True, 'import numpy as np\n'), ((427, 474), 'cv2.cv2.inRange', 'cv2.inRange', (['hsvframe', 'lower_white', 'upper_white'], {}), '(hsvframe, lower_white, upper_white)\n', (438, 474), False, 'from cv2 import cv2\n'), ((489, 535), 'cv2.cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'white_mask'}), '(frame, frame, mask=white_mask)\n', (504, 535), False, 'from cv2 import cv2\n'), ((1034, 1059), 'cv2.cv2.imshow', 'cv2.imshow', (['"""main"""', 'frame'], {}), "('main', frame)\n", (1044, 1059), False, 'from cv2 import cv2\n'), ((1061, 1115), 'cv2.cv2.setWindowProperty', 'cv2.setWindowProperty', (['"""main"""', 'cv2.WND_PROP_TOPMOST', '(1)'], {}), "('main', cv2.WND_PROP_TOPMOST, 1)\n", (1082, 1115), False, 'from cv2 import cv2\n'), ((549, 617), 'cv2.cv2.findContours', 'cv2.findContours', (['white_mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(white_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (565, 617), False, 'from cv2 import cv2\n'), ((757, 781), 'cv2.cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (772, 781), False, 'from cv2 import cv2\n'), ((1158, 1181), 'cv2.cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1179, 1181), False, 'from cv2 import cv2\n'), ((1184, 1198), 'cv2.cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1195, 1198), False, 'from cv2 import cv2\n'), ((1217, 1227), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1225, 1227), False, 'import sys\n'), ((819, 844), 'cv2.cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (835, 844), False, 'from cv2 import cv2\n'), ((1121, 1135), 'cv2.cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1132, 1135), False, 'from cv2 import cv2\n'), ((955, 967), 'pynput.mouse.Controller', 'Controller', ([], {}), '()\n', (965, 967), False, 'from pynput.mouse import Button, Controller\n')] |
# coding=utf-8
###
# Patient.py -
# This file contains the definition of class Patient used to handle patients scans, store their lesions
# and survival time.
# Patient.py also implements handy function to pre process a pile of dicom image e.g. conversion to SUV
#
# Author : <NAME> - <EMAIL>
#
# Created on : 16/02/2018
###
import numpy as np
import pydicom
import os
import SimpleITK as sitk
import dicom_numpy
# --------------------------
# Patient class definition
# --------------------------
class Patient:
def __init__(self, ref, PATH_TO_DATA):
'''Provide ref number of the patient as a string "xxx-xxx", image is simpleITK type'''
self.ref = ref
self.list_lesions = []
dcmDirectory = os.path.join(PATH_TO_DATA, ref, "dcm")
self.image = initializePatientImage(dcmDirectory)
# --------------------------
# Set of functions used to pre process dcm slice
# --------------------------
def isSliceUnitSUV(dcmSlice):
"""Take a dcm slice in input and returns true if the voxels are expressed in SUV - Standardized uptake value.
Return false otherwise."""
# 00541001 corresponds to the tag index of the voxel unit in the dcm file
units = dcmSlice[0x00541001].value.lower()
unitIsSUV = "suv" in units
if unitIsSUV:
return True
else:
return False
def setSliceUnitToSUV(pathToDcmSlice):
"""Take an absolute path to a dcm file and change its dicom tag related to units [0054,1001] to 'suv' and save the
dcm."""
dcmSlice = pydicom.dcmread(pathToDcmSlice)
dcmSlice[0x00541001].value = 'suv'
dcmSlice.save_as(pathToDcmSlice)
def multiplySlice(scalar, pathToDcmSlice):
"""WARNING : Deprecated function. This function is no longer used in the extraction pipe and it's expected behavior
is not sure to be verified. To delete.
Take a scalar value and the absolute path of a dcm slice.
Multiply all pixels in the slice per the scalar value and save the slice under the same slice name.
Warning : saving erase the original data for the new multiplied ones."""
dcmSlice = pydicom.dcmread(pathToDcmSlice)
for n, val in enumerate(dcmSlice.pixel_array.flat):
dcmSlice.pixel_array.flat[n] = scalar * dcmSlice.pixel_array.flat[n]
# Warning: passing to string may change the value of the voxels.
# To debug : try printing the values of a voxel, and its dtype, before and after the multiplication
# and before / after the tostring() method (pretty sure this is buggy part)
dcmSlice.PixelData = dcmSlice.pixel_array.tostring()
dcmSlice.save_as(pathToDcmSlice)
def dcmToSimpleITK(dcmDirectory):
"""Return a simple ITK image from a pile of dcm files. The returned sITK image has been rescaled based on the
value of the rescale slope on the dicom tag. Array-like data of the 3D image can be obtained with the
GetArrayFromImage() method"""
list_dcmFiles = []
for directory, subDirectory, list_dcmFileNames in os.walk(dcmDirectory):
for dcmFile in list_dcmFileNames:
if '.dcm' in dcmFile.lower():
list_dcmFiles.append(os.path.join(directory, dcmFile))
dcmImage = [pydicom.dcmread(dcmSliceFile) for dcmSliceFile in list_dcmFiles]
voxel_ndarray, ijk_to_xyz = dicom_numpy.combine_slices(dcmImage)
sITK_image = sitk.GetImageFromArray(voxel_ndarray)
return (sITK_image)
def convertToSUV(dcmDirectory, sITKImage):
"""Return a new simple ITK image where the voxels have been converted to SUV.
Converts the voxels data from a simple ITK image to SUV, based on the SUV factor found (or computed if not) in the
matching dicom slices from the dcmDirectory. The dicom slices and input simple ITK image are not modified.
Warning 1: This function assumes all the patient DCM tags used below are defined and set to their correct value.
Warning 2: No sanity check is done on the delay between injection and acquisition to see if belongs
to the range of EANM guidelines.
Warning 3: This function assumes all the slices are in the same unit (e.g. all slices' voxels are in SUV).
Warning 4: simple ITK image passed as input are assumed to have been rescaled properly with the matching rescale
slope found in the dicom tag of the matching dicom file."""
# Get the pile of dcm slices
list_dcmFiles = []
for directory, subDirectory, list_dcmFileNames in os.walk(dcmDirectory):
for fileName in list_dcmFileNames:
if '.dcm' in fileName.lower(): # check whether file is a dicom
list_dcmFiles.append(os.path.join(directory, fileName))
# Choose the first slice to check for voxel unit
refDicomSlice = pydicom.dcmread(list_dcmFiles[0])
# Compute the suvFactor
manufacturer = refDicomSlice[0x00080070].value.lower()
manufacturerIsPhilips = "philips" in manufacturer
units = refDicomSlice[0x00541001].value.lower()
unitIsNotBqml = "bqml" not in units
# Philips machines have specific tags
if manufacturerIsPhilips and unitIsNotBqml:
suvFactor = float(refDicomSlice[0x70531000].value)
else:
# Get infos from the patient dcm tags
acquisitionHour = int(refDicomSlice[0x00080031].value[0:2])
acquisitionMinute = int(refDicomSlice[0x00080031].value[2:4])
injectionHour = int(refDicomSlice[0x00540016].value[0][0x00181072].value[0:2])
injectionMinute = int(refDicomSlice[0x00540016].value[0][0x00181072].value[2:4])
deltaHour = acquisitionHour - injectionHour
deltaMinute = acquisitionMinute - injectionMinute
if (deltaMinute < 0):
deltaMinute = 60 + deltaMinute
deltaHour = deltaHour - 1
# Computing of the suvFactor from bqml
decayFactor = np.exp(-np.log(2) * ((60 * deltaHour) + deltaMinute) / 109.8)
radioNuclideTotalDose = float(refDicomSlice[0x00540016].value[0][0x00181074].value)
correctedActivity = decayFactor * radioNuclideTotalDose
patientMass = float(refDicomSlice[0x00101030].value)
suvFactor = (patientMass * 1000) / correctedActivity
# All slices are multiplied per the same suv factor. We assume it's a constant for a patient
voxels = sitk.GetArrayFromImage(sITKImage).astype('float32')
SUVVoxels = suvFactor * voxels
SUVsITKImage = sitk.GetImageFromArray(SUVVoxels)
return SUVsITKImage
def initializePatientImage(dcmDirectory):
"""From a dicom directory, output the rescaled and converted to SUV simple ITK image used to compute features"""
# Compute the simple ITK image rescaled per the rescale slope
rescaledImage = dcmToSimpleITK(dcmDirectory)
list_dcmFiles = []
for directory, subDirectory, list_dcmFileNames in os.walk(dcmDirectory):
for fileName in list_dcmFileNames:
if '.dcm' in fileName.lower(): # check whether file is a dicom
list_dcmFiles.append(os.path.join(directory, fileName))
# Choose the first slice to check for voxel unit
refDicomSlice = pydicom.dcmread(list_dcmFiles[0])
# If the slice is already in SUV we assume all the dcm pile for a same patient is in SUV, otherwise we convert
if isSliceUnitSUV(refDicomSlice):
print(" Patient's voxels value are already in SUV. No conversion needed.")
return rescaledImage
else:
print(" Patient's voxels value are not in SUV. Converting the patient's voxels in SUV ...")
image3D = convertToSUV(dcmDirectory, rescaledImage)
print(" Conversion to SUV done")
# return image3D
return rescaledImage | [
"SimpleITK.GetImageFromArray",
"pydicom.dcmread",
"dicom_numpy.combine_slices",
"numpy.log",
"os.path.join",
"SimpleITK.GetArrayFromImage",
"os.walk"
] | [((1532, 1563), 'pydicom.dcmread', 'pydicom.dcmread', (['pathToDcmSlice'], {}), '(pathToDcmSlice)\n', (1547, 1563), False, 'import pydicom\n'), ((2107, 2138), 'pydicom.dcmread', 'pydicom.dcmread', (['pathToDcmSlice'], {}), '(pathToDcmSlice)\n', (2122, 2138), False, 'import pydicom\n'), ((2988, 3009), 'os.walk', 'os.walk', (['dcmDirectory'], {}), '(dcmDirectory)\n', (2995, 3009), False, 'import os\n'), ((3279, 3315), 'dicom_numpy.combine_slices', 'dicom_numpy.combine_slices', (['dcmImage'], {}), '(dcmImage)\n', (3305, 3315), False, 'import dicom_numpy\n'), ((3337, 3374), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['voxel_ndarray'], {}), '(voxel_ndarray)\n', (3359, 3374), True, 'import SimpleITK as sitk\n'), ((4419, 4440), 'os.walk', 'os.walk', (['dcmDirectory'], {}), '(dcmDirectory)\n', (4426, 4440), False, 'import os\n'), ((4707, 4740), 'pydicom.dcmread', 'pydicom.dcmread', (['list_dcmFiles[0]'], {}), '(list_dcmFiles[0])\n', (4722, 4740), False, 'import pydicom\n'), ((6353, 6386), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['SUVVoxels'], {}), '(SUVVoxels)\n', (6375, 6386), True, 'import SimpleITK as sitk\n'), ((6767, 6788), 'os.walk', 'os.walk', (['dcmDirectory'], {}), '(dcmDirectory)\n', (6774, 6788), False, 'import os\n'), ((7055, 7088), 'pydicom.dcmread', 'pydicom.dcmread', (['list_dcmFiles[0]'], {}), '(list_dcmFiles[0])\n', (7070, 7088), False, 'import pydicom\n'), ((735, 773), 'os.path.join', 'os.path.join', (['PATH_TO_DATA', 'ref', '"""dcm"""'], {}), "(PATH_TO_DATA, ref, 'dcm')\n", (747, 773), False, 'import os\n'), ((3182, 3211), 'pydicom.dcmread', 'pydicom.dcmread', (['dcmSliceFile'], {}), '(dcmSliceFile)\n', (3197, 3211), False, 'import pydicom\n'), ((6243, 6276), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['sITKImage'], {}), '(sITKImage)\n', (6265, 6276), True, 'import SimpleITK as sitk\n'), ((3132, 3164), 'os.path.join', 'os.path.join', (['directory', 'dcmFile'], {}), '(directory, dcmFile)\n', (3144, 3164), False, 'import os\n'), ((4598, 4631), 'os.path.join', 'os.path.join', (['directory', 'fileName'], {}), '(directory, fileName)\n', (4610, 4631), False, 'import os\n'), ((6946, 6979), 'os.path.join', 'os.path.join', (['directory', 'fileName'], {}), '(directory, fileName)\n', (6958, 6979), False, 'import os\n'), ((5798, 5807), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (5804, 5807), True, 'import numpy as np\n')] |
import numpy as np
import open3d as o3d
from argparse import ArgumentParser
import os
parser = ArgumentParser()
parser.add_argument("--red",type=float, default= 0.5)
parser.add_argument("--blue", type=float, default = 0.4)
parser.add_argument("--green", type=float, default = 0.4)
parser.add_argument("--scan_folder",type=str, default = "scatters")
parser.add_argument("--extrinsic_name", type = str, default = "extrinsic")
args = parser.parse_args()
def computeTF(pointcloud_registers):
top = np.zeros(3,dtype=np.float)
bottom = np.zeros(3,dtype=np.float)
left = np.zeros(3,dtype=np.float)
right = np.zeros(3,dtype=np.float)
for pc in pointcloud_registers:
pc.remove_statistical_outlier(nb_neighbors=20, std_ratio=0.04)
points = np.asarray(pc.points)
top += points[np.argmax(points[:,1])]
bottom += points[np.argmin(points[:,1])]
left += points[np.argmin(points[:,0])]
right += points[np.argmax(points[:,0])]
top /= len(pointcloud_registers)
bottom /= len(pointcloud_registers)
left /= len(pointcloud_registers)
right /= len(pointcloud_registers)
R = np.zeros((3,3))
R[:,0] = (right - left)/np.linalg.norm(right-left,2)
R[:,1] = (top - bottom)/np.linalg.norm(top - bottom,2)
R[:,2] = np.cross(R[:,0],R[:,1])
R = R.T
# set the point with minimum x then minimum y as origin
T = -(top + bottom+left+right)/4
return R, T
def process_point_cloud(pcd):
color = np.asarray(pcd.colors)
color[:,[0,2]] = color[:,[2,0]]
pcd.colors = o3d.utility.Vector3dVector(color)
mask = (color[:,0] > args.red) * (color[:,1] < args.green) * (color[:,2] < args.blue)
points = np.asarray(pcd.points)
truncated_pcd = o3d.geometry.PointCloud()
truncated_pcd.points = o3d.utility.Vector3dVector(points[mask])
truncated_pcd.colors = o3d.utility.Vector3dVector(color[mask])
return truncated_pcd
filelist = os.listdir(f"./pointcloud_raw/{args.scan_folder}/")
truncated_pcds = []
for file in filelist:
filename = f"./pointcloud_raw/{args.scan_folder}/{file}"
pcd = o3d.io.read_point_cloud(filename)
truncated_pcd = process_point_cloud(pcd)
truncated_pcds.append(truncated_pcd)
R, T = computeTF(truncated_pcds)
np.savez(f"./extrinsic/{args.extrinsic_name}.npz", R = R, T = T)
| [
"numpy.savez",
"os.listdir",
"numpy.cross",
"argparse.ArgumentParser",
"numpy.asarray",
"numpy.linalg.norm",
"numpy.argmax",
"numpy.zeros",
"open3d.geometry.PointCloud",
"open3d.io.read_point_cloud",
"numpy.argmin",
"open3d.utility.Vector3dVector"
] | [((96, 112), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (110, 112), False, 'from argparse import ArgumentParser\n'), ((1944, 1995), 'os.listdir', 'os.listdir', (['f"""./pointcloud_raw/{args.scan_folder}/"""'], {}), "(f'./pointcloud_raw/{args.scan_folder}/')\n", (1954, 1995), False, 'import os\n'), ((2263, 2323), 'numpy.savez', 'np.savez', (['f"""./extrinsic/{args.extrinsic_name}.npz"""'], {'R': 'R', 'T': 'T'}), "(f'./extrinsic/{args.extrinsic_name}.npz', R=R, T=T)\n", (2271, 2323), True, 'import numpy as np\n'), ((500, 527), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (508, 527), True, 'import numpy as np\n'), ((540, 567), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (548, 567), True, 'import numpy as np\n'), ((578, 605), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (586, 605), True, 'import numpy as np\n'), ((617, 644), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (625, 644), True, 'import numpy as np\n'), ((1152, 1168), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1160, 1168), True, 'import numpy as np\n'), ((1297, 1323), 'numpy.cross', 'np.cross', (['R[:, 0]', 'R[:, 1]'], {}), '(R[:, 0], R[:, 1])\n', (1305, 1323), True, 'import numpy as np\n'), ((1490, 1512), 'numpy.asarray', 'np.asarray', (['pcd.colors'], {}), '(pcd.colors)\n', (1500, 1512), True, 'import numpy as np\n'), ((1566, 1599), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['color'], {}), '(color)\n', (1592, 1599), True, 'import open3d as o3d\n'), ((1703, 1725), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (1713, 1725), True, 'import numpy as np\n'), ((1746, 1771), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (1769, 1771), True, 'import open3d as o3d\n'), ((1799, 1839), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points[mask]'], {}), '(points[mask])\n', (1825, 1839), True, 'import open3d as o3d\n'), ((1867, 1906), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['color[mask]'], {}), '(color[mask])\n', (1893, 1906), True, 'import open3d as o3d\n'), ((2109, 2142), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['filename'], {}), '(filename)\n', (2132, 2142), True, 'import open3d as o3d\n'), ((768, 789), 'numpy.asarray', 'np.asarray', (['pc.points'], {}), '(pc.points)\n', (778, 789), True, 'import numpy as np\n'), ((1196, 1227), 'numpy.linalg.norm', 'np.linalg.norm', (['(right - left)', '(2)'], {}), '(right - left, 2)\n', (1210, 1227), True, 'import numpy as np\n'), ((1253, 1284), 'numpy.linalg.norm', 'np.linalg.norm', (['(top - bottom)', '(2)'], {}), '(top - bottom, 2)\n', (1267, 1284), True, 'import numpy as np\n'), ((812, 835), 'numpy.argmax', 'np.argmax', (['points[:, 1]'], {}), '(points[:, 1])\n', (821, 835), True, 'import numpy as np\n'), ((861, 884), 'numpy.argmin', 'np.argmin', (['points[:, 1]'], {}), '(points[:, 1])\n', (870, 884), True, 'import numpy as np\n'), ((908, 931), 'numpy.argmin', 'np.argmin', (['points[:, 0]'], {}), '(points[:, 0])\n', (917, 931), True, 'import numpy as np\n'), ((956, 979), 'numpy.argmax', 'np.argmax', (['points[:, 0]'], {}), '(points[:, 0])\n', (965, 979), True, 'import numpy as np\n')] |
'''
A simple neural network to solve 2 input XOR
'''
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
], "float32")
y = np.array([
[0],
[1],
[1],
[0]
], "int32")
ohe = OneHotEncoder()
y = ohe.fit_transform(y).toarray()
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=1, random_state=42)
inp = tf.placeholder(tf.float32, shape=[None, 2])
expected = tf.placeholder(tf.float32, shape=[None, 2])
# layer 1 parameters
weights1 = tf.Variable(tf.truncated_normal([2, 3], stddev=0.01))
biases1 = tf.Variable(tf.zeros([3]))
hidden1 = tf.nn.sigmoid(tf.matmul(inp, weights1) + biases1)
# layer 2 (ouput layer) parameters
weights2 = tf.Variable(tf.truncated_normal([3, 2], stddev=0.01))
biases2 = tf.Variable(tf.zeros([2]))
logits = tf.matmul(hidden1, weights2) + biases2
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, expected)
loss = tf.reduce_mean(cross_entropy)
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(expected, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('Cross Entropy Loss', loss)
tf.summary.scalar('Accuracy', accuracy)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs/2-layer")
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print("Training")
for i in range(5000):
sess.run(train_step, feed_dict={inp: X_train,
expected: y_train})
summary, acc, err = sess.run([merged, accuracy, loss], feed_dict={inp: X_train,
expected: y_train})
writer.add_summary(summary, i + 1)
if (i + 1) % 1000 == 0:
print("Epoch: {:5d}\tAcc: {:6.2f}%\tErr: {:6.2f}".format(i + 1, acc * 100, err))
print("\nValidation")
acc_test = sess.run(accuracy, feed_dict={inp: X_test,
expected: y_test})
acc_train = sess.run(accuracy, feed_dict={inp: X_train,
expected: y_train})
print("Accuracy on validation data = {:.2f}%".format(acc_test * 100))
print("Accuracy on training data = {:.2f}%".format(acc_train * 100))
| [
"tensorflow.cast",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.merge_all",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"numpy.array",
"tensorflow.argmax",
"... | [((204, 257), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]', '"""float32"""'], {}), "([[0, 0], [0, 1], [1, 0], [1, 1]], 'float32')\n", (212, 257), True, 'import numpy as np\n'), ((281, 320), 'numpy.array', 'np.array', (['[[0], [1], [1], [0]]', '"""int32"""'], {}), "([[0], [1], [1], [0]], 'int32')\n", (289, 320), True, 'import numpy as np\n'), ((346, 361), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (359, 361), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((433, 485), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(1)', 'random_state': '(42)'}), '(X, y, test_size=1, random_state=42)\n', (449, 485), False, 'from sklearn.model_selection import train_test_split\n'), ((499, 542), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 2]'}), '(tf.float32, shape=[None, 2])\n', (513, 542), True, 'import tensorflow as tf\n'), ((554, 597), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 2]'}), '(tf.float32, shape=[None, 2])\n', (568, 597), True, 'import tensorflow as tf\n'), ((988, 1045), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', (['logits', 'expected'], {}), '(logits, expected)\n', (1027, 1045), True, 'import tensorflow as tf\n'), ((1053, 1082), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (1067, 1082), True, 'import tensorflow as tf\n'), ((1295, 1340), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Cross Entropy Loss"""', 'loss'], {}), "('Cross Entropy Loss', loss)\n", (1312, 1340), True, 'import tensorflow as tf\n'), ((1341, 1380), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy"""', 'accuracy'], {}), "('Accuracy', accuracy)\n", (1358, 1380), True, 'import tensorflow as tf\n'), ((1390, 1412), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (1410, 1412), True, 'import tensorflow as tf\n'), ((1422, 1461), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./logs/2-layer"""'], {}), "('./logs/2-layer')\n", (1443, 1461), True, 'import tensorflow as tf\n'), ((1471, 1504), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1502, 1504), True, 'import tensorflow as tf\n'), ((644, 684), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[2, 3]'], {'stddev': '(0.01)'}), '([2, 3], stddev=0.01)\n', (663, 684), True, 'import tensorflow as tf\n'), ((708, 721), 'tensorflow.zeros', 'tf.zeros', (['[3]'], {}), '([3])\n', (716, 721), True, 'import tensorflow as tf\n'), ((843, 883), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[3, 2]'], {'stddev': '(0.01)'}), '([3, 2], stddev=0.01)\n', (862, 883), True, 'import tensorflow as tf\n'), ((907, 920), 'tensorflow.zeros', 'tf.zeros', (['[2]'], {}), '([2])\n', (915, 920), True, 'import tensorflow as tf\n'), ((931, 959), 'tensorflow.matmul', 'tf.matmul', (['hidden1', 'weights2'], {}), '(hidden1, weights2)\n', (940, 959), True, 'import tensorflow as tf\n'), ((1181, 1201), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (1190, 1201), True, 'import tensorflow as tf\n'), ((1203, 1225), 'tensorflow.argmax', 'tf.argmax', (['expected', '(1)'], {}), '(expected, 1)\n', (1212, 1225), True, 'import tensorflow as tf\n'), ((1253, 1292), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (1260, 1292), True, 'import tensorflow as tf\n'), ((1511, 1523), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1521, 1523), True, 'import tensorflow as tf\n'), ((747, 771), 'tensorflow.matmul', 'tf.matmul', (['inp', 'weights1'], {}), '(inp, weights1)\n', (756, 771), True, 'import tensorflow as tf\n'), ((1097, 1126), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (1119, 1126), True, 'import tensorflow as tf\n')] |
'''
Applying Stochastic Gradient Descent for Linear Regression
'''
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
X = 2 * np.random.rand(100,1)
y = 4 + 3 * X + np.random.randn(100,1)
X_b = np.c_[np.ones((100,1)), X]
eta = .1
m = 100
n_epochs = 50
# Learning schedule hyperparameters
t0, t1 = 5, 50
def learning_schedule(t):
return t0 / (t + t1)
# Random initialization for theta
theta = np.random.randn(2,1)
for epoch in range(n_epochs):
for i in range(m):
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * m + i)
theta = theta - eta * gradients
print('Theta: ', theta)
# Using Scikit-Learn
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(max_iter=50, penalty=None, eta0=.1)
sgd_reg.fit(X, y.ravel())
print('Scikit-Learn: ', sgd_reg.intercept_, sgd_reg.coef_)
| [
"numpy.ones",
"numpy.random.rand",
"sklearn.linear_model.SGDRegressor",
"numpy.random.randint",
"numpy.random.randn"
] | [((420, 441), 'numpy.random.randn', 'np.random.randn', (['(2)', '(1)'], {}), '(2, 1)\n', (435, 441), True, 'import numpy as np\n'), ((871, 920), 'sklearn.linear_model.SGDRegressor', 'SGDRegressor', ([], {'max_iter': '(50)', 'penalty': 'None', 'eta0': '(0.1)'}), '(max_iter=50, penalty=None, eta0=0.1)\n', (883, 920), False, 'from sklearn.linear_model import SGDRegressor\n'), ((146, 168), 'numpy.random.rand', 'np.random.rand', (['(100)', '(1)'], {}), '(100, 1)\n', (160, 168), True, 'import numpy as np\n'), ((184, 207), 'numpy.random.randn', 'np.random.randn', (['(100)', '(1)'], {}), '(100, 1)\n', (199, 207), True, 'import numpy as np\n'), ((220, 237), 'numpy.ones', 'np.ones', (['(100, 1)'], {}), '((100, 1))\n', (227, 237), True, 'import numpy as np\n'), ((518, 538), 'numpy.random.randint', 'np.random.randint', (['m'], {}), '(m)\n', (535, 538), True, 'import numpy as np\n')] |
#!/usr/bin/env python
### shared_qvm.py
###
### Author: <NAME>
###
### Copyright (c) 2017 Rigetti Computing
### This file shows a minimal example of how to use the --shared
### option with QVM from Python.
from __future__ import print_function
import posix_ipc as pos
import mmap
import ctypes
import numpy as np
import socket
import json
import sys
from pyquil.api import QVMConnection
from pyquil.quil import Program
from pyquil.gates import X
def query_length_offset(name):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect('/tmp/' + name)
s.sendall("?")
message, peer = s.recvfrom(4096)
length, offset = message.split(',')
return int(length), int(offset)
def retrieve_wavefunction(name):
length, offset = query_length_offset(name)
shm = pos.SharedMemory(name)
m = mmap.mmap(shm.fd, shm.size)
# get the pointer to what appear to be an array of bytes
ptr = ctypes.POINTER(ctypes.c_ubyte)(ctypes.c_void_p.from_buffer(m, offset))
# cast to array of complex double floats
ptr = ctypes.cast(ptr, np.ctypeslib.ndpointer(shape=(length,), dtype=np.complex128))
return np.ctypeslib.as_array(ptr)
# Example use of this interface.
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Syntax: shared_qvm.py <name>')
sys.exit(1)
name = sys.argv[1]
cxn = QVMConnection(sync_endpoint='http://127.0.0.1:5000')
wf = retrieve_wavefunction(name)
print("Initial wavefunction:")
print(wf)
print("Initializing to W state.")
wf[0b0000] = 0j
wf[0b0001] = (1+0j)/np.sqrt(4)
wf[0b0010] = (1+0j)/np.sqrt(4)
wf[0b0100] = (1+0j)/np.sqrt(4)
wf[0b1000] = (1+0j)/np.sqrt(4)
print(wf)
print("Evolving with X3X2X1X0 via QVM. Quil program is:")
p = Program().inst([X(q) for q in range(4)])
print(p)
cxn.run(p, [0])
print("Printing evolved state.")
for b in range(len(wf)):
if not np.isclose(wf[b], 0j):
print("{0:04b} => {1}".format(b, wf[b]))
| [
"pyquil.api.QVMConnection",
"mmap.mmap",
"ctypes.POINTER",
"numpy.sqrt",
"socket.socket",
"numpy.isclose",
"numpy.ctypeslib.as_array",
"posix_ipc.SharedMemory",
"pyquil.quil.Program",
"ctypes.c_void_p.from_buffer",
"pyquil.gates.X",
"numpy.ctypeslib.ndpointer",
"sys.exit"
] | [((491, 540), 'socket.socket', 'socket.socket', (['socket.AF_UNIX', 'socket.SOCK_STREAM'], {}), '(socket.AF_UNIX, socket.SOCK_STREAM)\n', (504, 540), False, 'import socket\n'), ((794, 816), 'posix_ipc.SharedMemory', 'pos.SharedMemory', (['name'], {}), '(name)\n', (810, 816), True, 'import posix_ipc as pos\n'), ((825, 852), 'mmap.mmap', 'mmap.mmap', (['shm.fd', 'shm.size'], {}), '(shm.fd, shm.size)\n', (834, 852), False, 'import mmap\n'), ((1140, 1166), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['ptr'], {}), '(ptr)\n', (1161, 1166), True, 'import numpy as np\n'), ((1354, 1406), 'pyquil.api.QVMConnection', 'QVMConnection', ([], {'sync_endpoint': '"""http://127.0.0.1:5000"""'}), "(sync_endpoint='http://127.0.0.1:5000')\n", (1367, 1406), False, 'from pyquil.api import QVMConnection\n'), ((924, 954), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_ubyte'], {}), '(ctypes.c_ubyte)\n', (938, 954), False, 'import ctypes\n'), ((955, 993), 'ctypes.c_void_p.from_buffer', 'ctypes.c_void_p.from_buffer', (['m', 'offset'], {}), '(m, offset)\n', (982, 993), False, 'import ctypes\n'), ((1067, 1127), 'numpy.ctypeslib.ndpointer', 'np.ctypeslib.ndpointer', ([], {'shape': '(length,)', 'dtype': 'np.complex128'}), '(shape=(length,), dtype=np.complex128)\n', (1089, 1127), True, 'import numpy as np\n'), ((1309, 1320), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1317, 1320), False, 'import sys\n'), ((1576, 1586), 'numpy.sqrt', 'np.sqrt', (['(4)'], {}), '(4)\n', (1583, 1586), True, 'import numpy as np\n'), ((1611, 1621), 'numpy.sqrt', 'np.sqrt', (['(4)'], {}), '(4)\n', (1618, 1621), True, 'import numpy as np\n'), ((1646, 1656), 'numpy.sqrt', 'np.sqrt', (['(4)'], {}), '(4)\n', (1653, 1656), True, 'import numpy as np\n'), ((1681, 1691), 'numpy.sqrt', 'np.sqrt', (['(4)'], {}), '(4)\n', (1688, 1691), True, 'import numpy as np\n'), ((1776, 1785), 'pyquil.quil.Program', 'Program', ([], {}), '()\n', (1783, 1785), False, 'from pyquil.quil import Program\n'), ((1792, 1796), 'pyquil.gates.X', 'X', (['q'], {}), '(q)\n', (1793, 1796), False, 'from pyquil.gates import X\n'), ((1931, 1954), 'numpy.isclose', 'np.isclose', (['wf[b]', '(0.0j)'], {}), '(wf[b], 0.0j)\n', (1941, 1954), True, 'import numpy as np\n')] |
import numpy as np
n = int(input().strip())
array = np.array([[float(x) for x in input().strip().split()] for _ in range(n)], dtype = float)
print(np.linalg.det(array)) | [
"numpy.linalg.det"
] | [((149, 169), 'numpy.linalg.det', 'np.linalg.det', (['array'], {}), '(array)\n', (162, 169), True, 'import numpy as np\n')] |
#Created by JetBrains PyCharm
#Project Name: SoundAnalyzer with RaspberryPi
#Author: <NAME>
#University: Cergy-Pontoise
#E-mail : <EMAIL>
import asyncio
import os, errno
import pyaudio
import spl_lib as spl
from scipy.signal import lfilter
import numpy
import time
class MicUSB:
#CHUNKS[1] was 9600
CHUNKS = [4096, 1024]
CHUNK = CHUNKS[1]
FORMAT = pyaudio.paInt16
CHANNEL = 1
#RATES[1] was 48000
RATES = [44300, 44100]
RATE = RATES[1]
NUMERATOR, DENOMINATOR = spl.A_weighting(RATE)
def __init__(self):
self.__pa = pyaudio.PyAudio()
self.__stream = self.__pa.open(format=MicUSB.FORMAT,
channels=MicUSB.CHANNEL,
rate=MicUSB.RATE,
input=True,
frames_per_buffer=MicUSB.CHUNK)
self.__currentdB = 0
self.__Init()
def __Init(self):
self.__Listen(45)
def __fire_and_forget(f):
def wrapped(*args, **kwargs):
return asyncio.get_event_loop().run_in_executor(None, f, *args, *kwargs)
return wrapped
@__fire_and_forget
def __Listen(self, duration):
endTime = time.time() + duration
print("Listening...")
error_count = 0
while True:
try:
block = self.__stream.read(MicUSB.CHUNK, exception_on_overflow=False)
except IOError as e:
error_count += 1
print(" (%d) Error recording: %s" % (error_count, e))
else:
decoded_block = numpy.fromstring(block, 'Int16')
y = lfilter(MicUSB.NUMERATOR, MicUSB.DENOMINATOR, decoded_block)
self.__currentdB = 20 * numpy.log10(spl.rms_flat(y))
#print(new_decibel)
self.__stream.stop_stream()
self.__stream.close()
self.__pa.terminate()
# def __OpenStream(self):
def GetdB(self):
return self.__currentdB
def __Update_dB(self, new_dB):
if abs(self.__currentdB - new_dB) > 2:
self.__currentdB = new_dB
# if __name__ == "__main__":
# a = MicUSB()
# a.Listen(60)
# while (True):
# print(a.GetdB())
# time.sleep(.5)
# print("slept")
| [
"spl_lib.rms_flat",
"asyncio.get_event_loop",
"spl_lib.A_weighting",
"scipy.signal.lfilter",
"pyaudio.PyAudio",
"numpy.fromstring",
"time.time"
] | [((500, 521), 'spl_lib.A_weighting', 'spl.A_weighting', (['RATE'], {}), '(RATE)\n', (515, 521), True, 'import spl_lib as spl\n'), ((567, 584), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (582, 584), False, 'import pyaudio\n'), ((1245, 1256), 'time.time', 'time.time', ([], {}), '()\n', (1254, 1256), False, 'import time\n'), ((1631, 1663), 'numpy.fromstring', 'numpy.fromstring', (['block', '"""Int16"""'], {}), "(block, 'Int16')\n", (1647, 1663), False, 'import numpy\n'), ((1684, 1744), 'scipy.signal.lfilter', 'lfilter', (['MicUSB.NUMERATOR', 'MicUSB.DENOMINATOR', 'decoded_block'], {}), '(MicUSB.NUMERATOR, MicUSB.DENOMINATOR, decoded_block)\n', (1691, 1744), False, 'from scipy.signal import lfilter\n'), ((1078, 1102), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1100, 1102), False, 'import asyncio\n'), ((1797, 1812), 'spl_lib.rms_flat', 'spl.rms_flat', (['y'], {}), '(y)\n', (1809, 1812), True, 'import spl_lib as spl\n')] |
import six
from collections import deque, defaultdict
import numpy as np
from pybot.utils.itertools_recipes import chunks
def concat_chunked_dicts(dlist):
"""
Concatenate individual arrays in dictionary
TODO: defaultdict is the right way to do it, except for
conversion to dict in the final return call. Keras requires
type as dict
"""
batch = defaultdict(list)
for item in dlist:
for k,v in six.iteritems(item):
batch[k].append(v)
for k,v in six.iteritems(batch):
batch[k] = np.concatenate(v)
return dict(batch)
def chunked_data(iterable, batch_size=10):
"""
For tuples:
arg = ([np.array, np.array], {'output': np.array})
For dictionaries:
arg = ({'input': np.array, 'input2': np.array}, {'output': np.array})
"""
for batch in chunks(iterable, batch_size):
args = list(zip(*batch))
# (arg), (arg), ...
# arg = ([x1,x2], y)
# type(args[0]) = tuple
# type(args[0][0]) = list
if isinstance(args[0][0], dict):
items = [concat_chunked_dicts(arg) for arg in args]
elif isinstance(args[0][0], np.ndarray):
items = [np.concatenate(arg) for arg in args]
elif isinstance(args[0][0], list) and isinstance(args[0][0][0], np.ndarray):
items = [[np.concatenate(item) for item in arg] for arg in args]
else:
raise TypeError('''Unknown type: either dict, np.array, or list of np.arrays can be batched'''
'''Type is {}'''.format(type(args[0][0])))
yield tuple(items)
def get_dataset_generator(datagen, batch_size=1):
if batch_size > 1:
datagen = chunked_data(datagen, batch_size=batch_size)
return datagen
| [
"six.iteritems",
"collections.defaultdict",
"numpy.concatenate",
"pybot.utils.itertools_recipes.chunks"
] | [((384, 401), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (395, 401), False, 'from collections import deque, defaultdict\n'), ((511, 531), 'six.iteritems', 'six.iteritems', (['batch'], {}), '(batch)\n', (524, 531), False, 'import six\n'), ((851, 879), 'pybot.utils.itertools_recipes.chunks', 'chunks', (['iterable', 'batch_size'], {}), '(iterable, batch_size)\n', (857, 879), False, 'from pybot.utils.itertools_recipes import chunks\n'), ((444, 463), 'six.iteritems', 'six.iteritems', (['item'], {}), '(item)\n', (457, 463), False, 'import six\n'), ((552, 569), 'numpy.concatenate', 'np.concatenate', (['v'], {}), '(v)\n', (566, 569), True, 'import numpy as np\n'), ((1236, 1255), 'numpy.concatenate', 'np.concatenate', (['arg'], {}), '(arg)\n', (1250, 1255), True, 'import numpy as np\n'), ((1381, 1401), 'numpy.concatenate', 'np.concatenate', (['item'], {}), '(item)\n', (1395, 1401), True, 'import numpy as np\n')] |
from unityagents import UnityEnvironment
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import torch
from collections import deque
from maddpg import MADDPG
env = UnityEnvironment(file_name="Tennis_Linux/Tennis.x86_64")
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
'''
count = 0
import time
for i in range(1, 6): # play game for 5 episodes
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
while True:
count += 1
#time.sleep(3)
actions = np.random.randn(num_agents, action_size) # select an action (for each agent)
actions = np.clip(actions, -1, 1) # all actions between -1 and 1
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
#print("Agent 1:")
#print(str(next_states[0]))
#print("Agent 2:")
#print(str(next_states[1]))
#print()
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones) or count==10: # exit loop if episode finished
break
print('Score (max over agents) from episode {}: {}'.format(i, np.max(scores)))
env.close()
'''
# Create an agent, pass a desired size for the hiden layers.
agent = MADDPG(state_size, action_size, seed=10, a_check=None, c_check=None, gamma=0.995, tau=1e-3, add_noise=False, mu=0.,
theta=0.15, sigma=0.2, lr_actor=1e-4, lr_critic=4.2e-3,buffer_size=1e5, batch_size=200, update_every = 4,
low_action=-1, high_action=1, num_agents=2, warm_up=0, consecutive_learns=3, clip_critic_grad=0)
# Define dqn algorithm
def maddpg(n_episodes=10000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes + 1):
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
agent.reset()
score = np.zeros(2)
while True:
actions = agent.act(states, random=False)
env_info = env.step(actions)[brain_name]
next_states, rewards, dones = env_info.vector_observations, env_info.rewards, env_info.local_done
#next_state = agent.state_normalizer(next_state)
#reward = agent.reward_normalizer(reward)
agent.step(states, actions, rewards, next_states, dones, i_episode)
states = next_states
score += rewards
if np.any(dones):
break
episode_score = np.max(score)
scores_window.append(episode_score) # save most recent score
scores.append(episode_score) # save most recent score
eps = max(eps_end, eps_decay * eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}\tlast score: {:.2f}'.format(i_episode, np.mean(scores_window), episode_score), end="")
if i_episode % 100 == 0:
print('\nEpisode {}\tAverage Score: {:.2f}\tlast score: {:.2f}'.format(i_episode, np.mean(scores_window), episode_score), end="")
if np.mean(scores_window) >= 0.5 and i_episode > 50:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode - 100,
np.mean(scores_window)))
torch.save(agent.critic.state_dict(), 'my_critic.pth')
torch.save(agent.actor.state_dict(), 'my_actor.pth')
break
# A small step in learning rate to allow for quicker convergence with above set parameters
#if i_episode == 1200:
# agent.adjust_learning_rate(1200, 2E-5)
return scores
scores = maddpg()
env.close()
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
# Save scores
with open('scores.txt', 'w') as f:
for item in scores:
f.write("%f\n" % item)
'''
agent = MADDPG(state_size, action_size, seed=10, a_check=None, c_check=None, gamma=0.995, tau=1e-3, add_noise=False, mu=0.,
theta=0.15, sigma=0.2, lr_actor=1e-4, lr_critic=4e-3,buffer_size=1e5, batch_size=200, update_every = 4,
low_action=-1, high_action=1, num_agents=2, warm_up=0, consecutive_learns=3, clip_critic_grad=0)
Episode 100 Average Score: 0.03 last score: 0.00
Episode 200 Average Score: 0.00 last score: 0.00
Episode 300 Average Score: 0.02 last score: 0.00
Episode 400 Average Score: 0.01 last score: 0.00
Episode 500 Average Score: 0.01 last score: 0.00
Episode 600 Average Score: 0.00 last score: 0.00
Episode 700 Average Score: 0.01 last score: 0.00
Episode 800 Average Score: 0.04 last score: 0.00
Episode 900 Average Score: 0.01 last score: 0.00
Episode 1000 Average Score: 0.07 last score: 0.10
Episode 1100 Average Score: 0.09 last score: 0.09
Episode 1200 Average Score: 0.08 last score: 0.10
Episode 1300 Average Score: 0.08 last score: 0.10
Episode 1400 Average Score: 0.11 last score: 0.10
Episode 1500 Average Score: 0.10 last score: 0.10
Episode 1600 Average Score: 0.12 last score: 0.10
Episode 1700 Average Score: 0.08 last score: 0.00
Episode 1800 Average Score: 0.08 last score: 0.10
Episode 1900 Average Score: 0.11 last score: 0.20
Episode 2000 Average Score: 0.08 last score: 0.10
Episode 2100 Average Score: 0.23 last score: 0.10
Episode 2200 Average Score: 0.14 last score: 0.10
Episode 2300 Average Score: 0.10 last score: 0.10
Episode 2400 Average Score: 0.24 last score: 0.20
Episode 2500 Average Score: 0.31 last score: 0.10
Episode 2600 Average Score: 0.29 last score: 1.00
Episode 2700 Average Score: 0.54 last score: 0.00
Episode 2703 Average Score: 0.51 last score: 0.00
''' | [
"numpy.mean",
"collections.deque",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"numpy.any",
"numpy.max",
"unityagents.UnityEnvironment",
"matplotlib.pyplot.figure",
"numpy.zeros",
"maddpg.MADDPG",
"matplotlib.pyplot.show"
] | [((92, 115), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (105, 115), True, 'import matplotlib.pyplot as plt\n'), ((192, 248), 'unityagents.UnityEnvironment', 'UnityEnvironment', ([], {'file_name': '"""Tennis_Linux/Tennis.x86_64"""'}), "(file_name='Tennis_Linux/Tennis.x86_64')\n", (208, 248), False, 'from unityagents import UnityEnvironment\n'), ((2503, 2846), 'maddpg.MADDPG', 'MADDPG', (['state_size', 'action_size'], {'seed': '(10)', 'a_check': 'None', 'c_check': 'None', 'gamma': '(0.995)', 'tau': '(0.001)', 'add_noise': '(False)', 'mu': '(0.0)', 'theta': '(0.15)', 'sigma': '(0.2)', 'lr_actor': '(0.0001)', 'lr_critic': '(0.0042)', 'buffer_size': '(100000.0)', 'batch_size': '(200)', 'update_every': '(4)', 'low_action': '(-1)', 'high_action': '(1)', 'num_agents': '(2)', 'warm_up': '(0)', 'consecutive_learns': '(3)', 'clip_critic_grad': '(0)'}), '(state_size, action_size, seed=10, a_check=None, c_check=None, gamma=\n 0.995, tau=0.001, add_noise=False, mu=0.0, theta=0.15, sigma=0.2,\n lr_actor=0.0001, lr_critic=0.0042, buffer_size=100000.0, batch_size=200,\n update_every=4, low_action=-1, high_action=1, num_agents=2, warm_up=0,\n consecutive_learns=3, clip_critic_grad=0)\n', (2509, 2846), False, 'from maddpg import MADDPG\n'), ((5518, 5530), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5528, 5530), True, 'import matplotlib.pyplot as plt\n'), ((5598, 5617), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (5608, 5617), True, 'import matplotlib.pyplot as plt\n'), ((5618, 5641), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode #"""'], {}), "('Episode #')\n", (5628, 5641), True, 'import matplotlib.pyplot as plt\n'), ((5642, 5652), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5650, 5652), True, 'import matplotlib.pyplot as plt\n'), ((3453, 3470), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (3458, 3470), False, 'from collections import deque\n'), ((3721, 3732), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (3729, 3732), True, 'import numpy as np\n'), ((4305, 4318), 'numpy.max', 'np.max', (['score'], {}), '(score)\n', (4311, 4318), True, 'import numpy as np\n'), ((4244, 4257), 'numpy.any', 'np.any', (['dones'], {}), '(dones)\n', (4250, 4257), True, 'import numpy as np\n'), ((4606, 4628), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (4613, 4628), True, 'import numpy as np\n'), ((4840, 4862), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (4847, 4862), True, 'import numpy as np\n'), ((4781, 4803), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (4788, 4803), True, 'import numpy as np\n'), ((5085, 5107), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (5092, 5107), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 15:50:29 2017
@author: BallBlueMeercat
"""
#import csv
#with open('Amanullah.txt') as f:
# reader = csv.reader(f, delimiter="\t")
# d = list(reader)
#print(d[5][:]) # 248
#import pandas as pd
#pd.read_csv('Amanullah.txt', delim_whitespace=True)
import numpy as np
zpicks1 =[0.028488
,0.050043
,0.052926
,0.070086
,0.062668
,0.087589
,0.078577
,0.017227
,0.042233
,0.045295
,0.03648
,0.019599
,0.100915
,0.027342
,0.074605
,0.026489
,0.049922
,0.030604
,0.016345641
,0.0154363
,0.030529
,0.024525
,0.023953
,0.026038
,0.048948
,0.024314
,0.015166
,0.03572
,0.048818
,0.0219800059146
,0.0275
,0.1244
,0.036
,0.01673
,0.016321
,0.021793
,0.01645
,0.023208
,0.036457
,0.019264
,0.017605
,0.031528
,0.023536
,0.016743
,0.05371
,0.016991
,0.027865
,0.017173
,0.029955
,0.016559
,0.015
,0.0544
,0.1561
,0.0393
,0.1241
,0.1441
,0.1299
,0.0784
,0.0583
,0.0309
,0.0406
,0.0152
,0.0224
,0.016
,0.0362
,0.0173
,0.0312
,0.0221
,0.016
,0.0249
,0.0303
,0.0283
,0.0152
,0.0345
,0.036
,0.0248
,0.0292
,0.0163
,0.0187
,0.0195
,0.0256
,0.0337
,0.0546
,0.024
,0.0336
,0.0341
,0.0261
,0.0211
,0.0321
,0.0221
,0.0298
,0.0334
,0.0284
,0.0341
,0.045
,0.0421
,0.0576
,0.033
,0.0753
,0.0204
,0.0205
,0.0402
,0.026
,0.0259
,0.0268
,0.0239
,0.069
,0.0651
,0.0229
,0.018
,0.0315
,0.0215
,0.0255
,0.0325
,0.0843
,0.0308
,0.0327
,0.0423
,0.0684
,0.0153
,0.0233
,0.0491
,0.0425
,0.0192
,0.0308
,0.0212
,0.0277
,0.0335
,0.0208
,0.0173
,0.036
,0.0233
,0.0589
,0.0583
,0.0688
,0.0321
,0.0522
,0.0308
,0.0329
,0.023
,0.015
,0.0321
,0.0643
,0.032
,0.0209
,0.0219
,0.032
,0.0151
,0.0192
,0.0266
,0.0377
,0.0247
,0.0242
,0.0366
,0.0229
,0.0312
,0.015
,0.0341
,0.0251
,0.0189
,0.065
,0.147
,0.13
,0.104
,0.244
,0.3
,0.045
,0.114
,0.258
,0.297
,0.382
,0.147
,0.274
,0.299
,0.38
,0.382
,0.303
,0.35
,0.087
,0.262
,0.217
,0.119
,0.183
,0.359
,0.143
,0.262
,0.234
,0.153
,0.095
,0.288
,0.195
,0.148
,0.213
,0.181
,0.265
,0.193
,0.34
,0.118
,0.143
,0.162
,0.29
,0.124
,0.265
,0.127
,0.174
,0.165
,0.251
,0.259
,0.108
,0.161
,0.206
,0.245
,0.25
,0.23
,0.087
,0.063
,0.279
,0.277
,0.156
,0.332
,0.363
,0.332
,0.146
,0.39
,0.175
,0.301
,0.117
,0.22
,0.121
,0.156
,0.41
,0.179
,0.252
,0.253
,0.393
,0.13
,0.401
,0.311
,0.172
,0.28
,0.31
,0.187
,0.067
,0.318
,0.259
,0.3
,0.272
,0.281
,0.294
,0.19
,0.267
,0.125
,0.184
,0.314
,0.311
,0.09
,0.404
,0.202
,0.328
,0.213
,0.181
,0.094
,0.304
,0.11
,0.087
,0.204
,0.198
,0.128
,0.216
,0.322
,0.219
,0.191
,0.094
,0.381
,0.212
,0.368
,0.422
,0.259
,0.185
,0.214
,0.361
,0.395
,0.116
,0.145
,0.254
,0.389
,0.35
,0.257
,0.218
,0.43
,0.62
,0.57
,0.3
,0.38
,0.43
,0.24
,0.44
,0.5
,0.97
,0.479
,0.83
,0.416
,0.581
,0.45
,0.579
,0.32
,0.657
,0.43
,0.472
,0.374
,0.18
,0.55
,0.592
,0.172
,0.526
,0.763
,0.58
,0.43
,0.45
,0.656
,0.495
,0.49
,0.57
,0.388
,0.45
,0.48
,0.615
,0.4
,0.655
,0.498
,0.465
,0.453
,0.425
,0.514
,0.423
,0.859
,0.936
,0.528
,0.978
,0.885
,0.815
,0.698
,0.568
,0.711
,0.3396
,0.3965
,0.812
,0.799
,0.882
,0.833
,0.874
,0.772
,0.178
,0.26
,0.186
,0.269
,0.215
,0.543
,0.75
,0.64
,0.43
,0.64
,0.497
,0.44
,0.355
,0.78
,0.54
,0.86
,0.468
,0.84
,0.96
,0.8218
,0.93
,0.451
,0.61
,0.83
,0.707
,0.415
,0.557
,0.791
,0.695
,0.633
,0.2486
,0.532
,0.331
,0.346
,0.961
,0.613
,0.3402
,0.983
,0.71
,0.73
,0.47
,0.62
,0.521
,0.369
,0.571
,0.604
,0.9271
,0.285
,0.2912
,0.548
,0.868
,0.496
,0.811
,0.756
,0.817
,0.752
,0.5516
,0.3578
,1.01
,0.741
,0.43
,0.526
,0.592
,0.905
,0.949
,0.4607
,0.3709
,0.8
,0.679
,0.5817
,0.55
,0.81
,0.95
,0.3373
,0.91
,0.263
,0.643
,0.691
,0.357
,0.721
,0.581
,0.6268
,0.818
,0.4627
,0.449
,0.688
,0.87
,0.5043
,0.591
,0.426
,0.329
,0.583
,0.519
,0.401
,0.205
,0.34
,0.436
,0.363
,0.436
,0.309
,0.342
,0.159
,0.332
,0.469
,0.239
,0.352
,0.612
,0.631
,0.645
,0.429
,0.497
,0.539
,0.561
,0.41
,0.412
,0.599
,0.619
,0.422
,0.54
,0.401
,0.218
,0.633
,0.383
,0.302
,0.34
,0.51
,0.421
,0.399
,0.493
,0.687
,0.687
,0.495
,0.603
,0.421
,0.348
,0.213
,0.344
,0.271
,0.564
,0.274
,0.181
,0.582
,0.68
,0.401
,0.416
,0.286
,0.562
,0.266
,0.314
,0.581
,0.463
,0.341
,0.631
,0.522
,0.368
,0.309
,0.528
,0.216
,0.284
,0.508
,0.781
,0.613
,0.278
,0.477
,0.95
,1.057
,0.816
,0.455
,1.02
,1.14
,0.854
,1.37
,0.975
,0.97
,0.74
,1.39
,0.46
,1.02
,1.12
,1.23
,1.19
,0.839
,1.01
,0.521
,0.475
,0.95
,1.3
,1.4
,1.305
,0.216
,0.735
,1.14
,1.307
,1.265
,0.67
,0.64
,1.34
,0.84
,0.935
,0.953
,1.124
,0.552
,0.671
,0.511
,1.03]
zpicks2 = [0.226143925635
,0.167114251513
,0.155866408242
,0.158669048234
,0.156270379521
,0.189597973592
,0.155790553548
,0.199535140164
,0.167648033398
,0.165271499987
,0.170400148449
,0.184736519598
,0.167818465426
,0.175738040875
,0.160110641392
,0.19147874559
,0.162480236617
,0.173537588946
,0.14402997371
,0.149773066605
,0.0905547451176
,0.10395529982
,0.108670309216
,0.110164462529
,0.175878781605
,0.184772695826
,0.21780640315
,0.17455423199
,0.163660035553
,0.190634594378
,0.179523627555
,0.167742220247
,0.171121785922
,0.212053128352
,0.207622958381
,0.231469837469
,0.250007930671
,0.230453431037
,0.216601021623
,0.239750047895
,0.270777214079
,0.224419994663
,0.233717264297
,0.247451826306
,0.220796341332
,0.308392297269
,0.221390433935
,0.247302669853
,0.223360506658
,0.25040542174
,0.160930618134
,0.0851859276563
,0.0831803243516
,0.0983616480332
,0.110959981085
,0.156387993518
,0.129040616704
,0.0863760987398
,0.199697931638
,0.176677068679
,0.164649900237
,0.209050001205
,0.232580853069
,0.215338744788
,0.164155647932
,0.210066600914
,0.173133567729
,0.183198718389
,0.201710026772
,0.187033320457
,0.169350596107
,0.172739361028
,0.236356060735
,0.205416560749
,0.171522593533
,0.177535782419
,0.167312607114
,0.206892325236
,0.190583331504
,0.191040914154
,0.179228108521
,0.172930780679
,0.16950794454
,0.189072007408
,0.181737645865
,0.167946113517
,0.185600393489
,0.18146401803
,0.166313999169
,0.182214269008
,0.209882805071
,0.168522663763
,0.168292526178
,0.165324028128
,0.155012862381
,0.162357046505
,0.153692702221
,0.164165671037
,0.152378958895
,0.188558589288
,0.184564810559
,0.162967150273
,0.176878840961
,0.171154394323
,0.17097815303
,0.174177026864
,0.169085209011
,0.154689426092
,0.178350941227
,0.190930640521
,0.163304382923
,0.180036324178
,0.189515900553
,0.162662308732
,0.193449865152
,0.171179303431
,0.161712348214
,0.16403631389
,0.162821861461
,0.207155778089
,0.176317048358
,0.168279067414
,0.188970108359
,0.191424646957
,0.165778427196
,0.18474579249
,0.176243118914
,0.162762916979
,0.195361399576
,0.215091501139
,0.169897387897
,0.178698403151
,0.157634614976
,0.160351225061
,0.191426088364
,0.167577472063
,0.182864275271
,0.164890394315
,0.162681757891
,0.179840694371
,0.211404589303
,0.166296974492
,0.158132776968
,0.186494110705
,0.187650195008
,0.181514604995
,0.165934850147
,0.207587590171
,0.190576794911
,0.170306255652
,0.290373531881
,0.174379321042
,0.172887454113
,0.16122683881
,0.175847772194
,0.166284241086
,0.207266601483
,0.170422924262
,0.171895520078
,0.20380344245
,0.120378117628
,0.121080634003
,0.135784293243
,0.12064387065
,0.150780305961
,0.215824546738
,0.127260196442
,0.120017551192
,0.157083138554
,0.215462354547
,0.220813701877
,0.126442356391
,0.164190866001
,0.204243298637
,0.197647141922
,0.199423960287
,0.246333608197
,0.218357189469
,0.117239118491
,0.142760435008
,0.140960118616
,0.114480614211
,0.120171462823
,0.202932960788
,0.12033040226
,0.149344667277
,0.152379820049
,0.114718538439
,0.118788973643
,0.134501367232
,0.127181263793
,0.112499009039
,0.16065563488
,0.116442235995
,0.128294661027
,0.121960815999
,0.17167768821
,0.114590986149
,0.114524011818
,0.119565350744
,0.150676952536
,0.122100457699
,0.140962475796
,0.120227176144
,0.114282533804
,0.113957220599
,0.133182501728
,0.136851565925
,0.118564236278
,0.115619678811
,0.124355782411
,0.12515209647
,0.126539062794
,0.122886919759
,0.113367509506
,0.116292465656
,0.170515825332
,0.143749208555
,0.116301202618
,0.157824605437
,0.167338045039
,0.153791748359
,0.113634577753
,0.202376651268
,0.129902757627
,0.152785577721
,0.113090217803
,0.117918087643
,0.111553736186
,0.112833879905
,0.208370772279
,0.125612605347
,0.121669684179
,0.119228104024
,0.171808246679
,0.110314772736
,0.20749905315
,0.135622441007
,0.117223091751
,0.13095448346
,0.177971613506
,0.12145112418
,0.118306255874
,0.192756486723
,0.132143781265
,0.155174252389
,0.146266612538
,0.139677729246
,0.142023222928
,0.120475217309
,0.125621852074
,0.11193654747
,0.114893026219
,0.140263606877
,0.154509467451
,0.118243252284
,0.212015765825
,0.133809098038
,0.144862922481
,0.11609784707
,0.117868646113
,0.115343190964
,0.135822682884
,0.120481707591
,0.118814561252
,0.129617980863
,0.13579473086
,0.124812931046
,0.117827563103
,0.135100374854
,0.12534004292
,0.120534772995
,0.119858620288
,0.148158100576
,0.121635351197
,0.199892747232
,0.203900920127
,0.1381090777
,0.124767998966
,0.132159093773
,0.176630102674
,0.152039612546
,0.114183512043
,0.117222451448
,0.145717293983
,0.188189991384
,0.178077262302
,0.152073477348
,0.154102535988
,0.36344840931
,0.395579278552
,0.395227786467
,0.320340946007
,0.333518201962
,0.466023796791
,0.408470067632
,0.32528185368
,0.322646999342
,0.818432977093
,0.363960881605
,0.476221516489
,0.566269702273
,0.511751148392
,0.45952604589
,0.653193752978
,0.426633125026
,0.668671821574
,0.619060682936
,0.522213466148
,0.943967733848
,0.452691548423
,1.02320057236
,0.727363486932
,0.436282215602
,0.521010819611
,0.909043307134
,0.526483896531
,0.465243749852
,0.598757469559
,0.635534544683
,0.449365583478
,0.452457762216
,0.476394763516
,0.471680427567
,0.51587600329
,0.529360126953
,0.564010349483
,0.483871492231
,0.503703328639
,0.652414722917
,0.610115606889
,0.532970328234
,0.494543625705
,0.506403802203
,0.2534244988
,0.302526442652
,0.717608197762
,0.250124518086
,0.298988010763
,0.29259857127
,0.746941209578
,0.440949943442
,0.295573375025
,0.37487817081
,0.232651192244
,0.207508245548
,0.386258428909
,0.238374333285
,0.597150330975
,0.531348208869
,0.395564414276
,0.223384059982
,0.243792731173
,0.207808738715
,0.197085550242
,0.264144092838
,0.201887086183
,0.0965091021297
,0.137773519339
,0.189960701162
,0.148525009826
,0.194442979286
,0.168225439785
,0.106490971154
,0.203457956158
,0.171350462953
,0.11058193483
,0.172897098508
,0.165250052616
,0.225125911159
,0.278620780131
,0.215627670046
,0.293679174465
,0.144017146978
,0.15737107879
,0.211176417394
,0.259277417037
,0.141995153329
,0.158128342293
,0.203347956975
,0.207672991039
,0.171303725502
,0.156001611413
,0.167913176451
,0.13852607255
,0.153627203737
,0.345401297016
,0.166247667787
,0.136350904009
,0.441320917548
,0.185960509148
,0.196706401553
,0.154395011149
,0.171049818244
,0.162167199818
,0.15383794776
,0.175419559321
,0.160870192314
,0.290419777723
,0.137402734867
,0.147860252643
,0.190445811927
,0.249257746544
,0.154020408449
,0.212840468683
,0.194569634979
,0.212381883478
,0.20904577048
,0.150278973145
,0.136956742249
,0.380891045794
,0.225350070805
,0.152228805806
,0.168434413662
,0.173662227482
,0.260816306234
,0.288339749303
,0.180730669073
,0.155882980598
,0.217280907113
,0.197639177097
,0.160984148284
,0.156209026367
,0.215181910354
,0.29513371916
,0.138055963607
,0.270806993269
,0.135415118781
,0.167688951886
,0.256534768993
,0.13857499344
,0.188595219767
,0.156453875009
,0.158306777046
,0.27125777036
,0.148626981409
,0.158124901298
,0.166956993834
,0.281601232201
,0.15061141667
,0.308183442634
,0.22595475705
,0.273050416654
,0.288536338632
,0.316956305777
,0.23053144449
,0.219199816494
,0.229672711685
,0.217391698785
,0.20891536552
,0.2294063655
,0.238102446996
,0.215459944632
,0.246358907571
,0.240709017167
,0.283065430909
,0.205242395733
,0.225170628892
,0.284735745242
,0.245972086709
,0.23539370649
,0.216241856053
,0.222751847877
,0.243223346147
,0.307329015793
,0.259435785458
,0.315186986894
,0.386382183638
,0.249438141428
,0.232044816512
,0.284960259957
,0.371779826612
,0.224122410473
,0.261565759588
,0.253453216931
,0.275320135034
,0.232764787176
,0.227776883863
,0.325882220619
,0.296231218043
,0.265794738732
,0.286826360161
,0.274033552724
,0.233155755193
,0.280738355979
,0.23465390113
,0.218430762841
,0.225411924471
,0.210577314192
,0.217182931008
,0.296179216596
,0.209762707255
,0.229723298662
,0.327843886551
,0.292732159101
,0.336024813008
,0.304687254477
,0.260077754247
,0.336087850286
,0.2525260008
,0.243685251902
,0.394367550265
,0.268565103689
,0.225338954944
,0.235138397674
,0.248621683092
,0.207836005267
,0.215775825102
,0.303416211397
,0.217189018845
,0.207444974165
,0.232554896589
,0.325104262678
,0.261568700364
,0.20080587099
,0.185467817051
,0.30169867661
,0.240606195136
,0.466035003635
,0.232259103398
,0.208732914262
,0.216820067565
,0.213134015411
,0.244915950922
,0.198885573363
,0.282255398884
,0.183311419886
,0.238383165249
,0.196808697491
,0.2262922033
,0.21187088553
,0.222899260788
,0.236655407187
,0.209549982904
,0.322525273833
,0.184037573567
,0.248548172881
,0.222117064104
,0.217324926249
,1.07792101581
,0.249576425145
,0.231050242645
,0.185908921392
,0.364467879127
,0.265400797402
,0.224124266884
,0.186583218843
,0.186744572784
,0.267856909216
,0.194732639155
,0.215358476354
,0.971680645776
,0.198690905622
,0.105933336834
,0.121926562564
,0.0899118005512
,0.142160782046]
mag = [
35.3355510594
,36.6754415683
,36.8168806729
,37.4403208027
,37.4803312569
,38.2312880884
,37.4880153326
,34.6528548205
,36.3351409577
,36.6329198059
,35.9028802813
,34.5849105142
,38.4564792878
,35.0742634622
,37.5857951062
,35.4739511994
,36.5637135925
,35.5474875749
,34.0357608093
,33.9288622487
,35.5987234724
,35.0588114943
,34.9628931602
,35.3604256119
,36.7224923466
,35.0989067311
,34.0945752677
,35.9784201813
,36.3736376383
,34.8477997627
,35.6443099344
,39.0474440635
,35.8166920463
,34.2137334689
,33.9968666808
,34.9662747364
,34.1782747061
,35.0760231692
,36.1310606977
,34.9290818261
,34.3341878854
,35.7209576638
,35.1655185904
,34.0023106368
,36.47359007
,34.3727214476
,35.079691262
,34.2567548437
,35.9695297947
,34.3359548469
,34.1611242178
,36.9482868597
,39.227736887
,36.3296338828
,38.8089373233
,38.8286718588
,38.9767208561
,37.6784334873
,37.0227081887
,35.9151154293
,36.3635574234
,34.0128062018
,34.9604436109
,34.1665118574
,35.9818330401
,34.2481675909
,35.6190668111
,34.8880671183
,33.8142603068
,34.7844502384
,35.6227445028
,35.5095000297
,34.2611733597
,35.9597476335
,35.6729412003
,35.2550362369
,35.9909847314
,34.428922484
,35.0398652996
,34.7556470033
,35.6821627437
,35.8268020378
,36.6034159881
,35.1708581962
,36.0054974141
,35.8323574252
,35.3540611638
,34.6494717372
,35.8862461317
,34.9213754902
,35.506995688
,35.8612155366
,35.5704019595
,35.942753551
,36.5991688426
,36.4032354598
,37.1076369832
,35.9894165447
,37.6492880383
,34.7125139232
,34.5973526852
,36.3669644792
,35.3618288289
,35.4093397274
,35.3032674902
,35.0233196812
,37.5632893538
,37.3353162366
,35.1846393132
,34.3947741135
,35.6474839975
,34.9211199992
,35.6747779958
,35.7976725473
,38.0527926414
,35.6124448286
,36.0723318412
,36.3883376162
,37.7252349619
,34.6566486547
,34.8677153894
,36.7199743885
,35.9144831533
,34.7285878322
,35.7737272495
,34.8346147484
,35.685542453
,35.9646644449
,34.7963237588
,34.2290778645
,36.1419516923
,35.192149373
,37.1375683747
,37.0511345128
,37.4743372416
,35.624004108
,36.6643549794
,35.5802476433
,35.9289937858
,35.066402753
,34.3704032433
,35.8655549482
,37.1708016197
,35.8135243381
,34.6890069038
,34.8412253332
,35.5819126039
,34.5091434423
,34.4813665248
,35.3141855009
,35.7917282129
,34.9096518953
,35.197313794
,35.969795049
,35.130731713
,35.880989798
,34.0863548459
,35.7604333404
,34.9393048575
,34.3767780517
,37.3262250234
,39.5592698132
,38.9057647374
,38.5020998563
,40.1393375624
,41.0666243477
,36.393636275
,38.5632028326
,40.6227568239
,41.1521879097
,41.6800684404
,39.0624777172
,40.7342862612
,40.786167145
,41.5943818036
,41.2798844609
,41.4958048947
,41.3186791849
,37.9849524764
,40.5403068912
,40.3106668232
,38.5741014523
,39.6049215973
,41.3392339457
,39.2819503516
,40.8215974583
,40.2129770735
,39.1872750155
,38.1877012201
,41.0542374079
,39.9663270466
,39.3027549618
,40.5699103652
,39.6471841809
,40.7841092312
,40.0333916493
,41.3167357808
,38.7531726515
,39.1375855648
,39.3037024249
,40.8596631914
,38.8075567467
,40.5592893806
,38.7165439115
,39.5156068266
,39.4205055796
,40.7874215484
,40.6700406667
,38.6596242466
,39.3773509139
,40.0315745848
,40.2148561368
,40.2851903681
,40.2680210961
,37.9819349544
,37.1535798244
,40.8489855129
,40.7532140999
,39.3285968248
,41.2409920908
,41.396805305
,41.0563346847
,39.3150656135
,41.6178771066
,39.5466952992
,40.8576976883
,38.7723677225
,40.2077951229
,38.7645868009
,39.3526221585
,41.8584359733
,40.0988547901
,40.7555023446
,40.5530673076
,41.5147227504
,38.8646925888
,41.7503175801
,41.0341628563
,39.4855106255
,40.7832381081
,40.9195402038
,39.8366280383
,37.3759156237
,41.4006012087
,40.6017587956
,41.045986724
,40.6829741028
,40.5154942024
,40.9271885736
,39.7779949203
,40.5175395642
,38.7102646797
,39.8151415007
,41.0285113011
,41.210916386
,37.8397856085
,41.9224021245
,39.8812126015
,40.9849963429
,39.9909774034
,39.7491170234
,38.2966544602
,41.0429300945
,38.6660439788
,38.0070616987
,39.9944125695
,39.9334159336
,38.9203810159
,40.2168645316
,41.1205696642
,40.2294909454
,39.9496020384
,38.1541733477
,41.4455391404
,39.9869791346
,41.8848542117
,42.1513901027
,40.6523430162
,39.7133625937
,40.0983712623
,41.1643837956
,41.7971037106
,38.7000698518
,39.1892893551
,40.5264639914
,41.9150400049
,41.3091071435
,40.4975321919
,40.3796999961
,41.3250227805
,43.2414539718
,42.5033997022
,40.978473029
,42.0839004042
,42.4243368959
,40.7410932476
,42.0682788206
,42.3772235339
,42.8096709251
,42.3725071675
,43.5528793931
,42.4507674693
,42.0658249423
,41.8473340603
,43.2099933879
,41.2583245282
,42.984995669
,41.7782581789
,41.9753335602
,43.2038030699
,40.2161567859
,44.3775825486
,44.1658958719
,39.3060887655
,41.9606135946
,44.5058457473
,43.3165540203
,41.8021270066
,42.2798419395
,43.163757796
,42.1349159624
,41.7890062516
,42.6854322147
,42.2304967413
,42.4252888348
,42.1742925555
,42.5543829775
,42.3331570972
,42.3212910372
,42.9963723488
,41.8196468352
,42.8441573957
,41.2107609985
,42.8072073246
,41.5784932046
,44.1117949333
,43.3110517598
,42.4736471276
,43.5101906132
,44.207875513
,44.0859183588
,43.7942852172
,42.7280926842
,43.5871786373
,41.0979300094
,41.4943125288
,43.6655368654
,43.3737484585
,43.3697792983
,43.6994307822
,43.2887235894
,43.5326743178
,39.4462680572
,40.8362001457
,39.7205596334
,40.7878025381
,40.3913025086
,42.4924155173
,43.2578441337
,42.7777247214
,42.1996725697
,43.1807709647
,42.3326264116
,42.012529312
,41.3484969437
,43.6120619833
,42.4390506937
,43.9375562435
,42.5493325602
,43.9018471523
,43.6192033448
,43.8124852428
,43.560407391
,41.7962473711
,42.9023716786
,44.0535603584
,43.3156514414
,41.8744251286
,42.5769689992
,43.5871216709
,43.2116658024
,43.0819715543
,40.6239072462
,42.5758707909
,41.0697589526
,41.3543412802
,44.2645401128
,42.9845810521
,41.3549032931
,44.1893249256
,43.0248980987
,43.3070687497
,42.1421923658
,43.0397842919
,42.1985723808
,41.6294017233
,42.4289650526
,42.5524984575
,43.9666494559
,40.8511146401
,40.8220972223
,42.2963775812
,43.5171093106
,42.2096742771
,43.4274096531
,43.841972097
,43.6787360537
,43.3334915384
,42.2898806519
,41.4214717885
,44.0371993632
,43.7160273544
,41.8054832197
,42.4095309248
,42.5652698448
,43.6621900525
,43.4319714347
,42.0972776673
,41.6640301668
,43.7102538752
,43.4861035947
,42.6983720673
,42.2953525972
,43.3737778575
,43.9924930818
,41.3217710555
,44.3268198751
,40.6244185037
,43.0199766985
,43.0987782021
,41.4503101861
,43.1874760163
,42.7733707088
,42.787445377
,43.4040974169
,42.0541814484
,42.0196027616
,43.0704958563
,44.2406074622
,42.365523828
,43.2254104475
,41.7735214129
,41.3426796309
,42.4054033796
,43.0843340799
,41.7052599054
,39.9080744849
,41.2288219413
,41.8946716216
,41.5682803687
,41.9353580779
,41.1833648256
,41.3961782167
,39.4107379798
,41.2676756269
,42.3702690971
,40.2574128205
,41.4318861711
,42.8246855768
,42.3865265731
,42.8351146004
,41.9059782715
,42.0884631073
,42.2377984208
,42.885843734
,41.3581075779
,41.4232510759
,42.7610577503
,43.0710915246
,41.7458788277
,42.5245932467
,42.5627722403
,40.0720006034
,42.210722156
,41.6620032351
,41.2957407355
,41.0820238262
,41.8887223665
,42.1362463861
,41.4864269384
,42.1528623126
,43.0153077416
,42.8464372814
,42.259591209
,42.657813727
,42.1973742088
,41.5970773005
,40.1001985435
,41.1905064352
,40.5328864644
,42.3924561345
,40.7263236177
,39.6956112205
,43.1797675167
,42.9107934322
,41.9343745013
,41.5401633967
,41.2113198625
,43.069622149
,40.3605248738
,41.2482461469
,43.6987176723
,41.9498405771
,41.0072769026
,42.8981977617
,42.6951558939
,41.4880607389
,40.876793648
,42.3751743659
,40.4037818654
,40.8493617042
,42.2037743798
,43.4540256112
,42.6305005668
,40.5752670828
,42.0650099983
,43.6483165605
,44.1666838278
,43.7075622463
,42.3383621215
,44.2946061494
,44.2601276384
,43.6206227915
,45.0317825245
,44.2680083865
,44.481706307
,43.3016559562
,44.8354256258
,42.143982369
,44.0808776922
,44.438447316
,44.9361444895
,44.2836832301
,43.4104143734
,44.835860554
,42.387525323
,42.1139203511
,43.8364762181
,44.8687556563
,42.9259149014
,44.6420998553
,40.5426584948
,43.1000774659
,44.2011896159
,45.1191615825
,44.8535903247
,43.1550267844
,42.9104214351
,45.0024532701
,43.5102466754
,43.5500467052
,44.2912529866
,44.5764033273
,42.5302511229
,42.9914166016
,42.388163454
,44.2516558833]
import matplotlib.pyplot as plt
plt.figure()
plt.scatter(zpicks1,mag)
plt.title('zpicks1')
plt.figure()
plt.scatter(zpicks2,mag)
plt.title('zpicks2')
zpicks1, mag = (list(t) for t in zip(*sorted(zip(zpicks1, mag))))
plt.figure()
plt.plot(zpicks1,mag)
plt.title('zpicks1 sorted')
mag = np.asarray(mag)
output1 = mag, zpicks1
# Relative path of output folder.
save_path = './data/'+'sorted1'
import pickle
pickle.dump(output1, open(save_path, 'wb'))
import results
mag1, zpicks1 = results.load('./data', 'sorted1')
plt.figure()
plt.title('model: sorted1'
+'\n Evolution of magnitude with redshift')
#data = plt.errorbar(zpicks1, mag2, yerr=0.7, fmt='.', alpha=0.3)
best_fit = plt.scatter(zpicks1, mag1, marker='.', lw='1', c='xkcd:tomato')
plt.ylabel('magnitude')
plt.xlabel('z')
plt.show(block=False) | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"results.load",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((36606, 36618), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (36616, 36618), True, 'import matplotlib.pyplot as plt\n'), ((36619, 36644), 'matplotlib.pyplot.scatter', 'plt.scatter', (['zpicks1', 'mag'], {}), '(zpicks1, mag)\n', (36630, 36644), True, 'import matplotlib.pyplot as plt\n'), ((36644, 36664), 'matplotlib.pyplot.title', 'plt.title', (['"""zpicks1"""'], {}), "('zpicks1')\n", (36653, 36664), True, 'import matplotlib.pyplot as plt\n'), ((36666, 36678), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (36676, 36678), True, 'import matplotlib.pyplot as plt\n'), ((36679, 36704), 'matplotlib.pyplot.scatter', 'plt.scatter', (['zpicks2', 'mag'], {}), '(zpicks2, mag)\n', (36690, 36704), True, 'import matplotlib.pyplot as plt\n'), ((36704, 36724), 'matplotlib.pyplot.title', 'plt.title', (['"""zpicks2"""'], {}), "('zpicks2')\n", (36713, 36724), True, 'import matplotlib.pyplot as plt\n'), ((36794, 36806), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (36804, 36806), True, 'import matplotlib.pyplot as plt\n'), ((36807, 36829), 'matplotlib.pyplot.plot', 'plt.plot', (['zpicks1', 'mag'], {}), '(zpicks1, mag)\n', (36815, 36829), True, 'import matplotlib.pyplot as plt\n'), ((36829, 36856), 'matplotlib.pyplot.title', 'plt.title', (['"""zpicks1 sorted"""'], {}), "('zpicks1 sorted')\n", (36838, 36856), True, 'import matplotlib.pyplot as plt\n'), ((36864, 36879), 'numpy.asarray', 'np.asarray', (['mag'], {}), '(mag)\n', (36874, 36879), True, 'import numpy as np\n'), ((37067, 37100), 'results.load', 'results.load', (['"""./data"""', '"""sorted1"""'], {}), "('./data', 'sorted1')\n", (37079, 37100), False, 'import results\n'), ((37103, 37115), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37113, 37115), True, 'import matplotlib.pyplot as plt\n'), ((37116, 37190), 'matplotlib.pyplot.title', 'plt.title', (['(\'model: sorted1\' + """\n Evolution of magnitude with redshift""")'], {}), '(\'model: sorted1\' + """\n Evolution of magnitude with redshift""")\n', (37125, 37190), True, 'import matplotlib.pyplot as plt\n'), ((37274, 37337), 'matplotlib.pyplot.scatter', 'plt.scatter', (['zpicks1', 'mag1'], {'marker': '"""."""', 'lw': '"""1"""', 'c': '"""xkcd:tomato"""'}), "(zpicks1, mag1, marker='.', lw='1', c='xkcd:tomato')\n", (37285, 37337), True, 'import matplotlib.pyplot as plt\n'), ((37338, 37361), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""magnitude"""'], {}), "('magnitude')\n", (37348, 37361), True, 'import matplotlib.pyplot as plt\n'), ((37362, 37377), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""z"""'], {}), "('z')\n", (37372, 37377), True, 'import matplotlib.pyplot as plt\n'), ((37378, 37399), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (37386, 37399), True, 'import matplotlib.pyplot as plt\n')] |
# %% [markdown]
# ##
import os
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.patches import Circle
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from scipy.integrate import tplquad
from scipy.special import comb
from scipy.stats import gaussian_kde
from sklearn.metrics import pairwise_distances
import pymaid
from graspy.utils import pass_to_ranks
from hyppo.ksample import KSample
from src.data import load_metagraph
from src.graph import MetaGraph, preprocess
from src.hierarchy import signal_flow
from src.io import readcsv, savecsv, savefig
from src.visualization import (
CLASS_COLOR_DICT,
adjplot,
barplot_text,
get_mid_map,
gridmap,
matrixplot,
remove_axis,
remove_spines,
set_axes_equal,
stacked_barplot,
)
from joblib import Parallel, delayed
np.random.seed(8888)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, fmt="pdf", **kws)
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, **kws)
# params
level = 4
class_key = f"lvl{level}_labels"
metric = "bic"
bic_ratio = 1
d = 8 # embedding dimension
method = "color_iso"
basename = f"-method={method}-d={d}-bic_ratio={bic_ratio}"
title = f"Method={method}, d={d}, BIC ratio={bic_ratio}"
exp = "137.2-BDP-omni-clust"
# load data
pair_meta = readcsv("meta" + basename, foldername=exp, index_col=0)
pair_meta["lvl0_labels"] = pair_meta["lvl0_labels"].astype(str)
pair_adj = readcsv("adj" + basename, foldername=exp, index_col=0)
pair_adj = pair_adj.values
mg = MetaGraph(pair_adj, pair_meta)
meta = mg.meta
# load connectors
connector_path = "maggot_models/data/processed/2020-05-08/connectors.csv"
connectors = pd.read_csv(connector_path)
# compare dendrite inputs
compartment = "dendrite"
direction = "postsynaptic"
max_samples = 500
n_subsamples = 48 * 2
method = "subsample"
def filter_connectors(connectors, ids, direction, compartment):
label_connectors = connectors[connectors[f"{direction}_to"].isin(ids)]
label_connectors = label_connectors[
label_connectors[f"{direction}_type"] == compartment
]
label_connectors = label_connectors[
~label_connectors["connector_id"].duplicated(keep="first")
]
return label_connectors
def euclidean(x):
"""Default euclidean distance function calculation"""
return pairwise_distances(X=x, metric="euclidean", n_jobs=-1)
def run_dcorr(data1, data2):
ksamp = KSample("Dcorr", compute_distance=euclidean)
stat, pval = ksamp.test(data1, data2, auto=True, workers=-1)
return stat, pval
def spatial_dcorr(data1, data2, method="full", max_samples=1000, n_subsamples=5):
if (len(data1) == 0) or (len(data2) == 0):
return np.nan, np.nan
if method == "subsample":
if max(len(data1), len(data2)) < max_samples:
method = "full"
else:
stats = np.empty(n_subsamples)
p_vals = np.empty(n_subsamples)
all_shuffles = []
for i in range(n_subsamples):
subsampled_data = []
for data in [data1, data2]:
n_subsamples = min(len(data), max_samples)
inds = np.random.choice(
n_subsamples, size=n_subsamples, replace=False
)
subsampled_data.append(data[inds])
all_shuffles.append(subsampled_data)
outs = Parallel(n_jobs=-1)(delayed(run_dcorr)(*s) for s in all_shuffles)
outs = list(zip(*outs))
stats = outs[0]
p_vals = outs[1]
stat = np.median(stats)
p_val = np.median(p_vals)
if method == "max-d":
max_dim_stat = -np.inf
best_p_val = np.nan
for dim in range(data1.shape[1]):
dim_stat, dim_p_val = run_dcorr(data1[:, dim], data2[:, dim])
if dim_stat > max_dim_stat:
max_dim_stat = dim_stat
best_p_val = dim_p_val
stat = max_dim_stat
p_val = best_p_val
if method == "full":
stat, p_val = run_dcorr(data1, data2)
return stat, p_val
# %% [markdown]
# ##
class_labels = meta[class_key].unique()
p_vals = np.zeros((len(class_labels), len(class_labels)))
stats = np.zeros_like(p_vals)
cluster_meta = pd.DataFrame(index=class_labels)
total = comb(len(class_labels), k=2, exact=True)
count = 0
currtime = time.time()
for i, label1 in enumerate(class_labels):
label1_meta = meta[meta[class_key] == label1]
label1_ids = label1_meta.index.values
label1_connectors = filter_connectors(
connectors, label1_ids, direction, compartment
)
cluster_meta.loc[label1, "n_samples"] = len(label1_connectors)
for j, label2 in enumerate(class_labels):
if i < j:
print(f"Progress: {count / total:.2f}")
label2_meta = meta[meta[class_key] == label2]
label2_ids = label2_meta.index.values
label2_connectors = filter_connectors(
connectors, label2_ids, direction, compartment
)
data1 = label1_connectors[["x", "y", "z"]].values
data2 = label2_connectors[["x", "y", "z"]].values
stat, p_val = spatial_dcorr(
data1,
data2,
method=method,
max_samples=max_samples,
n_subsamples=n_subsamples,
)
stats[i, j] = stat
p_vals[i, j] = p_val
count += 1
print(f"\n{time.time() - currtime} elapsed\n")
basename = (
f"lvl={level}-compartment={compartment}-direction={direction}-method={method}"
)
if method == "subsample":
basename += f"-n_sub={n_subsamples}-max_samp={max_samples}"
p_val_df = pd.DataFrame(
data=p_vals, index=cluster_meta.index, columns=cluster_meta.index
)
stashcsv(p_val_df, "p-vals" + basename)
stats_df = pd.DataFrame(
data=stats, index=cluster_meta.index, columns=cluster_meta.index
)
stashcsv(stats_df, "test-stats" + basename)
plot_p_vals = -np.log10(p_vals)
plt.figure()
adjplot(
plot_p_vals,
meta=cluster_meta,
vmax=np.nanmax(plot_p_vals[~np.isinf(plot_p_vals)]),
cbar_kws=dict(shrink=0.7),
cbar=True,
cmap="Reds",
)
stashfig("p-val-plot" + basename)
plt.figure(figsize=(10, 10))
sns.heatmap(
stats,
cmap="Reds",
cbar_kws=dict(shrink=0.7),
square=True,
xticklabels=False,
yticklabels=False,
)
stashfig("stats-plot" + basename)
| [
"numpy.log10",
"pandas.read_csv",
"src.io.savecsv",
"numpy.empty",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.isinf",
"src.io.savefig",
"numpy.random.choice",
"hyppo.ksample.KSample",
"time.time",
"numpy.median",
"src.io.readcsv",
"sklearn.metrics.pairwise_distances",
"joblib.Parall... | [((962, 982), 'numpy.random.seed', 'np.random.seed', (['(8888)'], {}), '(8888)\n', (976, 982), True, 'import numpy as np\n'), ((1522, 1577), 'src.io.readcsv', 'readcsv', (["('meta' + basename)"], {'foldername': 'exp', 'index_col': '(0)'}), "('meta' + basename, foldername=exp, index_col=0)\n", (1529, 1577), False, 'from src.io import readcsv, savecsv, savefig\n'), ((1653, 1707), 'src.io.readcsv', 'readcsv', (["('adj' + basename)"], {'foldername': 'exp', 'index_col': '(0)'}), "('adj' + basename, foldername=exp, index_col=0)\n", (1660, 1707), False, 'from src.io import readcsv, savecsv, savefig\n'), ((1740, 1770), 'src.graph.MetaGraph', 'MetaGraph', (['pair_adj', 'pair_meta'], {}), '(pair_adj, pair_meta)\n', (1749, 1770), False, 'from src.graph import MetaGraph, preprocess\n'), ((1893, 1920), 'pandas.read_csv', 'pd.read_csv', (['connector_path'], {}), '(connector_path)\n', (1904, 1920), True, 'import pandas as pd\n'), ((4461, 4482), 'numpy.zeros_like', 'np.zeros_like', (['p_vals'], {}), '(p_vals)\n', (4474, 4482), True, 'import numpy as np\n'), ((4498, 4530), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'class_labels'}), '(index=class_labels)\n', (4510, 4530), True, 'import pandas as pd\n'), ((4602, 4613), 'time.time', 'time.time', ([], {}), '()\n', (4611, 4613), False, 'import time\n'), ((5949, 6028), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'p_vals', 'index': 'cluster_meta.index', 'columns': 'cluster_meta.index'}), '(data=p_vals, index=cluster_meta.index, columns=cluster_meta.index)\n', (5961, 6028), True, 'import pandas as pd\n'), ((6087, 6165), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'stats', 'index': 'cluster_meta.index', 'columns': 'cluster_meta.index'}), '(data=stats, index=cluster_meta.index, columns=cluster_meta.index)\n', (6099, 6165), True, 'import pandas as pd\n'), ((6249, 6261), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6259, 6261), True, 'import matplotlib.pyplot as plt\n'), ((6468, 6496), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (6478, 6496), True, 'import matplotlib.pyplot as plt\n'), ((992, 1018), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1008, 1018), False, 'import os\n'), ((1070, 1133), 'src.io.savefig', 'savefig', (['name'], {'foldername': 'FNAME', 'save_on': '(True)', 'fmt': '"""pdf"""'}), "(name, foldername=FNAME, save_on=True, fmt='pdf', **kws)\n", (1077, 1133), False, 'from src.io import readcsv, savecsv, savefig\n'), ((1171, 1213), 'src.io.savecsv', 'savecsv', (['df', 'name'], {'foldername': 'FNAME'}), '(df, name, foldername=FNAME, **kws)\n', (1178, 1213), False, 'from src.io import readcsv, savecsv, savefig\n'), ((2543, 2597), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', ([], {'X': 'x', 'metric': '"""euclidean"""', 'n_jobs': '(-1)'}), "(X=x, metric='euclidean', n_jobs=-1)\n", (2561, 2597), False, 'from sklearn.metrics import pairwise_distances\n'), ((2641, 2685), 'hyppo.ksample.KSample', 'KSample', (['"""Dcorr"""'], {'compute_distance': 'euclidean'}), "('Dcorr', compute_distance=euclidean)\n", (2648, 2685), False, 'from hyppo.ksample import KSample\n'), ((6232, 6248), 'numpy.log10', 'np.log10', (['p_vals'], {}), '(p_vals)\n', (6240, 6248), True, 'import numpy as np\n'), ((3081, 3103), 'numpy.empty', 'np.empty', (['n_subsamples'], {}), '(n_subsamples)\n', (3089, 3103), True, 'import numpy as np\n'), ((3125, 3147), 'numpy.empty', 'np.empty', (['n_subsamples'], {}), '(n_subsamples)\n', (3133, 3147), True, 'import numpy as np\n'), ((3807, 3823), 'numpy.median', 'np.median', (['stats'], {}), '(stats)\n', (3816, 3823), True, 'import numpy as np\n'), ((3844, 3861), 'numpy.median', 'np.median', (['p_vals'], {}), '(p_vals)\n', (3853, 3861), True, 'import numpy as np\n'), ((3629, 3648), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (3637, 3648), False, 'from joblib import Parallel, delayed\n'), ((5711, 5722), 'time.time', 'time.time', ([], {}), '()\n', (5720, 5722), False, 'import time\n'), ((3391, 3455), 'numpy.random.choice', 'np.random.choice', (['n_subsamples'], {'size': 'n_subsamples', 'replace': '(False)'}), '(n_subsamples, size=n_subsamples, replace=False)\n', (3407, 3455), True, 'import numpy as np\n'), ((6343, 6364), 'numpy.isinf', 'np.isinf', (['plot_p_vals'], {}), '(plot_p_vals)\n', (6351, 6364), True, 'import numpy as np\n'), ((3649, 3667), 'joblib.delayed', 'delayed', (['run_dcorr'], {}), '(run_dcorr)\n', (3656, 3667), False, 'from joblib import Parallel, delayed\n')] |
import sys, os, random, json, uuid, time, argparse, logging, logging.config
import numpy as np
from random import randint
from collections import defaultdict as ddict, Counter
from orderedset import OrderedSet
from pprint import pprint
# PyTorch related imports
import torch
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from torch.nn.init import xavier_normal_, xavier_uniform_
from torch.nn import Parameter as Param
from torch.utils.data import DataLoader
from torch_scatter import scatter_add
np.set_printoptions(precision=4)
def set_gpu(gpus):
"""
Sets the GPU to be used for the run
Parameters
----------
gpus: List of GPUs to be used for the run
Returns
-------
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus
def get_logger(name, log_dir, config_dir):
"""
Creates a logger object
Parameters
----------
name: Name of the logger file
log_dir: Directory where logger file needs to be stored
config_dir: Directory from where log_config.json needs to be read
Returns
-------
A logger object which writes to both file and stdout
"""
config_dict = json.load(open(config_dir + 'log_config.json'))
config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger
def get_param(shape):
param = Parameter(torch.Tensor(*shape))
xavier_normal_(param.data)
return param
def com_mult(a, b):
r1, i1 = a[..., 0], a[..., 1] #最后一维上第一组元素、第二组元素
r2, i2 = b[..., 0], b[..., 1] #最后一维上第一组元素、第二组元素
return torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim = -1)
def conj(a):
a[..., 1] = -a[..., 1] # 最后一维第二组元素的相反数
return a
def cconv(a, b):
return torch.irfft(com_mult(torch.rfft(a, 1), torch.rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))
def ccorr(a, b):
#torch.irfft:从复到实的反离散傅里叶变换
#torch.rfft:从实到复的离散傅里叶变换
return torch.irfft(com_mult(conj(torch.rfft(a, 1)), torch.rfft(b, 1)), 1, signal_sizes=(a.shape[-1],)) | [
"logging.getLogger",
"logging.StreamHandler",
"logging.config.dictConfig",
"logging.Formatter",
"torch.stack",
"torch.Tensor",
"torch.rfft",
"torch.nn.init.xavier_normal_",
"numpy.set_printoptions"
] | [((531, 563), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)'}), '(precision=4)\n', (550, 563), True, 'import numpy as np\n'), ((1400, 1438), 'logging.config.dictConfig', 'logging.config.dictConfig', (['config_dict'], {}), '(config_dict)\n', (1425, 1438), False, 'import sys, os, random, json, uuid, time, argparse, logging, logging.config\n'), ((1452, 1475), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1469, 1475), False, 'import sys, os, random, json, uuid, time, argparse, logging, logging.config\n'), ((1565, 1598), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1586, 1598), False, 'import sys, os, random, json, uuid, time, argparse, logging, logging.config\n'), ((1795, 1821), 'torch.nn.init.xavier_normal_', 'xavier_normal_', (['param.data'], {}), '(param.data)\n', (1809, 1821), False, 'from torch.nn.init import xavier_normal_, xavier_uniform_\n'), ((1967, 2026), 'torch.stack', 'torch.stack', (['[r1 * r2 - i1 * i2, r1 * i2 + i1 * r2]'], {'dim': '(-1)'}), '([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim=-1)\n', (1978, 2026), False, 'import torch\n'), ((1631, 1664), 'logging.Formatter', 'logging.Formatter', (['std_out_format'], {}), '(std_out_format)\n', (1648, 1664), False, 'import sys, os, random, json, uuid, time, argparse, logging, logging.config\n'), ((1769, 1789), 'torch.Tensor', 'torch.Tensor', (['*shape'], {}), '(*shape)\n', (1781, 1789), False, 'import torch\n'), ((2141, 2157), 'torch.rfft', 'torch.rfft', (['a', '(1)'], {}), '(a, 1)\n', (2151, 2157), False, 'import torch\n'), ((2159, 2175), 'torch.rfft', 'torch.rfft', (['b', '(1)'], {}), '(b, 1)\n', (2169, 2175), False, 'import torch\n'), ((2336, 2352), 'torch.rfft', 'torch.rfft', (['b', '(1)'], {}), '(b, 1)\n', (2346, 2352), False, 'import torch\n'), ((2317, 2333), 'torch.rfft', 'torch.rfft', (['a', '(1)'], {}), '(a, 1)\n', (2327, 2333), False, 'import torch\n')] |
"""
This module provides a function to evaluate potential outliers in the aseg.stats
values.
"""
# ------------------------------------------------------------------------------
# subfunctions
def readAsegStats(path_aseg_stats):
"""
A function to read aseg.stats files.
"""
# read file
with open(path_aseg_stats) as stats_file:
aseg_stats = stats_file.read().splitlines()
# initialize
aseg = dict()
# read measures
for line in aseg_stats:
if '# Measure BrainSeg,' in line:
aseg.update({'BrainSeg' : float(line.split(',')[3])})
elif '# Measure BrainSegNotVent,' in line:
aseg.update({'BrainSegNotVent' : float(line.split(',')[3])})
elif '# Measure BrainSegNotVentSurf,' in line:
aseg.update({'BrainSegNotVentSurf' : float(line.split(',')[3])})
elif '# Measure VentricleChoroidVol,' in line:
aseg.update({'VentricleChoroidVol' : float(line.split(',')[3])})
elif '# Measure lhCortex,' in line:
aseg.update({'lhCortex' : float(line.split(',')[3])})
elif '# Measure rhCortex,' in line:
aseg.update({'rhCortex' : float(line.split(',')[3])})
elif '# Measure Cortex,' in line:
aseg.update({'Cortex' : float(line.split(',')[3])})
elif '# Measure lhCerebralWhiteMatter,' in line:
aseg.update({'lhCerebralWhiteMatter' : float(line.split(',')[3])})
elif '# Measure rhCerebralWhiteMatter,' in line:
aseg.update({'rhCerebralWhiteMatter' : float(line.split(',')[3])})
elif '# Measure CerebralWhiteMatter,' in line:
aseg.update({'CerebralWhiteMatter' : float(line.split(',')[3])})
elif '# Measure SubCortGray,' in line:
aseg.update({'SubCortGray' : float(line.split(',')[3])})
elif '# Measure TotalGray,' in line:
aseg.update({'TotalGray' : float(line.split(',')[3])})
elif '# Measure SupraTentorial,' in line:
aseg.update({'SupraTentorial' : float(line.split(',')[3])})
elif '# Measure SupraTentorialNotVent,' in line:
aseg.update({'SupraTentorialNotVent' : float(line.split(',')[3])})
elif '# Measure SupraTentorialNotVentVox,' in line:
aseg.update({'SupraTentorialNotVentVox' : float(line.split(',')[3])})
elif '# Measure Mask,' in line:
aseg.update({'Mask' : float(line.split(',')[3])})
elif '# Measure BrainSegVol-to-eTIV,' in line:
aseg.update({'BrainSegVol_to_eTIV' : float(line.split(',')[3])})
elif '# Measure MaskVol-to-eTIV,' in line:
aseg.update({'MaskVol_to_eTIV' : float(line.split(',')[3])})
elif '# Measure lhSurfaceHoles,' in line:
aseg.update({'lhSurfaceHoles' : float(line.split(',')[3])})
elif '# Measure rhSurfaceHoles,' in line:
aseg.update({'rhSurfaceHoles' : float(line.split(',')[3])})
elif '# Measure SurfaceHoles,' in line:
aseg.update({'SurfaceHoles' : float(line.split(',')[3])})
elif '# Measure EstimatedTotalIntraCranialVol,' in line:
aseg.update({'EstimatedTotalIntraCranialVol' : float(line.split(',')[3])})
elif 'Left-Lateral-Ventricle' in line:
aseg.update({'Left-Lateral-Ventricle' : float(line.split()[3])})
elif 'Left-Inf-Lat-Vent' in line:
aseg.update({'Left-Inf-Lat-Vent' : float(line.split()[3])})
elif 'Left-Cerebellum-White-Matter' in line:
aseg.update({'Left-Cerebellum-White-Matter' : float(line.split()[3])})
elif 'Left-Cerebellum-Cortex' in line:
aseg.update({'Left-Cerebellum-Cortex' : float(line.split()[3])})
elif 'Left-Thalamus-Proper' in line:
aseg.update({'Left-Thalamus-Proper' : float(line.split()[3])})
elif 'Left-Caudate' in line:
aseg.update({'Left-Caudate' : float(line.split()[3])})
elif 'Left-Putamen' in line:
aseg.update({'Left-Putamen' : float(line.split()[3])})
elif 'Left-Pallidum' in line:
aseg.update({'Left-Pallidum' : float(line.split()[3])})
elif '3rd-Ventricle' in line:
aseg.update({'3rd-Ventricle' : float(line.split()[3])})
elif '4th-Ventricle' in line:
aseg.update({'4th-Ventricle' : float(line.split()[3])})
elif 'Brain-Stem' in line:
aseg.update({'Brain-Stem' : float(line.split()[3])})
elif 'Left-Hippocampus' in line:
aseg.update({'Left-Hippocampus' : float(line.split()[3])})
elif 'Left-Amygdala' in line:
aseg.update({'Left-Amygdala' : float(line.split()[3])})
elif 'CSF' in line:
aseg.update({'CSF' : float(line.split()[3])})
elif 'Left-Accumbens-area' in line:
aseg.update({'Left-Accumbens-area' : float(line.split()[3])})
elif 'Left-VentralDC' in line:
aseg.update({'Left-VentralDC' : float(line.split()[3])})
elif 'Left-vessel' in line:
aseg.update({'Left-vessel' : float(line.split()[3])})
elif 'Left-choroid-plexus' in line:
aseg.update({'Left-choroid-plexus' : float(line.split()[3])})
elif 'Right-Lateral-Ventricle' in line:
aseg.update({'Right-Lateral-Ventricle' : float(line.split()[3])})
elif 'Right-Inf-Lat-Vent' in line:
aseg.update({'Right-Inf-Lat-Vent' : float(line.split()[3])})
elif 'Right-Cerebellum-White-Matter' in line:
aseg.update({'Right-Cerebellum-White-Matter' : float(line.split()[3])})
elif 'Right-Cerebellum-Cortex' in line:
aseg.update({'Right-Cerebellum-Cortex' : float(line.split()[3])})
elif 'Right-Thalamus-Proper' in line:
aseg.update({'Right-Thalamus-Proper' : float(line.split()[3])})
elif 'Right-Caudate' in line:
aseg.update({'Right-Caudate' : float(line.split()[3])})
elif 'Right-Putamen' in line:
aseg.update({'Right-Putamen' : float(line.split()[3])})
elif 'Right-Pallidum' in line:
aseg.update({'Right-Pallidum' : float(line.split()[3])})
elif 'Right-Hippocampus' in line:
aseg.update({'Right-Hippocampus' : float(line.split()[3])})
elif 'Right-Amygdala' in line:
aseg.update({'Right-Amygdala' : float(line.split()[3])})
elif 'Right-Accumbens-area' in line:
aseg.update({'Right-Accumbens-area' : float(line.split()[3])})
elif 'Right-VentralDC' in line:
aseg.update({'Right-VentralDC' : float(line.split()[3])})
elif 'Right-vessel' in line:
aseg.update({'Right-vessel' : float(line.split()[3])})
elif 'Right-choroid-plexus' in line:
aseg.update({'Right-choroid-plexus' : float(line.split()[3])})
elif '5th-Ventricle' in line:
aseg.update({'5th-Ventricle' : float(line.split()[3])})
elif 'WM-hypointensities' in line:
aseg.update({'WM-hypointensities' : float(line.split()[3])})
elif 'Left-WM-hypointensities' in line:
aseg.update({'Left-WM-hypointensities' : float(line.split()[3])})
elif 'Right-WM-hypointensities' in line:
aseg.update({'Right-WM-hypointensities' : float(line.split()[3])})
elif 'non-WM-hypointensities' in line:
aseg.update({'non-WM-hypointensities' : float(line.split()[3])})
elif 'Left-non-WM-hypointensities' in line:
aseg.update({'Left-non-WM-hypointensities' : float(line.split()[3])})
elif 'Right-non-WM-hypointensities' in line:
aseg.update({'Right-non-WM-hypointensities' : float(line.split()[3])})
elif 'Optic-Chiasm' in line:
aseg.update({'Optic-Chiasm' : float(line.split()[3])})
elif 'CC_Posterior' in line:
aseg.update({'CC_Posterior' : float(line.split()[3])})
elif 'CC_Mid_Posterior' in line:
aseg.update({'CC_Mid_Posterior' : float(line.split()[3])})
elif 'CC_Central' in line:
aseg.update({'CC_Central' : float(line.split()[3])})
elif 'CC_Mid_Anterior' in line:
aseg.update({'CC_Mid_Anterior' : float(line.split()[3])})
elif 'CC_Anterior' in line:
aseg.update({'CC_Anterior' : float(line.split()[3])})
# return
return aseg
# ------------------------------------------------------------------------------
# outlier table
def outlierTable():
"""
A function to provide normative values for Freesurfer segmentations and
parcellations.
"""
# define
outlierDict = dict([
('Left-Accumbens-area', dict([('lower' , 210.87844594754), ('upper', 718.01022026916)])),
('Right-Accumbens-area', dict([('lower' , 304.86134907845), ('upper', 751.63838456345)])),
('Left-Amygdala', dict([('lower' , 1179.73655974083), ('upper', 1935.09415214717)])),
('Right-Amygdala', dict([('lower' , 1161.54746836742), ('upper', 2002.14187676668)])),
('Brain-Stem', dict([('lower' , 18048.54263155760), ('upper', 25300.51090318110)])),
('Left-Caudate', dict([('lower' , 2702.73311142764), ('upper', 4380.54479618196)])),
('Right-Caudate', dict([('lower' , 2569.61140834210), ('upper', 4412.61035536070)])),
('Left-Hippocampus', dict([('lower' , 3432.26483953083), ('upper', 4934.43236139507)])),
('Right-Hippocampus', dict([('lower' , 3580.74371035841), ('upper', 5067.49668145829)])),
('Left-Pallidum', dict([('lower' , 935.47686324176), ('upper', 1849.42861796994)])),
('Right-Pallidum', dict([('lower' , 1078.14975428593), ('upper', 1864.08951102817)])),
('Left-Putamen', dict([('lower' , 3956.23134409153), ('upper', 6561.97642872937)])),
('Right-Putamen', dict([('lower' , 3768.88684356957), ('upper', 6142.52870810603)])),
('Left-Thalamus-Proper', dict([('lower' , 6483.36121320953), ('upper', 9489.46749012527)])),
('Right-Thalamus-Proper', dict([('lower' , 6065.70220487045), ('upper', 8346.88382091555)])),
('Left-VentralDC', dict([('lower' , 3182.42264293449), ('upper', 4495.77412707751)])),
('Right-VentralDC', dict([('lower' , 3143.88280953869), ('upper', 4407.63641978371)]))
])
# return
return outlierDict
# ------------------------------------------------------------------------------
# main function
def outlierDetection(subjects, subjects_dir, output_dir, outlierDict, min_no_subjects=10):
"""
A function to evaluate potential outliers in the aseg.stats values.
"""
# imports
import os
import csv
import numpy as np
import pandas as pd
from qatoolspython.outlierDetection import readAsegStats
# create a dictionary with all data from all subjects, and create a list of all available keys
aseg = dict()
all_aseg_keys = list()
for subject in subjects:
path_aseg_stats = os.path.join(subjects_dir, subject, "stats", "aseg.stats")
aseg_stats = readAsegStats(path_aseg_stats)
aseg.update({subject : aseg_stats})
all_aseg_keys.extend(list(aseg_stats.keys()))
all_aseg_keys = list(sorted(set(all_aseg_keys)))
# compare individual data against sample statistics (if more than min_no_subjects cases)
outlierSampleNonpar = dict()
outlierSampleParam = dict()
outlierSampleNonparNum = dict()
outlierSampleParamNum = dict()
if len(subjects) >= min_no_subjects:
# compute means, sd, medians, and quantiles based on sample
df = pd.DataFrame.from_dict(aseg).transpose()
iqr = np.percentile(df, 75, axis=0) - np.percentile(df, 25, axis=0)
sample_nonpar_lower = dict(zip(df.columns, np.percentile(df, 25, axis=0) - 1.5 * iqr))
sample_nonpar_upper = dict(zip(df.columns, np.percentile(df, 75, axis=0) + 1.5 * iqr))
sample_param_lower = dict(np.mean(df, axis=0) - 2 * np.std(df, axis=0))
sample_param_upper = dict(np.mean(df, axis=0) + 2 * np.std(df, axis=0))
# compare individual data against sample statistics
for subject in aseg:
nonparDict = dict()
paramDict = dict()
for key in aseg[subject]:
if (aseg[subject][key] < sample_nonpar_lower[key]) or (aseg[subject][key] > sample_nonpar_upper[key]):
nonparDict.update({key : True })
else:
nonparDict.update({key : False })
if (aseg[subject][key] < sample_param_lower[key]) or (aseg[subject][key] > sample_param_upper[key]):
paramDict.update({key : True })
else:
paramDict.update({key : False })
outlierSampleNonpar.update({subject : nonparDict})
outlierSampleParam.update({subject: paramDict})
outlierSampleNonparNum.update({subject : np.sum(list(nonparDict.values()))})
outlierSampleParamNum.update({subject : np.sum(list(paramDict.values()))})
else:
for subject in aseg:
nonparDict = dict()
paramDict = dict()
for key in aseg[subject]:
nonparDict.update({key : np.nan })
paramDict.update({key : np.nan })
outlierSampleNonpar.update({subject : nonparDict})
outlierSampleParam.update({subject: paramDict})
outlierSampleNonparNum.update({subject: np.nan})
outlierSampleParamNum.update({subject: np.nan})
# compare individual data against normative values
outlierNorms = dict()
outlierNormsNum = dict()
for subject in aseg:
normsDict = dict()
for key in aseg[subject]:
if key in outlierDict:
if (aseg[subject][key] < outlierDict[key]['lower']) or (aseg[subject][key] > outlierDict[key]['upper']):
normsDict.update({key: True})
else:
normsDict.update({key: False})
else:
normsDict.update({key: np.nan})
outlierNorms.update({subject : normsDict})
outlierNormsNum.update({subject: np.nansum(list(normsDict.values()))})
# write to csv files
asegFieldnames = ['subject']
asegFieldnames.extend(all_aseg_keys)
with open(os.path.join(output_dir, 'all.aseg.stats'), 'w') as datafile:
csvwriter = csv.DictWriter(datafile, fieldnames=asegFieldnames, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvwriter.writeheader()
for subject in sorted(list(aseg.keys())):
tmp = aseg[subject]
tmp.update({'subject' : subject})
csvwriter.writerow(tmp)
with open(os.path.join(output_dir, 'all.outliers.sample.nonpar.stats'), 'w') as datafile:
csvwriter = csv.DictWriter(datafile, fieldnames=asegFieldnames, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvwriter.writeheader()
for subject in sorted(list(outlierSampleNonpar.keys())):
tmp = outlierSampleNonpar[subject]
tmp.update({'subject' : subject})
csvwriter.writerow(tmp)
with open(os.path.join(output_dir, 'all.outliers.sample.param.stats'), 'w') as datafile:
csvwriter = csv.DictWriter(datafile, fieldnames=asegFieldnames, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvwriter.writeheader()
for subject in sorted(list(outlierSampleParam.keys())):
tmp = outlierSampleParam[subject]
tmp.update({'subject' : subject})
csvwriter.writerow(tmp)
with open(os.path.join(output_dir, 'all.outliers.norms.stats'), 'w') as datafile:
csvwriter = csv.DictWriter(datafile, fieldnames=asegFieldnames, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvwriter.writeheader()
for subject in sorted(list(outlierNorms.keys())):
tmp = outlierNorms[subject]
tmp.update({'subject' : subject})
csvwriter.writerow(tmp)
# return
return outlierSampleNonparNum, outlierSampleParamNum, outlierNormsNum
| [
"csv.DictWriter",
"numpy.mean",
"os.path.join",
"pandas.DataFrame.from_dict",
"numpy.std",
"numpy.percentile",
"qatoolspython.outlierDetection.readAsegStats"
] | [((11106, 11164), 'os.path.join', 'os.path.join', (['subjects_dir', 'subject', '"""stats"""', '"""aseg.stats"""'], {}), "(subjects_dir, subject, 'stats', 'aseg.stats')\n", (11118, 11164), False, 'import os\n'), ((11186, 11216), 'qatoolspython.outlierDetection.readAsegStats', 'readAsegStats', (['path_aseg_stats'], {}), '(path_aseg_stats)\n', (11199, 11216), False, 'from qatoolspython.outlierDetection import readAsegStats\n'), ((14560, 14672), 'csv.DictWriter', 'csv.DictWriter', (['datafile'], {'fieldnames': 'asegFieldnames', 'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(datafile, fieldnames=asegFieldnames, delimiter=\',\',\n quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (14574, 14672), False, 'import csv\n'), ((14979, 15091), 'csv.DictWriter', 'csv.DictWriter', (['datafile'], {'fieldnames': 'asegFieldnames', 'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(datafile, fieldnames=asegFieldnames, delimiter=\',\',\n quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (14993, 15091), False, 'import csv\n'), ((15427, 15539), 'csv.DictWriter', 'csv.DictWriter', (['datafile'], {'fieldnames': 'asegFieldnames', 'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(datafile, fieldnames=asegFieldnames, delimiter=\',\',\n quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (15441, 15539), False, 'import csv\n'), ((15866, 15978), 'csv.DictWriter', 'csv.DictWriter', (['datafile'], {'fieldnames': 'asegFieldnames', 'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(datafile, fieldnames=asegFieldnames, delimiter=\',\',\n quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (15880, 15978), False, 'import csv\n'), ((11784, 11813), 'numpy.percentile', 'np.percentile', (['df', '(75)'], {'axis': '(0)'}), '(df, 75, axis=0)\n', (11797, 11813), True, 'import numpy as np\n'), ((11816, 11845), 'numpy.percentile', 'np.percentile', (['df', '(25)'], {'axis': '(0)'}), '(df, 25, axis=0)\n', (11829, 11845), True, 'import numpy as np\n'), ((14478, 14520), 'os.path.join', 'os.path.join', (['output_dir', '"""all.aseg.stats"""'], {}), "(output_dir, 'all.aseg.stats')\n", (14490, 14520), False, 'import os\n'), ((14879, 14939), 'os.path.join', 'os.path.join', (['output_dir', '"""all.outliers.sample.nonpar.stats"""'], {}), "(output_dir, 'all.outliers.sample.nonpar.stats')\n", (14891, 14939), False, 'import os\n'), ((15328, 15387), 'os.path.join', 'os.path.join', (['output_dir', '"""all.outliers.sample.param.stats"""'], {}), "(output_dir, 'all.outliers.sample.param.stats')\n", (15340, 15387), False, 'import os\n'), ((15774, 15826), 'os.path.join', 'os.path.join', (['output_dir', '"""all.outliers.norms.stats"""'], {}), "(output_dir, 'all.outliers.norms.stats')\n", (15786, 15826), False, 'import os\n'), ((11728, 11756), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['aseg'], {}), '(aseg)\n', (11750, 11756), True, 'import pandas as pd\n'), ((12072, 12091), 'numpy.mean', 'np.mean', (['df'], {'axis': '(0)'}), '(df, axis=0)\n', (12079, 12091), True, 'import numpy as np\n'), ((12152, 12171), 'numpy.mean', 'np.mean', (['df'], {'axis': '(0)'}), '(df, axis=0)\n', (12159, 12171), True, 'import numpy as np\n'), ((11898, 11927), 'numpy.percentile', 'np.percentile', (['df', '(25)'], {'axis': '(0)'}), '(df, 25, axis=0)\n', (11911, 11927), True, 'import numpy as np\n'), ((11993, 12022), 'numpy.percentile', 'np.percentile', (['df', '(75)'], {'axis': '(0)'}), '(df, 75, axis=0)\n', (12006, 12022), True, 'import numpy as np\n'), ((12098, 12116), 'numpy.std', 'np.std', (['df'], {'axis': '(0)'}), '(df, axis=0)\n', (12104, 12116), True, 'import numpy as np\n'), ((12178, 12196), 'numpy.std', 'np.std', (['df'], {'axis': '(0)'}), '(df, axis=0)\n', (12184, 12196), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding:utf8
# -*- coding: utf-8 -*-
"""
Main Program: Run MODIS AGGREGATION IN PARALLEL
Created on 2019
@author: <NAME>
"""
import os
import sys
import h5py
import timeit
import random
import numpy as np
from mpi4py import MPI
from netCDF4 import Dataset
def read_filelist(loc_dir,prefix,yr,day,fileformat):
# Read the filelist in the specific directory
str = os.popen("ls "+ loc_dir + prefix + yr + day + "*."+fileformat).read()
fname = np.array(str.split("\n"))
fname = np.delete(fname,len(fname)-1)
return fname
def read_MODIS(fname1,fname2,verbose=False): # READ THE HDF FILE
# Read the cloud mask from MYD06_L2 product')
ncfile=Dataset(fname1,'r')
CM1km = np.array(ncfile.variables['Cloud_Mask_1km'])
CM = (np.array(CM1km[:,:,0],dtype='byte') & 0b00000110) >>1
ncfile.close()
# Read the geolocation data from MYD03 product')
ncfile=Dataset(fname2,'r')
lat = np.array(ncfile.variables['Latitude'])
lon = np.array(ncfile.variables['Longitude'])
attr_lat = ncfile.variables['Latitude']._FillValue
attr_lon = ncfile.variables['Longitude']._FillValue
#Use _FillValue to remove fill data in lat & lon
lat[np.where(lat == attr_lat)] = 0.0
lon[np.where(lat == attr_lat)] = 0.0
CM [np.where(lat == attr_lat)] = 0.5 #which will not be identified by lines 80-83
lat[np.where(lon == attr_lon)] = 0.0
lon[np.where(lon == attr_lon)] = 0.0
CM [np.where(lon == attr_lon)] = 0.5 #which will not be identified by lines 80-83
ncfile.close()
return lat,lon,CM
def run_modis_aggre(fname1,fname2,NTA_lats,NTA_lons,grid_lon,gap_x,gap_y,hdfs):
# This function is the data aggregation loops by number of files
hdfs = np.array(hdfs)
for j in hdfs:
print("File Number: {} / {}".format(j,hdfs[-1]))
# Read Level-2 MODIS data
lat,lon,CM = read_MODIS(fname1[j],fname2[j])
#print(lat.shape,lon.shape,CM.shape)
# Restrain lat & lon & variables in the required region
res_idx = np.where((lat > NTA_lats[0]) & (lat < NTA_lats[1]) & (lon > NTA_lons[0]) & (lon < NTA_lons[1]))
#print(res_idx)
CM = CM [res_idx]
lat = lat[res_idx]
lon = lon[res_idx]
# Ravel the 2-D data to 1-D array
lat = lat.ravel()
lon = lon.ravel()
CM = CM.ravel()
# Locate the lat lon index into 3-Level frid box
idx_lon = ((lon-NTA_lons[0])/gap_x).astype(int)
idx_lat = ((lat-NTA_lats[0])/gap_y).astype(int)
latlon_index=(idx_lat*grid_lon)+idx_lon
latlon_index_unique = np.unique(latlon_index)
for i in np.arange(latlon_index_unique.size):
#-----loop through all the grid boxes ocupied by this granule------#
z=latlon_index_unique[i]
if((z >= 0) & (z < len(Count))):
TOT_pix = np.sum(CM[np.where(latlon_index == z)]>=0).astype(float)
CLD_pix = np.sum(CM[np.where(latlon_index == z)]<=1).astype(float)
Fraction = (CLD_pix / TOT_pix)
#Min and Max
if Fraction_Min[z] > Fraction:
Fraction_Min[z] = Fraction
if Fraction_Max[z] < Fraction:
Fraction_Max[z] = Fraction
#Total and Count for Mean
TOT_Fraction[z] += Fraction
Count[z] += 1
#Standard Deviation
TOT_Fraction_sq[z] += Fraction**2
return (Count,Fraction_Min,Fraction_Max,TOT_Fraction,TOT_Fraction_sq)
#Mean and std. computations. minmax() is called inside this function
#def MeanStd(data,z,latlon_index,M):
# #Both mean and stdd
# #print(key)
# val=data[np.where(latlon_index == z)]
# M.XXX_pix[key][z]=M.XXX_pix[key][z]+np.sum(val)
# M.XXX_pixSq[key][z]=M.XXX_pixSq[key][z]+np.sum(val**2)
# minmax(val,z,M)
def addGridEntry(f,name,units,long_name,data):
'''
f:h5py.File()
-------------------------------------
Ex.
self.addGridEntry(f,'CF','Fraction','Cloud_Fraction',total_cloud_fraction)
'''
PCentry=f.create_dataset(name,data=data)
PCentry.dims[0].label='lat_bnd'
PCentry.dims[1].label='lon_bnd'
PCentry.attrs['units']=units
PCentry.attrs["long_name"]=long_name
if __name__ =='__main__':
# This is the main program for using concurrent to speed up the whole process
#-------------STEP 0: Read the input from User --------
# checking user input
#if len(sys.argv) != 7:
# print("Wrong user input")
# print("usage: python deliverable_code_3_test.py <True/False> <True/False> <True/False> <True/False> <True/False> <Bin Size>")
# sys.exit()
#else:
# # pass system arguments to the function
# minimum = sys.argv[1]
# maximum = sys.argv[2]
# mean = sys.argv[3]
# std = sys.argv[4]
# count = sys.argv[5]
# binsize = sys.argv[6]
#-------------STEP 1: Set up the specific directory --------
MYD06_dir= '/umbc/xfs1/cybertrn/common/Data/Satellite_Observations/MODIS/MYD06_L2/'
MYD06_prefix = 'MYD06_L2.A'
MYD03_dir= '/umbc/xfs1/cybertrn/common/Data/Satellite_Observations/MODIS/MYD03/'
MYD03_prefix = 'MYD03.A'
fileformat = 'hdf'
#-------------STEP 2: Set up spactial and temporal resolution----------
NTA_lats = [-90,90] #[ 0,40] #[-90,90] #[-30,30]
NTA_lons = [-180,180] #[-40,60] #[-180,180] #[-60,60]
gap_x, gap_y = 1,1 #0.625,0.5
if ((NTA_lons[-1]-NTA_lons[0])%gap_x != 0) | ((NTA_lats[-1]-NTA_lats[0])%gap_y != 0):
print("Grid size should be dividable by the dimension of the selected region.")
print("If you choose the region of latitude from -40 to 40, then you gird size (gap_y) should be dividable by 80.")
print("If you choose the region of longitude from 20 to 35, then you gird size (gap_x) should be dividable by 55.")
print("Please try again!")
sys.exit()
map_lon = np.arange(NTA_lons[0],NTA_lons[1],gap_x)
map_lat = np.arange(NTA_lats[0],NTA_lats[1],gap_y)
Lon,Lat = np.meshgrid(map_lon,map_lat)
grid_lon=np.int((NTA_lons[-1]-NTA_lons[0])/gap_x)
grid_lat=np.int((NTA_lats[-1]-NTA_lats[0])/gap_y)
#print(grid_lon,grid_lat,grid_lat*grid_lon)
Count = np.zeros(grid_lat*grid_lon)
Fraction_Min = np.zeros(grid_lat*grid_lon) + np.inf
Fraction_Max = np.zeros(grid_lat*grid_lon) - np.inf
TOT_Fraction = np.zeros(grid_lat*grid_lon)
TOT_Fraction_sq = np.zeros(grid_lat*grid_lon)
fname1,fname2 = [],[]
# Read all files in a month (in this case: January)
# Read the filename list for different time period
years = np.array([2008])
months = np.array([1])
days = np.arange(1,2,dtype=np.int)
for yr,day in zip(years,days):
yc ='%04i' % yr
dc ='%03i' % day
fname_tmp1 = read_filelist(MYD06_dir,MYD06_prefix,yc,dc,fileformat)
fname_tmp2 = read_filelist(MYD03_dir,MYD03_prefix,yc,dc,fileformat)
fname1 = np.append(fname1,fname_tmp1)
fname2 = np.append(fname2,fname_tmp2)
# Initiate MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
random.seed(rank)
# Distribute the number of files into ppns for MPI
remain = size-len(fname1)%size
ppn_file = (len(fname1)+remain)/size
if ppn_file >= remain:
# Distribute the day's loops into MPI ppns
files = np.arange(len(fname1)+remain)
tasks = np.array(np.split(files,size))
hdfs = tasks[rank]
if rank == (size-1):
hdfs = np.delete(hdfs, np.arange(len(hdfs)-remain,len(hdfs)))
else:
# Distribute the day's loops into MPI ppns
files = np.arange(len(fname1)-len(fname1)%size)
tasks = np.array(np.split(files,size))
hdfs = tasks[rank]
if rank == (size-1):
hdfs = np.append(hdfs, np.arange(len(files),len(files)+len(fname1)%size))
print("process {} aggregating files from {} to {}...".format(rank, hdfs[0],hdfs[-1]))
# Start counting operation time
start_time = timeit.default_timer()
results = np.asarray(run_modis_aggre(fname1,fname2,NTA_lats,NTA_lons,grid_lon,gap_x,gap_y,hdfs))
if rank == 0:
Count += results[0,:]
Fraction_Min = results[1,:]
Fraction_Max = results[2,:]
TOT_Fraction += results[3,:]
TOT_Fraction_sq += results[4,:]
for i in range(1,size):
recv_req = comm.Irecv(results,source=i, tag=0)
recv_req.wait()
Count = Count + results[1,:]
Fraction_Min = np.dstack((Fraction_Min,results[2,:]))
Fraction_Max = np.dstack((Fraction_Max,results[3,:]))
TOT_Fraction = TOT_Fraction + results[0,:]
TOT_Fraction_sq = TOT_Fraction_sq + results[4,:]
# Compute the mean cloud fraction & Statistics (Include Min & Max & Standard deviation)
Mean_Fraction = (TOT_Fraction / Count)
Std_Fraction = (TOT_Fraction_sq / Count) - Mean_Fraction**2
Count = Count.reshape([grid_lat,grid_lon])
Mean_Fraction = Mean_Fraction.reshape([grid_lat,grid_lon])
Std_Fraction = Std_Fraction.reshape([grid_lat,grid_lon])
Fraction_Min = np.min(Fraction_Min,axis=2).reshape([grid_lat,grid_lon])
Fraction_Max = np.max(Fraction_Max,axis=2).reshape([grid_lat,grid_lon])
end_time = timeit.default_timer()
print('Mean_Fraction:')
print( Mean_Fraction )
print ("Operation Time in {:7.2f} seconds".format(end_time - start_time))
# Create file to store the result
#np.savetxt("cloud_fraction_mean.dat", Mean_Fraction, fmt="%10.4f")
#np.savetxt("cloud_fraction_min.dat" , Fraction_Min , fmt="%10.4f")
#np.savetxt("cloud_fraction_max.dat" , Fraction_Max , fmt="%10.4f")
#np.savetxt("cloud_fraction_std.dat" , Std_Fraction , fmt="%10.4f")
#np.savetxt("cloud_fraction_pix_count.dat", Count , fmt="%10d")
#np.savetxt("test_geolocation_lat.dat" , Lat, fmt="%10.4f")
#np.savetxt("test_geolocation_lon.dat" , Lon, fmt="%10.4f")
# Create HDF5 file to store the result
l3name='MOD08_M3'+'A{:04d}{:02d}'.format(years[0],months[0])
ff=h5py.File(l3name+'.hdf5','w')
PC=ff.create_dataset('lat_bnd',data=map_lat)
PC.attrs['units']='degrees'
PC.attrs['long_name']='Latitude_boundaries'
PC=ff.create_dataset('lon_bnd',data=map_lon)
PC.attrs['units']='degrees'
PC.attrs['long_name']='Longitude_boundaries'
addGridEntry(ff,'Cloud_Fraction_Mean' ,'none','Cloud Fraction from Cloud Mask (cloudy & prob cloudy)',Mean_Fraction)
addGridEntry(ff,'Cloud_Fraction_Standard_Deviation','none','Cloud Fraction from Cloud Mask (cloudy & prob cloudy)',Std_Fraction )
addGridEntry(ff,'Cloud_Fraction_Minimum' ,'none','Cloud Fraction from Cloud Mask (cloudy & prob cloudy)',Fraction_Min )
addGridEntry(ff,'Cloud_Fraction_Maximum' ,'none','Cloud Fraction from Cloud Mask (cloudy & prob cloudy)',Fraction_Max )
addGridEntry(ff,'Cloud_Fraction_Pixel_Counts' ,'none','Cloud Fraction from Cloud Mask (cloudy & prob cloudy)',Count)
ff.close()
print(l3name+'.hdf5 Saved!')
else:
print("Process {} finished".format(rank))
send_req = comm.Isend(results, dest=0, tag=0)
send_req.wait()
#def main():
#
# # checking user input
# if len(sys.argv) != 6:
# print("Wrong user input")
# print("usage: python format_meteo_forcing.py <template raster> <met data input path> <forcing file outpath> <start year> <end year>")
# #print "DIR INPUTS SHOULD CONTAIN TRAILING /"
# sys.exit()
#
# else:
# if sys.argv[2][-1] != '/':
# print("Input met data dir should contain trailing '/'")
# print("fixing it for you...")
# sys.argv[2] = sys.argv[2] + "/"
#
# if sys.argv[3][-1] != '/':
# print("Output forcing data dir should contain trailing '/'")
# print("fixing it for you...")
# sys.argv[3] = sys.argv[3] + "/"
#
# # pass system arguments to the function
# t1 = datetime.now()
# format_meteo_forcing(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5])
# dt = datetime.now()-t1
# print ('Processing time: {0}'.format(dt))
#
# return
#def save_level3_hdf5(self,Agg):
# '''
# To save aggregated data products.
# Agg: MODIS_L2toL3 object
# '''
# self.MODIS_L2toL3=Agg
# self.fname=Agg.l3name
# ff=h5py.File(self.fname+'.hdf5','w')
# self.addGridEntry(ff,'CF','Fraction','Cloud_Fraction',Agg.M.total_cloud_fraction)
# self.addGridEntry(ff,'PC','Count','Pixel_Count',Agg.M.pixel_count)
# for key in Agg.variables:
# for st in Agg.M.stt:
# self.addGridEntry(ff, key+'_'+st, Agg.variables[key][1], Agg.variables[key][0]+'_'+self.get_long_name(st), \
# Agg.M.stt[st][key])
# PC=ff.create_dataset('lat_bnd',data=Agg.lat_bnd)
# PC.attrs['units']='degrees'
# PC.attrs['long_name']='Latitude_boundaries'
#
# PC=ff.create_dataset('lon_bnd',data=Agg.lon_bnd)
# PC.attrs['units']='degrees'
# PC.attrs['long_name']='Longitude_boundaries'
# ff.close()
# print(self.fname+'.hdf5 Saved!') | [
"numpy.dstack",
"numpy.unique",
"numpy.where",
"timeit.default_timer",
"netCDF4.Dataset",
"numpy.min",
"random.seed",
"h5py.File",
"numpy.append",
"numpy.array",
"numpy.zeros",
"os.popen",
"numpy.split",
"numpy.max",
"sys.exit",
"numpy.meshgrid",
"numpy.int",
"numpy.arange"
] | [((673, 693), 'netCDF4.Dataset', 'Dataset', (['fname1', '"""r"""'], {}), "(fname1, 'r')\n", (680, 693), False, 'from netCDF4 import Dataset\n'), ((702, 746), 'numpy.array', 'np.array', (["ncfile.variables['Cloud_Mask_1km']"], {}), "(ncfile.variables['Cloud_Mask_1km'])\n", (710, 746), True, 'import numpy as np\n'), ((885, 905), 'netCDF4.Dataset', 'Dataset', (['fname2', '"""r"""'], {}), "(fname2, 'r')\n", (892, 905), False, 'from netCDF4 import Dataset\n'), ((913, 951), 'numpy.array', 'np.array', (["ncfile.variables['Latitude']"], {}), "(ncfile.variables['Latitude'])\n", (921, 951), True, 'import numpy as np\n'), ((960, 999), 'numpy.array', 'np.array', (["ncfile.variables['Longitude']"], {}), "(ncfile.variables['Longitude'])\n", (968, 999), True, 'import numpy as np\n'), ((1667, 1681), 'numpy.array', 'np.array', (['hdfs'], {}), '(hdfs)\n', (1675, 1681), True, 'import numpy as np\n'), ((5518, 5560), 'numpy.arange', 'np.arange', (['NTA_lons[0]', 'NTA_lons[1]', 'gap_x'], {}), '(NTA_lons[0], NTA_lons[1], gap_x)\n', (5527, 5560), True, 'import numpy as np\n'), ((5570, 5612), 'numpy.arange', 'np.arange', (['NTA_lats[0]', 'NTA_lats[1]', 'gap_y'], {}), '(NTA_lats[0], NTA_lats[1], gap_y)\n', (5579, 5612), True, 'import numpy as np\n'), ((5622, 5651), 'numpy.meshgrid', 'np.meshgrid', (['map_lon', 'map_lat'], {}), '(map_lon, map_lat)\n', (5633, 5651), True, 'import numpy as np\n'), ((5663, 5707), 'numpy.int', 'np.int', (['((NTA_lons[-1] - NTA_lons[0]) / gap_x)'], {}), '((NTA_lons[-1] - NTA_lons[0]) / gap_x)\n', (5669, 5707), True, 'import numpy as np\n'), ((5714, 5758), 'numpy.int', 'np.int', (['((NTA_lats[-1] - NTA_lats[0]) / gap_y)'], {}), '((NTA_lats[-1] - NTA_lats[0]) / gap_y)\n', (5720, 5758), True, 'import numpy as np\n'), ((5822, 5851), 'numpy.zeros', 'np.zeros', (['(grid_lat * grid_lon)'], {}), '(grid_lat * grid_lon)\n', (5830, 5851), True, 'import numpy as np\n'), ((5981, 6010), 'numpy.zeros', 'np.zeros', (['(grid_lat * grid_lon)'], {}), '(grid_lat * grid_lon)\n', (5989, 6010), True, 'import numpy as np\n'), ((6028, 6057), 'numpy.zeros', 'np.zeros', (['(grid_lat * grid_lon)'], {}), '(grid_lat * grid_lon)\n', (6036, 6057), True, 'import numpy as np\n'), ((6197, 6213), 'numpy.array', 'np.array', (['[2008]'], {}), '([2008])\n', (6205, 6213), True, 'import numpy as np\n'), ((6224, 6237), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (6232, 6237), True, 'import numpy as np\n'), ((6246, 6275), 'numpy.arange', 'np.arange', (['(1)', '(2)'], {'dtype': 'np.int'}), '(1, 2, dtype=np.int)\n', (6255, 6275), True, 'import numpy as np\n'), ((6655, 6672), 'random.seed', 'random.seed', (['rank'], {}), '(rank)\n', (6666, 6672), False, 'import random\n'), ((7467, 7489), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7487, 7489), False, 'import timeit\n'), ((1161, 1186), 'numpy.where', 'np.where', (['(lat == attr_lat)'], {}), '(lat == attr_lat)\n', (1169, 1186), True, 'import numpy as np\n'), ((1199, 1224), 'numpy.where', 'np.where', (['(lat == attr_lat)'], {}), '(lat == attr_lat)\n', (1207, 1224), True, 'import numpy as np\n'), ((1237, 1262), 'numpy.where', 'np.where', (['(lat == attr_lat)'], {}), '(lat == attr_lat)\n', (1245, 1262), True, 'import numpy as np\n'), ((1322, 1347), 'numpy.where', 'np.where', (['(lon == attr_lon)'], {}), '(lon == attr_lon)\n', (1330, 1347), True, 'import numpy as np\n'), ((1360, 1385), 'numpy.where', 'np.where', (['(lon == attr_lon)'], {}), '(lon == attr_lon)\n', (1368, 1385), True, 'import numpy as np\n'), ((1398, 1423), 'numpy.where', 'np.where', (['(lon == attr_lon)'], {}), '(lon == attr_lon)\n', (1406, 1423), True, 'import numpy as np\n'), ((1937, 2036), 'numpy.where', 'np.where', (['((lat > NTA_lats[0]) & (lat < NTA_lats[1]) & (lon > NTA_lons[0]) & (lon <\n NTA_lons[1]))'], {}), '((lat > NTA_lats[0]) & (lat < NTA_lats[1]) & (lon > NTA_lons[0]) &\n (lon < NTA_lons[1]))\n', (1945, 2036), True, 'import numpy as np\n'), ((2432, 2455), 'numpy.unique', 'np.unique', (['latlon_index'], {}), '(latlon_index)\n', (2441, 2455), True, 'import numpy as np\n'), ((2468, 2503), 'numpy.arange', 'np.arange', (['latlon_index_unique.size'], {}), '(latlon_index_unique.size)\n', (2477, 2503), True, 'import numpy as np\n'), ((5495, 5505), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5503, 5505), False, 'import sys\n'), ((5869, 5898), 'numpy.zeros', 'np.zeros', (['(grid_lat * grid_lon)'], {}), '(grid_lat * grid_lon)\n', (5877, 5898), True, 'import numpy as np\n'), ((5925, 5954), 'numpy.zeros', 'np.zeros', (['(grid_lat * grid_lon)'], {}), '(grid_lat * grid_lon)\n', (5933, 5954), True, 'import numpy as np\n'), ((6496, 6525), 'numpy.append', 'np.append', (['fname1', 'fname_tmp1'], {}), '(fname1, fname_tmp1)\n', (6505, 6525), True, 'import numpy as np\n'), ((6536, 6565), 'numpy.append', 'np.append', (['fname2', 'fname_tmp2'], {}), '(fname2, fname_tmp2)\n', (6545, 6565), True, 'import numpy as np\n'), ((8664, 8686), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (8684, 8686), False, 'import timeit\n'), ((9440, 9472), 'h5py.File', 'h5py.File', (["(l3name + '.hdf5')", '"""w"""'], {}), "(l3name + '.hdf5', 'w')\n", (9449, 9472), False, 'import h5py\n'), ((391, 456), 'os.popen', 'os.popen', (["('ls ' + loc_dir + prefix + yr + day + '*.' + fileformat)"], {}), "('ls ' + loc_dir + prefix + yr + day + '*.' + fileformat)\n", (399, 456), False, 'import os\n'), ((756, 794), 'numpy.array', 'np.array', (['CM1km[:, :, 0]'], {'dtype': '"""byte"""'}), "(CM1km[:, :, 0], dtype='byte')\n", (764, 794), True, 'import numpy as np\n'), ((6929, 6950), 'numpy.split', 'np.split', (['files', 'size'], {}), '(files, size)\n', (6937, 6950), True, 'import numpy as np\n'), ((7184, 7205), 'numpy.split', 'np.split', (['files', 'size'], {}), '(files, size)\n', (7192, 7205), True, 'import numpy as np\n'), ((7928, 7968), 'numpy.dstack', 'np.dstack', (['(Fraction_Min, results[2, :])'], {}), '((Fraction_Min, results[2, :]))\n', (7937, 7968), True, 'import numpy as np\n'), ((7985, 8025), 'numpy.dstack', 'np.dstack', (['(Fraction_Max, results[3, :])'], {}), '((Fraction_Max, results[3, :]))\n', (7994, 8025), True, 'import numpy as np\n'), ((8519, 8547), 'numpy.min', 'np.min', (['Fraction_Min'], {'axis': '(2)'}), '(Fraction_Min, axis=2)\n', (8525, 8547), True, 'import numpy as np\n'), ((8593, 8621), 'numpy.max', 'np.max', (['Fraction_Max'], {'axis': '(2)'}), '(Fraction_Max, axis=2)\n', (8599, 8621), True, 'import numpy as np\n'), ((2664, 2691), 'numpy.where', 'np.where', (['(latlon_index == z)'], {}), '(latlon_index == z)\n', (2672, 2691), True, 'import numpy as np\n'), ((2735, 2762), 'numpy.where', 'np.where', (['(latlon_index == z)'], {}), '(latlon_index == z)\n', (2743, 2762), True, 'import numpy as np\n')] |
import argparse
import datetime
import numpy as np
import os
import optuna
import pandas as pd
import sys
import tensorflow as tf
from optuna.integration import TFKerasPruningCallback
from pathlib import Path
# global variables
DATA_SET_PATH = ""
def parse_split(split_str):
pieces = split_str.split(",")
split = [float(x) for x in pieces]
if sum(split) == 1.0 and len(split) == 3:
return split
raise AssertionError("Argument split is invalid")
def get_dataset(path: str):
element_spec = ({"tweets": tf.RaggedTensorSpec(tf.TensorShape([None, 10]),
tf.float32,
0,
tf.int64),
"ideas": tf.RaggedTensorSpec(tf.TensorShape([None, 9]),
tf.float32,
0,
tf.int64),
"company_info": tf.TensorSpec(shape=(44,),
dtype=tf.float32)},
tf.TensorSpec(shape=(), dtype=tf.float32,))
data_set = tf.data.experimental.load(path, element_spec)
return data_set
def create_model(trial):
learning_rate = trial.suggest_float("lr", 1e-5, 1e-3, log=True)
# units = trial.suggest_categorical("units", [16, 32, 64, 128, 256])
units_lstm = trial.suggest_int("units_lstm", 16, 512)
units_dense = trial.suggest_int("units_dense", 16, 512)
# Compose neural network
num_twitter_features = 10
num_stocktwits_features = 9
num_company_infos = 44
input_twitter = tf.keras.Input(shape=(None, num_twitter_features),
name="tweets")
input_stocktwits = tf.keras.Input(shape=(None, num_stocktwits_features),
name="ideas")
input_company_info = tf.keras.Input(shape=(num_company_infos,),
name="company_info")
lstm_twitter = tf.keras.layers.LSTM(units_lstm)(input_twitter)
lstm_stocktwits = tf.keras.layers.LSTM(units_lstm)(input_stocktwits)
dense_input = tf.keras.layers.concatenate([lstm_twitter,
lstm_stocktwits,
input_company_info])
dense_a = tf.keras.layers.Dense(units_dense,
activation="tanh")(dense_input)
dense_b = tf.keras.layers.Dense(units_dense,
activation="tanh")(dense_a)
dense_c = tf.keras.layers.Dense(1)(dense_b)
model = tf.keras.Model(inputs=[input_twitter,
input_stocktwits,
input_company_info],
outputs=dense_c)
# Compile model.
loss = args.loss
if loss == "mse": # mean squared error
loss_tf = tf.keras.losses.MeanSquaredError()
elif loss == "mae": # mean absolute error
loss_tf = tf.keras.losses.MeanAbsoluteError()
else:
print("Error: Loss {} not possible.".format(args.loss))
model.compile(loss=loss_tf,
optimizer=tf.keras.optimizers.Adam(learning_rate))
return model
def objective(trial):
# batch_size = trial.suggest_categorical("batch_size", [16, 32, 64])
batch_size = trial.suggest_int("batch_size", 16, 128)
# tweets_threshold = trial.suggest_int("tweets_threshold", 240, 960)
# Clear clutter from previous TensorFlow graphs.
tf.keras.backend.clear_session()
# Create dataset instance.
data_set = get_dataset(DATA_SET_PATH)
data_set = data_set.shuffle(8192)
split = parse_split(args.split)
data_set_size = int(data_set.cardinality())
train_data_set_size = int(split[0] * data_set_size)
val_data_set_size = int(split[1] * data_set_size)
test_data_set_size = int(split[2] * data_set_size)
train_data_set = data_set.take(train_data_set_size)
val_test_data_set = data_set.skip(train_data_set_size)
test_data_set = val_test_data_set.take(test_data_set_size)
val_data_set = val_test_data_set.skip(test_data_set_size)
train_data_set = train_data_set.batch(batch_size)
val_data_set = val_data_set.batch(batch_size)
test_data_set = test_data_set.batch(batch_size)
# Create tf.keras model instance.
model = create_model(trial)
# Create callbacks for early stopping and pruning.
log_path = os.path.join(".", "tensor_board_log")
os.makedirs(log_path, exist_ok=True)
date = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
log_dir = os.path.join(log_path, date)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
profile_batch=0,
update_freq="batch")
monitor = "val_loss"
callbacks = [
tf.keras.callbacks.EarlyStopping(patience=3),
TFKerasPruningCallback(trial, monitor),
tensorboard_callback
]
# Train model.
history = model.fit(
train_data_set,
epochs=args.epochs,
validation_data=val_data_set,
callbacks=callbacks,
)
checkpoint_path = os.path.join(".", "checkpoint")
os.makedirs(checkpoint_path, exist_ok=True)
checkpoint_name = "checkpoint-{}".format(trial.number)
file_name = os.path.join(checkpoint_path, checkpoint_name)
model.save(file_name)
# Predict
predictions = model.predict(test_data_set)
# calculate R^2
true_labels = np.concatenate([y for x, y in test_data_set], axis=0)
residual_sum = np.sum(np.square(true_labels - predictions))
true_labels_mean = np.mean(true_labels)
total_sum = np.sum(np.square(true_labels - true_labels_mean))
r_squared = 1 - (residual_sum / total_sum)
# calculate mean absolute error
mae = np.mean(np.abs(true_labels - predictions))
# calculate mean squared error
mse = np.mean(np.square(true_labels - predictions))
# calculate accuracy if there would be only stock_up stock_down
num_samples = true_labels.shape[0]
num_correctly_classified = 0
for true_label, prediction in zip(true_labels, predictions):
if ((true_label > 0 and prediction > 0) or
(true_label <= 0 and prediction <= 0)):
num_correctly_classified = num_correctly_classified + 1
accuracy = num_correctly_classified / num_samples
test_stats_path = os.path.join(log_dir, "test_stats.txt")
with open(test_stats_path, "w") as stats_file:
stats_file.write("true labels mean: {}\n".format(true_labels_mean))
stats_file.write("residual sum: {}\n".format(residual_sum))
stats_file.write("total sum: {}\n".format(total_sum))
stats_file.write("r squared: {}\n".format(r_squared))
stats_file.write("mean absolute error: {}\n".format(mae))
stats_file.write("mean squared error: {}\n".format(mse))
stats_file.write("accuracy (up, down): {}\n".format(accuracy))
stats_file.write("training history: {}\n".format(history.history))
stats_file.write("training dataset size: {}\n".format(
tf.data.experimental.cardinality(train_data_set).numpy()))
stats_file.write("validation dataset size: {}\n".format(
tf.data.experimental.cardinality(val_data_set).numpy()))
stats_file.write("test dataset size: {}\n".format(
tf.data.experimental.cardinality(test_data_set).numpy()))
return history.history[monitor][-1]
def log_study_as_csv(study):
data_frame = study.trials_dataframe()
data_frame.to_csv("study.csv")
def main():
global DATA_SET_PATH
DATA_SET_PATH = os.path.join(".", "dataset/Ava")
os.makedirs(os.path.dirname(DATA_SET_PATH), exist_ok=True)
# log arguments
with open("study_args.txt", "w") as args_file:
args_file.write("dataset: {}\n".format("Ava")) # HARD
args_file.write("loss: {}\n".format(args.loss))
args_file.write("epochs: {}\n".format(args.epochs))
args_file.write("split: {}\n".format(args.split))
args_file.write("trials: {}\n".format(args.num_trials))
study = optuna.create_study(
direction="minimize",
pruner=optuna.pruners.HyperbandPruner()
)
study.optimize(objective, n_trials=args.num_trials)
log_study_as_csv(study)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Training parameters")
parser.add_argument("--loss", type=str,
help="Loss mse or mae")
parser.add_argument("--epochs", type=int,
help="Number of epochs")
parser.add_argument("--split", type=str,
help="Dataset split; train,val,test; e.g. 0.8,0.1,0,1")
parser.add_argument("--num_trials", type=int,
help="Number of trials")
args = parser.parse_args()
main()
| [
"tensorflow.data.experimental.cardinality",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"optuna.integration.TFKerasPruningCallback",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.concatenate",
"numpy.abs",
"tensorflow.k... | [((1230, 1275), 'tensorflow.data.experimental.load', 'tf.data.experimental.load', (['path', 'element_spec'], {}), '(path, element_spec)\n', (1255, 1275), True, 'import tensorflow as tf\n'), ((1723, 1788), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(None, num_twitter_features)', 'name': '"""tweets"""'}), "(shape=(None, num_twitter_features), name='tweets')\n", (1737, 1788), True, 'import tensorflow as tf\n'), ((1847, 1914), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(None, num_stocktwits_features)', 'name': '"""ideas"""'}), "(shape=(None, num_stocktwits_features), name='ideas')\n", (1861, 1914), True, 'import tensorflow as tf\n'), ((1978, 2041), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(num_company_infos,)', 'name': '"""company_info"""'}), "(shape=(num_company_infos,), name='company_info')\n", (1992, 2041), True, 'import tensorflow as tf\n'), ((2240, 2325), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[lstm_twitter, lstm_stocktwits, input_company_info]'], {}), '([lstm_twitter, lstm_stocktwits, input_company_info]\n )\n', (2267, 2325), True, 'import tensorflow as tf\n'), ((2705, 2802), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[input_twitter, input_stocktwits, input_company_info]', 'outputs': 'dense_c'}), '(inputs=[input_twitter, input_stocktwits, input_company_info],\n outputs=dense_c)\n', (2719, 2802), True, 'import tensorflow as tf\n'), ((3619, 3651), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (3649, 3651), True, 'import tensorflow as tf\n'), ((4554, 4591), 'os.path.join', 'os.path.join', (['"""."""', '"""tensor_board_log"""'], {}), "('.', 'tensor_board_log')\n", (4566, 4591), False, 'import os\n'), ((4596, 4632), 'os.makedirs', 'os.makedirs', (['log_path'], {'exist_ok': '(True)'}), '(log_path, exist_ok=True)\n', (4607, 4632), False, 'import os\n'), ((4708, 4736), 'os.path.join', 'os.path.join', (['log_path', 'date'], {}), '(log_path, date)\n', (4720, 4736), False, 'import os\n'), ((4764, 4853), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'profile_batch': '(0)', 'update_freq': '"""batch"""'}), "(log_dir=log_dir, profile_batch=0,\n update_freq='batch')\n", (4794, 4853), True, 'import tensorflow as tf\n'), ((5339, 5370), 'os.path.join', 'os.path.join', (['"""."""', '"""checkpoint"""'], {}), "('.', 'checkpoint')\n", (5351, 5370), False, 'import os\n'), ((5375, 5418), 'os.makedirs', 'os.makedirs', (['checkpoint_path'], {'exist_ok': '(True)'}), '(checkpoint_path, exist_ok=True)\n', (5386, 5418), False, 'import os\n'), ((5494, 5540), 'os.path.join', 'os.path.join', (['checkpoint_path', 'checkpoint_name'], {}), '(checkpoint_path, checkpoint_name)\n', (5506, 5540), False, 'import os\n'), ((5668, 5721), 'numpy.concatenate', 'np.concatenate', (['[y for x, y in test_data_set]'], {'axis': '(0)'}), '([y for x, y in test_data_set], axis=0)\n', (5682, 5721), True, 'import numpy as np\n'), ((5809, 5829), 'numpy.mean', 'np.mean', (['true_labels'], {}), '(true_labels)\n', (5816, 5829), True, 'import numpy as np\n'), ((6582, 6621), 'os.path.join', 'os.path.join', (['log_dir', '"""test_stats.txt"""'], {}), "(log_dir, 'test_stats.txt')\n", (6594, 6621), False, 'import os\n'), ((7823, 7855), 'os.path.join', 'os.path.join', (['"""."""', '"""dataset/Ava"""'], {}), "('.', 'dataset/Ava')\n", (7835, 7855), False, 'import os\n'), ((8536, 8594), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training parameters"""'}), "(description='Training parameters')\n", (8559, 8594), False, 'import argparse\n'), ((1170, 1211), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '()', 'dtype': 'tf.float32'}), '(shape=(), dtype=tf.float32)\n', (1183, 1211), True, 'import tensorflow as tf\n'), ((2101, 2133), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['units_lstm'], {}), '(units_lstm)\n', (2121, 2133), True, 'import tensorflow as tf\n'), ((2171, 2203), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['units_lstm'], {}), '(units_lstm)\n', (2191, 2203), True, 'import tensorflow as tf\n'), ((2429, 2482), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['units_dense'], {'activation': '"""tanh"""'}), "(units_dense, activation='tanh')\n", (2450, 2482), True, 'import tensorflow as tf\n'), ((2546, 2599), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['units_dense'], {'activation': '"""tanh"""'}), "(units_dense, activation='tanh')\n", (2567, 2599), True, 'import tensorflow as tf\n'), ((2659, 2683), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (2680, 2683), True, 'import tensorflow as tf\n'), ((3001, 3035), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (3033, 3035), True, 'import tensorflow as tf\n'), ((5017, 5061), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'patience': '(3)'}), '(patience=3)\n', (5049, 5061), True, 'import tensorflow as tf\n'), ((5071, 5109), 'optuna.integration.TFKerasPruningCallback', 'TFKerasPruningCallback', (['trial', 'monitor'], {}), '(trial, monitor)\n', (5093, 5109), False, 'from optuna.integration import TFKerasPruningCallback\n'), ((5748, 5784), 'numpy.square', 'np.square', (['(true_labels - predictions)'], {}), '(true_labels - predictions)\n', (5757, 5784), True, 'import numpy as np\n'), ((5853, 5894), 'numpy.square', 'np.square', (['(true_labels - true_labels_mean)'], {}), '(true_labels - true_labels_mean)\n', (5862, 5894), True, 'import numpy as np\n'), ((5998, 6031), 'numpy.abs', 'np.abs', (['(true_labels - predictions)'], {}), '(true_labels - predictions)\n', (6004, 6031), True, 'import numpy as np\n'), ((6086, 6122), 'numpy.square', 'np.square', (['(true_labels - predictions)'], {}), '(true_labels - predictions)\n', (6095, 6122), True, 'import numpy as np\n'), ((7872, 7902), 'os.path.dirname', 'os.path.dirname', (['DATA_SET_PATH'], {}), '(DATA_SET_PATH)\n', (7887, 7902), False, 'import os\n'), ((1052, 1096), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(44,)', 'dtype': 'tf.float32'}), '(shape=(44,), dtype=tf.float32)\n', (1065, 1096), True, 'import tensorflow as tf\n'), ((3101, 3136), 'tensorflow.keras.losses.MeanAbsoluteError', 'tf.keras.losses.MeanAbsoluteError', ([], {}), '()\n', (3134, 3136), True, 'import tensorflow as tf\n'), ((3272, 3311), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['learning_rate'], {}), '(learning_rate)\n', (3296, 3311), True, 'import tensorflow as tf\n'), ((4644, 4667), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4665, 4667), False, 'import datetime\n'), ((8371, 8403), 'optuna.pruners.HyperbandPruner', 'optuna.pruners.HyperbandPruner', ([], {}), '()\n', (8401, 8403), False, 'import optuna\n'), ((555, 581), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 10]'], {}), '([None, 10])\n', (569, 581), True, 'import tensorflow as tf\n'), ((812, 837), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, 9]'], {}), '([None, 9])\n', (826, 837), True, 'import tensorflow as tf\n'), ((7293, 7341), 'tensorflow.data.experimental.cardinality', 'tf.data.experimental.cardinality', (['train_data_set'], {}), '(train_data_set)\n', (7325, 7341), True, 'import tensorflow as tf\n'), ((7429, 7475), 'tensorflow.data.experimental.cardinality', 'tf.data.experimental.cardinality', (['val_data_set'], {}), '(val_data_set)\n', (7461, 7475), True, 'import tensorflow as tf\n'), ((7557, 7604), 'tensorflow.data.experimental.cardinality', 'tf.data.experimental.cardinality', (['test_data_set'], {}), '(test_data_set)\n', (7589, 7604), True, 'import tensorflow as tf\n')] |
from collections import Iterable
from typing import Union, List, Tuple
import numpy as np
from functools import partial
from scipy import ndimage as ndi
import cv2
import random
from .utils import clipBBoxes
from .base import AugBase
# -------------- channel aug_cuda --------------- #
class RGB2Gray(AugBase):
def __init__(self):
super().__init__()
self.always = True
@property
def canBackward(self):
return True
def _backward_params(self, result):
self._init_params(result)
self.params = True
def apply_to_img(self, result):
if self.isForwarding:
assert self.channels == 3 and self.dim == 2, f"{self.channels} {self.dim}"
result['img'] = cv2.cvtColor(np.moveaxis(result['img'], 0, -1), cv2.COLOR_RGB2GRAY)[None, ...]
result['img_shape'] = result['img'].shape
else:
assert self.channels == 1
result['img'] = np.repeat(result['img'], 3, axis=0).astype(np.uint8)
result['img_shape'] = result['img'].shape
return result
class Gray2RGB(AugBase):
def __init__(self):
super().__init__()
self.always = True
@property
def canBackward(self):
return True
def _backward_params(self, result):
self._init_params(result)
self.params = True
def apply_to_img(self, result):
if self.isForwarding:
assert self.channels == 1
result['img'] = np.repeat(result['img'], 3, axis=0).astype(np.uint8)
result['img_shape'] = result['img'].shape
else:
assert self.channels == 3 and self.dim == 2
result['img'] = result['img'][[0], ...]
result['img_shape'] = result['img'].shape
return result
class ChannelSelect(AugBase):
def __init__(self, index: (list, tuple, int)):
super().__init__()
self.always = True
if isinstance(index, (int, float)):
index = [int(index)]
self.index = index
assert isinstance(self.index, (list, tuple))
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(channel_index={})'.format(self.index)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
self.params = tuple([self.index, self.channels])
result[self.key_name] = self.params
def _backward_params(self, result):
self._init_params(result)
params = result.pop(self.key_name, None)
if params:
self.params = params
def apply_to_img(self, result):
if self.isForwarding:
index, _ = self.params
result['img'] = result['img'].take(indices=index, axis=0)
result['img_shape'] = result['img'].shape
else:
_, channels = self.params
result['img'] = np.repeat(result['img'], channels, axis=0)
result['img_shape'] = result['img'].shape
return result
class AnnotationMap(AugBase):
def __init__(self, mapping: dict):
super().__init__()
self.always = True
self.mapping = mapping
assert isinstance(self.mapping, dict)
assert all([isinstance(i, int) for i in self.mapping.keys()])
assert all([isinstance(i, int) for i in self.mapping.values()])
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(mapping={})'.format(self.mapping)
return repr_str
@property
def canBackward(self):
flag = len(set(self.mapping.values())) == len(self.mapping.values())
flag = flag and len(set(self.mapping.keys())) == len(self.mapping.keys())
return flag
def _forward_params(self, result):
self._init_params(result)
self.params = self.mapping.copy()
result[self.key_name] = self.params
def _backward_params(self, result):
self._init_params(result)
params = result.pop(self.key_name, None)
self.params = dict((v, k) for k, v in params.items())
def apply_to_cls(self, result):
for key in result.get('cls_fields', []):
for prev, curr in self.params.items():
result[key] = curr if result[key] == prev else result[key]
return result
def apply_to_seg(self, result):
for key in result.get('seg_fields', []):
for prev, curr in self.params.items():
result[key] = np.where(result[key] == prev, curr, result[key])
return result
def apply_to_det(self, result):
for key in result.get('det_fields', []):
for prev, curr in self.params.items():
bboxes_labels = result[key][:, 2 * self.dim]
bboxes_labels = np.where(bboxes_labels == prev, curr, bboxes_labels)
result[key][:, 2 * self.dim] = bboxes_labels
# -------------- normalization --------------- #
class Normalize(AugBase):
"""
Normalize the image to [-1.0, 1.0].
support segmentation, detection and classification tasks
support 2D and 3D images
support forward and backward
Args:
mean (sequence): Mean values of each channels.
std (sequence): Std values of each channels.
"""
def __init__(self, mean, std, clip=True):
super().__init__()
self.always = True
self.mean = mean
self.std = std
self.clip = clip
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(mean={}, std={}, clip={})'.format(self.mean, self.std, self.clip)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
# 3 channel [(128, 128, 128), (128, 128, 128)]
self.params = [tuple(self.mean), tuple(self.std)]
result[self.key_name] = self.params
def _backward_params(self, result):
self._init_params(result)
params = result.pop(self.key_name, None)
if params is not None:
# [(-1, -1, -1), (1/128, 1/128, 1/128)]
r_mean = - np.array(params[0]) / np.array(params[1])
r_std = 1 / np.array(params[1])
self.params = [tuple(r_mean), tuple(r_std)]
def apply_to_img(self, result):
mean, std = self.params
assert self.channels == len(mean) == len(std), f"channels = {self.channels}"
assert result['img'].shape == result['img_shape']
expand = (slice(None),) + (None,) * self.dim
result['img'] = (result['img'] - np.array(mean, dtype=np.float32)[expand]) / np.array(std, dtype=np.float32)[
expand]
if self.clip and self.isForwarding:
result['img'] = np.clip(result['img'], -1.0, 1.0)
class MultiNormalize(AugBase):
"""
Normalize the image to [-1.0, 1.0].
support segmentation, detection and classification tasks
support 2D and 3D images
support forward and backward
Args:
means (sequence): Mean values of each channels.
stds (sequence): Std values of each channels.
"""
def __init__(self, means, stds, clip=True):
super().__init__()
self.always = True
self.means = means
self.stds = stds
self.clip = clip
assert len(means[0]) == 1, 'only support one channel image'
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(means={}, stds={}, clip={})'.format(self.means, self.stds, self.clip)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
# [[(128, ), (192, )], [(128, ), (192, )]]
self.params = [self.means, self.stds]
result[self.key_name] = self.params
def _backward_params(self, result):
self._init_params(result)
params = result.pop(self.key_name, None)
if params is not None:
# [[-128/128, -192/192], [1/128, 1/192]]
self.params = [[], []]
for mean, std in zip(params[0], params[1]):
r_mean = - np.array(mean) / np.array(std)
r_std = 1 / np.array(std)
self.params[0].append(r_mean[0])
self.params[1].append(r_std[0])
def apply_to_img(self, result):
if self.isForwarding:
img = result['img'].astype(np.float32)
means, stds = self.params
assert self.channels == len(means[0]) == len(stds[0]), f"channels = {self.channels}, it should be 1"
assert img.shape == result['img_shape']
expand = (slice(None),) + (None,) * self.dim
imgs = [(img - np.array(mean, dtype=np.float32)[expand]) / np.array(std, dtype=np.float32)[expand]
for mean, std in zip(means, stds)]
img = np.concatenate(imgs, axis=0)
if self.clip and self.isForwarding:
img = np.clip(img, -1.0, 1.0)
result['img'] = img
result['img_shape'] = img.shape
else:
img = result['img'].astype(np.float32)
mean, std = self.params
assert self.channels == len(mean) == len(std), f"channels={self.channels}, mean={mean}"
assert img.shape == result['img_shape']
expand = (slice(None),) + (None,) * self.dim
img = (img - np.array(mean, dtype=np.float32)[expand]) / np.array(std, dtype=np.float32)[expand]
img = np.mean(img, axis=0, keepdims=True)
result['img'] = img
result['img_shape'] = img.shape
class AutoNormalize(AugBase):
"""Normalize the image to [-1.0, 1.0].
"""
def __init__(self, method='norm', clip=False):
super().__init__()
self.always = True
self.method = method
self.clip = clip
assert method in ['norm', 'minmax'], "method is one of ['norm', 'minmax']"
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(method={}, clip={})'.format(self.method, self.clip)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
if self.method == 'norm':
mean = np.mean(result['img'], axis=self.image_axes)
std = np.std(result['img'], axis=self.image_axes)
else:
M = np.max(result['img'], axis=self.image_axes)
m = np.min(result['img'], axis=self.image_axes)
mean = (M + m) / 2
std = (M - m) / 2
if not isinstance(mean, Iterable):
mean = [mean]
std = [std]
self.params = [tuple(mean), tuple(std)]
result[self.key_name] = self.params
def _backward_params(self, result):
self._init_params(result)
params = result.pop(self.key_name, None)
if params is not None:
r_mean = - np.array(params[0]) / np.array(params[1])
r_std = 1 / np.array(params[1])
self.params = [tuple(r_mean), tuple(r_std)]
def apply_to_img(self, result):
img = result['img'].astype(np.float32)
mean, std = self.params
assert self.channels == len(mean) == len(std), f"channels = {self.channels}, mean = len({len(mean)})"
assert img.shape == result['img_shape']
expand = (slice(None),) + (None,) * self.dim
img = (img - np.array(mean, dtype=np.float32)[expand]) / np.array(std, dtype=np.float32)[expand]
if self.clip and self.isForwarding:
img = np.clip(img, -1.0, 1.0)
result['img'] = img
# ------------- intensity ---------------- #
class RandomGamma(AugBase):
"""
support segmentation, detection and classification tasks
support 2D and 3D images
"""
def __init__(self, p, gamma):
super().__init__()
self.p = p
self.gamma = gamma
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(p={}, gamma={})'.format(self.p, self.gamma)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
if isinstance(self.gamma, (list, tuple)):
# assert len(self.gamma) == self.channels, "len(gamma) must equals to image channels"
assert len(self.gamma) == 2 and self.gamma[0] < self.gamma[1], \
"gamma is [min, max] format or just a number"
gamma = tuple([self.get_range(self.gamma, 1)] * self.channels)
self.params = gamma
result[self.key_name] = self.params
def _backward_params(self, result):
self._init_params(result)
params = result.pop(self.key_name, None)
if params is not None:
self.params = tuple([1 / p for p in params])
def apply_to_img(self, result):
image = result['img']
new_image = np.zeros_like(image)
for c in range(self.channels):
c_image = image[c]
temp_min, temp_max = np.min(c_image) - 1e-5, np.max(c_image) + 1e-5
c_image = (c_image - temp_min) / (temp_max - temp_min)
c_image = np.power(c_image, self.params[c])
new_image[c] = c_image * (temp_max - temp_min) + temp_min
result['img'] = new_image
class RandomBlur(AugBase):
"""
support segmentation, detection and classification tasks
support 2D and 3D images
"""
def __init__(self, p, sigma):
super().__init__()
self.p = p
self.sigma = sigma
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(p={}, sigma={})'.format(self.p, self.sigma)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
if isinstance(self.sigma, (list, tuple)):
# assert len(self.sigma) == self.channels, "len(sigma_std) must equals to image channels"
# sigma = [sigma * random.random() for sigma in self.sigma]
assert len(self.sigma) == 2 and self.sigma[0] <= self.sigma[1]
sigma = [self.get_range(self.sigma)] * self.channels
else:
sigma = [self.sigma * random.random()] * self.channels
self.params = sigma
result[self.key_name] = self.params
def _backward_params(self, result):
super()._backward_params(result)
self.params = [True]
def apply_to_img(self, result):
if self.isForwarding:
image = result['img']
new_image = np.zeros_like(image)
for c in range(self.channels):
c_image = image[c]
c_image = ndi.gaussian_filter(c_image, sigma=self.params[c])
new_image[c] = c_image
result['img'] = new_image
class RandomNoise(AugBase):
def __init__(self,
p: float,
method: str = 'uniform',
mean: float = 0,
std: float = 0.1):
super().__init__()
self.supported = ['uniform', 'normal']
assert method in self.supported, f"method should be one of {self.supported}"
self.p = p
self.method = method
self.mean = mean
self.std = std
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(p={}, method={}, mean={}, std={})'.format(self.p, self.method, self.mean, self.std)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
if self.method == 'uniform':
noise = ((np.random.rand(*self.array_shape) - 0.5) / 0.5) * self.std + self.mean
else:
noise = np.random.randn(*self.array_shape) * self.std + self.mean
self.params = noise.astype(np.float32)
result[self.key_name] = self.params
def _backward_params(self, result):
self._init_params(result)
params = result.pop(self.key_name, None)
if params is not None:
self.params = -params
def apply_to_img(self, result):
result['img'] = result['img'] + self.params
class RandomSpike(AugBase):
def __init__(self,
p,
num_spikes: Union[int, Tuple[int, int]] = 1,
intensity: Union[float, Tuple[float, float]] = (0.5, 1)
):
super().__init__()
self.p = p
if isinstance(num_spikes, int):
num_spikes = (1, num_spikes)
self.num_spikes = num_spikes
self.intensity = intensity
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(num_spikes={})'.format(self.num_spikes)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
num_spikes_param = int(self.get_range(self.num_spikes))
intensity_param = self.get_range(self.intensity)
spikes_positions = np.random.rand(num_spikes_param, self.dim)
self.params = spikes_positions.tolist(), intensity_param
result[self.key_name] = self.params
def _backward_params(self, result):
self._init_params(result)
params = result.pop(self.key_name, None)
if params is not None:
spikes_positions, intensity_param = params
self.params = spikes_positions, - intensity_param
def apply_to_img(self, result):
image = result['img']
spikes_positions, intensity = self.params
transformed_result = []
for c in image:
spectrum = self.fourier_transform(c)
if intensity >= 1 and not self.isForwarding:
tmp = spectrum.max() / intensity
else:
tmp = spectrum.max()
spikes_positions = np.array(spikes_positions)
shape = np.array(self.image_shape)
mid_shape = shape // 2
indices = np.floor(spikes_positions * shape).astype(int)
for index in indices:
diff = index - mid_shape
idx = mid_shape + diff
spectrum[tuple(idx)] += tmp * intensity
# If we wanted to add a pure cosine, we should add spikes to both
# sides of k-space. However, having only one is a better
# representation og the actual cause of the artifact in real
# scans. Therefore the next two lines have been removed.
# #i, j, k = mid_shape - diff
# #spectrum[i, j, k] = spectrum.max() * intensity_factor
cc = np.real(self.inv_fourier_transform(spectrum))
transformed_result.append(cc)
result['img'] = np.stack(transformed_result, axis=0)
class RandomBiasField(AugBase):
def __init__(self, p, coefficients):
super().__init__()
self.p = p
self.coefficients = coefficients
self.order = 1
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(coefficients={})'.format(self.coefficients)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
random_coefficients = []
if self.dim == 3:
for x_order in range(0, self.order + 1):
for y_order in range(0, self.order + 1 - x_order):
for _ in range(0, self.order + 1 - (x_order + y_order)):
number = self.get_range(self.coefficients)
random_coefficients.append(number)
else:
for x_order in range(0, self.order + 1):
for y_order in range(0, self.order + 1 - x_order):
number = self.get_range(self.coefficients)
random_coefficients.append(number)
random_coefficients = np.array(random_coefficients)
self.params = random_coefficients
result[self.key_name] = random_coefficients.tolist()
def _backward_params(self, result):
self._init_params(result)
params = result.pop(self.key_name, None)
if params is not None:
self.params = params
def apply_to_img(self, result):
image = result['img']
transformed_result = []
for component in image:
half_shape = np.array(component.shape) / 2
ranges = [np.arange(-n, n) for n in half_shape]
bias_field = np.zeros(component.shape)
if self.dim == 3:
x_mesh, y_mesh, z_mesh = np.asarray(np.meshgrid(*ranges, indexing='ij'))
x_mesh /= x_mesh.max()
y_mesh /= y_mesh.max()
z_mesh /= z_mesh.max()
i = 0
for x_order in range(self.order + 1):
for y_order in range(self.order + 1 - x_order):
for z_order in range(self.order + 1 - (x_order + y_order)):
random_coefficient = self.params[i]
new_map = (
random_coefficient
* x_mesh ** x_order
* y_mesh ** y_order
* z_mesh ** z_order
)
bias_field += new_map
i += 1
else:
x_mesh, y_mesh = np.asarray(np.meshgrid(*ranges, indexing='ij'))
x_mesh /= x_mesh.max()
y_mesh /= y_mesh.max()
i = 0
for x_order in range(self.order + 1):
for y_order in range(self.order + 1 - x_order):
random_coefficient = self.params[i]
new_map = (
random_coefficient
* x_mesh ** x_order
* y_mesh ** y_order
)
bias_field += new_map
i += 1
bias_field = np.exp(bias_field).astype(np.float32)
bias_field = bias_field / np.max(bias_field)
if self.isForwarding:
component = component * bias_field
else:
component = component / bias_field
transformed_result.append(component)
result['img'] = np.stack(transformed_result, axis=0)
class RandomCutout(AugBase):
FUSION = {
'mean': np.mean,
'min': np.min,
'max': np.max
}
def __init__(self, p,
num_holes: int,
size: int,
apply_to: Union[tuple, list] = (),
fill='mean'):
super(RandomCutout, self).__init__()
self.p = p
self.num_holes = num_holes
self.size = size
self.apply_to = apply_to
if isinstance(fill, (int, float)):
self.fusion_fun = partial(lambda a, constant: constant, constant=fill)
else:
self.fusion_fun = RandomCutout.FUSION[str(fill)]
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(p={}, num_holes={}, size={}, apply_to={})' \
.format(self.p, self.num_holes, self.size, self.apply_to)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
ctr = np.random.randint(0, self.image_shape[::-1], size=(self.num_holes, self.dim)) # xyz
bboxes = np.concatenate([ctr, ctr + self.size], axis=1)
bboxes = clipBBoxes(self.dim, bboxes, self.image_shape)
self.params = bboxes
result[self.key_name] = self.params
def _backward_params(self, result):
self._init_params(result)
self.params = True
def apply_to_img(self, result: dict):
# print(result['img'].shape)
if self.isForwarding:
mask = np.zeros_like(result['img'])[[0], ...]
for hole in self.params:
slices = (slice(None),) + tuple(map(slice, hole[:self.dim][::-1], hole[-self.dim:][::-1]))
mean_val = self.fusion_fun(result['img'][slices])
result['img'][slices] = mean_val
mask[slices] = 1.0
result['cutout_mask'] = mask
result['seg_fields'].append('cutout_mask')
def apply_to_seg(self, result: dict):
if self.isForwarding:
for key in result['seg_fields']:
if key in self.apply_to:
for hole in self.params:
slices = (slice(None),) + tuple(map(slice, hole[:self.dim][::-1], hole[-self.dim:][::-1]))
result[key][slices] = 0
class ForegroundCutout(RandomCutout):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _forward_params(self, result):
self._init_params(result)
if 'gt_seg_skeleton' in result.keys():
foreground = result['gt_seg_skeleton']
else:
foreground = result['gt_seg']
points = np.argwhere(foreground[0] > 0) # zyx
if len(points):
ctr = points[np.random.choice(np.arange(len(points)), self.num_holes)][:, ::-1] # xyz
bboxes = np.concatenate([ctr, ctr + self.size], axis=1)
bboxes = clipBBoxes(self.dim, bboxes, self.image_shape)
self.params = bboxes
result[self.key_name] = self.params
else:
RandomCutout._forward_params(self, result)
| [
"numpy.clip",
"numpy.random.rand",
"numpy.array",
"scipy.ndimage.gaussian_filter",
"numpy.moveaxis",
"numpy.arange",
"numpy.mean",
"numpy.repeat",
"numpy.where",
"numpy.max",
"numpy.exp",
"numpy.stack",
"numpy.concatenate",
"numpy.min",
"numpy.meshgrid",
"numpy.floor",
"numpy.std",
... | [((13049, 13069), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (13062, 13069), True, 'import numpy as np\n'), ((17227, 17269), 'numpy.random.rand', 'np.random.rand', (['num_spikes_param', 'self.dim'], {}), '(num_spikes_param, self.dim)\n', (17241, 17269), True, 'import numpy as np\n'), ((18967, 19003), 'numpy.stack', 'np.stack', (['transformed_result'], {'axis': '(0)'}), '(transformed_result, axis=0)\n', (18975, 19003), True, 'import numpy as np\n'), ((20147, 20176), 'numpy.array', 'np.array', (['random_coefficients'], {}), '(random_coefficients)\n', (20155, 20176), True, 'import numpy as np\n'), ((22698, 22734), 'numpy.stack', 'np.stack', (['transformed_result'], {'axis': '(0)'}), '(transformed_result, axis=0)\n', (22706, 22734), True, 'import numpy as np\n'), ((23765, 23842), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.image_shape[::-1]'], {'size': '(self.num_holes, self.dim)'}), '(0, self.image_shape[::-1], size=(self.num_holes, self.dim))\n', (23782, 23842), True, 'import numpy as np\n'), ((23867, 23913), 'numpy.concatenate', 'np.concatenate', (['[ctr, ctr + self.size]'], {'axis': '(1)'}), '([ctr, ctr + self.size], axis=1)\n', (23881, 23913), True, 'import numpy as np\n'), ((25446, 25476), 'numpy.argwhere', 'np.argwhere', (['(foreground[0] > 0)'], {}), '(foreground[0] > 0)\n', (25457, 25476), True, 'import numpy as np\n'), ((2953, 2995), 'numpy.repeat', 'np.repeat', (["result['img']", 'channels'], {'axis': '(0)'}), "(result['img'], channels, axis=0)\n", (2962, 2995), True, 'import numpy as np\n'), ((6819, 6852), 'numpy.clip', 'np.clip', (["result['img']", '(-1.0)', '(1.0)'], {}), "(result['img'], -1.0, 1.0)\n", (6826, 6852), True, 'import numpy as np\n'), ((8954, 8982), 'numpy.concatenate', 'np.concatenate', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (8968, 8982), True, 'import numpy as np\n'), ((9590, 9625), 'numpy.mean', 'np.mean', (['img'], {'axis': '(0)', 'keepdims': '(True)'}), '(img, axis=0, keepdims=True)\n', (9597, 9625), True, 'import numpy as np\n'), ((10383, 10427), 'numpy.mean', 'np.mean', (["result['img']"], {'axis': 'self.image_axes'}), "(result['img'], axis=self.image_axes)\n", (10390, 10427), True, 'import numpy as np\n'), ((10446, 10489), 'numpy.std', 'np.std', (["result['img']"], {'axis': 'self.image_axes'}), "(result['img'], axis=self.image_axes)\n", (10452, 10489), True, 'import numpy as np\n'), ((10520, 10563), 'numpy.max', 'np.max', (["result['img']"], {'axis': 'self.image_axes'}), "(result['img'], axis=self.image_axes)\n", (10526, 10563), True, 'import numpy as np\n'), ((10580, 10623), 'numpy.min', 'np.min', (["result['img']"], {'axis': 'self.image_axes'}), "(result['img'], axis=self.image_axes)\n", (10586, 10623), True, 'import numpy as np\n'), ((11684, 11707), 'numpy.clip', 'np.clip', (['img', '(-1.0)', '(1.0)'], {}), '(img, -1.0, 1.0)\n', (11691, 11707), True, 'import numpy as np\n'), ((13309, 13342), 'numpy.power', 'np.power', (['c_image', 'self.params[c]'], {}), '(c_image, self.params[c])\n', (13317, 13342), True, 'import numpy as np\n'), ((14737, 14757), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (14750, 14757), True, 'import numpy as np\n'), ((18066, 18092), 'numpy.array', 'np.array', (['spikes_positions'], {}), '(spikes_positions)\n', (18074, 18092), True, 'import numpy as np\n'), ((18113, 18139), 'numpy.array', 'np.array', (['self.image_shape'], {}), '(self.image_shape)\n', (18121, 18139), True, 'import numpy as np\n'), ((20741, 20766), 'numpy.zeros', 'np.zeros', (['component.shape'], {}), '(component.shape)\n', (20749, 20766), True, 'import numpy as np\n'), ((23258, 23310), 'functools.partial', 'partial', (['(lambda a, constant: constant)'], {'constant': 'fill'}), '(lambda a, constant: constant, constant=fill)\n', (23265, 23310), False, 'from functools import partial\n'), ((25628, 25674), 'numpy.concatenate', 'np.concatenate', (['[ctr, ctr + self.size]'], {'axis': '(1)'}), '([ctr, ctr + self.size], axis=1)\n', (25642, 25674), True, 'import numpy as np\n'), ((4532, 4580), 'numpy.where', 'np.where', (['(result[key] == prev)', 'curr', 'result[key]'], {}), '(result[key] == prev, curr, result[key])\n', (4540, 4580), True, 'import numpy as np\n'), ((4833, 4885), 'numpy.where', 'np.where', (['(bboxes_labels == prev)', 'curr', 'bboxes_labels'], {}), '(bboxes_labels == prev, curr, bboxes_labels)\n', (4841, 4885), True, 'import numpy as np\n'), ((6224, 6243), 'numpy.array', 'np.array', (['params[1]'], {}), '(params[1])\n', (6232, 6243), True, 'import numpy as np\n'), ((6268, 6287), 'numpy.array', 'np.array', (['params[1]'], {}), '(params[1])\n', (6276, 6287), True, 'import numpy as np\n'), ((6694, 6725), 'numpy.array', 'np.array', (['std'], {'dtype': 'np.float32'}), '(std, dtype=np.float32)\n', (6702, 6725), True, 'import numpy as np\n'), ((9053, 9076), 'numpy.clip', 'np.clip', (['img', '(-1.0)', '(1.0)'], {}), '(img, -1.0, 1.0)\n', (9060, 9076), True, 'import numpy as np\n'), ((11070, 11089), 'numpy.array', 'np.array', (['params[1]'], {}), '(params[1])\n', (11078, 11089), True, 'import numpy as np\n'), ((11114, 11133), 'numpy.array', 'np.array', (['params[1]'], {}), '(params[1])\n', (11122, 11133), True, 'import numpy as np\n'), ((11582, 11613), 'numpy.array', 'np.array', (['std'], {'dtype': 'np.float32'}), '(std, dtype=np.float32)\n', (11590, 11613), True, 'import numpy as np\n'), ((14862, 14912), 'scipy.ndimage.gaussian_filter', 'ndi.gaussian_filter', (['c_image'], {'sigma': 'self.params[c]'}), '(c_image, sigma=self.params[c])\n', (14881, 14912), True, 'from scipy import ndimage as ndi\n'), ((20624, 20649), 'numpy.array', 'np.array', (['component.shape'], {}), '(component.shape)\n', (20632, 20649), True, 'import numpy as np\n'), ((20677, 20693), 'numpy.arange', 'np.arange', (['(-n)', 'n'], {}), '(-n, n)\n', (20686, 20693), True, 'import numpy as np\n'), ((22452, 22470), 'numpy.max', 'np.max', (['bias_field'], {}), '(bias_field)\n', (22458, 22470), True, 'import numpy as np\n'), ((24282, 24310), 'numpy.zeros_like', 'np.zeros_like', (["result['img']"], {}), "(result['img'])\n", (24295, 24310), True, 'import numpy as np\n'), ((752, 785), 'numpy.moveaxis', 'np.moveaxis', (["result['img']", '(0)', '(-1)'], {}), "(result['img'], 0, -1)\n", (763, 785), True, 'import numpy as np\n'), ((952, 987), 'numpy.repeat', 'np.repeat', (["result['img']", '(3)'], {'axis': '(0)'}), "(result['img'], 3, axis=0)\n", (961, 987), True, 'import numpy as np\n'), ((1483, 1518), 'numpy.repeat', 'np.repeat', (["result['img']", '(3)'], {'axis': '(0)'}), "(result['img'], 3, axis=0)\n", (1492, 1518), True, 'import numpy as np\n'), ((6202, 6221), 'numpy.array', 'np.array', (['params[0]'], {}), '(params[0])\n', (6210, 6221), True, 'import numpy as np\n'), ((6650, 6682), 'numpy.array', 'np.array', (['mean'], {'dtype': 'np.float32'}), '(mean, dtype=np.float32)\n', (6658, 6682), True, 'import numpy as np\n'), ((8239, 8252), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (8247, 8252), True, 'import numpy as np\n'), ((8281, 8294), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (8289, 8294), True, 'import numpy as np\n'), ((9532, 9563), 'numpy.array', 'np.array', (['std'], {'dtype': 'np.float32'}), '(std, dtype=np.float32)\n', (9540, 9563), True, 'import numpy as np\n'), ((11048, 11067), 'numpy.array', 'np.array', (['params[0]'], {}), '(params[0])\n', (11056, 11067), True, 'import numpy as np\n'), ((11538, 11570), 'numpy.array', 'np.array', (['mean'], {'dtype': 'np.float32'}), '(mean, dtype=np.float32)\n', (11546, 11570), True, 'import numpy as np\n'), ((13173, 13188), 'numpy.min', 'np.min', (['c_image'], {}), '(c_image)\n', (13179, 13188), True, 'import numpy as np\n'), ((13197, 13212), 'numpy.max', 'np.max', (['c_image'], {}), '(c_image)\n', (13203, 13212), True, 'import numpy as np\n'), ((15935, 15969), 'numpy.random.randn', 'np.random.randn', (['*self.array_shape'], {}), '(*self.array_shape)\n', (15950, 15969), True, 'import numpy as np\n'), ((18197, 18231), 'numpy.floor', 'np.floor', (['(spikes_positions * shape)'], {}), '(spikes_positions * shape)\n', (18205, 18231), True, 'import numpy as np\n'), ((20850, 20885), 'numpy.meshgrid', 'np.meshgrid', (['*ranges'], {'indexing': '"""ij"""'}), "(*ranges, indexing='ij')\n", (20861, 20885), True, 'import numpy as np\n'), ((21737, 21772), 'numpy.meshgrid', 'np.meshgrid', (['*ranges'], {'indexing': '"""ij"""'}), "(*ranges, indexing='ij')\n", (21748, 21772), True, 'import numpy as np\n'), ((22376, 22394), 'numpy.exp', 'np.exp', (['bias_field'], {}), '(bias_field)\n', (22382, 22394), True, 'import numpy as np\n'), ((8222, 8236), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (8230, 8236), True, 'import numpy as np\n'), ((8841, 8872), 'numpy.array', 'np.array', (['std'], {'dtype': 'np.float32'}), '(std, dtype=np.float32)\n', (8849, 8872), True, 'import numpy as np\n'), ((9488, 9520), 'numpy.array', 'np.array', (['mean'], {'dtype': 'np.float32'}), '(mean, dtype=np.float32)\n', (9496, 9520), True, 'import numpy as np\n'), ((14396, 14411), 'random.random', 'random.random', ([], {}), '()\n', (14409, 14411), False, 'import random\n'), ((8797, 8829), 'numpy.array', 'np.array', (['mean'], {'dtype': 'np.float32'}), '(mean, dtype=np.float32)\n', (8805, 8829), True, 'import numpy as np\n'), ((15830, 15863), 'numpy.random.rand', 'np.random.rand', (['*self.array_shape'], {}), '(*self.array_shape)\n', (15844, 15863), True, 'import numpy as np\n')] |
## ========================================================================== ##
## Copyright (c) 2019 The University of Texas at Austin. ##
## All rights reserved. ##
## ##
## Licensed under the Apache License, Version 2.0 (the "License"); ##
## you may not use this file except in compliance with the License. ##
## A copy of the License is included with this software in the file LICENSE. ##
## If your copy does not contain the License, you may obtain a copy of the ##
## License at: ##
## ##
## https://www.apache.org/licenses/LICENSE-2.0 ##
## ##
## Unless required by applicable law or agreed to in writing, software ##
## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ##
## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ##
## See the License for the specific language governing permissions and ##
## limitations under the License. ##
## ##
## ========================================================================== ##
Name = 'MH3'
Label = 'MH3'
Help = ''
NumberOfInputs = 2
InputDataType = ['vtkPolyData', 'vtkUnstructuredGrid', 'vtkImageData']
OutputDataType = 'vtkUnstructuredGrid'
ExtraXml = ''
Properties = dict(
arrayName = 'timeMonthly_avg_ecosysTracers_DON',
power = 1.0,
sscale = 10000,
loop_count = 1,
target = 999999.0,
spread = -1.0,
)
def RequestData():
import vtk
import random
import numpy as np
from vtk.numpy_interface import dataset_adapter as dsa
import paraview.vtk.util.numpy_support as vnp
def P(v):
if v < 0.0:
return 0
if power != 1.0:
try:
pv = pow(v, power)
return pv
except:
print('E', v, power)
return 0
else:
return v
class Interpolator:
def __init__(self, dset):
self.dset = dset.VTKObject
self.xyz = [-10000, -20000, -30000]
self.pids = vtk.reference([0]*10)
self.nverts = -1
self.pc = [0]*3
self.wts = [0]*10
self.gc = vtk.vtkGenericCell()
self.sid = 2
if self.dset.IsA('vtkUnstructuredGrid'):
self.locator = vtk.vtkCellTreeLocator()
self.locator.SetDataSet(dset.VTKObject)
self.locator.BuildLocator()
self.is_vtu = True
else:
self.is_vtu = False
def Locate(self, xyz):
if self.is_vtu:
cid = self.locator.FindCell(xyz, 0.0, self.gc, self.pc, self.wts)
if cid < 0:
self.xyz = []
return False
idl = vtk.vtkIdList()
self.dset.GetCellPoints(cid, idl)
self.ids = [idl.GetId(i) for i in range(idl.GetNumberOfIds())]
#print("LOCATE cid", cid)
#print("vids", self.ids)
#print('wts', self.wts)
else:
vox = self.dset.FindAndGetCell(xyz, None, 0, 0.0, vtk.reference(self.sid), self.pc, self.wts)
if vox == None:
self.xyz = []
return None
self.ids = [vox.GetPointId(i) for i in range(vox.GetNumberOfPoints())]
self.xyz = xyz
return True
def Interpolate(self, xyz, a):
if list(xyz) != list(self.xyz):
if not self.Locate(xyz):
return None
if len(a.shape) == 1:
return sum(self.wts[i]*a[self.ids[i]] for i in range(len(self.ids)))
else:
return [sum(self.wts[i]*a[self.ids[i]][j] for i in range(len(self.ids))) for j in range(a.shape[1])]
class Samples:
def __init__(self, dset):
self.points = []
self.vars = []
self.V = []
self.PV = []
self.I = []
for i in dset.PointData.keys():
self.vars.append([i, dset.PointData[i], []])
def num(self):
return len(self.points)
def add(self, I, p, v, pv, i):
vals = []
for var in self.vars:
value = I.Interpolate(p, var[1])
if value == None:
value = -99999
vals.append(value)
self.points.append(p)
self.V.append(v)
self.PV.append(pv)
self.I.append(i)
for j,var in enumerate(self.vars):
var[2].append(vals[j])
def stuff_vtu(self, outpt):
outpt.SetPoints(dsa.VTKArray(np.array(self.points).astype('f4')))
outpt.PointData.append(dsa.VTKArray(np.array(self.V).astype('f4')), 'V')
outpt.PointData.append(dsa.VTKArray(np.array(self.PV).astype('f4')), 'PV')
outpt.PointData.append(dsa.VTKArray(np.array(self.I).astype('f4')), 'I')
ct = dsa.numpyTovtkDataArray(np.array([vtk.VTK_VERTEX]*outpt.GetNumberOfPoints()).astype('u1'))
co = dsa.numpy_support.numpy_to_vtkIdTypeArray(np.array(range(0, 2*outpt.GetNumberOfPoints(), 2)).astype('i8'))
ca = vtk.vtkCellArray()
for i in range(outpt.GetNumberOfPoints()):
ca.InsertNextCell(1, [i])
outpt.VTKObject.SetCells(ct, co, ca)
for v in self.vars:
outpt.PointData.append(dsa.VTKArray(np.array(v[2]).astype('f4')), v[0])
np.random.seed(12346)
volume = inputs[0]
if volume.VTKObject.IsA('vtkImageData'):
is_vtu = False
elif volume.VTKObject.IsA('vtkUnstructuredGrid'):
is_vtu = True
else:
print('wha?')
return
components = volume
samples = Samples(volume)
interp = Interpolator(volume)
if target == 999999.0:
target = np.max(volume.PointData[arrayName])
if spread < 0:
a = target - np.min(volume.PointData[arrayName])
b = np.max(components.PointData[arrayName]) - target
if a > b:
spread = a
else:
spread = b
array = 1.0 - np.minimum(np.abs(volume.PointData[arrayName] - target) / spread, 1.0)
initial_points = []
initial_pqs = []
r = np.random.rand(len(array))
pts = volume.Points[array > r]
vs = array[array > r]
js = np.array(range(len(array)))
js = js[array > r]
for p,v,j in zip(pts, vs, js):
initial_points.append(p)
initial_pqs.append(v)
print('target', target, 'spread', spread, 'seeds', len(initial_points))
current_points = list(initial_points)
current_pqs = list(initial_pqs)
misses = [0]*len(initial_points)
steps = [0]*len(initial_points)
done = False
indx = 0
accept_count = 0
for l in range(loop_count):
for p0, v0 in zip(initial_points, initial_pqs):
p1 = p0 + np.random.normal(loc=0.0, scale=sscale, size=3)
v1 = interp.Interpolate(p1, array)
if not v1:
continue
accept = 0
if v1 >= v0:
accept = 1
else:
u = np.random.rand()
if u < v1/v0:
accept = 1
if accept:
samples.add(interp, p1, v1, v1, 0)
samples.stuff_vtu(output)
return
| [
"numpy.random.normal",
"numpy.abs",
"numpy.random.rand",
"vtk.vtkIdList",
"vtk.reference",
"vtk.vtkGenericCell",
"vtk.vtkCellArray",
"numpy.max",
"numpy.array",
"vtk.vtkCellTreeLocator",
"numpy.random.seed",
"numpy.min"
] | [((5405, 5426), 'numpy.random.seed', 'np.random.seed', (['(12346)'], {}), '(12346)\n', (5419, 5426), True, 'import numpy as np\n'), ((5753, 5788), 'numpy.max', 'np.max', (['volume.PointData[arrayName]'], {}), '(volume.PointData[arrayName])\n', (5759, 5788), True, 'import numpy as np\n'), ((2417, 2440), 'vtk.reference', 'vtk.reference', (['([0] * 10)'], {}), '([0] * 10)\n', (2430, 2440), False, 'import vtk\n'), ((2524, 2544), 'vtk.vtkGenericCell', 'vtk.vtkGenericCell', ([], {}), '()\n', (2542, 2544), False, 'import vtk\n'), ((5149, 5167), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (5165, 5167), False, 'import vtk\n'), ((5826, 5861), 'numpy.min', 'np.min', (['volume.PointData[arrayName]'], {}), '(volume.PointData[arrayName])\n', (5832, 5861), True, 'import numpy as np\n'), ((5870, 5909), 'numpy.max', 'np.max', (['components.PointData[arrayName]'], {}), '(components.PointData[arrayName])\n', (5876, 5909), True, 'import numpy as np\n'), ((2634, 2658), 'vtk.vtkCellTreeLocator', 'vtk.vtkCellTreeLocator', ([], {}), '()\n', (2656, 2658), False, 'import vtk\n'), ((3017, 3032), 'vtk.vtkIdList', 'vtk.vtkIdList', ([], {}), '()\n', (3030, 3032), False, 'import vtk\n'), ((6008, 6052), 'numpy.abs', 'np.abs', (['(volume.PointData[arrayName] - target)'], {}), '(volume.PointData[arrayName] - target)\n', (6014, 6052), True, 'import numpy as np\n'), ((6747, 6794), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'sscale', 'size': '(3)'}), '(loc=0.0, scale=sscale, size=3)\n', (6763, 6794), True, 'import numpy as np\n'), ((6953, 6969), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6967, 6969), True, 'import numpy as np\n'), ((3315, 3338), 'vtk.reference', 'vtk.reference', (['self.sid'], {}), '(self.sid)\n', (3328, 3338), False, 'import vtk\n'), ((4642, 4663), 'numpy.array', 'np.array', (['self.points'], {}), '(self.points)\n', (4650, 4663), True, 'import numpy as np\n'), ((4721, 4737), 'numpy.array', 'np.array', (['self.V'], {}), '(self.V)\n', (4729, 4737), True, 'import numpy as np\n'), ((4800, 4817), 'numpy.array', 'np.array', (['self.PV'], {}), '(self.PV)\n', (4808, 4817), True, 'import numpy as np\n'), ((4881, 4897), 'numpy.array', 'np.array', (['self.I'], {}), '(self.I)\n', (4889, 4897), True, 'import numpy as np\n'), ((5364, 5378), 'numpy.array', 'np.array', (['v[2]'], {}), '(v[2])\n', (5372, 5378), True, 'import numpy as np\n')] |
import requests, json
import numpy as np
from ftplib import FTP
from osgeo import ogr
from osgeo import osr
import os.path
import zipfile
import time
from math import floor, ceil, atan2, pi, sqrt
import pickle
import simplekml
from osgeo import gdal
import urllib.request
def loadMapsWCS(bbox, dim, mapType):
if os.path.isfile('wcsMap.tif'):
os.remove('wcsMap.tif')
#dtmMap = np.memmap('Kort/terrainMap',dtype='float32',mode='w+',shape=(dim[0],dim[1]))
mapFile = 'wcsMap.tif'
#BBOX = "BBOX="+str(858275)+","+str(6108438)+","+str(897160)+","+str(6144049)
BBOX = "BBOX="+str(bbox[0])+","+str(bbox[1])+","+str(bbox[2])+","+str(bbox[3])
DIM = "WIDTH="+str(dim[0])+"&HEIGHT="+str(dim[1])
if mapType == 't':
coverage = "dhm_terraen"
elif mapType == 's':
coverage = "dhm_overflade"
url = "http://services.kortforsyningen.dk/dhm?REQUEST=GetCoverage&SERVICE=WCS&VERSION=1.0.0&COVERAGE="+coverage+"&CRS=EPSG:25832&"+BBOX+"&"+DIM+"&FORMAT=image/gtiff&login=ASN&password=<PASSWORD>"
print(url)
urllib.request.urlretrieve(url,mapFile)
m = gdal.Open(mapFile)
dtmMap = np.array(m.GetRasterBand(1).ReadAsArray())
return dtmMap
def loadMapsHybrid(nRange, mRange, mapType):
inputSize = 2500
scaledSize = inputSize
if mapType == 't':
prefix = 'Kort/DTM_1km_'
elif mapType == 's':
prefix = 'Kort/DSM_1km_'
Map = np.zeros([scaledSize*len(mRange),scaledSize*(len(nRange))])
for n in nRange:
#print(n)
for m in mRange:
nStart = (n-nRange[0])*inputSize
mStart = (mRange[-1]-m)*inputSize
nEnd = nStart+inputSize
mEnd = mStart+inputSize
fileName = prefix+str(m)+'_'+str(n)+'.tif'
if os.path.isfile(fileName):
dt = gdal.Open(fileName)
Map[mStart:mEnd,nStart:nEnd] = np.array(dt.GetRasterBand(1).ReadAsArray())
dt=None
else:
#BBOX = "BBOX="+str(858275)+","+str(6108438)+","+str(897160)+","+str(6144049)
bbox = [n*1000,m*1000,(n+1)*1000,(m+1)*1000]
print(bbox)
Map[mStart:mEnd,nStart:nEnd] = loadMapsWCS(bbox, [2500,2500], mapType)
return np.fliplr(np.transpose(Map))
def getMapExtents(BasePosition,windowSize):
source = osr.SpatialReference()
source.ImportFromEPSG(4326) # Coordinate system of DAWA results
target = osr.SpatialReference()
target.ImportFromEPSG(25832) # Coordinate system of maps
transform = osr.CoordinateTransformation(source, target)
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(BasePosition.get('long'),BasePosition.get('lat'))
point.Transform(transform)
nMin=int(floor((point.GetX()-BasePosition.get('radius'))/1e3)) #
nMax=int(floor((point.GetX()+BasePosition.get('radius'))/1e3)) # Determine relevant map files to load
mMin=int(floor((point.GetY()-BasePosition.get('radius'))/1e3)) #
mMax=int(floor((point.GetY()+BasePosition.get('radius'))/1e3)) #
mapFiles = [[n,m] for n in range(nMin,nMax+1,windowSize) for m in range(mMin,mMax+1,windowSize)]
return mapFiles
def saveKML(name, coordinateList, addressList, LoSList):
kmlC = simplekml.Kml()
kmlUC = simplekml.Kml()
for m in range(0,len(LoSList)):
addStr = addressList[m].get('addvejnavn')+' '+addressList[m].get('husnr')
if (LoSList[m]):
kmlC.newpoint(name=addStr, description="med LoS",coords=[(coordinateList[m][0],coordinateList[m][1])]) # lon, lat optional height
#kmlC.newpoint(description="Daekket", coords=[(coordinateList[m][0],coordinateList[m][1])]) # lon, lat optional height
else:
kmlUC.newpoint(name=addStr, description="uden LoS",coords=[(coordinateList[m][0],coordinateList[m][1])]) # lon, lat optional height
kmlC.save(name+'_Google_Earth_med_LoS.kml')
kmlUC.save(name+'_Google_Earth_uden_LoS.kml')
return 0
def saveData(name, addressList, LoSList, BW, coveredBase):
with open('indstillinger','rb') as f:
name, basePosition, logon, mapRange = pickle.load(f)
covered = open(name+'_med_LoS.csv', 'wb')
notCovered = open(name+'_uden_LoS.csv', 'wb')
keys = ['name','long','lat','radius','hbase', 'hclient', 'freq', 'download', 'upload', 'thetamin', 'thetamax']
for n in range(0,len(basePosition)):
BS = []
for key in keys:
BS.append(str(basePosition[n].get(key)))
s = str(BS)
covered.write(bytes(s, 'latin-1'))
notCovered.write(bytes(s, 'latin-1'))
covLevel = str(len([n for n in LoSList if n==1]))+'/'+str(len(LoSList))
covered.write(bytes(covLevel, 'latin-1'))
notCovered.write(bytes(covLevel, 'latin-1'))
s='\nid;kvh;kommunenavn;kommunekode;postnr;vejnavn;vejkode;husnr;bogstav;download_tek_mbits;upload_tek_mbits;download_udt_privat_mbits;upload_udt_privat_mbits;download_udt_erhverv_mbits;upload_udt_erhverv_mbits;base(r)\n'
covered.write(bytes(s, 'latin-1'))
notCovered.write(bytes(s, 'latin-1'))
for m in range(0,len(LoSList)):
coveredBasesStr = ''
for i3, cb in enumerate(coveredBase[m]):
if i3 < len(coveredBase[m])-1:
coveredBasesStr += str(cb)+', '
else:
coveredBasesStr += str(cb)
addStr = addressList[m].get('uid')+';;'+addressList[m].get('kommunenavn')+';'+addressList[m].get('kommunekode')+';'\
+addressList[m].get('postnr')+';'+addressList[m].get('addvejnavn')+';'+addressList[m].get('vejkode')+';'\
+addressList[m].get('husnr')+';;'+str(BW[m][0])+';'+str(BW[m][1])+';'+str(BW[m][0])+';'+str(BW[m][1])+';'\
+str(BW[m][0])+';'+str(BW[m][1])+';'+coveredBasesStr+'\n'
if LoSList[m]:
covered.write(bytes(addStr, 'latin-1'))
else:
notCovered.write(bytes(addStr, 'latin-1'))
covered.close()
notCovered.close()
return 0
def findAddresses(BasePosition):
DAWA_URL = 'http://dawa.aws.dk/adgangsadresser?cirkel='+str(BasePosition.get('long'))+','+str(BasePosition.get('lat'))+','+str(BasePosition.get('radius'))+'&format=geojson'
print(DAWA_URL)
r = requests.get(DAWA_URL)
try:
features = json.loads(r.text)['features']
except KeyError:
features = []
coordinates = []
uid = []
address = []
for i in features:
coordinates.append(i['geometry']['coordinates'])
uid.append(i['properties']['id'])
address.append({
'uid':i['properties']['id'],
'kommunenavn':i['properties']['kommunenavn'],
'kommunekode':i['properties']['kommunekode'],
'postnr':i['properties']['postnr'],
'addvejnavn':i['properties']['vejnavn'],
'vejkode':i['properties']['vejkode'],
'husnr':i['properties']['husnr']
})
return coordinates, address
def getBuildingCoords(address_uid, numElems=None):
DAWA_URL = 'http://dawa.aws.dk/bygninger?adgangsadresseid='+str(address_uid)+'&format=geojson'
idx = 0
while idx < 5:
try:
r = requests.get(DAWA_URL, timeout=10)
idx = 5
except:
print('BBR points error')
print(str(address_uid))
r = {}
time.sleep(0.5)
idx += 1
try:
features = json.loads(r.text)['features']
except KeyError:
features = []
if len(features) == 0:
return features
arr1 = features[0]
arr2 = arr1['geometry']
arr3 = arr2['coordinates']
arr4 = arr3[0]
arr = arr4[0:-1]
if numElems == None:
return arr
else:
return shrink_list(arr, numElems)
def shrink_list(arr, numElems):
if len(arr)>numElems:
idx = np.round(np.linspace(0, len(arr) - 1, numElems)).astype(int)
return [arr[i] for i in idx]
else:
return arr
def downloadMaps(self, logon, BasePosition):
print('Downloading maps')
self.statusSig.sig.emit("Downloader kort")
source = osr.SpatialReference()
source.ImportFromEPSG(4326) # Coordinate system of DAWA results
target = osr.SpatialReference()
target.ImportFromEPSG(25832) # Coordinate system of maps
transform = osr.CoordinateTransformation(source, target)
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(BasePosition.get('long'),BasePosition.get('lat'))
point.Transform(transform)
nZipMin=int(floor((point.GetX()-BasePosition.get('radius'))/1e4)) #
nZipMax=int(floor((point.GetX()+BasePosition.get('radius'))/1e4)) # Determine relevant zip files to download
mZipMin=int(floor((point.GetY()-BasePosition.get('radius'))/1e4)) #
mZipMax=int(floor((point.GetY()+BasePosition.get('radius'))/1e4)) #
nZipRange = list(range(nZipMin,nZipMax+1))
mZipRange = list(range(mZipMin,mZipMax+1))
loggedOn = 0
if not os.path.isdir("Kort"):
os.mkdir("Kort")
for n in nZipRange:
for m in mZipRange:
terrainZipName = 'DTM_'+str(m)+'_'+str(n)+'_TIF_UTM32-ETRS89.zip'
self.statusSig.sig.emit("Downloader kort - "+terrainZipName)
if os.path.isfile('Kort/'+terrainZipName):
Download = (time.time()-os.stat('Kort/'+terrainZipName).st_mtime)>(60*60*24*365)
else:
if not loggedOn:
ftp = FTP('ftp.kortforsyningen.dk',user=logon[0], passwd = logon[1])
ftp.cwd('/dhm_danmarks_hoejdemodel/DTM')
Download = (terrainZipName in ftp.nlst())
if Download:
# Download
ftp.retrbinary('RETR '+terrainZipName, open('Kort/'+terrainZipName, 'wb').write,102400)
# Unzip
zf = zipfile.ZipFile('Kort/'+terrainZipName)
zf.extractall('Kort')
zf.close()
os.remove('Kort/'+terrainZipName)
open('Kort/'+terrainZipName, 'a').close()
surfaceZipName = 'DSM_'+str(m)+'_'+str(n)+'_TIF_UTM32-ETRS89.zip'
if os.path.isfile('Kort/'+surfaceZipName):
Download = (time.time()-os.stat('Kort/'+surfaceZipName).st_mtime)>(60*60*24*365)
else:
if not loggedOn:
ftp = FTP('ftp.kortforsyningen.dk',user=logon[0], passwd = logon[1])
ftp.cwd('/dhm_danmarks_hoejdemodel/DSM')
Download = (surfaceZipName in ftp.nlst())
if Download:
# Download
ftp.retrbinary('RETR '+surfaceZipName, open('Kort/'+surfaceZipName, 'wb').write,102400)
# Unzip
zf = zipfile.ZipFile('Kort/'+surfaceZipName)
zf.extractall('Kort')
zf.close()
os.remove('Kort/'+surfaceZipName)
open('Kort/'+surfaceZipName, 'a').close()
if loggedOn:
ftp.quit()
return 0
def findCoveredAddresses(BasePosition, inCoordinates, inAddress, clientHeights=None, BPHeight=None):
# This function finds the addresses within range of the base position and calculates their line of sight.
#
# Base positions are given as a dictionary with the keys:
#'name'
#'long', 'lat' (WGS84)
#'radius' (Maximum coverage radius, m)
#'hbase' (Antenna height, m)
#'hclient' (Client antenna height over ground, m)
#'rhclient' (Client antenna height over roof, m)
#'download'
#'upload'
#'freq' (MHz)
#'thetamin', 'thetamax' (For directional antennas; angles clockwise from North)
#
# For each 1 km x 1 km it calculates all the sight lines that pass through, saves a png-image of the map, and emits a plot signal also containing
# sample data points to display one of the sight lines with 1. Fresnel zone.
# It also emits status signals with text strings to be displayed to the user.
# in the end, it returns a list of address coordinates, address data, and LoS-status (0 or 1).
#
# The function is run once for each base station. Then, the coverage data should be aggregated and exported:
#
#coveredCoordinates = []
#coveredUid = []
#coveredAddress = []
#coveredLoS = []
#
#for i in range(0,len(self.addressList)):
# if (self.addressList[i].get('uid') not in coveredUid):
# coveredCoordinates.append(coordinatesList[i])
# coveredUid.append(self.addressList[i].get('uid'))
# coveredAddress.append(self.addressList[i])
# coveredLoS.append(self.LoSList[i])
#
# elif (self.LoSList[i]):
# #coveredUid[coveredUid.index(addressList[i].get('uid'))] = LoSList[i]
# coveredLoS[coveredUid.index(self.addressList[i].get('uid'))] = self.LoSList[i]
#
#saveData(self.name,coveredAddress, coveredLoS, bwList)
#saveKML(self.name,coveredCoordinates, coveredAddress, coveredLoS, self.basePosition[0])
print('Calculating for '+str(len(inAddress))+' addresses')
# Set parameters
windowSize = 1 # Size of calculation window (in no. of map tiles)
inputSize = 2500 # Side length of map tiles (in pixels)
resolution = 0.4 # Map resolution in m
Kfactor = 1 # 1 is a conservative value.
Rearth = 6371e3*Kfactor # Effective earth radius
#WantedClearance = 1 # Fraction of first Fresnel zone that needs to be clear.
source = osr.SpatialReference()
source.ImportFromEPSG(4326) # Coordinate system of DAWA results
target = osr.SpatialReference()
target.ImportFromEPSG(25832) # Coordinate system of maps
# Find the necessary map file range
mapFiles = getMapExtents(BasePosition,windowSize)
# Show the map and sight lines
nMin = min([n for [n,_] in mapFiles])
nMax = max([n for [n,_] in mapFiles])
mMin = min([m for [n,m] in mapFiles])
mMax = max([m for [n,m] in mapFiles])
imgDim = [nMax-nMin+1,mMax-mMin+1]
imgDim = [(n+n%windowSize)*100 for n in imgDim]
mapMat = np.zeros(imgDim)
posMat = np.zeros(imgDim)
slMat = np.zeros(imgDim)
wl = 3e2/BasePosition.get('freq') # c/f (0.06 m @ 5 GHz)
transform = osr.CoordinateTransformation(source, target)
basePos = ogr.Geometry(ogr.wkbPoint)
basePos.AddPoint(BasePosition.get('long'),BasePosition.get('lat'))
basePos.Transform(transform)
BPCoords=[basePos.GetX()/1e3,basePos.GetY()/1e3]
thetamin = BasePosition.get('thetamin')
thetamax = BasePosition.get('thetamax')
#####################################################################################################################################################
# Calculate relevant parameters for each address, for later use
coordinates = []
CPCoords = []
address = []
angle = []
if clientHeights == None:
clientHeights = []
clientHeights_stat = True
else:
clientHeights_stat = False
totDist = []
LoS = []
for n in range(0,len(inAddress)):
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(inCoordinates[n][0],inCoordinates[n][1])
point.Transform(transform)
inAngle = (pi+atan2(basePos.GetX()-point.GetX(),basePos.GetY()-point.GetY()))*180/pi
distance = sqrt((basePos.GetX()-point.GetX())**2+(basePos.GetY()-point.GetY())**2)
if (inAngle>thetamin)&(inAngle<thetamax) & (distance<=BasePosition.get('radius')):
coordinates.append(inCoordinates[n])
address.append(inAddress[n])
angle.append(inAngle)
CPCoords.append([point.GetX()/1e3,point.GetY()/1e3])
if clientHeights_stat == True:
clientHeights.append(-999)
totDist.append(sqrt((basePos.GetX()-point.GetX())**2+(basePos.GetY()-point.GetY())**2))
LoS.append(1)
#####################################################################################################################################################
terrainMap=np.zeros([inputSize,inputSize]) # Initialize array
sSquare = 5
maxDiff = 5
heights=[]
#####################################################################################################################################################
if len(coordinates):
#####################################################################################################################################################
# Find client and base heights
if clientHeights_stat == True:
for [n,m] in mapFiles:
CPInside = any([(N>=n) & (N<=n+windowSize) & (M>=m) & (M<=m+windowSize) for [N,M] in CPCoords])
BPInside = any([(N>=n) & (N<=n+windowSize) & (M>=m) & (M<=m+windowSize) for [N,M] in [BPCoords]])
if CPInside | BPInside: # Only load file if there are addresses or base stations inside...
print("Loaded "+ str([n,m]))
terrainMap = loadMapsHybrid(range(n,n+windowSize), range(m,m+windowSize), 't')
surfMap = loadMapsHybrid(range(n,n+windowSize), range(m,m+windowSize), 's')
for nClient in range(0,len(address)):
if ((CPCoords[nClient][0]>n) & (CPCoords[nClient][0]<n+windowSize) & (CPCoords[nClient][1]>m) & (CPCoords[nClient][1]<m+windowSize)):
CPMatrixCoords = [floor((CPCoords[nClient][0]-n)*inputSize), floor((CPCoords[nClient][1]-m)*inputSize)]
tHeight = terrainMap[CPMatrixCoords[0],CPMatrixCoords[1]] + BasePosition.get('hclient')
sHeight = surfMap[CPMatrixCoords[0],CPMatrixCoords[1]]
posMat[floor((CPCoords[nClient][0]-nMin)*100), floor((CPCoords[nClient][1]-mMin)*100)] = 255
heights=[]
if BasePosition.get('rhclient')>0:
NRange = range(max([0,CPMatrixCoords[0]-sSquare]),min([inputSize,CPMatrixCoords[0]+sSquare]))
MRange = range(max([0,CPMatrixCoords[1]-sSquare]),min([inputSize,CPMatrixCoords[1]+sSquare]))
heights.extend([surfMap[N,M]+BasePosition.get('rhclient') for N in NRange for M in MRange if surfMap[N,M] <= (sHeight+maxDiff)])
heights.append(tHeight)
maxH = max(heights)
clientHeights[nClient] = maxH
if BPInside:
BPMatrixCoords = [floor((BPCoords[0]-n)*inputSize), floor((BPCoords[1]-m)*inputSize)]
BPHeight = terrainMap[BPMatrixCoords[0],BPMatrixCoords[1]] + BasePosition.get('hbase')
posMat[floor((BPCoords[0]-nMin)*100), floor((BPCoords[1]-mMin)*100)] = 255
terrainMap = None
surfMap = None
maxH = None
NRange = None
MRange = None
sHeight = None
tHeight = None
CPMatrixCoords = None
CPInside = None
BPInside = None
else:
terrainMap = None
surfMap = None
maxH = None
NRange = None
MRange = None
sHeight = None
tHeight = None
CPMatrixCoords = None
CPInside = None
BPInside = None
#####################################################################################################################################################
#####################################################################################################################################################
# Do the LoS-calculations subsection by subsection
hCentre = []
hFresnel = []
hTerrain = []
sortedMapFiles=[]
for offset in range(0,len(mapFiles)):
sortedMapFiles.extend([[n,m] for [n,m] in mapFiles if ((n==nMin+offset)|(n==nMax-offset)|(m==mMin++offset)|(m==mMax-offset))&([n,m] not in sortedMapFiles)])
for [n,m] in sortedMapFiles:
dist = []
hFresnel = []
hTerrain = []
hCentre = []
sMapLoaded = 0 # Never load the same subsection twice
cornerPoints = [[n,m],[n+windowSize,m],[n+windowSize,m+windowSize],[n,m+windowSize]]
cornerMatrixCoords = [[floor((corner[0]-n)*inputSize), floor((corner[1]-m)*inputSize)] for corner in cornerPoints]
BPMatrixCoords = [floor((BPCoords[0]-n)*inputSize), floor((BPCoords[1]-m)*inputSize)]
for nClient in [n for n in range(0,len(address)) if LoS[n]]:
CPMatrixCoords = [floor((CPCoords[nClient][0]-n)*inputSize), floor((CPCoords[nClient][1]-m)*inputSize)]
CPInside = any([(N>=n) & (N<n+windowSize) & (M>=m) & (M<m+windowSize) for [N,M] in [CPCoords[nClient]]])
BPInside = any([(N>=n) & (N<n+windowSize) & (M>=m) & (M<m+windowSize) for [N,M] in [BPCoords]])
#intersects = 0
intPoints = []
for corner in range(-1,3):
x00 = BPMatrixCoords[0]
y00 = BPMatrixCoords[1]
x10 = cornerMatrixCoords[corner][0]
y10 = cornerMatrixCoords[corner][1]
x01 = CPMatrixCoords[0]-BPMatrixCoords[0]
y01 = CPMatrixCoords[1]-BPMatrixCoords[1]
x11 = cornerMatrixCoords[corner+1][0]-cornerMatrixCoords[corner][0]
y11 = cornerMatrixCoords[corner+1][1]-cornerMatrixCoords[corner][1]
d = x11*y01 - x01*y11
if d!=0:
s = (1/d) * ((x00-x10)*y01-(y00-y10)*x01)
t = -(1/d) * (-(x00-x10)*y11+(y00-y10)*x11)
else:
s=-1
t=-1
if (s>=0) & (s<=1) & (t>=0) & (t<=1):
intPoints.append([x10+s*x11, y10+s*y11])
if len(intPoints)>2:
print("Error: Sight line intercepts bounding box at more than 2 points!")
if (len(intPoints)>1) | CPInside | BPInside: # Load the map section if a sight line crosses its border, or if the client station is inside.
if not sMapLoaded:
surfaceMap = loadMapsHybrid(range(n,n+windowSize), range(m,m+windowSize), 's')
print("Loaded "+ str([n,m]))
sMapLoaded = 1
mapMat[(floor(n-nMin)*100):(floor(n-nMin)*100+100*windowSize), (floor(m-mMin)*100):(floor(m-mMin)*100+100*windowSize)] = surfaceMap[::25,::25]
if len(intPoints)==2:
x0, y0 = intPoints[0]
x1, y1 = intPoints[1]
if CPInside & BPInside:
[x0, y0] = BPMatrixCoords
[x1, y1] = CPMatrixCoords
elif CPInside:
x0, y0 = intPoints[0]
[x1, y1] = CPMatrixCoords
elif BPInside:
[x0, y0] = BPMatrixCoords
x1, y1 = intPoints[0]
num = sqrt((x0-x1)**2+(y0-y1)**2) # Choose an appropriate number of nearest-neighbour sampling points
x, y = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
x=x.astype(np.int)
y=y.astype(np.int)
xn = [x[n] for n in range(0,len(x)) if ( (x[n]>0) & (x[n]<(windowSize*inputSize)) & (y[n]>0) & (y[n]<(windowSize*inputSize)) )]
yn = [y[n] for n in range(0,len(x)) if ( (x[n]>0) & (x[n]<(windowSize*inputSize)) & (y[n]>0) & (y[n]<(windowSize*inputSize)) )]
x = xn
y = yn
zi = surfaceMap[x, y]
dist = [sqrt( (x[n]-CPMatrixCoords[0])**2 + (y[n]-CPMatrixCoords[1])**2) *resolution for n in range(0,len(x))]
dist = [dist[n] for n in range(0,len(dist)) if ((totDist[nClient]>dist[n])&(dist[n]>0))]
earthCurvature = [Rearth*(np.cos((distance-totDist[nClient]/2)/Rearth)-np.cos(totDist[nClient]/(2*Rearth))) for distance in dist]
earthCurvature = [ec-Rearth*(np.cos((-totDist[nClient]/2)/Rearth)-np.cos(totDist[nClient]/(2*Rearth))) for ec in earthCurvature]
hTerrain = [zi[n]+earthCurvature[n] for n in range(0,len(dist))]
hCentre = [clientHeights[nClient]+(BPHeight-clientHeights[nClient])*distance/totDist[nClient] for distance in dist]
hFresnel = [hCentre[n] - sqrt( wl*dist[n]*(totDist[nClient]-dist[n])/totDist[nClient] ) for n in range(0,len(dist))]
xp, yp = np.linspace(x0/25+(n-nMin)*100, x1/25+(n-nMin)*100, ceil(num/25)), np.linspace(y0/25+(m-mMin)*100, y1/25+(m-mMin)*100, ceil(num/25))
xp=xp.astype(np.int)
yp=yp.astype(np.int)
xp1 = [xp[n] for n in range(0,len(xp)) if ( (xp[n]>0) & (xp[n]<len(posMat)) & (yp[n]>0) & (yp[n]<len(posMat)) )]
yp1 = [yp[n] for n in range(0,len(xp)) if ( (xp[n]>0) & (xp[n]<len(posMat)) & (yp[n]>0) & (yp[n]<len(posMat)) )]
slMat[xp1,yp1] = np.ones(len(xp1))
if any([hFresnel[n]<hTerrain[n] for n in range(0,len(hFresnel))]):
LoS[nClient] = 0
if ((dist[0]<2.0)&(hFresnel[0]<hTerrain[0]))|((dist[-1]<2.0)&(hFresnel[-1]<hTerrain[-1])):
print("Warning: Client below terrain!")
print('Clients processed:')
print(len(address))
return coordinates, address, LoS, clientHeights, BPHeight
if __name__=='__main__':
arr = [1,2,3,4,5,6,7]
a = shrink_list(arr, 6)
print(a) | [
"osgeo.gdal.Open",
"zipfile.ZipFile",
"math.floor",
"math.sqrt",
"time.sleep",
"osgeo.osr.CoordinateTransformation",
"ftplib.FTP",
"numpy.linspace",
"json.loads",
"osgeo.ogr.Geometry",
"pickle.load",
"requests.get",
"numpy.cos",
"numpy.transpose",
"time.time",
"math.ceil",
"osgeo.osr... | [((1140, 1158), 'osgeo.gdal.Open', 'gdal.Open', (['mapFile'], {}), '(mapFile)\n', (1149, 1158), False, 'from osgeo import gdal\n'), ((2462, 2484), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (2482, 2484), False, 'from osgeo import osr\n'), ((2572, 2594), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (2592, 2594), False, 'from osgeo import osr\n'), ((2680, 2724), 'osgeo.osr.CoordinateTransformation', 'osr.CoordinateTransformation', (['source', 'target'], {}), '(source, target)\n', (2708, 2724), False, 'from osgeo import osr\n'), ((2749, 2775), 'osgeo.ogr.Geometry', 'ogr.Geometry', (['ogr.wkbPoint'], {}), '(ogr.wkbPoint)\n', (2761, 2775), False, 'from osgeo import ogr\n'), ((3406, 3421), 'simplekml.Kml', 'simplekml.Kml', ([], {}), '()\n', (3419, 3421), False, 'import simplekml\n'), ((3435, 3450), 'simplekml.Kml', 'simplekml.Kml', ([], {}), '()\n', (3448, 3450), False, 'import simplekml\n'), ((6516, 6538), 'requests.get', 'requests.get', (['DAWA_URL'], {}), '(DAWA_URL)\n', (6528, 6538), False, 'import requests, json\n'), ((8472, 8494), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (8492, 8494), False, 'from osgeo import osr\n'), ((8579, 8601), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (8599, 8601), False, 'from osgeo import osr\n'), ((8681, 8725), 'osgeo.osr.CoordinateTransformation', 'osr.CoordinateTransformation', (['source', 'target'], {}), '(source, target)\n', (8709, 8725), False, 'from osgeo import osr\n'), ((8749, 8775), 'osgeo.ogr.Geometry', 'ogr.Geometry', (['ogr.wkbPoint'], {}), '(ogr.wkbPoint)\n', (8761, 8775), False, 'from osgeo import ogr\n'), ((14050, 14072), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (14070, 14072), False, 'from osgeo import osr\n'), ((14164, 14186), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (14184, 14186), False, 'from osgeo import osr\n'), ((14688, 14704), 'numpy.zeros', 'np.zeros', (['imgDim'], {}), '(imgDim)\n', (14696, 14704), True, 'import numpy as np\n'), ((14719, 14735), 'numpy.zeros', 'np.zeros', (['imgDim'], {}), '(imgDim)\n', (14727, 14735), True, 'import numpy as np\n'), ((14749, 14765), 'numpy.zeros', 'np.zeros', (['imgDim'], {}), '(imgDim)\n', (14757, 14765), True, 'import numpy as np\n'), ((14857, 14901), 'osgeo.osr.CoordinateTransformation', 'osr.CoordinateTransformation', (['source', 'target'], {}), '(source, target)\n', (14885, 14901), False, 'from osgeo import osr\n'), ((14923, 14949), 'osgeo.ogr.Geometry', 'ogr.Geometry', (['ogr.wkbPoint'], {}), '(ogr.wkbPoint)\n', (14935, 14949), False, 'from osgeo import ogr\n'), ((16812, 16844), 'numpy.zeros', 'np.zeros', (['[inputSize, inputSize]'], {}), '([inputSize, inputSize])\n', (16820, 16844), True, 'import numpy as np\n'), ((2382, 2399), 'numpy.transpose', 'np.transpose', (['Map'], {}), '(Map)\n', (2394, 2399), True, 'import numpy as np\n'), ((4326, 4340), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4337, 4340), False, 'import pickle\n'), ((15781, 15807), 'osgeo.ogr.Geometry', 'ogr.Geometry', (['ogr.wkbPoint'], {}), '(ogr.wkbPoint)\n', (15793, 15807), False, 'from osgeo import ogr\n'), ((6575, 6593), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (6585, 6593), False, 'import requests, json\n'), ((7485, 7519), 'requests.get', 'requests.get', (['DAWA_URL'], {'timeout': '(10)'}), '(DAWA_URL, timeout=10)\n', (7497, 7519), False, 'import requests, json\n'), ((7745, 7763), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (7755, 7763), False, 'import requests, json\n'), ((1928, 1947), 'osgeo.gdal.Open', 'gdal.Open', (['fileName'], {}), '(fileName)\n', (1937, 1947), False, 'from osgeo import gdal\n'), ((7667, 7682), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (7677, 7682), False, 'import time\n'), ((10231, 10272), 'zipfile.ZipFile', 'zipfile.ZipFile', (["('Kort/' + terrainZipName)"], {}), "('Kort/' + terrainZipName)\n", (10246, 10272), False, 'import zipfile\n'), ((11193, 11234), 'zipfile.ZipFile', 'zipfile.ZipFile', (["('Kort/' + surfaceZipName)"], {}), "('Kort/' + surfaceZipName)\n", (11208, 11234), False, 'import zipfile\n'), ((21715, 21751), 'math.floor', 'floor', (['((BPCoords[0] - n) * inputSize)'], {}), '((BPCoords[0] - n) * inputSize)\n', (21720, 21751), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((21749, 21785), 'math.floor', 'floor', (['((BPCoords[1] - m) * inputSize)'], {}), '((BPCoords[1] - m) * inputSize)\n', (21754, 21785), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((9843, 9904), 'ftplib.FTP', 'FTP', (['"""ftp.kortforsyningen.dk"""'], {'user': 'logon[0]', 'passwd': 'logon[1]'}), "('ftp.kortforsyningen.dk', user=logon[0], passwd=logon[1])\n", (9846, 9904), False, 'from ftplib import FTP\n'), ((10787, 10848), 'ftplib.FTP', 'FTP', (['"""ftp.kortforsyningen.dk"""'], {'user': 'logon[0]', 'passwd': 'logon[1]'}), "('ftp.kortforsyningen.dk', user=logon[0], passwd=logon[1])\n", (10790, 10848), False, 'from ftplib import FTP\n'), ((21590, 21624), 'math.floor', 'floor', (['((corner[0] - n) * inputSize)'], {}), '((corner[0] - n) * inputSize)\n', (21595, 21624), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((21622, 21656), 'math.floor', 'floor', (['((corner[1] - m) * inputSize)'], {}), '((corner[1] - m) * inputSize)\n', (21627, 21656), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((21894, 21939), 'math.floor', 'floor', (['((CPCoords[nClient][0] - n) * inputSize)'], {}), '((CPCoords[nClient][0] - n) * inputSize)\n', (21899, 21939), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((21937, 21982), 'math.floor', 'floor', (['((CPCoords[nClient][1] - m) * inputSize)'], {}), '((CPCoords[nClient][1] - m) * inputSize)\n', (21942, 21982), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((24690, 24727), 'math.sqrt', 'sqrt', (['((x0 - x1) ** 2 + (y0 - y1) ** 2)'], {}), '((x0 - x1) ** 2 + (y0 - y1) ** 2)\n', (24694, 24727), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((9692, 9703), 'time.time', 'time.time', ([], {}), '()\n', (9701, 9703), False, 'import time\n'), ((10636, 10647), 'time.time', 'time.time', ([], {}), '()\n', (10645, 10647), False, 'import time\n'), ((24814, 24838), 'numpy.linspace', 'np.linspace', (['x0', 'x1', 'num'], {}), '(x0, x1, num)\n', (24825, 24838), True, 'import numpy as np\n'), ((24840, 24864), 'numpy.linspace', 'np.linspace', (['y0', 'y1', 'num'], {}), '(y0, y1, num)\n', (24851, 24864), True, 'import numpy as np\n'), ((19545, 19581), 'math.floor', 'floor', (['((BPCoords[0] - n) * inputSize)'], {}), '((BPCoords[0] - n) * inputSize)\n', (19550, 19581), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((19579, 19615), 'math.floor', 'floor', (['((BPCoords[1] - m) * inputSize)'], {}), '((BPCoords[1] - m) * inputSize)\n', (19584, 19615), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((25385, 25456), 'math.sqrt', 'sqrt', (['((x[n] - CPMatrixCoords[0]) ** 2 + (y[n] - CPMatrixCoords[1]) ** 2)'], {}), '((x[n] - CPMatrixCoords[0]) ** 2 + (y[n] - CPMatrixCoords[1]) ** 2)\n', (25389, 25456), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((26239, 26307), 'math.sqrt', 'sqrt', (['(wl * dist[n] * (totDist[nClient] - dist[n]) / totDist[nClient])'], {}), '(wl * dist[n] * (totDist[nClient] - dist[n]) / totDist[nClient])\n', (26243, 26307), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((26417, 26431), 'math.ceil', 'ceil', (['(num / 25)'], {}), '(num / 25)\n', (26421, 26431), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((26484, 26498), 'math.ceil', 'ceil', (['(num / 25)'], {}), '(num / 25)\n', (26488, 26498), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((18259, 18304), 'math.floor', 'floor', (['((CPCoords[nClient][0] - n) * inputSize)'], {}), '((CPCoords[nClient][0] - n) * inputSize)\n', (18264, 18304), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((18302, 18347), 'math.floor', 'floor', (['((CPCoords[nClient][1] - m) * inputSize)'], {}), '((CPCoords[nClient][1] - m) * inputSize)\n', (18307, 18347), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((19757, 19790), 'math.floor', 'floor', (['((BPCoords[0] - nMin) * 100)'], {}), '((BPCoords[0] - nMin) * 100)\n', (19762, 19790), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((19788, 19821), 'math.floor', 'floor', (['((BPCoords[1] - mMin) * 100)'], {}), '((BPCoords[1] - mMin) * 100)\n', (19793, 19821), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((25670, 25720), 'numpy.cos', 'np.cos', (['((distance - totDist[nClient] / 2) / Rearth)'], {}), '((distance - totDist[nClient] / 2) / Rearth)\n', (25676, 25720), True, 'import numpy as np\n'), ((25715, 25754), 'numpy.cos', 'np.cos', (['(totDist[nClient] / (2 * Rearth))'], {}), '(totDist[nClient] / (2 * Rearth))\n', (25721, 25754), True, 'import numpy as np\n'), ((18582, 18624), 'math.floor', 'floor', (['((CPCoords[nClient][0] - nMin) * 100)'], {}), '((CPCoords[nClient][0] - nMin) * 100)\n', (18587, 18624), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((18622, 18664), 'math.floor', 'floor', (['((CPCoords[nClient][1] - mMin) * 100)'], {}), '((CPCoords[nClient][1] - mMin) * 100)\n', (18627, 18664), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((25824, 25862), 'numpy.cos', 'np.cos', (['(-totDist[nClient] / 2 / Rearth)'], {}), '(-totDist[nClient] / 2 / Rearth)\n', (25830, 25862), True, 'import numpy as np\n'), ((25861, 25900), 'numpy.cos', 'np.cos', (['(totDist[nClient] / (2 * Rearth))'], {}), '(totDist[nClient] / (2 * Rearth))\n', (25867, 25900), True, 'import numpy as np\n'), ((23918, 23933), 'math.floor', 'floor', (['(n - nMin)'], {}), '(n - nMin)\n', (23923, 23933), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((23974, 23989), 'math.floor', 'floor', (['(m - mMin)'], {}), '(m - mMin)\n', (23979, 23989), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((23938, 23953), 'math.floor', 'floor', (['(n - nMin)'], {}), '(n - nMin)\n', (23943, 23953), False, 'from math import floor, ceil, atan2, pi, sqrt\n'), ((23994, 24009), 'math.floor', 'floor', (['(m - mMin)'], {}), '(m - mMin)\n', (23999, 24009), False, 'from math import floor, ceil, atan2, pi, sqrt\n')] |
# Copyright (c) 2020 zfit
# noinspection PyUnresolvedReferences
from zfit.core.testing import setup_function, teardown_function, tester
# deactivating CUDA capable gpus
from zfit.z.tools import _auto_upcast
suppress_gpu = False
if suppress_gpu:
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import pytest
import tensorflow as tf
import numpy as np
import zfit.z.math
prec = 0.00001
def test_polynomial():
coeffs = [5.3, 1.2, complex(1.3, 0.4), -42, 32.4, 529.3, -0.93]
x = tf.constant(5.)
polynom_tf = zfit.z.math.poly_complex(*(coeffs + [x])) # py34 comp: *x, y does not work
polynom_np = np.polyval(coeffs[::-1], 5.)
result = polynom_tf.numpy()
assert result == pytest.approx(polynom_np, rel=prec)
def test_auto_upcast():
tensor_from_f32 = _auto_upcast(tf.constant(5, dtype=tf.float32))
tensor_from_f64 = _auto_upcast(tf.constant(5, dtype=tf.float64))
assert tensor_from_f32.dtype == tf.float64
assert tensor_from_f64.dtype == tf.float64
tensor_from_i32 = _auto_upcast(tf.constant(5, dtype=tf.int32))
tensor_from_i64 = _auto_upcast(tf.constant(5, dtype=tf.int64))
assert tensor_from_i32.dtype == tf.int64
assert tensor_from_i64.dtype == tf.int64
tensor_from_c64 = _auto_upcast(tf.constant(5., dtype=tf.complex64))
tensor_from_c128 = _auto_upcast(tf.constant(5., dtype=tf.complex128))
assert tensor_from_c64.dtype == tf.complex128
assert tensor_from_c128.dtype == tf.complex128
| [
"pytest.approx",
"tensorflow.constant",
"numpy.polyval"
] | [((572, 588), 'tensorflow.constant', 'tf.constant', (['(5.0)'], {}), '(5.0)\n', (583, 588), True, 'import tensorflow as tf\n'), ((698, 727), 'numpy.polyval', 'np.polyval', (['coeffs[::-1]', '(5.0)'], {}), '(coeffs[::-1], 5.0)\n', (708, 727), True, 'import numpy as np\n'), ((781, 816), 'pytest.approx', 'pytest.approx', (['polynom_np'], {'rel': 'prec'}), '(polynom_np, rel=prec)\n', (794, 816), False, 'import pytest\n'), ((878, 910), 'tensorflow.constant', 'tf.constant', (['(5)'], {'dtype': 'tf.float32'}), '(5, dtype=tf.float32)\n', (889, 910), True, 'import tensorflow as tf\n'), ((947, 979), 'tensorflow.constant', 'tf.constant', (['(5)'], {'dtype': 'tf.float64'}), '(5, dtype=tf.float64)\n', (958, 979), True, 'import tensorflow as tf\n'), ((1111, 1141), 'tensorflow.constant', 'tf.constant', (['(5)'], {'dtype': 'tf.int32'}), '(5, dtype=tf.int32)\n', (1122, 1141), True, 'import tensorflow as tf\n'), ((1178, 1208), 'tensorflow.constant', 'tf.constant', (['(5)'], {'dtype': 'tf.int64'}), '(5, dtype=tf.int64)\n', (1189, 1208), True, 'import tensorflow as tf\n'), ((1336, 1372), 'tensorflow.constant', 'tf.constant', (['(5.0)'], {'dtype': 'tf.complex64'}), '(5.0, dtype=tf.complex64)\n', (1347, 1372), True, 'import tensorflow as tf\n'), ((1409, 1446), 'tensorflow.constant', 'tf.constant', (['(5.0)'], {'dtype': 'tf.complex128'}), '(5.0, dtype=tf.complex128)\n', (1420, 1446), True, 'import tensorflow as tf\n')] |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tracks the order of alchemy events and resulting stones and potions."""
import abc
import collections
import copy
import itertools
import random
from typing import Any, Dict, List, Mapping, Optional, Sequence, Set, Tuple, Union
from dm_alchemy.types import graphs
from dm_alchemy.types import stones_and_potions
from dm_alchemy.types import utils
import numpy as np
Stone = stones_and_potions.Stone
Potion = stones_and_potions.Potion
LatentStone = stones_and_potions.LatentStone
LatentPotion = stones_and_potions.LatentPotion
AlignedStone = stones_and_potions.AlignedStone
PerceivedPotion = stones_and_potions.PerceivedPotion
StoneMap = stones_and_potions.StoneMap
PotionMap = stones_and_potions.PotionMap
CAULDRON = stones_and_potions.CAULDRON
RewardWeights = stones_and_potions.RewardWeights
Graph = graphs.Graph
NEVER_USED = -1
NO_OUTCOME = -1
UNKNOWN_TYPE = -3
class EventTracker(abc.ABC):
"""Base class for things that track alchemy events."""
def __init__(self, name):
self.name = name
@abc.abstractmethod
def potion_used(
self, stone_ind: int, potion_ind: int, val: int, start_stone: graphs.Node,
stone_inst: int, potion: Potion, end_stone: graphs.Node) -> None:
pass
def failed_potion_use(
self, stone_ind: int, start_stone: graphs.Node, stone_inst: int) -> None:
"""Optional callback when a potion use is attempted but fails."""
pass
class GameState:
"""Keeps track of the symbolic state of an alchemy game."""
def __init__(
self, graph: graphs.Graph, trial_items: utils.TrialItems,
event_trackers: Optional[Sequence[EventTracker]] = None
):
self._stones = copy.deepcopy(trial_items.stones)
self._stone_idx_to_ind = {p.idx: i for i, p in enumerate(self._stones)}
self._stone_ind_to_idx = {i: p.idx for i, p in enumerate(self._stones)}
self._potions = copy.deepcopy(trial_items.potions)
self._potion_idx_to_ind = {p.idx: i for i, p in enumerate(self._potions)}
self._graph = graph
num_stones = len(self._stones)
num_potions = len(self._potions)
self._existing_stones = set(range(num_stones))
self._existing_potions = set(range(num_potions))
trackers = event_trackers if event_trackers is not None else []
self.trackers = {tracker.name: tracker for tracker in trackers}
self._count = 0
def add_event_trackers(self, event_trackers: Sequence[EventTracker]) -> None:
"""Adds event trackers if they are not already there."""
self.trackers.update({tracker.name: tracker for tracker in event_trackers})
def get_stone_ind(
self, stone_inst: Optional[int] = None,
stone: Optional[Union[graphs.Node, LatentStone]] = None
) -> Optional[int]:
"""Gets a stone referred to through a variety of methods.
The caller must pass exactly one of stone_inst and stone.
Args:
stone_inst: The instance id of the stone used in the potion.
stone: The stone used.
Returns:
The index (into the list of stones originally passed to the EventTracker
in construction) for the stone used in the potion or None if no match can
be found.
"""
if len([e for e in [stone_inst, stone] if e is not None]) != 1:
raise ValueError('Exactly one of stone inst and stone must be given.')
if stone_inst is not None:
return self._stone_idx_to_ind[stone_inst]
if isinstance(stone, LatentStone):
stone_node = graphs.Node(-1, stone.latent_coords)
else:
stone_node = stone
matches = self._matching_stones(stone_node)
if not matches:
return None
return matches[0]
def get_potion_ind(
self, potion_inst: Optional[int] = None,
potion: Optional[Union[Potion, LatentPotion]] = None) -> Optional[int]:
"""Gets a potion referred to through a variety of methods.
The caller must pass exactly one of potion_inst and potion.
Args:
potion_inst: The instance id of the potion used.
potion: The potion used.
Returns:
The index (into the list of potions originally passed to the EventTracker
in construction) for the potion used or None if no match can be found.
-1 refers to the cauldron.
"""
if len([e for e in [potion_inst, potion] if e is not None]) != 1:
raise ValueError('Exactly one of potion inst and potion must be given.')
if potion_inst is not None:
return self._potion_idx_to_ind[potion_inst]
if isinstance(potion, LatentPotion):
potion = Potion(-1, potion.latent_dim, potion.latent_dir)
matches = self._matching_potions(potion)
if not matches:
return None
return matches[0]
def _stone_node(self, ind: int) -> graphs.Node:
node_ = self._graph.node_list.get_node_by_coords(
list(self._stones[ind].latent))
assert node_ is not None
node: graphs.Node = node_
return node
def _matching_potions(self, potion: Potion) -> List[int]:
return [p for p in self._existing_potions
if self._potions[p].as_index == potion.as_index]
def _matching_stones(self, stone_node: graphs.Node) -> List[int]:
return [i for i in self._existing_stones
if tuple(self._stone_node(i).coords) == tuple(stone_node.coords)]
def has_stone_ind(self, stone_ind: int) -> bool:
return stone_ind in self._existing_stones
def has_potion_ind(self, potion_ind: int) -> bool:
return potion_ind in self._existing_potions
def _remove_potion(self, potion_ind: int) -> None:
self._existing_potions.remove(potion_ind)
def _remove_stone(self, stone_ind: int) -> None:
self._existing_stones.remove(stone_ind)
def potion_used(
self, stone_ind: int, potion_ind: int,
val: Optional[int] = None
) -> int:
"""Records that a potion has been used.
The caller must pass exactly one of stone_ind, stone_inst and stone, and
exactly one of potion_ind, potion_inst and potion.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
potion_ind: The index (into the list of potions originally passed to the
EventTracker in construction) for the potion used. -1 refers to the
cauldron.
val: The value to record in this event (typically the frame number that
this event occurs). If this is not set then the value set will be
arbitrary but will preserve the order in which the potion_used and
stone_used functions are called.
Returns:
The index (into the list of stones originally passed to the EventTracker
in construction) for the stone used in the potion. This may not have been
passed into the function (if stone_inst or stone was passed instead).
"""
# -1 corresponds to the cauldron and so there is no potion to remove and the
# stone does not change
old_node = self._stone_node(stone_ind)
outcome_stone = None
potion = None
if potion_ind != CAULDRON:
outcome_stone = copy.deepcopy(old_node)
potion = self._potions[potion_ind]
# Change the stone in _stones
if old_node in self._graph.edge_list.edges:
outcome_stone = [end_node for end_node, v in
self._graph.edge_list.edges[old_node].items()
if potion.same_effect(v[1])]
if outcome_stone:
assert len(outcome_stone) == 1
outcome_stone = outcome_stone[0]
self._stones[stone_ind].latent = np.array(list(outcome_stone.coords))
else:
outcome_stone = old_node
self._remove_potion(potion_ind)
if self.trackers:
if val is None:
val = self._count
self._count += 1
for event_tracker in self.trackers.values():
event_tracker.potion_used(
stone_ind, potion_ind, val, old_node,
self._stone_ind_to_idx[stone_ind], potion, outcome_stone)
return stone_ind
def stone_used(self, stone_ind: int, val: Optional[int] = None) -> None:
"""Records that a stone has been used (placed in the cauldron).
The caller must pass exactly one of stone_ind, stone_inst and stone.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
val: The value to record in this event (typically the frame number that
this event occurs). If this is not set then the value set will be
arbitrary but will preserve the order in which the potion_used and
stone_used functions are called.
"""
self.potion_used(
stone_ind=stone_ind, potion_ind=CAULDRON, val=val)
self._remove_stone(stone_ind)
def failed_potion_use(self, stone_ind: int) -> None:
old_node = self._stone_node(stone_ind)
for event_tracker in self.trackers.values():
event_tracker.failed_potion_use(
stone_ind, old_node, self._stone_ind_to_idx[stone_ind])
def has_stones(self) -> bool:
return bool(self._existing_stones)
def has_potions(self) -> bool:
return bool(self._existing_potions)
def has_stones_and_potions(self) -> bool:
return self.has_stones() and self.has_potions()
def rand_stone_ind(self) -> int:
return random.sample(self._existing_stones, 1)[0]
def rand_potion_ind(self) -> int:
return random.sample(self._existing_potions, 1)[0]
def use_rand_stone_potion_pair(self) -> Tuple[Stone, int]:
"""Uses a random stone with a random potion.
Returns:
The new value of the stone and the index of that stone.
"""
stone_index = self.rand_stone_ind()
return self.use_rand_potion(stone_index)
def use_rand_potion(self, stone_ind: int) -> Tuple[Stone, int]:
"""Uses the stone passed with a random potion.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone to use in a random potion.
Returns:
The new value of the stone and the index of that stone.
"""
potion_index = self.rand_potion_ind()
self.potion_used(stone_ind, potion_index)
return self._stones[stone_ind], stone_ind
def existing_stone_nodes(self) -> List[graphs.Node]:
"""Returns a list of nodes for the remaining existing stones."""
return [self._stone_node(i) for i in self._existing_stones]
def existing_stones(self) -> List[Stone]:
"""Returns a list of the remaining existing stones."""
return [self._stones[i] for i in self._existing_stones]
def existing_potions(self) -> List[Potion]:
"""Returns a list of the remaining existing potions."""
return [self._potions[i] for i in self._existing_potions]
def existing_items(self) -> utils.TrialItems:
return utils.TrialItems(
stones=self.existing_stones(), potions=self.existing_potions())
@property
def num_stones(self) -> int:
return len(self._existing_stones)
@property
def num_potions(self) -> int:
return len(self._existing_potions)
def check_have_potions(self, needed_potions: Sequence[Potion]) -> bool:
"""Checks that we have all the potions we need."""
need = collections.Counter([p.as_index for p in needed_potions])
have = collections.Counter([self._potions[p].as_index
for p in self._existing_potions])
for k in need.keys():
if k not in have.keys():
return False
else:
if have[k] < need[k]:
return False
return True
def get_stones_above_thresh(
self, reward_weights: RewardWeights, threshold: int) -> List[int]:
"""Gets all the stones whose value exceeds the threshold passed in."""
current_vals = {i: reward_weights(self._stones[i].latent)
for i in self._existing_stones}
return [i for i, current_val in current_vals.items()
if current_val > threshold]
def use_stones_above_thresh(
self, reward_weights: RewardWeights, threshold: int) -> None:
"""Uses all the stones whose value exceeds the threshold passed in."""
for i in self.get_stones_above_thresh(reward_weights, threshold):
self.stone_used(i)
def get_stone(self, ind: int) -> Stone:
return self._stones[ind]
def get_potion(self, ind: int) -> Potion:
return self._potions[ind]
@property
def node_list(self) -> graphs.NodeList:
return self._graph.node_list
@property
def edge_list(self) -> graphs.EdgeList:
return self._graph.edge_list
@property
def stone_ind_to_idx(self) -> Dict[int, int]:
return self._stone_ind_to_idx
@property
def stone_idx_to_ind(self) -> Dict[int, int]:
return self._stone_idx_to_ind
@property
def potion_idx_to_ind(self) -> Dict[int, int]:
return self._potion_idx_to_ind
class TrialTracker(EventTracker):
"""Type which tracks all events in a trial."""
@abc.abstractmethod
def events_list(self) -> List[Tuple[int, int, int]]:
"""Returns a list of stone index, potion index, val for the trial events."""
pass
class MatrixEventTracker(TrialTracker):
"""Tracks the order of potion used and stone used events in matrix."""
def __init__(self, num_stones: int, num_potions: int):
self.events = np.full(
shape=(num_stones, num_potions + 1), fill_value=-1, dtype=np.int)
super().__init__(name='matrix_event')
def potion_used(
self, stone_ind: int, potion_ind: int, val: int,
start_stone: graphs.Node, stone_inst: int, potion: Potion,
end_stone: graphs.Node) -> None:
"""Records that a potion has been used.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
potion_ind: The index (into the list of potions originally passed to the
EventTracker in construction) for the potion used. -1 refers to the
cauldron.
val: The value to record in this event (typically the frame number that
this event occurs). If this is not set then the value set will be
arbitrary but will preserve the order in which the potion_used and
stone_used functions are called.
start_stone: The stone node before the potion is used.
stone_inst: The instance id for the stone we are using.
potion: The potion used.
end_stone: The stone node after the potion is used.
"""
self.events[stone_ind, potion_ind] = val
def events_list(self) -> List[Tuple[int, int, int]]:
stone_used, potion_used = np.where(self.events != -1)
frame = [self.events[x, y] for (x, y) in zip(stone_used, potion_used)]
num_potions = self.events.shape[1] - 1
events = sorted(zip(stone_used, potion_used, frame), key=lambda x: x[2])
return [
(stone_ind, CAULDRON if potion_ind == num_potions else potion_ind,
frame) for stone_ind, potion_ind, frame in events]
ActionSequenceElement = Tuple[int, Mapping[str, Any], int, int]
class ActionSequenceTracker(TrialTracker):
"""Tracks the order of potion used and stone used events in matrix."""
def __init__(self):
self._action_sequence = []
super().__init__(name='action_sequence')
def potion_used(
self, stone_ind: int, potion_ind: int, val: int,
start_stone: graphs.Node, stone_inst: int, potion: Potion,
end_stone: graphs.Node) -> None:
"""Records that a potion has been used.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
potion_ind: The index (into the list of potions originally passed to the
EventTracker in construction) for the potion used. -1 refers to the
cauldron.
val: The value to record in this event (typically the frame number that
this event occurs). If this is not set then the value set will be
arbitrary but will preserve the order in which the potion_used and
stone_used functions are called.
start_stone: The stone node before the potion is used.
stone_inst: The instance id for the stone we are using.
potion: The potion used.
end_stone: The stone node after the potion is used.
"""
# add to action sequence
action_dict = {'node': (start_stone.idx, start_stone.coords),
'stone_idx': stone_inst}
# -1 corresponds to the cauldron and so there is no potion to remove and the
# stone does not change
if potion_ind == CAULDRON:
action_dict['action'] = 'cauldron'
else:
# Change the stone in _stones
action_dict['action'] = (potion.as_index,
(potion.dimension, potion.direction))
action_dict['potion_idx'] = potion.idx
action_dict['outcome_node'] = (end_stone.idx, end_stone.coords)
self._action_sequence.append((val, action_dict, stone_ind, potion_ind))
@property
def action_sequence(self) -> List[Tuple[int, Dict[str, Any], int, int]]:
self._action_sequence.sort(key=lambda x: x[0])
return self._action_sequence
def events_list(self) -> List[Tuple[int, int, int]]:
return [(stone_ind, potion_ind, val)
for val, _, stone_ind, potion_ind in self.action_sequence]
class LatestOutcomeTracker(EventTracker):
"""Tracks the most recent outcome of using a potion."""
def __init__(
self, potion_map: PotionMap, stone_map: StoneMap, rotation: np.ndarray):
# -1 represents no change and is the default value for outcome.
self.outcome = None
self.type_based_action = None
self._potion_map, self._stone_map = potion_map, stone_map
self._rotation = rotation
super().__init__(name='latest_outcome')
def reset(self) -> None:
self.outcome = None
self.type_based_action = None
def _perceived_stone(self, stone: graphs.Node):
aligned_stone = self._stone_map.apply_inverse(LatentStone(np.array(
stone.coords)))
return stones_and_potions.unalign(aligned_stone, self._rotation)
def potion_used(
self, stone_ind: int, potion_ind: int, val: int,
start_stone: graphs.Node, stone_inst: int, potion: Potion,
end_stone: Optional[graphs.Node]) -> None:
if end_stone is not None:
aligned_stone = self._stone_map.apply_inverse(LatentStone(np.array(
end_stone.coords)))
self.outcome = stones_and_potions.unalign(aligned_stone, self._rotation)
perceived_stone = self._perceived_stone(start_stone)
if potion_ind == CAULDRON:
self.type_based_action = utils.TypeBasedAction(
stone=perceived_stone, cauldron=True)
else:
perceived_potion = self._potion_map.apply_inverse(LatentPotion(
potion.dimension, potion.direction))
self.type_based_action = utils.TypeBasedAction(
stone=perceived_stone, potion=perceived_potion)
def failed_potion_use(
self, stone_ind: int, start_stone: graphs.Node, stone_inst: int):
"""Optional callback when a potion use is attempted but fails."""
self.outcome = None
perceived_stone = self._perceived_stone(start_stone)
# This is an invalid action but the stone type can be used for
# visualization.
self.type_based_action = utils.TypeBasedAction(stone=perceived_stone)
class RewardTracker(EventTracker):
"""Tracks the reward obtained."""
def __init__(self, reward_weights: RewardWeights):
self._reward = 0
self._reward_weights = reward_weights
super().__init__(name='reward')
def potion_used(
self, stone_ind: int, potion_ind: int, val: int,
start_stone: graphs.Node, stone_inst: int, potion: Potion,
end_stone: graphs.Node) -> None:
"""Adds reward when a potion has been used.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
potion_ind: The index (into the list of potions originally passed to the
EventTracker in construction) for the potion used. -1 refers to the
cauldron.
val: The value to record in this event (typically the frame number that
this event occurs). If this is not set then the value set will be
arbitrary but will preserve the order in which the potion_used and
stone_used functions are called.
start_stone: The stone node before the potion is used.
stone_inst: The instance id for the stone we are using.
potion: The potion used.
end_stone: The stone node after the potion is used.
"""
if potion_ind == CAULDRON:
self._reward += self._reward_weights(start_stone.coords)
@property
def reward(self) -> int:
return self._reward
class ItemsUsedTracker(EventTracker):
"""Tracks the stones and potions used."""
def __init__(self):
self.potions_used = []
self.stones_used = []
super().__init__(name='items_used')
def potion_used(
self, stone_ind: int, potion_ind: int, val: int,
start_stone: graphs.Node, stone_inst: int, potion: Potion,
end_stone: graphs.Node) -> None:
"""Keeps lists of potions and stones which have been used.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
potion_ind: The index (into the list of potions originally passed to the
EventTracker in construction) for the potion used. -1 refers to the
cauldron.
val: The value to record in this event (typically the frame number that
this event occurs). This is not relevant for this tracker.
start_stone: The stone node before the potion is used.
stone_inst: The instance id for the stone we are using.
potion: The potion used.
end_stone: The stone node after the potion is used.
"""
if potion_ind == CAULDRON:
self.stones_used.append(stone_ind)
else:
self.potions_used.append(potion_ind)
@property
def num_potions_used(self) -> int:
return len(self.potions_used)
@property
def num_stones_used(self) -> int:
return len(self.stones_used)
class Event(abc.ABC):
"""Abstract base class for events we want to check in the event tracker."""
@abc.abstractmethod
def next_occurrence(
self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]:
pass
def occurs(self, events: np.ndarray) -> bool:
event_start, _, _ = self.next_occurrence(events)
not_occurred = event_start == NEVER_USED
return not not_occurred
class SingleEvent(Event):
"""A single event where a stone is used with one of a set of potions."""
def __init__(self, stone_ind: int, potion_inds: Set[int]):
self.stone_ind = stone_ind
self.potion_inds = potion_inds
def next_occurrence(
self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]:
"""Gets the next occurrence of this event.
Args:
events: numpy array of stones against potions with the last entry
corresponding to the cauldron with a -1 in places where that stone was
never used with that potion and the time of usage otherwise.
Returns:
When event starts, when event ends, which potions were used by event.
"""
frames_potions = [(events[self.stone_ind, p], p) for p in self.potion_inds
if events[self.stone_ind, p] >= 0]
if not frames_potions:
return NEVER_USED, NEVER_USED, None
frame, potion_used = min(frames_potions, key=lambda v: v[0])
return frame, frame, {potion_used}
class AnyOrderEvents(Event):
"""A set of events which can happen in any order."""
def __init__(self, set_events: Set[Event]):
self.set_events = set_events
def next_occurrence(
self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]:
"""Gets the next occurrence of this event.
Args:
events: numpy array of stones against potions with the last entry
corresponding to the cauldron with a -1 in places where that stone was
never used with that potion and the time of usage otherwise.
Returns:
When event starts, when event ends, which potions were used by event.
"""
results = [e.next_occurrence(events) for e in self.set_events]
if any(v[0] == NEVER_USED for v in results):
return NEVER_USED, NEVER_USED, None
return (min(v[0] for v in results), max(v[1] for v in results),
set(itertools.chain.from_iterable([v[2] for v in results])))
class OrderedEvents(Event):
"""A list of events which must happen in the order passed in."""
def __init__(self, iter_events: Sequence[Event]):
self.iter_events = iter_events
def next_occurrence(
self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]:
"""Gets the next occurrence of this event.
Args:
events: numpy array of stones against potions with the last entry
corresponding to the cauldron with a -1 in places where that stone was
never used with that potion and the time of usage otherwise.
Returns:
When event starts, when event ends, which potions were used by event.
"""
results = [e.next_occurrence(events) for e in self.iter_events]
if any(v[0] == NEVER_USED for v in results):
return NEVER_USED, NEVER_USED, None
for end_first, start_next in zip([v[1] for v in results[:-1]],
[v[0] for v in results[1:]]):
# If the events happen on the same step this is allowed.
if end_first > start_next:
return NEVER_USED, NEVER_USED, None
return (results[0][0], results[-1][1],
set(itertools.chain.from_iterable([v[2] for v in results])))
def replay_events(game_state: GameState, event_tracker: TrialTracker) -> None:
for stone_ind, potion_ind, val in event_tracker.events_list():
if potion_ind == CAULDRON:
game_state.stone_used(stone_ind=stone_ind, val=val)
else:
game_state.potion_used(
stone_ind=stone_ind, potion_ind=potion_ind, val=val)
def matrix_events_to_action_sequence(
graph: Graph, items: utils.TrialItems, matrix_events: MatrixEventTracker
) -> List[ActionSequenceElement]:
"""Takes events/output of evaluation analysis and creates an event tracker."""
action_sequence_tracker = ActionSequenceTracker()
game_state = GameState(
trial_items=items, graph=graph, event_trackers=[action_sequence_tracker])
if matrix_events.events.shape != (items.num_stones, items.num_potions + 1):
raise ValueError(
'Matrix of events shape does not match the number of stones and '
'potions present.')
replay_events(game_state, matrix_events)
return action_sequence_tracker.action_sequence
| [
"dm_alchemy.types.stones_and_potions.unalign",
"random.sample",
"numpy.where",
"dm_alchemy.types.utils.TypeBasedAction",
"collections.Counter",
"numpy.array",
"itertools.chain.from_iterable",
"dm_alchemy.types.graphs.Node",
"copy.deepcopy",
"numpy.full"
] | [((2362, 2395), 'copy.deepcopy', 'copy.deepcopy', (['trial_items.stones'], {}), '(trial_items.stones)\n', (2375, 2395), False, 'import copy\n'), ((2568, 2602), 'copy.deepcopy', 'copy.deepcopy', (['trial_items.potions'], {}), '(trial_items.potions)\n', (2581, 2602), False, 'import copy\n'), ((11844, 11901), 'collections.Counter', 'collections.Counter', (['[p.as_index for p in needed_potions]'], {}), '([p.as_index for p in needed_potions])\n', (11863, 11901), False, 'import collections\n'), ((11913, 11998), 'collections.Counter', 'collections.Counter', (['[self._potions[p].as_index for p in self._existing_potions]'], {}), '([self._potions[p].as_index for p in self._existing_potions]\n )\n', (11932, 11998), False, 'import collections\n'), ((13900, 13973), 'numpy.full', 'np.full', ([], {'shape': '(num_stones, num_potions + 1)', 'fill_value': '(-1)', 'dtype': 'np.int'}), '(shape=(num_stones, num_potions + 1), fill_value=-1, dtype=np.int)\n', (13907, 13973), True, 'import numpy as np\n'), ((15200, 15227), 'numpy.where', 'np.where', (['(self.events != -1)'], {}), '(self.events != -1)\n', (15208, 15227), True, 'import numpy as np\n'), ((18618, 18675), 'dm_alchemy.types.stones_and_potions.unalign', 'stones_and_potions.unalign', (['aligned_stone', 'self._rotation'], {}), '(aligned_stone, self._rotation)\n', (18644, 18675), False, 'from dm_alchemy.types import stones_and_potions\n'), ((19873, 19917), 'dm_alchemy.types.utils.TypeBasedAction', 'utils.TypeBasedAction', ([], {'stone': 'perceived_stone'}), '(stone=perceived_stone)\n', (19894, 19917), False, 'from dm_alchemy.types import utils\n'), ((4125, 4161), 'dm_alchemy.types.graphs.Node', 'graphs.Node', (['(-1)', 'stone.latent_coords'], {}), '(-1, stone.latent_coords)\n', (4136, 4161), False, 'from dm_alchemy.types import graphs\n'), ((7702, 7725), 'copy.deepcopy', 'copy.deepcopy', (['old_node'], {}), '(old_node)\n', (7715, 7725), False, 'import copy\n'), ((9945, 9984), 'random.sample', 'random.sample', (['self._existing_stones', '(1)'], {}), '(self._existing_stones, 1)\n', (9958, 9984), False, 'import random\n'), ((10036, 10076), 'random.sample', 'random.sample', (['self._existing_potions', '(1)'], {}), '(self._existing_potions, 1)\n', (10049, 10076), False, 'import random\n'), ((19020, 19077), 'dm_alchemy.types.stones_and_potions.unalign', 'stones_and_potions.unalign', (['aligned_stone', 'self._rotation'], {}), '(aligned_stone, self._rotation)\n', (19046, 19077), False, 'from dm_alchemy.types import stones_and_potions\n'), ((19197, 19256), 'dm_alchemy.types.utils.TypeBasedAction', 'utils.TypeBasedAction', ([], {'stone': 'perceived_stone', 'cauldron': '(True)'}), '(stone=perceived_stone, cauldron=True)\n', (19218, 19256), False, 'from dm_alchemy.types import utils\n'), ((19426, 19495), 'dm_alchemy.types.utils.TypeBasedAction', 'utils.TypeBasedAction', ([], {'stone': 'perceived_stone', 'potion': 'perceived_potion'}), '(stone=perceived_stone, potion=perceived_potion)\n', (19447, 19495), False, 'from dm_alchemy.types import utils\n'), ((18573, 18595), 'numpy.array', 'np.array', (['stone.coords'], {}), '(stone.coords)\n', (18581, 18595), True, 'import numpy as np\n'), ((25077, 25131), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[v[2] for v in results]'], {}), '([v[2] for v in results])\n', (25106, 25131), False, 'import itertools\n'), ((26283, 26337), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[v[2] for v in results]'], {}), '([v[2] for v in results])\n', (26312, 26337), False, 'import itertools\n'), ((18959, 18985), 'numpy.array', 'np.array', (['end_stone.coords'], {}), '(end_stone.coords)\n', (18967, 18985), True, 'import numpy as np\n')] |
# Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import lasagne.layers as L
import numpy as np
import theano
import theano.tensor as T
import unittest
from . import networks
__all__ = [
"BaseTestCase",
"ExplainerTestCase",
]
class BaseTestCase(unittest.TestCase):
"""
A dryrun test on various networks for an explanation method.
For each network the test check that the generated network
has the right output shape, can be compiled
and executed with random inputs.
"""
def _apply_test(self, method, network):
raise NotImplementedError("Set in subclass.")
def test_dryrun(self):
for network in networks.iterator():
if six.PY2:
self._apply_test(self._method, network)
else:
with self.subTest(network_name=network["name"]):
self._apply_test(self._method, network)
pass
class ExplainerTestCase(BaseTestCase):
def _method(self, output_layer):
raise NotImplementedError("Set in subclass.")
def _assert(self, method, network, x, explanation):
pass
def _apply_test(self, method, network):
# Get explainer.
explainer = method(network["out"])
# Dryrun.
x = np.random.rand(1, *(network["input_shape"][1:]))
explanation = explainer.explain(x)
self.assertEqual(tuple(explanation.shape[1:]),
tuple(network["input_shape"][1:]))
self._assert(method, network, x, explanation)
pass
class PatternComputerTestCase(BaseTestCase):
def _method(self, output_layer):
raise NotImplementedError("Set in subclass.")
def _assert(self, method, network, x, explanation):
pass
def _apply_test(self, method, network):
# Get explainer.
computer = method(network["out"])
# Dryrun.
x = np.random.rand(10, *(network["input_shape"][1:]))
patterns = computer.compute_patterns(x, 2)
self._assert(method, network, x, patterns)
pass
| [
"numpy.random.rand"
] | [((1603, 1649), 'numpy.random.rand', 'np.random.rand', (['(1)', "*network['input_shape'][1:]"], {}), "(1, *network['input_shape'][1:])\n", (1617, 1649), True, 'import numpy as np\n'), ((2228, 2275), 'numpy.random.rand', 'np.random.rand', (['(10)', "*network['input_shape'][1:]"], {}), "(10, *network['input_shape'][1:])\n", (2242, 2275), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# @file homography.py
# @Author <NAME> (adityavaishampayan)
# @copyright MIT
# @brief wrapper file for calling the functions in scripts folder
import numpy as np
def compute_view_based_homography(correspondence):
N_u_inv = correspondence[6]
norm_img_pts = correspondence[2]
img_pts = correspondence[0]
obj_pts = correspondence[1]
norm_obj_pts = correspondence[3]
N_x = correspondence[5]
N = len(img_pts)
M = np.zeros((2 * N, 9), dtype=np.float64)
for i in range(len(img_pts)):
# obtaining the normalised image points
u, v = norm_img_pts[i]
# obtaining the normalised object points
x, y = norm_obj_pts[i]
r1 = np.array([-x, -y, -1, 0, 0, 0, x * u, y * u, u])
r2 = np.array([0, 0, 0, -x, -y, -1, x * v, y * v, v])
M[2 * i] = r1
M[(2 * i) + 1] = r2
# M.h = 0 . Solve the homogeneous system (e. g. by singular value decomposition):
u, s, v_h = np.linalg.svd(M)
# obtaining the minimum eigen value
h_norm = v_h[np.argmin(s)]
h_norm = h_norm.reshape(3, 3)
# de normalize equation 68 of Burger – Zhang’s Camera Calibration Algorithm
h = np.matmul(np.matmul(N_u_inv, h_norm), N_x)
h = h[:, :] / h[2, 2]
reprojection_error = 0
for i in range(len(img_pts)):
t1 = np.array([[obj_pts[i][0]], [obj_pts[i][1]], [1.0]])
t = np.matmul(h, t1).reshape(1, 3)
t = t / t[0][-1]
reprojection_error += np.sum(np.abs(img_pts[i] - t[0][:-1]))
reprojection_error = np.sqrt(reprojection_error / N)
print("Reprojection error : ", reprojection_error)
return h | [
"numpy.abs",
"numpy.sqrt",
"numpy.array",
"numpy.zeros",
"numpy.matmul",
"numpy.argmin",
"numpy.linalg.svd"
] | [((1542, 1580), 'numpy.zeros', 'np.zeros', (['(2 * N, 9)'], {'dtype': 'np.float64'}), '((2 * N, 9), dtype=np.float64)\n', (1550, 1580), True, 'import numpy as np\n'), ((2053, 2069), 'numpy.linalg.svd', 'np.linalg.svd', (['M'], {}), '(M)\n', (2066, 2069), True, 'import numpy as np\n'), ((2624, 2655), 'numpy.sqrt', 'np.sqrt', (['(reprojection_error / N)'], {}), '(reprojection_error / N)\n', (2631, 2655), True, 'import numpy as np\n'), ((1788, 1836), 'numpy.array', 'np.array', (['[-x, -y, -1, 0, 0, 0, x * u, y * u, u]'], {}), '([-x, -y, -1, 0, 0, 0, x * u, y * u, u])\n', (1796, 1836), True, 'import numpy as np\n'), ((1850, 1898), 'numpy.array', 'np.array', (['[0, 0, 0, -x, -y, -1, x * v, y * v, v]'], {}), '([0, 0, 0, -x, -y, -1, x * v, y * v, v])\n', (1858, 1898), True, 'import numpy as np\n'), ((2128, 2140), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (2137, 2140), True, 'import numpy as np\n'), ((2275, 2301), 'numpy.matmul', 'np.matmul', (['N_u_inv', 'h_norm'], {}), '(N_u_inv, h_norm)\n', (2284, 2301), True, 'import numpy as np\n'), ((2409, 2460), 'numpy.array', 'np.array', (['[[obj_pts[i][0]], [obj_pts[i][1]], [1.0]]'], {}), '([[obj_pts[i][0]], [obj_pts[i][1]], [1.0]])\n', (2417, 2460), True, 'import numpy as np\n'), ((2566, 2596), 'numpy.abs', 'np.abs', (['(img_pts[i] - t[0][:-1])'], {}), '(img_pts[i] - t[0][:-1])\n', (2572, 2596), True, 'import numpy as np\n'), ((2473, 2489), 'numpy.matmul', 'np.matmul', (['h', 't1'], {}), '(h, t1)\n', (2482, 2489), True, 'import numpy as np\n')] |
"""Unit tests for modified Helmholtz operators."""
# pylint: disable=redefined-outer-name
# pylint: disable=C0103
import numpy as _np
import pytest
pytestmark = pytest.mark.usefixtures("default_parameters", "helpers")
def test_maxwell_electric_field_sphere(
default_parameters, helpers, device_interface, precision
):
"""Test Maxwell electric field on sphere."""
from bempp.api import function_space
from bempp.api.operators.boundary.maxwell import electric_field
grid = helpers.load_grid("sphere")
space1 = function_space(grid, "RWG", 0)
space2 = function_space(grid, "SNC", 0)
discrete_op = electric_field(
space1,
space1,
space2,
2.5,
assembler="dense",
device_interface=device_interface,
precision=precision,
parameters=default_parameters,
).weak_form()
if precision == "single":
rtol = 1e-5
atol = 1e-7
else:
rtol = 1e-10
atol = 1e-14
expected = helpers.load_npy_data("maxwell_electric_field_boundary")
_np.testing.assert_allclose(discrete_op.to_dense(), expected, rtol=rtol, atol=atol)
def test_maxwell_electric_field_rbc_bc_sphere(
default_parameters, helpers, device_interface, precision, skip
):
"""Test Maxwell electric field on sphere with RBC/BC basis."""
if skip == "circleci":
pytest.skip()
import bempp.api
from bempp.api import function_space
from bempp.api.operators.boundary.maxwell import electric_field
grid = helpers.load_grid("sphere")
space1 = function_space(grid, "BC", 0)
space2 = function_space(grid, "RBC", 0)
rand = _np.random.RandomState(0)
vec = rand.rand(space1.global_dof_count)
bempp.api.GLOBAL_PARAMETERS.fmm.dense_evaluation = True
discrete_op = electric_field(
space1,
space1,
space2,
2.5,
assembler="fmm",
device_interface=device_interface,
precision=precision,
parameters=default_parameters,
).weak_form()
actual = discrete_op @ vec
bempp.api.GLOBAL_PARAMETERS.fmm.dense_evaluation = False
if precision == "single":
rtol = 5e-5
atol = 5e-6
else:
rtol = 1e-10
atol = 1e-14
mat = helpers.load_npy_data("maxwell_electric_field_boundary_rbc_bc")
expected = mat @ vec
_np.testing.assert_allclose(actual, expected, rtol=rtol, atol=atol)
bempp.api.clear_fmm_cache()
def test_maxwell_electric_field_bc_sphere(
default_parameters, helpers, device_interface, precision, skip
):
"""Test Maxwell electric field on sphere with BC basis."""
if skip == "circleci":
pytest.skip()
import bempp.api
from bempp.api import function_space
from bempp.api.operators.boundary.maxwell import electric_field
grid = helpers.load_grid("sphere")
space1 = function_space(grid, "BC", 0)
space2 = function_space(grid, "SNC", 0)
rand = _np.random.RandomState(0)
vec = rand.rand(space1.global_dof_count)
bempp.api.GLOBAL_PARAMETERS.fmm.dense_evaluation = True
discrete_op = electric_field(
space1,
space1,
space2,
2.5,
assembler="fmm",
device_interface=device_interface,
precision=precision,
parameters=default_parameters,
).weak_form()
actual = discrete_op @ vec
bempp.api.GLOBAL_PARAMETERS.fmm.dense_evaluation = False
if precision == "single":
rtol = 1e-4
atol = 5e-6
else:
rtol = 1e-10
atol = 1e-14
mat = helpers.load_npy_data("maxwell_electric_field_boundary_bc")
expected = mat @ vec
_np.testing.assert_allclose(actual, expected, rtol=rtol, atol=atol)
bempp.api.clear_fmm_cache()
def test_maxwell_magnetic_field_sphere(
default_parameters, helpers, device_interface, precision
):
"""Test Maxwell magnetic field on sphere."""
from bempp.api import function_space
from bempp.api.operators.boundary.maxwell import magnetic_field
grid = helpers.load_grid("sphere")
space1 = function_space(grid, "RWG", 0)
space2 = function_space(grid, "SNC", 0)
discrete_op = magnetic_field(
space1,
space1,
space2,
2.5,
assembler="dense",
device_interface=device_interface,
precision=precision,
parameters=default_parameters,
).weak_form()
if precision == "single":
rtol = 1e-5
atol = 1e-7
else:
rtol = 1e-10
atol = 1e-14
expected = helpers.load_npy_data("maxwell_magnetic_field_boundary")
_np.testing.assert_allclose(discrete_op.to_dense(), expected, rtol=rtol, atol=atol)
| [
"numpy.testing.assert_allclose",
"bempp.api.operators.boundary.maxwell.electric_field",
"bempp.api.operators.boundary.maxwell.magnetic_field",
"pytest.mark.usefixtures",
"bempp.api.function_space",
"pytest.skip",
"numpy.random.RandomState"
] | [((164, 220), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""default_parameters"""', '"""helpers"""'], {}), "('default_parameters', 'helpers')\n", (187, 220), False, 'import pytest\n'), ((539, 569), 'bempp.api.function_space', 'function_space', (['grid', '"""RWG"""', '(0)'], {}), "(grid, 'RWG', 0)\n", (553, 569), False, 'from bempp.api import function_space\n'), ((583, 613), 'bempp.api.function_space', 'function_space', (['grid', '"""SNC"""', '(0)'], {}), "(grid, 'SNC', 0)\n", (597, 613), False, 'from bempp.api import function_space\n'), ((1570, 1599), 'bempp.api.function_space', 'function_space', (['grid', '"""BC"""', '(0)'], {}), "(grid, 'BC', 0)\n", (1584, 1599), False, 'from bempp.api import function_space\n'), ((1613, 1643), 'bempp.api.function_space', 'function_space', (['grid', '"""RBC"""', '(0)'], {}), "(grid, 'RBC', 0)\n", (1627, 1643), False, 'from bempp.api import function_space\n'), ((1656, 1681), 'numpy.random.RandomState', '_np.random.RandomState', (['(0)'], {}), '(0)\n', (1678, 1681), True, 'import numpy as _np\n'), ((2361, 2428), 'numpy.testing.assert_allclose', '_np.testing.assert_allclose', (['actual', 'expected'], {'rtol': 'rtol', 'atol': 'atol'}), '(actual, expected, rtol=rtol, atol=atol)\n', (2388, 2428), True, 'import numpy as _np\n'), ((2874, 2903), 'bempp.api.function_space', 'function_space', (['grid', '"""BC"""', '(0)'], {}), "(grid, 'BC', 0)\n", (2888, 2903), False, 'from bempp.api import function_space\n'), ((2917, 2947), 'bempp.api.function_space', 'function_space', (['grid', '"""SNC"""', '(0)'], {}), "(grid, 'SNC', 0)\n", (2931, 2947), False, 'from bempp.api import function_space\n'), ((2960, 2985), 'numpy.random.RandomState', '_np.random.RandomState', (['(0)'], {}), '(0)\n', (2982, 2985), True, 'import numpy as _np\n'), ((3659, 3726), 'numpy.testing.assert_allclose', '_np.testing.assert_allclose', (['actual', 'expected'], {'rtol': 'rtol', 'atol': 'atol'}), '(actual, expected, rtol=rtol, atol=atol)\n', (3686, 3726), True, 'import numpy as _np\n'), ((4078, 4108), 'bempp.api.function_space', 'function_space', (['grid', '"""RWG"""', '(0)'], {}), "(grid, 'RWG', 0)\n", (4092, 4108), False, 'from bempp.api import function_space\n'), ((4122, 4152), 'bempp.api.function_space', 'function_space', (['grid', '"""SNC"""', '(0)'], {}), "(grid, 'SNC', 0)\n", (4136, 4152), False, 'from bempp.api import function_space\n'), ((1371, 1384), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (1382, 1384), False, 'import pytest\n'), ((2675, 2688), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (2686, 2688), False, 'import pytest\n'), ((633, 791), 'bempp.api.operators.boundary.maxwell.electric_field', 'electric_field', (['space1', 'space1', 'space2', '(2.5)'], {'assembler': '"""dense"""', 'device_interface': 'device_interface', 'precision': 'precision', 'parameters': 'default_parameters'}), "(space1, space1, space2, 2.5, assembler='dense',\n device_interface=device_interface, precision=precision, parameters=\n default_parameters)\n", (647, 791), False, 'from bempp.api.operators.boundary.maxwell import electric_field\n'), ((1807, 1963), 'bempp.api.operators.boundary.maxwell.electric_field', 'electric_field', (['space1', 'space1', 'space2', '(2.5)'], {'assembler': '"""fmm"""', 'device_interface': 'device_interface', 'precision': 'precision', 'parameters': 'default_parameters'}), "(space1, space1, space2, 2.5, assembler='fmm',\n device_interface=device_interface, precision=precision, parameters=\n default_parameters)\n", (1821, 1963), False, 'from bempp.api.operators.boundary.maxwell import electric_field\n'), ((3111, 3267), 'bempp.api.operators.boundary.maxwell.electric_field', 'electric_field', (['space1', 'space1', 'space2', '(2.5)'], {'assembler': '"""fmm"""', 'device_interface': 'device_interface', 'precision': 'precision', 'parameters': 'default_parameters'}), "(space1, space1, space2, 2.5, assembler='fmm',\n device_interface=device_interface, precision=precision, parameters=\n default_parameters)\n", (3125, 3267), False, 'from bempp.api.operators.boundary.maxwell import electric_field\n'), ((4172, 4330), 'bempp.api.operators.boundary.maxwell.magnetic_field', 'magnetic_field', (['space1', 'space1', 'space2', '(2.5)'], {'assembler': '"""dense"""', 'device_interface': 'device_interface', 'precision': 'precision', 'parameters': 'default_parameters'}), "(space1, space1, space2, 2.5, assembler='dense',\n device_interface=device_interface, precision=precision, parameters=\n default_parameters)\n", (4186, 4330), False, 'from bempp.api.operators.boundary.maxwell import magnetic_field\n')] |
import sys
import typing
from pathlib import Path
import numpy as np
import pandas as pd
import mpmp.config as cfg
import mpmp.utilities.data_utilities as du
from mpmp.utilities.tcga_utilities import (
process_y_matrix,
process_y_matrix_cancertype,
align_matrices,
filter_to_cross_data_samples,
)
class TCGADataModel():
"""
Class containing data necessary to run TCGA mutation prediction experiments.
Provides an interface to load and preprocess mutation data and training data
modalities, and to split data into train/test sets for each target gene.
"""
def __init__(self,
seed=cfg.default_seed,
subset_mad_genes=-1,
training_data='expression',
overlap_data_types=None,
load_compressed_data=False,
standardize_input=False,
n_dim=None,
sample_info_df=None,
verbose=False,
debug=False,
test=False):
"""
Initialize mutation prediction model/data
Arguments
---------
seed (int): seed for random number generator
subset_mad_genes (int): how many genes to keep (top by mean absolute deviation).
-1 doesn't do any filtering (all genes will be kept).
training_data (str): what data type to train the model on
overlap_data_types (list): what data types to use to determine sample set
load_compressed_data (bool): whether or not to use compressed data
n_dim (int): how many dimensions to use for compression algorithm
verbose (bool): whether or not to write verbose output
sample_info_df (pd.DataFrame): dataframe containing info about TCGA samples
debug (bool): if True, use a subset of expression data for quick debugging
test (bool): if True, don't save results to files
"""
# save relevant parameters
np.random.seed(seed)
self.seed = seed
self.subset_mad_genes = subset_mad_genes
self.compressed_data = load_compressed_data
self.overlap_data_types = overlap_data_types
self.n_dim = n_dim
self.verbose = verbose
self.debug = debug
self.test = test
# load and store data in memory
self._load_data(train_data_type=training_data,
compressed_data=load_compressed_data,
standardize_input=standardize_input,
n_dim=n_dim,
sample_info_df=sample_info_df,
debug=debug,
test=self.test)
def load_gene_set(self, gene_set='top_50'):
"""
Load gene set data from previous GitHub repos.
Arguments
---------
gene_set (str): which predefined gene set to use, or a list of gene names
to use a custom list.
Returns
-------
genes_df (pd.DataFrame): list of genes to run cross-validation experiments for,
contains gene names and oncogene/TSG classifications
"""
if self.verbose:
print('Loading gene label data...', file=sys.stderr)
if gene_set == 'top_50':
genes_df = du.load_top_genes()
elif gene_set == 'vogelstein':
genes_df = du.load_vogelstein()
elif gene_set == '50_random':
genes_df = du.load_random_genes()
else:
from mpmp.exceptions import GenesNotFoundError
assert isinstance(gene_set, typing.List)
genes_df = du.load_vogelstein()
# if all genes in gene_set are in vogelstein dataset, use it
if set(gene_set).issubset(set(genes_df.gene.values)):
genes_df = genes_df[genes_df.gene.isin(gene_set)]
# else if all genes in gene_set are in top50 dataset, use it
else:
genes_df = du.load_top_50()
if set(gene_set).issubset(set(genes_df.gene.values)):
genes_df = genes_df[genes_df.gene.isin(gene_set)]
else:
# else throw an error
raise GenesNotFoundError(
'Gene list was not a subset of Vogelstein or top50'
)
return genes_df
def process_data_for_cancer_type(self,
cancer_type,
cancer_type_dir):
"""
Prepare to run cancer type prediction experiments.
This has to be rerun to generate the labels for each cancer type.
Arguments
---------
cancer_type (str): cancer type to predict (one vs. rest binary)
cancer_type_dir (str): directory to write output to, if None don't
write output
"""
y_df_raw = self._generate_cancer_type_labels(cancer_type)
filtered_data = self._filter_data(
self.data_df,
y_df_raw
)
train_filtered_df, y_filtered_df, gene_features = filtered_data
train_filtered_df, y_filtered_df = filter_to_cross_data_samples(
train_filtered_df,
y_filtered_df,
data_types=self.overlap_data_types,
use_subsampled=(self.debug or self.test),
verbose=self.verbose
)
self.X_df = train_filtered_df
self.y_df = y_filtered_df
self.gene_features = gene_features
def process_data_for_gene(self,
gene,
classification,
gene_dir,
use_pancancer=False):
"""
Prepare to run mutation prediction experiments for a given gene.
Arguments
---------
gene (str): gene to run experiments for
classification (str): 'oncogene' or 'TSG'; most likely cancer function for
the given gene
gene_dir (str): directory to write output to, if None don't write output
use_pancancer (bool): whether or not to use pancancer data
"""
y_df_raw, valid_samples = self._generate_gene_labels(
gene, classification, gene_dir)
filtered_data = self._filter_data(
self.data_df,
y_df_raw,
add_cancertype_covariate=True
)
train_filtered_df, y_filtered_df, gene_features = filtered_data
# add non-gene features to data_types array if necessary
# this is used when building multi-omics models
if hasattr(self, 'data_types'):
# this has to have a different name than the general data_types
# array, since this preprocessing may happen multiple times (for
# each gene) in the same script call
self.gene_data_types = np.concatenate(
(self.data_types, np.array([cfg.NONGENE_FEATURE] *
np.count_nonzero(~gene_features)))
)
assert self.gene_data_types.shape[0] == gene_features.shape[0]
train_filtered_df, y_filtered_df = filter_to_cross_data_samples(
train_filtered_df,
y_filtered_df,
valid_samples=valid_samples,
data_types=self.overlap_data_types,
n_dim=self.n_dim,
use_subsampled=(self.debug or self.test),
verbose=self.verbose
)
self.X_df = train_filtered_df
self.y_df = y_filtered_df
self.gene_features = gene_features
assert np.count_nonzero(self.X_df.index.duplicated()) == 0
assert np.count_nonzero(self.y_df.index.duplicated()) == 0
def process_purity_data(self,
output_dir,
classify=False):
"""Prepare to run experiments predicting tumor purity.
Arguments
---------
output_dir (str): directory to write output to, if None don't write output
classify (bool): if True do classification, else regression
"""
y_df_raw = du.load_purity(self.mut_burden_df,
self.sample_info_df,
classify=classify,
verbose=self.verbose)
filtered_data = self._filter_data(
self.data_df,
y_df_raw,
add_cancertype_covariate=True
)
train_filtered_df, y_filtered_df, gene_features = filtered_data
train_filtered_df, y_filtered_df = filter_to_cross_data_samples(
train_filtered_df,
y_filtered_df,
data_types=self.overlap_data_types,
n_dim=self.n_dim,
use_subsampled=(self.debug or self.test),
verbose=self.verbose
)
# filter to samples in common between training data and tumor purity
self.X_df = train_filtered_df
self.y_df = y_filtered_df
self.gene_features = gene_features
assert np.count_nonzero(self.X_df.index.duplicated()) == 0
assert np.count_nonzero(self.y_df.index.duplicated()) == 0
def process_msi_data(self, cancer_type, output_dir):
"""Prepare to run experiments predicting microsatellite instability status.
Arguments
---------
output_dir (str): directory to write output to, if None don't write output
classify (bool): if True do classification, else regression
"""
y_df_raw = du.load_msi(cancer_type,
self.mut_burden_df,
self.sample_info_df,
verbose=self.verbose)
filtered_data = self._filter_data(
self.data_df,
y_df_raw,
add_cancertype_covariate=(cancer_type == 'pancancer')
)
train_filtered_df, y_filtered_df, gene_features = filtered_data
train_filtered_df, y_filtered_df = filter_to_cross_data_samples(
train_filtered_df,
y_filtered_df,
data_types=self.overlap_data_types,
n_dim=self.n_dim,
use_subsampled=(self.debug or self.test),
verbose=self.verbose
)
# filter to samples in common between training data and tumor purity
self.X_df = train_filtered_df
self.y_df = y_filtered_df
self.gene_features = gene_features
assert np.count_nonzero(self.X_df.index.duplicated()) == 0
assert np.count_nonzero(self.y_df.index.duplicated()) == 0
def process_survival_data(self,
output_dir,
cancer_type):
"""Prepare to run experiments predicting survival from omics data.
Arguments
---------
output_dir (str): directory to write output to, if None don't write output
"""
y_df_raw = du.load_survival_labels(cancer_type,
self.mut_burden_df,
self.sample_info_df,
verbose=self.verbose)
filtered_data = self._filter_data(
self.data_df,
y_df_raw,
# add cancer type covariate only in pan-cancer prediction case
add_cancertype_covariate=(cancer_type == 'pancancer'),
add_age_covariate=True
)
train_filtered_df, y_filtered_df, gene_features = filtered_data
train_filtered_df, y_filtered_df = filter_to_cross_data_samples(
train_filtered_df,
y_filtered_df,
data_types=self.overlap_data_types,
n_dim=self.n_dim,
use_subsampled=(self.debug or self.test),
verbose=self.verbose
)
# filter to samples in common between training data and tumor purity
self.X_df = train_filtered_df
self.y_df = y_filtered_df
self.gene_features = gene_features
assert np.count_nonzero(self.X_df.index.duplicated()) == 0
assert np.count_nonzero(self.y_df.index.duplicated()) == 0
def _load_data(self,
train_data_type,
compressed_data=False,
standardize_input=False,
n_dim=None,
sample_info_df=None,
debug=False,
test=False):
"""Load and store relevant data.
This data does not vary based on the gene/cancer type being considered
(i.e. it can be loaded only once when the class is instantiated).
Arguments:
----------
debug (bool): whether or not to subset data for faster debugging
test (bool): whether or not to subset columns in mutation data, for testing
"""
# load training data
if not isinstance(train_data_type, str):
# if a list of train data types is provided, we have to load each
# of them and concatenate columns
# n_dim should be a list here
self.data_df, self.data_types = du.load_multiple_data_types(
train_data_type,
n_dims=n_dim,
verbose=self.verbose)
elif compressed_data:
self.data_df = du.load_compressed_data(train_data_type,
n_dim=n_dim,
verbose=self.verbose,
standardize_input=standardize_input,
load_subset=(debug or test))
else:
self.data_df = du.load_raw_data(train_data_type,
verbose=self.verbose,
load_subset=(debug or test))
if sample_info_df is None:
self.sample_info_df = du.load_sample_info(train_data_type,
verbose=self.verbose)
else:
# sometimes we load sample info in the calling script as part of
# argument processing, etc
# in that case, we don't need to load it again
self.sample_info_df = sample_info_df
# load and unpack pancancer mutation/CNV/TMB data
# this data is described in more detail in the load_pancancer_data docstring
if test:
# for testing, just load a subset of pancancer data,
# this is much faster than loading mutation data for all genes
import mpmp.test_config as tcfg
pancan_data = du.load_pancancer_data(verbose=self.verbose,
test=True,
subset_columns=tcfg.test_genes)
else:
pancan_data = du.load_pancancer_data(verbose=self.verbose)
(self.sample_freeze_df,
self.mutation_df,
self.copy_loss_df,
self.copy_gain_df,
self.mut_burden_df) = pancan_data
def _generate_cancer_type_labels(self, cancer_type):
y_df, count_df = process_y_matrix_cancertype(
acronym=cancer_type,
sample_freeze=self.sample_freeze_df,
mutation_burden=self.mut_burden_df,
hyper_filter=5,
)
return y_df
def _generate_gene_labels(self, gene, classification, gene_dir):
# process the y matrix for the given gene or pathway
y_mutation_df = self.mutation_df.loc[:, gene]
# include copy number gains for oncogenes
# and copy number loss for tumor suppressor genes (TSG)
include_copy = True
if classification == "Oncogene":
y_copy_number_df = self.copy_gain_df.loc[:, gene]
elif classification == "TSG":
y_copy_number_df = self.copy_loss_df.loc[:, gene]
else:
y_copy_number_df = pd.DataFrame()
include_copy = False
# construct labels from mutation/CNV information, and filter for
# cancer types without an extreme label imbalance
y_df, valid_samples = process_y_matrix(
y_mutation=y_mutation_df,
y_copy=y_copy_number_df,
include_copy=include_copy,
gene=gene,
sample_freeze=self.sample_freeze_df,
mutation_burden=self.mut_burden_df,
filter_count=cfg.filter_count,
filter_prop=cfg.filter_prop,
output_directory=gene_dir,
hyper_filter=5,
test=self.test,
overlap_data_types=self.overlap_data_types
)
return y_df, valid_samples
def _filter_data(self,
data_df,
y_df,
add_cancertype_covariate=False,
add_age_covariate=False):
use_samples, data_df, y_df, gene_features = align_matrices(
x_file_or_df=data_df,
y=y_df,
add_cancertype_covariate=add_cancertype_covariate,
add_mutation_covariate=True,
add_age_covariate=add_age_covariate
)
return data_df, y_df, gene_features
| [
"mpmp.utilities.tcga_utilities.process_y_matrix_cancertype",
"numpy.count_nonzero",
"mpmp.utilities.data_utilities.load_multiple_data_types",
"mpmp.utilities.data_utilities.load_random_genes",
"mpmp.utilities.tcga_utilities.process_y_matrix",
"mpmp.utilities.data_utilities.load_top_50",
"mpmp.utilities.... | [((1997, 2017), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2011, 2017), True, 'import numpy as np\n'), ((5226, 5395), 'mpmp.utilities.tcga_utilities.filter_to_cross_data_samples', 'filter_to_cross_data_samples', (['train_filtered_df', 'y_filtered_df'], {'data_types': 'self.overlap_data_types', 'use_subsampled': '(self.debug or self.test)', 'verbose': 'self.verbose'}), '(train_filtered_df, y_filtered_df, data_types=\n self.overlap_data_types, use_subsampled=self.debug or self.test,\n verbose=self.verbose)\n', (5254, 5395), False, 'from mpmp.utilities.tcga_utilities import process_y_matrix, process_y_matrix_cancertype, align_matrices, filter_to_cross_data_samples\n'), ((7264, 7480), 'mpmp.utilities.tcga_utilities.filter_to_cross_data_samples', 'filter_to_cross_data_samples', (['train_filtered_df', 'y_filtered_df'], {'valid_samples': 'valid_samples', 'data_types': 'self.overlap_data_types', 'n_dim': 'self.n_dim', 'use_subsampled': '(self.debug or self.test)', 'verbose': 'self.verbose'}), '(train_filtered_df, y_filtered_df,\n valid_samples=valid_samples, data_types=self.overlap_data_types, n_dim=\n self.n_dim, use_subsampled=self.debug or self.test, verbose=self.verbose)\n', (7292, 7480), False, 'from mpmp.utilities.tcga_utilities import process_y_matrix, process_y_matrix_cancertype, align_matrices, filter_to_cross_data_samples\n'), ((8221, 8321), 'mpmp.utilities.data_utilities.load_purity', 'du.load_purity', (['self.mut_burden_df', 'self.sample_info_df'], {'classify': 'classify', 'verbose': 'self.verbose'}), '(self.mut_burden_df, self.sample_info_df, classify=classify,\n verbose=self.verbose)\n', (8235, 8321), True, 'import mpmp.utilities.data_utilities as du\n'), ((8680, 8867), 'mpmp.utilities.tcga_utilities.filter_to_cross_data_samples', 'filter_to_cross_data_samples', (['train_filtered_df', 'y_filtered_df'], {'data_types': 'self.overlap_data_types', 'n_dim': 'self.n_dim', 'use_subsampled': '(self.debug or self.test)', 'verbose': 'self.verbose'}), '(train_filtered_df, y_filtered_df, data_types=\n self.overlap_data_types, n_dim=self.n_dim, use_subsampled=self.debug or\n self.test, verbose=self.verbose)\n', (8708, 8867), False, 'from mpmp.utilities.tcga_utilities import process_y_matrix, process_y_matrix_cancertype, align_matrices, filter_to_cross_data_samples\n'), ((9632, 9724), 'mpmp.utilities.data_utilities.load_msi', 'du.load_msi', (['cancer_type', 'self.mut_burden_df', 'self.sample_info_df'], {'verbose': 'self.verbose'}), '(cancer_type, self.mut_burden_df, self.sample_info_df, verbose=\n self.verbose)\n', (9643, 9724), True, 'import mpmp.utilities.data_utilities as du\n'), ((10097, 10284), 'mpmp.utilities.tcga_utilities.filter_to_cross_data_samples', 'filter_to_cross_data_samples', (['train_filtered_df', 'y_filtered_df'], {'data_types': 'self.overlap_data_types', 'n_dim': 'self.n_dim', 'use_subsampled': '(self.debug or self.test)', 'verbose': 'self.verbose'}), '(train_filtered_df, y_filtered_df, data_types=\n self.overlap_data_types, n_dim=self.n_dim, use_subsampled=self.debug or\n self.test, verbose=self.verbose)\n', (10125, 10284), False, 'from mpmp.utilities.tcga_utilities import process_y_matrix, process_y_matrix_cancertype, align_matrices, filter_to_cross_data_samples\n'), ((11037, 11141), 'mpmp.utilities.data_utilities.load_survival_labels', 'du.load_survival_labels', (['cancer_type', 'self.mut_burden_df', 'self.sample_info_df'], {'verbose': 'self.verbose'}), '(cancer_type, self.mut_burden_df, self.\n sample_info_df, verbose=self.verbose)\n', (11060, 11141), True, 'import mpmp.utilities.data_utilities as du\n'), ((11661, 11848), 'mpmp.utilities.tcga_utilities.filter_to_cross_data_samples', 'filter_to_cross_data_samples', (['train_filtered_df', 'y_filtered_df'], {'data_types': 'self.overlap_data_types', 'n_dim': 'self.n_dim', 'use_subsampled': '(self.debug or self.test)', 'verbose': 'self.verbose'}), '(train_filtered_df, y_filtered_df, data_types=\n self.overlap_data_types, n_dim=self.n_dim, use_subsampled=self.debug or\n self.test, verbose=self.verbose)\n', (11689, 11848), False, 'from mpmp.utilities.tcga_utilities import process_y_matrix, process_y_matrix_cancertype, align_matrices, filter_to_cross_data_samples\n'), ((15374, 15516), 'mpmp.utilities.tcga_utilities.process_y_matrix_cancertype', 'process_y_matrix_cancertype', ([], {'acronym': 'cancer_type', 'sample_freeze': 'self.sample_freeze_df', 'mutation_burden': 'self.mut_burden_df', 'hyper_filter': '(5)'}), '(acronym=cancer_type, sample_freeze=self.\n sample_freeze_df, mutation_burden=self.mut_burden_df, hyper_filter=5)\n', (15401, 15516), False, 'from mpmp.utilities.tcga_utilities import process_y_matrix, process_y_matrix_cancertype, align_matrices, filter_to_cross_data_samples\n'), ((16377, 16736), 'mpmp.utilities.tcga_utilities.process_y_matrix', 'process_y_matrix', ([], {'y_mutation': 'y_mutation_df', 'y_copy': 'y_copy_number_df', 'include_copy': 'include_copy', 'gene': 'gene', 'sample_freeze': 'self.sample_freeze_df', 'mutation_burden': 'self.mut_burden_df', 'filter_count': 'cfg.filter_count', 'filter_prop': 'cfg.filter_prop', 'output_directory': 'gene_dir', 'hyper_filter': '(5)', 'test': 'self.test', 'overlap_data_types': 'self.overlap_data_types'}), '(y_mutation=y_mutation_df, y_copy=y_copy_number_df,\n include_copy=include_copy, gene=gene, sample_freeze=self.\n sample_freeze_df, mutation_burden=self.mut_burden_df, filter_count=cfg.\n filter_count, filter_prop=cfg.filter_prop, output_directory=gene_dir,\n hyper_filter=5, test=self.test, overlap_data_types=self.overlap_data_types)\n', (16393, 16736), False, 'from mpmp.utilities.tcga_utilities import process_y_matrix, process_y_matrix_cancertype, align_matrices, filter_to_cross_data_samples\n'), ((17145, 17315), 'mpmp.utilities.tcga_utilities.align_matrices', 'align_matrices', ([], {'x_file_or_df': 'data_df', 'y': 'y_df', 'add_cancertype_covariate': 'add_cancertype_covariate', 'add_mutation_covariate': '(True)', 'add_age_covariate': 'add_age_covariate'}), '(x_file_or_df=data_df, y=y_df, add_cancertype_covariate=\n add_cancertype_covariate, add_mutation_covariate=True,\n add_age_covariate=add_age_covariate)\n', (17159, 17315), False, 'from mpmp.utilities.tcga_utilities import process_y_matrix, process_y_matrix_cancertype, align_matrices, filter_to_cross_data_samples\n'), ((3342, 3361), 'mpmp.utilities.data_utilities.load_top_genes', 'du.load_top_genes', ([], {}), '()\n', (3359, 3361), True, 'import mpmp.utilities.data_utilities as du\n'), ((13226, 13311), 'mpmp.utilities.data_utilities.load_multiple_data_types', 'du.load_multiple_data_types', (['train_data_type'], {'n_dims': 'n_dim', 'verbose': 'self.verbose'}), '(train_data_type, n_dims=n_dim, verbose=self.verbose\n )\n', (13253, 13311), True, 'import mpmp.utilities.data_utilities as du\n'), ((14139, 14197), 'mpmp.utilities.data_utilities.load_sample_info', 'du.load_sample_info', (['train_data_type'], {'verbose': 'self.verbose'}), '(train_data_type, verbose=self.verbose)\n', (14158, 14197), True, 'import mpmp.utilities.data_utilities as du\n'), ((14861, 14953), 'mpmp.utilities.data_utilities.load_pancancer_data', 'du.load_pancancer_data', ([], {'verbose': 'self.verbose', 'test': '(True)', 'subset_columns': 'tcfg.test_genes'}), '(verbose=self.verbose, test=True, subset_columns=tcfg\n .test_genes)\n', (14883, 14953), True, 'import mpmp.utilities.data_utilities as du\n'), ((15087, 15131), 'mpmp.utilities.data_utilities.load_pancancer_data', 'du.load_pancancer_data', ([], {'verbose': 'self.verbose'}), '(verbose=self.verbose)\n', (15109, 15131), True, 'import mpmp.utilities.data_utilities as du\n'), ((3424, 3444), 'mpmp.utilities.data_utilities.load_vogelstein', 'du.load_vogelstein', ([], {}), '()\n', (3442, 3444), True, 'import mpmp.utilities.data_utilities as du\n'), ((13509, 13652), 'mpmp.utilities.data_utilities.load_compressed_data', 'du.load_compressed_data', (['train_data_type'], {'n_dim': 'n_dim', 'verbose': 'self.verbose', 'standardize_input': 'standardize_input', 'load_subset': '(debug or test)'}), '(train_data_type, n_dim=n_dim, verbose=self.verbose,\n standardize_input=standardize_input, load_subset=debug or test)\n', (13532, 13652), True, 'import mpmp.utilities.data_utilities as du\n'), ((13896, 13982), 'mpmp.utilities.data_utilities.load_raw_data', 'du.load_raw_data', (['train_data_type'], {'verbose': 'self.verbose', 'load_subset': '(debug or test)'}), '(train_data_type, verbose=self.verbose, load_subset=debug or\n test)\n', (13912, 13982), True, 'import mpmp.utilities.data_utilities as du\n'), ((16167, 16181), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16179, 16181), True, 'import pandas as pd\n'), ((3506, 3528), 'mpmp.utilities.data_utilities.load_random_genes', 'du.load_random_genes', ([], {}), '()\n', (3526, 3528), True, 'import mpmp.utilities.data_utilities as du\n'), ((3678, 3698), 'mpmp.utilities.data_utilities.load_vogelstein', 'du.load_vogelstein', ([], {}), '()\n', (3696, 3698), True, 'import mpmp.utilities.data_utilities as du\n'), ((4022, 4038), 'mpmp.utilities.data_utilities.load_top_50', 'du.load_top_50', ([], {}), '()\n', (4036, 4038), True, 'import mpmp.utilities.data_utilities as du\n'), ((4265, 4336), 'mpmp.exceptions.GenesNotFoundError', 'GenesNotFoundError', (['"""Gene list was not a subset of Vogelstein or top50"""'], {}), "('Gene list was not a subset of Vogelstein or top50')\n", (4283, 4336), False, 'from mpmp.exceptions import GenesNotFoundError\n'), ((7096, 7128), 'numpy.count_nonzero', 'np.count_nonzero', (['(~gene_features)'], {}), '(~gene_features)\n', (7112, 7128), True, 'import numpy as np\n')] |
from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples
import numpy as np
import pandas as pd
import unittest
class TestModelFitDef(unittest.TestCase):
def test_get_guess_params(self):
df1 = pd.read_csv('../Datasets for testing/Computational Data (EPFL) CO2.csv')
key_uptakes = ['Uptake (mmol/g)_13X_10 (°C)']
key_pressures = ['Pressure (bar)']
result1 = get_guess_params("langmuir", df1, key_uptakes, key_pressures)
guess_result_test1 = {'b': [155.72513521595482],
'q': [7.926677561000001]}
for key in guess_result_test1:
np.testing.assert_array_almost_equal(guess_result_test1[key], result1[key])
guess_result_test2 = {
'b1': [62.29005408638193],
'b2': [93.4350811295729],
'q1': [3.9633387805000004],
'q2': [3.9633387805000004]}
result2 = get_guess_params("dsl", df1, key_uptakes, key_pressures)
result2_d = get_guess_params("dsl nc", df1, key_uptakes, key_pressures)
for key in guess_result_test2:
np.testing.assert_array_almost_equal(guess_result_test2[key], result2[key])
np.testing.assert_array_almost_equal(guess_result_test2[key], result2_d[key])
guess_result_test3 = {
'b0': [155.72513521595482],
'q': [7.926677561000001],
'h': [-5000]
}
result3 = get_guess_params("langmuir td", df1, key_uptakes, key_pressures)
for key in guess_result_test3:
np.testing.assert_array_almost_equal(guess_result_test3[key], result3[key])
guess_result_test4 = {
'n': [1.585335512],
'ka': [1],
'ca': [0.45]
}
result4 = get_guess_params("gab", df1, key_uptakes, key_pressures)
for key in guess_result_test4:
np.testing.assert_array_almost_equal(guess_result_test4[key], result4[key])
guess_result_test5 = {
'n0': [7.926677561000001],
'n1': [155.72513521595482],
'a': [15.57251352],
'c': [1557.251352]
}
result5 = get_guess_params("mdr", df1, key_uptakes, key_pressures)
for key in guess_result_test5:
np.testing.assert_array_almost_equal(guess_result_test5[key], result5[key])
guess_result_test6 = {
'b': [155.72513521595482],
'q': [7.926677561000001],
'n': [1]
}
result6 = get_guess_params("sips", df1, key_uptakes, key_pressures)
for key in guess_result_test6:
np.testing.assert_array_almost_equal(guess_result_test6[key], result6[key])
guess_result_test7 = {
'b': [155.72513521595482],
'q': [7.926677561000001],
't': [0.5]
}
result7 = get_guess_params("toth", df1, key_uptakes, key_pressures)
for key in guess_result_test7:
np.testing.assert_array_almost_equal(guess_result_test7[key], result7[key])
guess_result_test8 = {
"c": [155.72513521595482],
"n": [10],
"g": [100],
"q": [3.963338781]
}
result8 = get_guess_params("bddt", df1, key_uptakes, key_pressures)
for key in guess_result_test8:
np.testing.assert_array_almost_equal(guess_result_test8[key], result8[key])
guess_result_test9 = {
"ns": [7.926677561000001],
"kf": [155.72513521595482],
"nu": [79.26677561000001],
"ku": [155.72513521595482],
"m": [5]
}
result9 = get_guess_params("dodo", df1, key_uptakes, key_pressures)
for key in guess_result_test9:
np.testing.assert_array_almost_equal(guess_result_test9[key], result9[key])
guess_result_test10 = {
'c': [155.72513521595482],
'n': [7.926677561000001]
}
result10 = get_guess_params("bet", df1, key_uptakes, key_pressures)
for key in guess_result_test10:
np.testing.assert_array_almost_equal(guess_result_test10[key], result10[key])
def test_get_fit_tuples(self):
guess1 = {'b': [155.7, 23.8, 1.9],
'q': [7, 7.25, 5.75]}
std_data = {
'temps': [10, 40, 100],
'cust_bounds': None,
'henry_constants': [1234, 172, 11],
'q_fix': 7
}
def assert_tuples(left, **kwargs):
for i, tuple_set in enumerate(left):
test = get_fit_tuples(i=i, **kwargs, **std_data)
for j, tup in enumerate(tuple_set):
tup_test = test[j]
for k, item in enumerate(tup):
self.assertEqual(item, tup_test[k])
result1 = (('q', 7, True, 0, None), ('delta', 1234, False), ('b', None, None, None, None, 'delta/q')), \
(('q', 7, True, 7, 7.001), ('delta', 172, False), ('b', None, None, None, None, 'delta/q')),\
(('q', 7, True, 7, 7.001), ('delta', 11, False), ('b', None, None, None, None, 'delta/q'))
test1_kwargs = {
'model': "langmuir",
'guess': guess1,
'cond': True,
'henry_off': False}
assert_tuples(result1, **test1_kwargs)
result2 = (('q', 7, True, 0, None), ('b', 155.7, True, 0, None)), \
(('q', 7, True, 7, 7.001), ('b', 23.8, True, 0, None)), \
(('q', 7, True, 7, 7.001), ('b', 1.9, True, 0, None))
test2_kwargs = {
'model': "langmuir",
'guess': guess1,
'cond': True,
'henry_off': True}
assert_tuples(result2, **test2_kwargs)
result3 = (('q', 7, True, 0, None), ('b', 155.7, True, 0, None)), \
(('q', 7.25, True, 0, None), ('b', 23.8, True, 0, None)), \
(('q', 5.75, True, 0, None), ('b', 1.9, True, 0, None))
test3_kwargs = {
'model': "langmuir",
'guess': guess1,
'cond': False,
'henry_off': False}
assert_tuples(result3, **test3_kwargs)
guess2 = {
'q1': [3, 2],
'q2': [3, 2],
'b1': [62, 32],
'b2': [93, 32]
}
result4 = (('q1', 3, True, 0, None), ('q2', 3, True, 0, None), ('b1', 62, True, 0, None), ('b2', 93, True, 0, None)), \
(('q1', 2, True, 0, None), ('q2', 2, True, 0, None), ('b1', 32, True, 0, None),\
('b2', 32, True, 0, None))
test4_kwargs = {
'model': "dsl",
'guess': guess2
}
assert_tuples(result4, **test4_kwargs)
guess3 = {
'b0': [155, 140],
'q': [7, 6],
'h': [-5000, -5000]
}
result5 = (('t', 10, False), ('q', 7, True, 0, None), ('h', -5000, True, None, None), ('b0', 155, True, 0, None)),\
(('t', 40, False), ('q', 7, True, 7, 7.001), ('h', -5000, True, None, None), ('b0', 140, True, 0, None))
test5_kwargs = {
'model': "langmuir td",
'guess': guess3,
'cond': True,
'henry_off': False}
assert_tuples(result5, **test5_kwargs)
result6 = (('t', 10, False), ('q', 7, True, 0, None), ('h', -5000, True, None, None), ('b0', 155, True, 0, None)),\
(('t', 40, False), ('q', 6, True, 0, None), ('h', -5000, True, None, None), ('b0', 140, True, 0, None))
test6_kwargs = {
'model': "langmuir td",
'guess': guess3,
'cond': False,
'henry_off': False}
assert_tuples(result6, **test6_kwargs)
guess4 = {
'n': [15, 14],
'ka': [7, 6],
'ca': [1, 2]
}
result7 = (('n', 15, True, 0, None), ('ka', 7, True, 0, None), ('ca', 1, True, 0, None)), \
(('n', 14, True, 0, None), ('ka', 6, True, 0, None), ('ca', 2, True, 0, None))
test7_kwargs = {
'model': "gab",
'guess': guess4}
assert_tuples(result7, **test7_kwargs)
guess5 = {
'n0': [7, 6],
'n1': [100, 120],
'a': [1, 2],
'c': [3, 4]
}
result8 = (('n0', 7, True, 0, None), ('n1', 100, True, 0, None), ('a', 1, True, 0, None),
('c', 3, True, 0, None)), \
(('n0', 6, True, 0, None), ('n1', 120, True, 0, None), ('a', 2, True, 0, None), \
('c', 4, True, 0, None))
test8_kwargs = {
'model': "mdr",
'guess': guess5,
'cond': False,
'henry_off': False}
assert_tuples(result8, **test8_kwargs)
result9 = (('n0', 7, True, 0, None), ('n1', 100, True, 0, None), ('a', 1, True, 0, None),
('c', 3, True, 0, None)), \
(('n0', 7, True, 7, 7.001), ('n1', 120, True, 0, None), ('a', 2, True, 0, None), \
('c', 4, True, 0, None))
test9_kwargs = {
'model': "mdr",
'guess': guess5,
'cond': True,
'henry_off': True}
assert_tuples(result9, **test9_kwargs)
result10 = (('n0', 7, True, 0, None),('delta', 1234, False), ('n1', None, None, None, None, 'delta/n0'), ('a', 1, True, 0, None),
('c', 3, True, 0, None)), \
(('n0', 7, True, 7, 7.001),('delta', 172, False), ('n1', None, None, None, None, 'delta/n0'), ('a', 2, True, 0, None), \
('c', 4, True, 0, None))
test10_kwargs = {
'model': "mdr",
'guess': guess5,
'cond': True,
'henry_off': False}
assert_tuples(result10, **test10_kwargs)
guess6 = {
'q': [5, 4],
'b': [100, 90],
'n': [1, 0.9]
}
result11 = (('q', 5, True, 0, None), ('b', 100, True, 0, None), ('n', 1, True, 0, None)), \
(('q', 4, True, 0, None), ('b', 90, True, 0, None), ('n', 0.9, True, 0, None))
test11_kwargs = {
'model': "sips",
'guess': guess6
}
assert_tuples(result11, **test11_kwargs)
guess7 = {
'q': [5, 4],
'b': [100, 90],
't': [1, 0.9]
}
result12 = (('q', 5, True, 0, None), ('b', 100, True, 0, None), ('t', 1, True, 0, None)), \
(('q', 4, True, 0, None), ('b', 90, True, 0, None), ('t', 0.9, True, 0, None))
test12_kwargs = {
'model': "toth",
'guess': guess7
}
assert_tuples(result12, **test12_kwargs)
guess8 = {
'c': [0.1, 1],
'n': [10, 20],
'g': [99, 100],
'q': [6, 5]
}
result13 = (('c', 0.1, True, 0, None), ('n', 10, True, 0, None), ('g', 99, True, 0, None),
('q', 6, True, 0, None)), \
(('c', 1, True, 0, None), ('n', 20, True, 0, None), ('g', 100, True, 0, None), \
('q', 5, True, 0, None))
test13_kwargs = {
'model': "bddt",
'guess': guess8
}
assert_tuples(result13, **test13_kwargs)
result14 = (('c', 0.1, True, 0, 1), ('n', 10, True, 0, None), ('g', 99, True, 0, None),
('q', 6, True, 0, None)), \
(('c', 1, True, 0, 1), ('n', 20, True, 0, None), ('g', 100, True, 0, None), \
('q', 5, True, 0, None))
test14_kwargs = {
'model': "bddt",
'guess': guess8,
'cond': True
}
assert_tuples(result14, **test14_kwargs)
guess9 = {
'ns': [1, 2],
'kf': [3, 4],
'nu': [5, 6],
'ku': [7, 8],
'm': [9, 10]
}
result15 = (('ns', 1, True, 0, None), ('kf', 3, True, 0, None), ('nu', 5, True, 0, None),
('ku', 7, True, 0, None), ('m', 9, True, 0, None)), \
(('ns', 2, True, 0, None), ('kf', 4, True, 0, None), ('nu', 6, True, 0, None),
('ku', 8, True, 0, None), ('m', 10, True, 0, None))
test15_kwargs = {
'model': "dodo",
'guess': guess9
}
assert_tuples(result15, **test15_kwargs)
guess10 = {
'n': [10, 11],
'c': [12, 13]
}
result16 = (('n', 10, True, 0, None), ('c', 12, True, 0, None)), \
(('n', 11, True, 0, None), ('c', 13, True, 0, None))
test16_kwargs = {
'model': "bet",
'guess': guess10
}
assert_tuples(result16, **test16_kwargs)
| [
"numpy.testing.assert_array_almost_equal",
"src.pyIsoFit.core.model_fit_def.get_guess_params",
"pandas.read_csv",
"src.pyIsoFit.core.model_fit_def.get_fit_tuples"
] | [((226, 298), 'pandas.read_csv', 'pd.read_csv', (['"""../Datasets for testing/Computational Data (EPFL) CO2.csv"""'], {}), "('../Datasets for testing/Computational Data (EPFL) CO2.csv')\n", (237, 298), True, 'import pandas as pd\n'), ((415, 476), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""langmuir"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('langmuir', df1, key_uptakes, key_pressures)\n", (431, 476), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((931, 987), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""dsl"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('dsl', df1, key_uptakes, key_pressures)\n", (947, 987), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((1008, 1067), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""dsl nc"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('dsl nc', df1, key_uptakes, key_pressures)\n", (1024, 1067), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((1450, 1514), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""langmuir td"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('langmuir td', df1, key_uptakes, key_pressures)\n", (1466, 1514), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((1784, 1840), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""gab"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('gab', df1, key_uptakes, key_pressures)\n", (1800, 1840), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((2172, 2228), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""mdr"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('mdr', df1, key_uptakes, key_pressures)\n", (2188, 2228), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((2516, 2573), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""sips"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('sips', df1, key_uptakes, key_pressures)\n", (2532, 2573), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((2863, 2920), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""toth"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('toth', df1, key_uptakes, key_pressures)\n", (2879, 2920), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((3227, 3284), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""bddt"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('bddt', df1, key_uptakes, key_pressures)\n", (3243, 3284), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((3653, 3710), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""dodo"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('dodo', df1, key_uptakes, key_pressures)\n", (3669, 3710), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((3978, 4034), 'src.pyIsoFit.core.model_fit_def.get_guess_params', 'get_guess_params', (['"""bet"""', 'df1', 'key_uptakes', 'key_pressures'], {}), "('bet', df1, key_uptakes, key_pressures)\n", (3994, 4034), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n'), ((643, 718), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test1[key]', 'result1[key]'], {}), '(guess_result_test1[key], result1[key])\n', (679, 718), True, 'import numpy as np\n'), ((1120, 1195), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test2[key]', 'result2[key]'], {}), '(guess_result_test2[key], result2[key])\n', (1156, 1195), True, 'import numpy as np\n'), ((1208, 1285), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test2[key]', 'result2_d[key]'], {}), '(guess_result_test2[key], result2_d[key])\n', (1244, 1285), True, 'import numpy as np\n'), ((1567, 1642), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test3[key]', 'result3[key]'], {}), '(guess_result_test3[key], result3[key])\n', (1603, 1642), True, 'import numpy as np\n'), ((1893, 1968), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test4[key]', 'result4[key]'], {}), '(guess_result_test4[key], result4[key])\n', (1929, 1968), True, 'import numpy as np\n'), ((2281, 2356), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test5[key]', 'result5[key]'], {}), '(guess_result_test5[key], result5[key])\n', (2317, 2356), True, 'import numpy as np\n'), ((2626, 2701), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test6[key]', 'result6[key]'], {}), '(guess_result_test6[key], result6[key])\n', (2662, 2701), True, 'import numpy as np\n'), ((2973, 3048), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test7[key]', 'result7[key]'], {}), '(guess_result_test7[key], result7[key])\n', (3009, 3048), True, 'import numpy as np\n'), ((3337, 3412), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test8[key]', 'result8[key]'], {}), '(guess_result_test8[key], result8[key])\n', (3373, 3412), True, 'import numpy as np\n'), ((3763, 3838), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test9[key]', 'result9[key]'], {}), '(guess_result_test9[key], result9[key])\n', (3799, 3838), True, 'import numpy as np\n'), ((4088, 4165), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['guess_result_test10[key]', 'result10[key]'], {}), '(guess_result_test10[key], result10[key])\n', (4124, 4165), True, 'import numpy as np\n'), ((4573, 4614), 'src.pyIsoFit.core.model_fit_def.get_fit_tuples', 'get_fit_tuples', ([], {'i': 'i'}), '(i=i, **kwargs, **std_data)\n', (4587, 4614), False, 'from src.pyIsoFit.core.model_fit_def import get_guess_params, get_fit_tuples\n')] |
#Plot the potential of the periodic surface with a dipole source
import numpy as np
import matplotlib.pyplot as plt
from Calc_power_cresc import Pow_abs_rad, \
Pow_abs_rad_r,\
Pow_abs_rad_hori,\
Pow_sca_rad, Pow_sca_r,\
Pow_sca_hori
def Absorption(R1_cyl, R2_cyl, inv, epos, sca, vel, orientation):
phi = np.arange(0,2*np.pi,0.001)
z_1 = sca / (R1_cyl*np.exp(1j*phi) - inv)
z_2 = sca / (R2_cyl*np.exp(1j*phi) - inv)
#Geometrical and physical constants in the cylinder frame
c = 3e8
Conv = 1.602e-19/6.626e-34*2*np.pi #Conversion from eV to SI-units
omega = np.arange(0.01, 8, 0.01)
c_e = vel*c #electron velocity
plt.figure(4)
plt.clf()
plt.subplot(111)
Q = np.zeros(np.size(omega))
if orientation==1:
x_e0 = min(np.real(z_2))
x_e = x_e0 + np.sign(x_e0)*epos
for m in range(0,np.size(omega)):
Q[m] = Pow_abs_rad(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])
plt.plot(omega, Q/1.6e-19)
elif orientation==3:
x_e0 = max(np.real(z_2))
x_e = x_e0 + epos
for m in range(0,np.size(omega)):
Q[m] = Pow_abs_rad_r(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])
plt.plot(omega, Q/1.6e-19)
else:
y_e0 = max(np.imag(z_1))
x_e = y_e0 + epos
for m in range(0,np.size(omega)):
Q[m] = Pow_abs_rad_hori(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])
plt.plot(omega, Q/1.6e-19)
plt.yscale('log')
plt.xlabel('$\omega/eV$')
plt.ylabel('Q/eV')
plt.gcf().tight_layout()
plt.figure(4).canvas.draw()
def Scattering(R1_cyl, R2_cyl, inv, epos, sca, vel, orientation):
phi = np.arange(0,2*np.pi,0.001)
z_1 = sca / (R1_cyl*np.exp(1j*phi) - inv)
z_2 = sca / (R2_cyl*np.exp(1j*phi) - inv)
#Geometrical and physical constants in the cylinder frame
c = 3e8
Conv = 1.602e-19/6.626e-34*2*np.pi #Conversion from eV to SI-units
omega = np.arange(0.01, 8, 0.01)
c_e = vel*c #electron velocity
plt.figure(5)
plt.clf()
plt.subplot(111)
S = np.zeros(np.size(omega))
if orientation==1:
x_e0 = min(np.real(z_2))
x_e = x_e0 + np.sign(x_e0)*epos
for m in range(0,np.size(omega)):
S[m] = Pow_sca_rad(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])
plt.plot(omega, S/1.6e-19)
elif orientation==3:
x_e0 = max(np.real(z_2))
x_e = x_e0 + epos
for m in range(0,np.size(omega)):
S[m] = Pow_sca_r(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])
plt.plot(omega, S/1.6e-19)
else:
y_e0 = max(np.imag(z_1))
x_e = y_e0 + epos
for m in range(0,np.size(omega)):
S[m] = Pow_sca_hori(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])
plt.plot(omega, S/1.6e-19)
plt.yscale('log')
plt.xlabel('$\omega/eV$')
plt.ylabel('Scattering/eV')
plt.gcf().tight_layout()
plt.figure(5).canvas.draw()
| [
"matplotlib.pyplot.ylabel",
"Calc_power_cresc.Pow_abs_rad",
"Calc_power_cresc.Pow_sca_r",
"numpy.imag",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.real",
"Calc_power_cresc.Pow_abs_rad_hori",
"Calc_power_cresc.Pow_abs_rad_r",
"Calc_power_cresc.Pow_... | [((460, 490), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.001)'], {}), '(0, 2 * np.pi, 0.001)\n', (469, 490), True, 'import numpy as np\n'), ((738, 762), 'numpy.arange', 'np.arange', (['(0.01)', '(8)', '(0.01)'], {}), '(0.01, 8, 0.01)\n', (747, 762), True, 'import numpy as np\n'), ((804, 817), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (814, 817), True, 'import matplotlib.pyplot as plt\n'), ((822, 831), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (829, 831), True, 'import matplotlib.pyplot as plt\n'), ((836, 852), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (847, 852), True, 'import matplotlib.pyplot as plt\n'), ((1619, 1636), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1629, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1667), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega/eV$"""'], {}), "('$\\\\omega/eV$')\n", (1651, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1671, 1689), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Q/eV"""'], {}), "('Q/eV')\n", (1681, 1689), True, 'import matplotlib.pyplot as plt\n'), ((1829, 1859), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.001)'], {}), '(0, 2 * np.pi, 0.001)\n', (1838, 1859), True, 'import numpy as np\n'), ((2107, 2131), 'numpy.arange', 'np.arange', (['(0.01)', '(8)', '(0.01)'], {}), '(0.01, 8, 0.01)\n', (2116, 2131), True, 'import numpy as np\n'), ((2173, 2186), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (2183, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2191, 2200), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2198, 2200), True, 'import matplotlib.pyplot as plt\n'), ((2205, 2221), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (2216, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2980, 2997), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2990, 2997), True, 'import matplotlib.pyplot as plt\n'), ((3002, 3028), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega/eV$"""'], {}), "('$\\\\omega/eV$')\n", (3012, 3028), True, 'import matplotlib.pyplot as plt\n'), ((3032, 3059), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scattering/eV"""'], {}), "('Scattering/eV')\n", (3042, 3059), True, 'import matplotlib.pyplot as plt\n'), ((871, 885), 'numpy.size', 'np.size', (['omega'], {}), '(omega)\n', (878, 885), True, 'import numpy as np\n'), ((1112, 1140), 'matplotlib.pyplot.plot', 'plt.plot', (['omega', '(Q / 1.6e-19)'], {}), '(omega, Q / 1.6e-19)\n', (1120, 1140), True, 'import matplotlib.pyplot as plt\n'), ((2240, 2254), 'numpy.size', 'np.size', (['omega'], {}), '(omega)\n', (2247, 2254), True, 'import numpy as np\n'), ((2481, 2509), 'matplotlib.pyplot.plot', 'plt.plot', (['omega', '(S / 1.6e-19)'], {}), '(omega, S / 1.6e-19)\n', (2489, 2509), True, 'import matplotlib.pyplot as plt\n'), ((929, 941), 'numpy.real', 'np.real', (['z_2'], {}), '(z_2)\n', (936, 941), True, 'import numpy as np\n'), ((1009, 1023), 'numpy.size', 'np.size', (['omega'], {}), '(omega)\n', (1016, 1023), True, 'import numpy as np\n'), ((1045, 1102), 'Calc_power_cresc.Pow_abs_rad', 'Pow_abs_rad', (['inv', 'x_e', 'c_e', 'sca', 'R1_cyl', 'R2_cyl', 'omega[m]'], {}), '(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])\n', (1056, 1102), False, 'from Calc_power_cresc import Pow_abs_rad, Pow_abs_rad_r, Pow_abs_rad_hori, Pow_sca_rad, Pow_sca_r, Pow_sca_hori\n'), ((1355, 1383), 'matplotlib.pyplot.plot', 'plt.plot', (['omega', '(Q / 1.6e-19)'], {}), '(omega, Q / 1.6e-19)\n', (1363, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1586, 1614), 'matplotlib.pyplot.plot', 'plt.plot', (['omega', '(Q / 1.6e-19)'], {}), '(omega, Q / 1.6e-19)\n', (1594, 1614), True, 'import matplotlib.pyplot as plt\n'), ((1694, 1703), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1701, 1703), True, 'import matplotlib.pyplot as plt\n'), ((2298, 2310), 'numpy.real', 'np.real', (['z_2'], {}), '(z_2)\n', (2305, 2310), True, 'import numpy as np\n'), ((2378, 2392), 'numpy.size', 'np.size', (['omega'], {}), '(omega)\n', (2385, 2392), True, 'import numpy as np\n'), ((2414, 2471), 'Calc_power_cresc.Pow_sca_rad', 'Pow_sca_rad', (['inv', 'x_e', 'c_e', 'sca', 'R1_cyl', 'R2_cyl', 'omega[m]'], {}), '(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])\n', (2425, 2471), False, 'from Calc_power_cresc import Pow_abs_rad, Pow_abs_rad_r, Pow_abs_rad_hori, Pow_sca_rad, Pow_sca_r, Pow_sca_hori\n'), ((2720, 2748), 'matplotlib.pyplot.plot', 'plt.plot', (['omega', '(S / 1.6e-19)'], {}), '(omega, S / 1.6e-19)\n', (2728, 2748), True, 'import matplotlib.pyplot as plt\n'), ((2947, 2975), 'matplotlib.pyplot.plot', 'plt.plot', (['omega', '(S / 1.6e-19)'], {}), '(omega, S / 1.6e-19)\n', (2955, 2975), True, 'import matplotlib.pyplot as plt\n'), ((3064, 3073), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3071, 3073), True, 'import matplotlib.pyplot as plt\n'), ((512, 530), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (518, 530), True, 'import numpy as np\n'), ((558, 576), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (564, 576), True, 'import numpy as np\n'), ((964, 977), 'numpy.sign', 'np.sign', (['x_e0'], {}), '(x_e0)\n', (971, 977), True, 'import numpy as np\n'), ((1184, 1196), 'numpy.real', 'np.real', (['z_2'], {}), '(z_2)\n', (1191, 1196), True, 'import numpy as np\n'), ((1250, 1264), 'numpy.size', 'np.size', (['omega'], {}), '(omega)\n', (1257, 1264), True, 'import numpy as np\n'), ((1286, 1345), 'Calc_power_cresc.Pow_abs_rad_r', 'Pow_abs_rad_r', (['inv', 'x_e', 'c_e', 'sca', 'R1_cyl', 'R2_cyl', 'omega[m]'], {}), '(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])\n', (1299, 1345), False, 'from Calc_power_cresc import Pow_abs_rad, Pow_abs_rad_r, Pow_abs_rad_hori, Pow_sca_rad, Pow_sca_r, Pow_sca_hori\n'), ((1412, 1424), 'numpy.imag', 'np.imag', (['z_1'], {}), '(z_1)\n', (1419, 1424), True, 'import numpy as np\n'), ((1478, 1492), 'numpy.size', 'np.size', (['omega'], {}), '(omega)\n', (1485, 1492), True, 'import numpy as np\n'), ((1514, 1576), 'Calc_power_cresc.Pow_abs_rad_hori', 'Pow_abs_rad_hori', (['inv', 'x_e', 'c_e', 'sca', 'R1_cyl', 'R2_cyl', 'omega[m]'], {}), '(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])\n', (1530, 1576), False, 'from Calc_power_cresc import Pow_abs_rad, Pow_abs_rad_r, Pow_abs_rad_hori, Pow_sca_rad, Pow_sca_r, Pow_sca_hori\n'), ((1723, 1736), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (1733, 1736), True, 'import matplotlib.pyplot as plt\n'), ((1881, 1899), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (1887, 1899), True, 'import numpy as np\n'), ((1927, 1945), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (1933, 1945), True, 'import numpy as np\n'), ((2333, 2346), 'numpy.sign', 'np.sign', (['x_e0'], {}), '(x_e0)\n', (2340, 2346), True, 'import numpy as np\n'), ((2553, 2565), 'numpy.real', 'np.real', (['z_2'], {}), '(z_2)\n', (2560, 2565), True, 'import numpy as np\n'), ((2619, 2633), 'numpy.size', 'np.size', (['omega'], {}), '(omega)\n', (2626, 2633), True, 'import numpy as np\n'), ((2655, 2710), 'Calc_power_cresc.Pow_sca_r', 'Pow_sca_r', (['inv', 'x_e', 'c_e', 'sca', 'R1_cyl', 'R2_cyl', 'omega[m]'], {}), '(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])\n', (2664, 2710), False, 'from Calc_power_cresc import Pow_abs_rad, Pow_abs_rad_r, Pow_abs_rad_hori, Pow_sca_rad, Pow_sca_r, Pow_sca_hori\n'), ((2777, 2789), 'numpy.imag', 'np.imag', (['z_1'], {}), '(z_1)\n', (2784, 2789), True, 'import numpy as np\n'), ((2843, 2857), 'numpy.size', 'np.size', (['omega'], {}), '(omega)\n', (2850, 2857), True, 'import numpy as np\n'), ((2879, 2937), 'Calc_power_cresc.Pow_sca_hori', 'Pow_sca_hori', (['inv', 'x_e', 'c_e', 'sca', 'R1_cyl', 'R2_cyl', 'omega[m]'], {}), '(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])\n', (2891, 2937), False, 'from Calc_power_cresc import Pow_abs_rad, Pow_abs_rad_r, Pow_abs_rad_hori, Pow_sca_rad, Pow_sca_r, Pow_sca_hori\n'), ((3093, 3106), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (3103, 3106), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
from pathlib import Path
import defopt
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from gp_ppc import load_data
from gp_model import extract_filters
from plot_orsolic_paper import plot_psycho, plot_chrono
from strenum import strenum
import seaborn as sb
def get_lick_stims(row, filter_idx):
"""
Get filter activations at the time of licks
row - row of dataframe for a given trial
filter_idx - filter activations to extract
"""
if ~np.isnan(row.rt):
return row.projected_stim[int(row.rt), filter_idx]
else:
return np.nan
def make_plots(model_dir, block, axes, axes_early, folds, n_samples):
"""Plot the effects of zeroing top filters"""
pred_filename = model_dir + 'predictions.pickle'
pred_filename_0 = model_dir + 'predictions_drop_filter_0.pickle'
pred_filename_1 = model_dir + 'predictions_drop_filter_1.pickle'
dset, dset_pred_full = load_data(pred_filename, n_samples, folds)
_, dset_pred_0 = load_data(pred_filename_0, n_samples, folds)
_, dset_pred_1 = load_data(pred_filename_1, n_samples, folds)
if block == 'split':
dset = dset[dset.hazard != 'nonsplit'].copy()
dset_pred_full = dset_pred_full[dset_pred_full.hazard != 'nonsplit'].copy()
dset_pred_0 = dset_pred_0[dset_pred_0.hazard != 'nonsplit'].copy()
dset_pred_1 = dset_pred_1[dset_pred_1.hazard != 'nonsplit'].copy()
else:
dset = dset[dset.hazard == 'nonsplit'].copy()
dset_pred_full = dset_pred_full[dset_pred_full.hazard == 'nonsplit'].copy()
dset_pred_0 = dset_pred_0[dset_pred_0.hazard == 'nonsplit'].copy()
dset_pred_1 = dset_pred_1[dset_pred_1.hazard == 'nonsplit'].copy()
dsets = [ dset_pred_full, dset_pred_0, dset_pred_1 ]
(f0_color, f1_color) = (sb.xkcd_rgb['mauve'], sb.xkcd_rgb['green'])
colors = [ 'k', f0_color, f1_color ]
labels = [ 'Full', '-Filter 1', '-Filter 2']
# psychometric functions
hitlicks_test = (
dset[~dset['early']]
.groupby('sig').agg({'hit': 'mean'})
)
axes[0].plot(hitlicks_test, '--.r')
# chronometric functions
period = 0.05
hitrt_test = period * (
dset[dset['hit'] & (dset['sig'] > 0)]
.groupby('sig').agg({'rt_change': 'mean'})
)
period = 0.05
axes[1].plot(hitrt_test, '--.r')
for (d, c, l) in zip(dsets, colors, labels):
plot_psycho(d, axes[0], l, color=c)
plot_chrono(d, period, axes[1], color=c)
axes[0].set_ylim(0, 1)
axes[0].set_xlim(0, 2)
axes[1].set_xlim(0, 2)
# plot filter activations at the time of licks
model_params = dict(np.load(model_dir + 'model/model_params_best.npz'))
filters, filters_idx, flip_mask = extract_filters(model_params)
predictions = pd.read_pickle(pred_filename)
predictions['sig'] /= np.log(2)
if block == 'split':
predictions = predictions[predictions.hazard != 'nonsplit'].copy()
else:
predictions = predictions[predictions.hazard != 'split'].copy()
for (f, c) in zip([0, 1], [f0_color, f1_color]):
col_name = 'lick_stim_{}'.format(f)
predictions[col_name] = predictions.apply(get_lick_stims,
args=(filters_idx[f],), axis=1)
# make sure sign of filter activations is consistent
if flip_mask[f]:
predictions[col_name] = -predictions[col_name]
hitlicks_pred = (
predictions[predictions['outcome']=='Hit']
.groupby(['sig']).agg({col_name: 'mean'})
)
fa_licks = predictions[predictions['outcome']=='FA'][col_name].mean()
axes[2].plot(hitlicks_pred, color=c)
axes[2].plot(-.25, fa_licks, '.', color=c)
axes[2].axhline(0, linestyle=':')
# plot proportion of early licks
early_licks_full = dset_pred_full.groupby('sample_id').agg({'early': np.mean})
early_licks_full['dset'] = 'Full'
early_licks_0 = dset_pred_0.groupby('sample_id').agg({'early': np.mean})
early_licks_0['dset'] = 'Without filter 1'
early_licks_1 = dset_pred_1.groupby('sample_id').agg({'early': np.mean})
early_licks_1['dset'] = 'Without filter 2'
my_pal = {"Full": "k",
"Without filter 1": sb.xkcd_rgb['mauve'],
"Without filter 2": sb.xkcd_rgb['green']}
axes_early.plot(-1, dset['early'].mean(), '.r')
vp = sb.violinplot(x = 'dset', y = 'early',
data=pd.concat((early_licks_full, early_licks_0, early_licks_1)),
inner=None, ax=axes_early, palette=my_pal)
vp.set(xlabel=None, ylabel=None)
axes_early.set_xlim(-1.5, 2.5)
axes_early.set_xticks(np.arange(-1, 3))
axes_early.set_xticklabels([])
Fold = strenum('Fold', 'train val test')
def main(figure_dir, *, folds=('test','val','train'), n_samples=200):
"""Evaluate the contribution of the top two stimulus filters to model performance
:param str figure_dir: directory for generated figures
:param list[Fold] folds: data folds to use
:param int n_samples: number of samples
"""
# set seaborn style, fix sans-serif font to avoid missing minus sign in pdf
rc_params = {
'font.sans-serif': ['Arial'],
'font.size': 8,
'lines.linewidth': 0.5,
'axes.linewidth': 0.5,
'xtick.major.width': 0.5,
'ytick.major.width': 0.5,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.major.size': 1,
'ytick.major.size': 1
}
sb.set(style='ticks')
sb.set_context('paper', rc=rc_params)
plt.rcParams['pdf.fonttype'] = 'truetype'
mice = ['IO_075', 'IO_078', 'IO_079', 'IO_080', 'IO_081', 'IO_083']
figure_path = Path(figure_dir)
figure_path.mkdir(parents=True, exist_ok=True)
with PdfPages(str(figure_path / 'drop_filters.pdf')) as pdf_1, \
PdfPages(str(figure_path / 'early_licks.pdf')) as pdf_2 :
fig_plots, axes_plots = plt.subplots(
len(mice), 6, figsize=(20/2.54, 3/2.54 * len(mice))
)
fig_early, axes_early = plt.subplots(
2, 6, figsize=(20/2.54, 6/2.54)
)
for ii, mouse in enumerate(mice):
model_dir = 'manuscript/results/' + mouse + \
'__constant__matern52__proj_wtime__ard/'
# running version, no hazard rate blocks
make_plots(model_dir, 'nonsplit',
axes_plots[ii,0:3], axes_early[0,ii], folds, n_samples)
# stationary version with hazard rate blocks
make_plots(model_dir, 'split',
axes_plots[ii,3:], axes_early[1,ii], folds, n_samples)
sb.despine(fig_plots, offset=3, trim=False)
fig_plots.tight_layout()
pdf_1.savefig(fig_plots)
plt.close(fig_plots)
sb.despine(fig_early, offset=3, trim=False)
fig_early.tight_layout()
pdf_2.savefig(fig_early)
plt.close(fig_early)
if __name__ == "__main__":
defopt.run(main)
| [
"pandas.read_pickle",
"gp_model.extract_filters",
"seaborn.set",
"plot_orsolic_paper.plot_psycho",
"pathlib.Path",
"numpy.arange",
"seaborn.despine",
"numpy.log",
"seaborn.set_context",
"gp_ppc.load_data",
"matplotlib.pyplot.close",
"numpy.isnan",
"pandas.concat",
"defopt.run",
"strenum.... | [((4748, 4781), 'strenum.strenum', 'strenum', (['"""Fold"""', '"""train val test"""'], {}), "('Fold', 'train val test')\n", (4755, 4781), False, 'from strenum import strenum\n'), ((1008, 1050), 'gp_ppc.load_data', 'load_data', (['pred_filename', 'n_samples', 'folds'], {}), '(pred_filename, n_samples, folds)\n', (1017, 1050), False, 'from gp_ppc import load_data\n'), ((1072, 1116), 'gp_ppc.load_data', 'load_data', (['pred_filename_0', 'n_samples', 'folds'], {}), '(pred_filename_0, n_samples, folds)\n', (1081, 1116), False, 'from gp_ppc import load_data\n'), ((1138, 1182), 'gp_ppc.load_data', 'load_data', (['pred_filename_1', 'n_samples', 'folds'], {}), '(pred_filename_1, n_samples, folds)\n', (1147, 1182), False, 'from gp_ppc import load_data\n'), ((2810, 2839), 'gp_model.extract_filters', 'extract_filters', (['model_params'], {}), '(model_params)\n', (2825, 2839), False, 'from gp_model import extract_filters\n'), ((2858, 2887), 'pandas.read_pickle', 'pd.read_pickle', (['pred_filename'], {}), '(pred_filename)\n', (2872, 2887), True, 'import pandas as pd\n'), ((2914, 2923), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2920, 2923), True, 'import numpy as np\n'), ((5519, 5540), 'seaborn.set', 'sb.set', ([], {'style': '"""ticks"""'}), "(style='ticks')\n", (5525, 5540), True, 'import seaborn as sb\n'), ((5545, 5582), 'seaborn.set_context', 'sb.set_context', (['"""paper"""'], {'rc': 'rc_params'}), "('paper', rc=rc_params)\n", (5559, 5582), True, 'import seaborn as sb\n'), ((5721, 5737), 'pathlib.Path', 'Path', (['figure_dir'], {}), '(figure_dir)\n', (5725, 5737), False, 'from pathlib import Path\n'), ((6973, 6989), 'defopt.run', 'defopt.run', (['main'], {}), '(main)\n', (6983, 6989), False, 'import defopt\n'), ((560, 576), 'numpy.isnan', 'np.isnan', (['row.rt'], {}), '(row.rt)\n', (568, 576), True, 'import numpy as np\n'), ((2477, 2512), 'plot_orsolic_paper.plot_psycho', 'plot_psycho', (['d', 'axes[0]', 'l'], {'color': 'c'}), '(d, axes[0], l, color=c)\n', (2488, 2512), False, 'from plot_orsolic_paper import plot_psycho, plot_chrono\n'), ((2521, 2561), 'plot_orsolic_paper.plot_chrono', 'plot_chrono', (['d', 'period', 'axes[1]'], {'color': 'c'}), '(d, period, axes[1], color=c)\n', (2532, 2561), False, 'from plot_orsolic_paper import plot_psycho, plot_chrono\n'), ((2720, 2770), 'numpy.load', 'np.load', (["(model_dir + 'model/model_params_best.npz')"], {}), "(model_dir + 'model/model_params_best.npz')\n", (2727, 2770), True, 'import numpy as np\n'), ((4687, 4703), 'numpy.arange', 'np.arange', (['(-1)', '(3)'], {}), '(-1, 3)\n', (4696, 4703), True, 'import numpy as np\n'), ((6077, 6126), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(6)'], {'figsize': '(20 / 2.54, 6 / 2.54)'}), '(2, 6, figsize=(20 / 2.54, 6 / 2.54))\n', (6089, 6126), True, 'import matplotlib.pyplot as plt\n'), ((6653, 6696), 'seaborn.despine', 'sb.despine', (['fig_plots'], {'offset': '(3)', 'trim': '(False)'}), '(fig_plots, offset=3, trim=False)\n', (6663, 6696), True, 'import seaborn as sb\n'), ((6771, 6791), 'matplotlib.pyplot.close', 'plt.close', (['fig_plots'], {}), '(fig_plots)\n', (6780, 6791), True, 'import matplotlib.pyplot as plt\n'), ((6801, 6844), 'seaborn.despine', 'sb.despine', (['fig_early'], {'offset': '(3)', 'trim': '(False)'}), '(fig_early, offset=3, trim=False)\n', (6811, 6844), True, 'import seaborn as sb\n'), ((6919, 6939), 'matplotlib.pyplot.close', 'plt.close', (['fig_early'], {}), '(fig_early)\n', (6928, 6939), True, 'import matplotlib.pyplot as plt\n'), ((4477, 4536), 'pandas.concat', 'pd.concat', (['(early_licks_full, early_licks_0, early_licks_1)'], {}), '((early_licks_full, early_licks_0, early_licks_1))\n', (4486, 4536), True, 'import pandas as pd\n')] |
import numpy as np
from scipy.interpolate import LinearNDInterpolator, interp1d
from astropy import table
from astropy.table import Table, Column
import warnings
def get_track_meta(track, key="FeH"):
""" get meta info from a track """
assert key in track.meta.keys()
return track.meta[key]
def find_rank_1d(arr, val, sort=False):
""" return ind of the two elements in *arr* that bracket *val* """
if sort:
arr = np.sort(arr)
sub = np.where((arr[:-1] < val) & (arr[1:] >= val))[0]
assert len(sub) > 0
return np.hstack((sub, sub + 1))
def get_track_item_given_eeps(track, eeps):
""" return track items given a set of eeps """
ind = np.zeros((len(track, )), dtype=bool)
for eep in eeps:
ind |= track["_eep"] == eep
return track[ind]
def calc_weight(arr, val, norm=1.):
""" calculate normalized weight """
weight = np.abs(arr - val)
weight *= norm / np.sum(weight)
return np.array(weight)
def table_linear_combination(t, weight):
""" given weight, return the linear combination of each row for each column
"""
assert len(t) == len(weight)
new_cols = []
colnames = t.colnames
ncols = len(colnames)
for i in range(ncols):
if t.dtype[i] in (np.int, np.float):
colname = colnames[i]
new_cols.append(
Column(np.array([np.sum(t[colname].data * weight)]), colname))
return Table(new_cols)
class StarObject():
def __init__(self, t):
assert len(t) == 1
colnames = t.colnames
ncols = len(colnames)
for i in range(ncols):
self.__setattr__(colnames[i], t[colnames[i]].data[0])
class TrackSet:
""" a set of tracks """
data = []
eep_bounds = (1, 808)
default_coord = ["_lgmass", "_feh", "_lgage", "_eep"]
bci = None
def __init__(self, tracks,
metadict=dict(minit="initial_mass",
feh="FEH",
eep="EEPS",
mbol="Mbol")):
""" initialization of track set object """
self.metadict = metadict
self.data = np.array(tracks)
self.grid_minit = np.array(
[get_track_meta(track, metadict["minit"]) for track in tracks])
self.grid_feh = np.array(
[get_track_meta(track, metadict["feh"]) for track in tracks])
#self.grid_EEP = [get_track_meta(track, metadict["eep"]) for track in tracks]
# every track starts from EEP=1
self.grid_EEP0 = np.array([np.min(_["_eep"]) for _ in self.data])
self.grid_EEP1 = np.array([np.max(_["_eep"]) for _ in self.data])
self.u_minit = np.unique(self.grid_minit)
self.u_feh = np.unique(self.grid_feh)
self.min_minit = np.min(self.u_minit)
self.max_minit = np.max(self.u_minit)
self.min_feh = np.min(self.u_feh)
self.max_feh = np.max(self.u_feh)
self.min_eep = np.min(self.grid_EEP0)
self.max_eep = np.max(self.grid_EEP1)
def get_track4(self, mass_feh=(1.01, 0.01)):
""" return the 4 neighboring stellar tracks """
test_minit, test_feh = np.array(mass_feh, dtype=np.float)
# assert Minit [Fe/H] in range
try:
assert self.min_minit < test_minit <= self.max_minit
assert self.min_feh < test_feh <= self.max_feh
except AssertionError as ae:
return None
# 1. locate 4 tracks
ind_minit = find_rank_1d(self.u_minit, test_minit)
ind_feh = find_rank_1d(self.u_feh, test_feh)
val_minit = self.u_minit[ind_minit]
val_feh = self.u_feh[ind_feh]
ind_track = np.where(np.logical_and(
(self.grid_minit == val_minit[0]) | (
self.grid_minit == val_minit[1]),
(self.grid_feh == val_feh[0]) | (self.grid_feh == val_feh[1])))[0]
track4 = self.data[ind_track]
return track4
def get_track4_unstructured(self, mass_feh=(1.01, 0.01)):
""" return the 4 neighboring stellar tracks given unstructured grid """
test_minit, test_feh = np.array(mass_feh, dtype=np.float)
d_minit_feh = (np.log10(self.grid_minit)-np.log10(test_minit))**2. + \
(self.grid_feh - test_feh) ** 2.
mask00 = (self.grid_minit < test_minit) & (self.grid_feh < test_feh)
mask01 = (self.grid_minit < test_minit) & (self.grid_feh >= test_feh)
mask10 = (self.grid_minit >= test_minit) & (self.grid_feh < test_feh)
mask11 = (self.grid_minit >= test_minit) & (self.grid_feh >= test_feh)
if np.any(np.array([np.sum(mask00), np.sum(mask01),
np.sum(mask10), np.sum(mask11)]) == 0):
return None
ind00 = np.argmin(np.ma.MaskedArray(d_minit_feh, ~mask00))
ind01 = np.argmin(np.ma.MaskedArray(d_minit_feh, ~mask01))
ind10 = np.argmin(np.ma.MaskedArray(d_minit_feh, ~mask10))
ind11 = np.argmin(np.ma.MaskedArray(d_minit_feh, ~mask11))
return self.data[[ind00, ind01, ind10, ind11]]
def interp_mass_feh_eep(self, interp_colname="_lgage",
mfe=(1.01, 0.01, 503.2),
lndi=True, debug=False, raise_error=False):
test_minit, test_feh, test_eep = np.array(mfe, dtype=np.float)
# 1. assert Minit [Fe/H] in range
try:
assert self.min_minit < test_minit <= self.max_minit
assert self.min_feh < test_feh <= self.max_feh
except AssertionError as ae:
if not raise_error:
return np.nan
else:
raise ae("The test values are not in bounds!")
# 2. locate 4 tracks
# ind_minit = find_rank_1d(self.u_minit, test_minit)
# ind_feh = find_rank_1d(self.u_feh, test_feh)
# val_minit = self.u_minit[ind_minit]
# val_feh = self.u_feh[ind_feh]
#
# ind_track = np.where(np.logical_and(
# (self.grid_minit == val_minit[0]) | (self.grid_minit == val_minit[1]),
# (self.grid_feh == val_feh[0]) | (self.grid_feh == val_feh[1])))[0]
# track4 = self.data[ind_track]
track4 = self.get_track4_unstructured((test_minit, test_feh))
if track4 is None:
if raise_error:
raise(ValueError("Bad test values!"))
else:
return np.nan
eep_maxmin = np.max([_["_eep"][0] for _ in track4])
eep_minmax = np.min([_["_eep"][-1] for _ in track4])
# 3. assert EEP in range
try:
assert eep_maxmin < test_eep <= eep_minmax
except AssertionError as ae:
if not raise_error:
return np.nan
else:
raise ae("EEP value is not in bounds!")
# 4. locate EEP
eep_arr = np.arange(eep_maxmin, eep_minmax + 1)
ind_eep = find_rank_1d(eep_arr, test_eep)
val_eep = eep_arr[ind_eep]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
track_box = table.vstack([
get_track_item_given_eeps(track, val_eep) for track in track4])
# 5. interpolate
if not lndi:
# points = np.array(track_box["_lgmass", "_feh", "_eep"].to_pandas())
# values = track_box[interp_colname].data
# lndi = LinearNDInterpolator(points, values)
# test_points = np.array((np.log10(test_minit), test_feh, test_eep))
w_mfe = (1 - calc_weight(track_box["_lgmass"], np.log10(test_minit), 4)) * \
(1 - calc_weight(track_box["_feh"], test_feh, 4)) * \
(1 - calc_weight(track_box["_eep"], test_eep, 4))
if debug:
return w_mfe
star_result = table_linear_combination(track_box, w_mfe)
return star_result
elif type(interp_colname) is not list:
points = np.array(track_box["_lgmass", "_feh", "_eep"].to_pandas())
# for linear mass
# points[:, 0] = np.power(10, points[:, 0])
values = track_box[interp_colname].data
lndi = LinearNDInterpolator(points, values)
test_points = np.array((np.log10(test_minit), test_feh, test_eep))
# for linear mass
# test_points = np.array((np.log10(test_minit), test_feh, test_eep))
return lndi(test_points)[0]
elif type(interp_colname) is list:
points = np.array(track_box["_lgmass", "_feh", "_eep"].to_pandas())
test_points = np.array((np.log10(test_minit), test_feh, test_eep))
results = []
for _interp_colname in interp_colname:
if type(_interp_colname) is int:
# directly return the input value if int
results.append(test_points[_interp_colname])
else:
values = track_box[_interp_colname].data
results.append(
LinearNDInterpolator(points, values)(test_points)[0])
return np.array(results)
def calc_dlgagedeep(self, track, deep=0.1):
I = interp1d(track["_eep"], track["_lgage"], kind="linear",
bounds_error=False, fill_value=-np.inf)
dlgagedeep = (I(track["_eep"] + deep) - I(track["_eep"] - deep)) \
/ deep / 2.
track.add_column(Column(dlgagedeep, "dlgagedeep"))
return track
def calc_dlgagedeep_for_all_tracks(self, deep=0.1):
for track in self.data:
I = interp1d(track["_eep"], track["_lgage"], kind="linear",
bounds_error=False, fill_value=-np.inf)
dlgagedeep = (I(track["_eep"] + deep) -
I(track["_eep"] - deep)) / deep / 2.
if "dlgagedeep" not in track.colnames:
track.add_column(Column(dlgagedeep, "dlgagedeep"))
else:
track["dlgagedeep"] = dlgagedeep
return
def get_track(self, minit, feh):
""" get the track closest to (minit, feh) """
ind_mindist = np.argmin(
(self.grid_minit - minit) ** 2. + (self.grid_feh - feh) ** 2.)
return self.data[ind_mindist]
def get_track_minit(self, minit):
""" get the track closest to minit """
chosen_minit = self.u_minit[np.argmin((self.u_minit - minit) ** 2)]
ind_minit = self.grid_minit == chosen_minit
return self.data[ind_minit]
def get_track_feh(self, feh):
""" get the track closest to feh """
chosen_feh = self.u_feh[np.argmin((self.u_feh - feh) ** 2)]
ind_feh = self.grid_feh == chosen_feh
return self.data[ind_feh]
def dtdeep(self):
""" calculate dtdeep for each track """
pass
def lnprior(minit, feh, age):
# 1. determine deep = dt deep/
return 0 | [
"numpy.log10",
"astropy.table.Table",
"numpy.hstack",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.arange",
"numpy.where",
"numpy.sort",
"numpy.max",
"numpy.min",
"warnings.simplefilter",
"numpy.argmin",
"numpy.abs",
"scipy.interpolate.LinearNDInterpolator",
"astropy.table.Column"... | [((554, 579), 'numpy.hstack', 'np.hstack', (['(sub, sub + 1)'], {}), '((sub, sub + 1))\n', (563, 579), True, 'import numpy as np\n'), ((894, 911), 'numpy.abs', 'np.abs', (['(arr - val)'], {}), '(arr - val)\n', (900, 911), True, 'import numpy as np\n'), ((959, 975), 'numpy.array', 'np.array', (['weight'], {}), '(weight)\n', (967, 975), True, 'import numpy as np\n'), ((1435, 1450), 'astropy.table.Table', 'Table', (['new_cols'], {}), '(new_cols)\n', (1440, 1450), False, 'from astropy.table import Table, Column\n'), ((446, 458), 'numpy.sort', 'np.sort', (['arr'], {}), '(arr)\n', (453, 458), True, 'import numpy as np\n'), ((469, 514), 'numpy.where', 'np.where', (['((arr[:-1] < val) & (arr[1:] >= val))'], {}), '((arr[:-1] < val) & (arr[1:] >= val))\n', (477, 514), True, 'import numpy as np\n'), ((933, 947), 'numpy.sum', 'np.sum', (['weight'], {}), '(weight)\n', (939, 947), True, 'import numpy as np\n'), ((2167, 2183), 'numpy.array', 'np.array', (['tracks'], {}), '(tracks)\n', (2175, 2183), True, 'import numpy as np\n'), ((2704, 2730), 'numpy.unique', 'np.unique', (['self.grid_minit'], {}), '(self.grid_minit)\n', (2713, 2730), True, 'import numpy as np\n'), ((2752, 2776), 'numpy.unique', 'np.unique', (['self.grid_feh'], {}), '(self.grid_feh)\n', (2761, 2776), True, 'import numpy as np\n'), ((2803, 2823), 'numpy.min', 'np.min', (['self.u_minit'], {}), '(self.u_minit)\n', (2809, 2823), True, 'import numpy as np\n'), ((2849, 2869), 'numpy.max', 'np.max', (['self.u_minit'], {}), '(self.u_minit)\n', (2855, 2869), True, 'import numpy as np\n'), ((2893, 2911), 'numpy.min', 'np.min', (['self.u_feh'], {}), '(self.u_feh)\n', (2899, 2911), True, 'import numpy as np\n'), ((2935, 2953), 'numpy.max', 'np.max', (['self.u_feh'], {}), '(self.u_feh)\n', (2941, 2953), True, 'import numpy as np\n'), ((2977, 2999), 'numpy.min', 'np.min', (['self.grid_EEP0'], {}), '(self.grid_EEP0)\n', (2983, 2999), True, 'import numpy as np\n'), ((3023, 3045), 'numpy.max', 'np.max', (['self.grid_EEP1'], {}), '(self.grid_EEP1)\n', (3029, 3045), True, 'import numpy as np\n'), ((3183, 3217), 'numpy.array', 'np.array', (['mass_feh'], {'dtype': 'np.float'}), '(mass_feh, dtype=np.float)\n', (3191, 3217), True, 'import numpy as np\n'), ((4136, 4170), 'numpy.array', 'np.array', (['mass_feh'], {'dtype': 'np.float'}), '(mass_feh, dtype=np.float)\n', (4144, 4170), True, 'import numpy as np\n'), ((5320, 5349), 'numpy.array', 'np.array', (['mfe'], {'dtype': 'np.float'}), '(mfe, dtype=np.float)\n', (5328, 5349), True, 'import numpy as np\n'), ((6453, 6491), 'numpy.max', 'np.max', (["[_['_eep'][0] for _ in track4]"], {}), "([_['_eep'][0] for _ in track4])\n", (6459, 6491), True, 'import numpy as np\n'), ((6513, 6552), 'numpy.min', 'np.min', (["[_['_eep'][-1] for _ in track4]"], {}), "([_['_eep'][-1] for _ in track4])\n", (6519, 6552), True, 'import numpy as np\n'), ((6871, 6908), 'numpy.arange', 'np.arange', (['eep_maxmin', '(eep_minmax + 1)'], {}), '(eep_maxmin, eep_minmax + 1)\n', (6880, 6908), True, 'import numpy as np\n'), ((9204, 9303), 'scipy.interpolate.interp1d', 'interp1d', (["track['_eep']", "track['_lgage']"], {'kind': '"""linear"""', 'bounds_error': '(False)', 'fill_value': '(-np.inf)'}), "(track['_eep'], track['_lgage'], kind='linear', bounds_error=False,\n fill_value=-np.inf)\n", (9212, 9303), False, 'from scipy.interpolate import LinearNDInterpolator, interp1d\n'), ((10165, 10239), 'numpy.argmin', 'np.argmin', (['((self.grid_minit - minit) ** 2.0 + (self.grid_feh - feh) ** 2.0)'], {}), '((self.grid_minit - minit) ** 2.0 + (self.grid_feh - feh) ** 2.0)\n', (10174, 10239), True, 'import numpy as np\n'), ((4796, 4835), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (['d_minit_feh', '(~mask00)'], {}), '(d_minit_feh, ~mask00)\n', (4813, 4835), True, 'import numpy as np\n'), ((4863, 4902), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (['d_minit_feh', '(~mask01)'], {}), '(d_minit_feh, ~mask01)\n', (4880, 4902), True, 'import numpy as np\n'), ((4930, 4969), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (['d_minit_feh', '(~mask10)'], {}), '(d_minit_feh, ~mask10)\n', (4947, 4969), True, 'import numpy as np\n'), ((4997, 5036), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (['d_minit_feh', '(~mask11)'], {}), '(d_minit_feh, ~mask11)\n', (5014, 5036), True, 'import numpy as np\n'), ((7008, 7033), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (7031, 7033), False, 'import warnings\n'), ((7047, 7078), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (7068, 7078), False, 'import warnings\n'), ((9454, 9486), 'astropy.table.Column', 'Column', (['dlgagedeep', '"""dlgagedeep"""'], {}), "(dlgagedeep, 'dlgagedeep')\n", (9460, 9486), False, 'from astropy.table import Table, Column\n'), ((9615, 9714), 'scipy.interpolate.interp1d', 'interp1d', (["track['_eep']", "track['_lgage']"], {'kind': '"""linear"""', 'bounds_error': '(False)', 'fill_value': '(-np.inf)'}), "(track['_eep'], track['_lgage'], kind='linear', bounds_error=False,\n fill_value=-np.inf)\n", (9623, 9714), False, 'from scipy.interpolate import LinearNDInterpolator, interp1d\n'), ((10411, 10449), 'numpy.argmin', 'np.argmin', (['((self.u_minit - minit) ** 2)'], {}), '((self.u_minit - minit) ** 2)\n', (10420, 10449), True, 'import numpy as np\n'), ((10651, 10685), 'numpy.argmin', 'np.argmin', (['((self.u_feh - feh) ** 2)'], {}), '((self.u_feh - feh) ** 2)\n', (10660, 10685), True, 'import numpy as np\n'), ((2567, 2584), 'numpy.min', 'np.min', (["_['_eep']"], {}), "(_['_eep'])\n", (2573, 2584), True, 'import numpy as np\n'), ((2641, 2658), 'numpy.max', 'np.max', (["_['_eep']"], {}), "(_['_eep'])\n", (2647, 2658), True, 'import numpy as np\n'), ((3710, 3866), 'numpy.logical_and', 'np.logical_and', (['((self.grid_minit == val_minit[0]) | (self.grid_minit == val_minit[1]))', '((self.grid_feh == val_feh[0]) | (self.grid_feh == val_feh[1]))'], {}), '((self.grid_minit == val_minit[0]) | (self.grid_minit ==\n val_minit[1]), (self.grid_feh == val_feh[0]) | (self.grid_feh ==\n val_feh[1]))\n', (3724, 3866), True, 'import numpy as np\n'), ((8189, 8225), 'scipy.interpolate.LinearNDInterpolator', 'LinearNDInterpolator', (['points', 'values'], {}), '(points, values)\n', (8209, 8225), False, 'from scipy.interpolate import LinearNDInterpolator, interp1d\n'), ((4195, 4220), 'numpy.log10', 'np.log10', (['self.grid_minit'], {}), '(self.grid_minit)\n', (4203, 4220), True, 'import numpy as np\n'), ((4221, 4241), 'numpy.log10', 'np.log10', (['test_minit'], {}), '(test_minit)\n', (4229, 4241), True, 'import numpy as np\n'), ((9125, 9142), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (9133, 9142), True, 'import numpy as np\n'), ((9935, 9967), 'astropy.table.Column', 'Column', (['dlgagedeep', '"""dlgagedeep"""'], {}), "(dlgagedeep, 'dlgagedeep')\n", (9941, 9967), False, 'from astropy.table import Table, Column\n'), ((4646, 4660), 'numpy.sum', 'np.sum', (['mask00'], {}), '(mask00)\n', (4652, 4660), True, 'import numpy as np\n'), ((4662, 4676), 'numpy.sum', 'np.sum', (['mask01'], {}), '(mask01)\n', (4668, 4676), True, 'import numpy as np\n'), ((4706, 4720), 'numpy.sum', 'np.sum', (['mask10'], {}), '(mask10)\n', (4712, 4720), True, 'import numpy as np\n'), ((4722, 4736), 'numpy.sum', 'np.sum', (['mask11'], {}), '(mask11)\n', (4728, 4736), True, 'import numpy as np\n'), ((8262, 8282), 'numpy.log10', 'np.log10', (['test_minit'], {}), '(test_minit)\n', (8270, 8282), True, 'import numpy as np\n'), ((1378, 1410), 'numpy.sum', 'np.sum', (['(t[colname].data * weight)'], {}), '(t[colname].data * weight)\n', (1384, 1410), True, 'import numpy as np\n'), ((7579, 7599), 'numpy.log10', 'np.log10', (['test_minit'], {}), '(test_minit)\n', (7587, 7599), True, 'import numpy as np\n'), ((8615, 8635), 'numpy.log10', 'np.log10', (['test_minit'], {}), '(test_minit)\n', (8623, 8635), True, 'import numpy as np\n'), ((9052, 9088), 'scipy.interpolate.LinearNDInterpolator', 'LinearNDInterpolator', (['points', 'values'], {}), '(points, values)\n', (9072, 9088), False, 'from scipy.interpolate import LinearNDInterpolator, interp1d\n')] |
#!/usr/bin/env python
# Family size distribution of tags which were aligned to the reference genome
#
# Author: <NAME> & <NAME>, Johannes-Kepler University Linz (Austria)
# Contact: <EMAIL>
#
# Takes at least one TABULAR file with tags before the alignment to the SSCS,
# a BAM file with tags of reads that overlap the regions of the reference genome and
# an optional BED file with chromosome, start and stop position of the regions as input.
# The program produces a plot which shows the distribution of family sizes of the tags from the input files and
# a tabular file with the data of the plot.
# USAGE: python FSD_regions.py --inputFile filenameSSCS --inputName1 filenameSSCS
# --bamFile DCSbamFile --rangesFile BEDfile --output_tabular outptufile_name_tabular
# --output_pdf outputfile_name_pdf
import argparse
import collections
import os.path
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
import pysam
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file, delim):
with open(file, 'r') as dest_f:
data_array = np.genfromtxt(dest_f, skip_header=0, delimiter=delim, comments='#', dtype=str)
return data_array
def make_argparser():
parser = argparse.ArgumentParser(description='Family Size Distribution of tags which were aligned to regions of the reference genome')
parser.add_argument('--inputFile', help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--bamFile', help='BAM file with aligned reads.')
parser.add_argument('--rangesFile', default=None, help='BED file with chromosome, start and stop positions.')
parser.add_argument('--output_pdf', default="data.pdf", type=str, help='Name of the pdf and tabular file.')
parser.add_argument('--output_tabular', default="data.tabular", type=str, help='Name of the pdf and tabular file.')
return parser
def compare_read_families_refGenome(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
firstFile = args.inputFile
name1 = args.inputName1
name1 = name1.split(".tabular")[0]
bamFile = args.bamFile
rangesFile = args.rangesFile
title_file = args.output_pdf
title_file2 = args.output_tabular
sep = "\t"
with open(title_file2, "w") as output_file, PdfPages(title_file) as pdf:
data_array = readFileReferenceFree(firstFile, "\t")
bamIndex = f"{bamFile}.bai"
if not os.path.exists(bamIndex):
print(f"Info: Generating BAM index in {bamIndex}")
pysam.index(bamFile)
bam = pysam.AlignmentFile(bamFile, "rb")
qname_dict = collections.OrderedDict()
if rangesFile is not None:
with open(rangesFile, 'r') as regs:
range_array = np.genfromtxt(regs, skip_header=0, delimiter='\t', comments='#', dtype=str)
if range_array.ndim == 0:
print("Error: file has 0 lines")
exit(2)
if range_array.ndim == 1:
chrList = range_array[0]
start_posList = range_array[1].astype(int)
stop_posList = range_array[2].astype(int)
chrList = [chrList.tolist()]
start_posList = [start_posList.tolist()]
stop_posList = [stop_posList.tolist()]
else:
chrList = range_array[:, 0]
start_posList = range_array[:, 1].astype(int)
stop_posList = range_array[:, 2].astype(int)
if len(start_posList) != len(stop_posList):
print("start_positions and end_positions do not have the same length")
exit(3)
chrList = np.array(chrList)
start_posList = np.array(start_posList).astype(int)
stop_posList = np.array(stop_posList).astype(int)
for chr, start_pos, stop_pos in zip(chrList, start_posList, stop_posList):
chr_start_stop = "{}_{}_{}".format(chr, start_pos, stop_pos)
qname_dict[chr_start_stop] = []
for read in bam.fetch(chr, start_pos, stop_pos):
if not read.is_unmapped:
if re.search('_', read.query_name):
tags = re.split('_', read.query_name)[0]
else:
tags = read.query_name
qname_dict[chr_start_stop].append(tags)
else:
for read in bam.fetch():
if not read.is_unmapped:
if re.search(r'_', read.query_name):
tags = re.split('_', read.query_name)[0]
else:
tags = read.query_name
if read.reference_name not in qname_dict:
qname_dict[read.reference_name] = [tags]
else:
qname_dict[read.reference_name].append(tags)
seq = np.array(data_array[:, 1])
tags = np.array(data_array[:, 2])
quant = np.array(data_array[:, 0]).astype(int)
group = np.array(list(qname_dict.keys()))
all_ab = seq[np.where(tags == "ab")[0]]
all_ba = seq[np.where(tags == "ba")[0]]
quant_ab = quant[np.where(tags == "ab")[0]]
quant_ba = quant[np.where(tags == "ba")[0]]
seqDic_ab = dict(zip(all_ab, quant_ab))
seqDic_ba = dict(zip(all_ba, quant_ba))
lst_ab = []
lst_ba = []
quantAfterRegion = []
length_regions = 0
for i in group:
lst_ab_r = []
lst_ba_r = []
seq_mut = qname_dict[i]
if rangesFile is None:
seq_mut, seqMut_index = np.unique(np.array(seq_mut), return_index=True)
length_regions = length_regions + len(seq_mut) * 2
for r in seq_mut:
count_ab = seqDic_ab.get(r)
count_ba = seqDic_ba.get(r)
lst_ab_r.append(count_ab)
lst_ab.append(count_ab)
lst_ba_r.append(count_ba)
lst_ba.append(count_ba)
dataAB = np.array(lst_ab_r)
dataBA = np.array(lst_ba_r)
bigFamilies = np.where(dataAB > 20)[0]
dataAB[bigFamilies] = 22
bigFamilies = np.where(dataBA > 20)[0]
dataBA[bigFamilies] = 22
quantAll = np.concatenate((dataAB, dataBA))
quantAfterRegion.append(quantAll)
quant_ab = np.array(lst_ab)
quant_ba = np.array(lst_ba)
maximumX = np.amax(np.concatenate(quantAfterRegion))
minimumX = np.amin(np.concatenate(quantAfterRegion))
# PLOT
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['patch.edgecolor'] = "black"
fig = plt.figure()
plt.subplots_adjust(bottom=0.3)
colors = ["#6E6E6E", "#0431B4", "#5FB404", "#B40431", "#F4FA58", "#DF7401", "#81DAF5"]
col = []
for i in range(0, len(group)):
col.append(colors[i])
counts = plt.hist(quantAfterRegion, bins=range(minimumX, maximumX + 1), stacked=False, label=group,
align="left", alpha=1, color=col, edgecolor="black", linewidth=1)
ticks = np.arange(minimumX - 1, maximumX, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
plt.xticks(np.array(ticks), ticks1)
count = np.bincount([int(_) for _ in quant_ab]) # original counts
legend = "max. family size:\nabsolute frequency:\nrelative frequency:\n\ntotal nr. of reads:\n(before SSCS building)"
plt.text(0.15, 0.085, legend, size=11, transform=plt.gcf().transFigure)
legend = "AB\n{}\n{}\n{:.5f}\n\n{:,}".format(max(map(int, quant_ab)), count[len(count) - 1], float(count[len(count) - 1]) / sum(count), sum(np.array(data_array[:, 0]).astype(int)))
plt.text(0.35, 0.105, legend, size=11, transform=plt.gcf().transFigure)
count2 = np.bincount([int(_) for _ in quant_ba]) # original counts
legend = "BA\n{}\n{}\n{:.5f}" \
.format(max(map(int, quant_ba)), count2[len(count2) - 1], float(count2[len(count2) - 1]) / sum(count2))
plt.text(0.45, 0.1475, legend, size=11, transform=plt.gcf().transFigure)
plt.text(0.55, 0.2125, "total nr. of tags:", size=11, transform=plt.gcf().transFigure)
plt.text(0.8, 0.2125, "{:,} ({:,})".format(length_regions, length_regions / 2), size=11,
transform=plt.gcf().transFigure)
legend4 = "* In the plot, both family sizes of the ab and ba strands were used.\nWhereas the total numbers indicate only the single count of the tags per region.\n"
plt.text(0.1, 0.01, legend4, size=11, transform=plt.gcf().transFigure)
space = 0
for i, count in zip(group, quantAfterRegion):
plt.text(0.55, 0.15 - space, "{}:\n".format(i), size=11, transform=plt.gcf().transFigure)
plt.text(0.8, 0.15 - space, "{:,}\n".format(len(count) / 2), size=11, transform=plt.gcf().transFigure)
space = space + 0.02
plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True)
plt.xlabel("Family size", fontsize=14)
plt.ylabel("Absolute Frequency", fontsize=14)
plt.grid(b=True, which="major", color="#424242", linestyle=":")
plt.margins(0.01, None)
pdf.savefig(fig, bbox_inch="tight")
plt.close()
output_file.write("Dataset:{}{}\n".format(sep, name1))
output_file.write("{}AB{}BA\n".format(sep, sep))
output_file.write("max. family size:{}{}{}{}\n".format(sep, max(map(int, quant_ab)), sep, max(map(int, quant_ba))))
output_file.write("absolute frequency:{}{}{}{}\n".format(sep, count[len(count) - 1], sep, count2[len(count2) - 1]))
output_file.write("relative frequency:{}{:.3f}{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count), sep, float(count2[len(count2) - 1]) / sum(count2)))
output_file.write("total nr. of reads{}{}\n".format(sep, sum(np.array(data_array[:, 0]).astype(int))))
output_file.write("total nr. of tags{}{} ({})\n".format(sep, length_regions, length_regions / 2))
output_file.write("\n\nValues from family size distribution\n")
output_file.write("{}".format(sep))
for i in group:
output_file.write("{}{}".format(i, sep))
output_file.write("\n")
j = 0
for fs in counts[1][0:len(counts[1]) - 1]:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
if len(group) == 1:
output_file.write("{}{}".format(int(counts[0][j]), sep))
else:
for n in range(len(group)):
output_file.write("{}{}".format(int(counts[0][n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
if len(group) == 1:
output_file.write("{}{}".format(int(sum(counts[0])), sep))
else:
for i in counts[0]:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("\n")
output_file.write("\n\nIn the plot, both family sizes of the ab and ba strands were used.\nWhereas the total numbers indicate only the single count of the tags per region.\n")
output_file.write("Region{}total nr. of tags per region\n".format(sep))
for i, count in zip(group, quantAfterRegion):
output_file.write("{}{}{}\n".format(i, sep, len(count) / 2))
print("Files successfully created!")
if __name__ == '__main__':
sys.exit(compare_read_families_refGenome(sys.argv))
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"pysam.AlignmentFile",
"numpy.array",
"matplotlib.pyplot.switch_backend",
"numpy.genfromtxt",
"numpy.arange",
"matplotlib.pyplot.margins",
"re.search",
"re.split",
"argparse.ArgumentParser",
"numpy.where",
"matplotlib.pyplot.xlabel",
"m... | [((995, 1020), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (1013, 1020), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Family Size Distribution of tags which were aligned to regions of the reference genome"""'}), "(description=\n 'Family Size Distribution of tags which were aligned to regions of the reference genome'\n )\n", (1285, 1397), False, 'import argparse\n'), ((1120, 1198), 'numpy.genfromtxt', 'np.genfromtxt', (['dest_f'], {'skip_header': '(0)', 'delimiter': 'delim', 'comments': '"""#"""', 'dtype': 'str'}), "(dest_f, skip_header=0, delimiter=delim, comments='#', dtype=str)\n", (1133, 1198), True, 'import numpy as np\n'), ((2386, 2406), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['title_file'], {}), '(title_file)\n', (2394, 2406), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((2663, 2697), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bamFile', '"""rb"""'], {}), "(bamFile, 'rb')\n", (2682, 2697), False, 'import pysam\n'), ((2719, 2744), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2742, 2744), False, 'import collections\n'), ((5043, 5069), 'numpy.array', 'np.array', (['data_array[:, 1]'], {}), '(data_array[:, 1])\n', (5051, 5069), True, 'import numpy as np\n'), ((5085, 5111), 'numpy.array', 'np.array', (['data_array[:, 2]'], {}), '(data_array[:, 2])\n', (5093, 5111), True, 'import numpy as np\n'), ((6573, 6589), 'numpy.array', 'np.array', (['lst_ab'], {}), '(lst_ab)\n', (6581, 6589), True, 'import numpy as np\n'), ((6609, 6625), 'numpy.array', 'np.array', (['lst_ba'], {}), '(lst_ba)\n', (6617, 6625), True, 'import numpy as np\n'), ((6773, 6812), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'figsize': '(11.69, 8.27)'}), "('figure', figsize=(11.69, 8.27))\n", (6779, 6812), True, 'import matplotlib.pyplot as plt\n'), ((7055, 7067), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7065, 7067), True, 'import matplotlib.pyplot as plt\n'), ((7076, 7107), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.3)'}), '(bottom=0.3)\n', (7095, 7107), True, 'import matplotlib.pyplot as plt\n'), ((7512, 7548), 'numpy.arange', 'np.arange', (['(minimumX - 1)', 'maximumX', '(1)'], {}), '(minimumX - 1, maximumX, 1)\n', (7521, 7548), True, 'import numpy as np\n'), ((9370, 9456), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '(14)', 'bbox_to_anchor': '(0.9, 1)', 'frameon': '(True)'}), "(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon\n =True)\n", (9380, 9456), True, 'import matplotlib.pyplot as plt\n'), ((9460, 9498), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Family size"""'], {'fontsize': '(14)'}), "('Family size', fontsize=14)\n", (9470, 9498), True, 'import matplotlib.pyplot as plt\n'), ((9507, 9552), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Absolute Frequency"""'], {'fontsize': '(14)'}), "('Absolute Frequency', fontsize=14)\n", (9517, 9552), True, 'import matplotlib.pyplot as plt\n'), ((9561, 9624), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'color': '"""#424242"""', 'linestyle': '""":"""'}), "(b=True, which='major', color='#424242', linestyle=':')\n", (9569, 9624), True, 'import matplotlib.pyplot as plt\n'), ((9633, 9656), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.01)', 'None'], {}), '(0.01, None)\n', (9644, 9656), True, 'import matplotlib.pyplot as plt\n'), ((9710, 9721), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9719, 9721), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2647), 'pysam.index', 'pysam.index', (['bamFile'], {}), '(bamFile)\n', (2638, 2647), False, 'import pysam\n'), ((3777, 3794), 'numpy.array', 'np.array', (['chrList'], {}), '(chrList)\n', (3785, 3794), True, 'import numpy as np\n'), ((6215, 6233), 'numpy.array', 'np.array', (['lst_ab_r'], {}), '(lst_ab_r)\n', (6223, 6233), True, 'import numpy as np\n'), ((6255, 6273), 'numpy.array', 'np.array', (['lst_ba_r'], {}), '(lst_ba_r)\n', (6263, 6273), True, 'import numpy as np\n'), ((6474, 6506), 'numpy.concatenate', 'np.concatenate', (['(dataAB, dataBA)'], {}), '((dataAB, dataBA))\n', (6488, 6506), True, 'import numpy as np\n'), ((6654, 6686), 'numpy.concatenate', 'np.concatenate', (['quantAfterRegion'], {}), '(quantAfterRegion)\n', (6668, 6686), True, 'import numpy as np\n'), ((6715, 6747), 'numpy.concatenate', 'np.concatenate', (['quantAfterRegion'], {}), '(quantAfterRegion)\n', (6729, 6747), True, 'import numpy as np\n'), ((7650, 7665), 'numpy.array', 'np.array', (['ticks'], {}), '(ticks)\n', (7658, 7665), True, 'import numpy as np\n'), ((2859, 2934), 'numpy.genfromtxt', 'np.genfromtxt', (['regs'], {'skip_header': '(0)', 'delimiter': '"""\t"""', 'comments': '"""#"""', 'dtype': 'str'}), "(regs, skip_header=0, delimiter='\\t', comments='#', dtype=str)\n", (2872, 2934), True, 'import numpy as np\n'), ((5128, 5154), 'numpy.array', 'np.array', (['data_array[:, 0]'], {}), '(data_array[:, 0])\n', (5136, 5154), True, 'import numpy as np\n'), ((5239, 5261), 'numpy.where', 'np.where', (["(tags == 'ab')"], {}), "(tags == 'ab')\n", (5247, 5261), True, 'import numpy as np\n'), ((5287, 5309), 'numpy.where', 'np.where', (["(tags == 'ba')"], {}), "(tags == 'ba')\n", (5295, 5309), True, 'import numpy as np\n'), ((5339, 5361), 'numpy.where', 'np.where', (["(tags == 'ab')"], {}), "(tags == 'ab')\n", (5347, 5361), True, 'import numpy as np\n'), ((5391, 5413), 'numpy.where', 'np.where', (["(tags == 'ba')"], {}), "(tags == 'ba')\n", (5399, 5413), True, 'import numpy as np\n'), ((6300, 6321), 'numpy.where', 'np.where', (['(dataAB > 20)'], {}), '(dataAB > 20)\n', (6308, 6321), True, 'import numpy as np\n'), ((6388, 6409), 'numpy.where', 'np.where', (['(dataBA > 20)'], {}), '(dataBA > 20)\n', (6396, 6409), True, 'import numpy as np\n'), ((3823, 3846), 'numpy.array', 'np.array', (['start_posList'], {}), '(start_posList)\n', (3831, 3846), True, 'import numpy as np\n'), ((3886, 3908), 'numpy.array', 'np.array', (['stop_posList'], {}), '(stop_posList)\n', (3894, 3908), True, 'import numpy as np\n'), ((4633, 4664), 're.search', 're.search', (['"""_"""', 'read.query_name'], {}), "('_', read.query_name)\n", (4642, 4664), False, 'import re\n'), ((5810, 5827), 'numpy.array', 'np.array', (['seq_mut'], {}), '(seq_mut)\n', (5818, 5827), True, 'import numpy as np\n'), ((7934, 7943), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7941, 7943), True, 'import matplotlib.pyplot as plt\n'), ((8204, 8213), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8211, 8213), True, 'import matplotlib.pyplot as plt\n'), ((8519, 8528), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8526, 8528), True, 'import matplotlib.pyplot as plt\n'), ((8615, 8624), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8622, 8624), True, 'import matplotlib.pyplot as plt\n'), ((8762, 8771), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8769, 8771), True, 'import matplotlib.pyplot as plt\n'), ((9015, 9024), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9022, 9024), True, 'import matplotlib.pyplot as plt\n'), ((4270, 4301), 're.search', 're.search', (['"""_"""', 'read.query_name'], {}), "('_', read.query_name)\n", (4279, 4301), False, 'import re\n'), ((8106, 8132), 'numpy.array', 'np.array', (['data_array[:, 0]'], {}), '(data_array[:, 0])\n', (8114, 8132), True, 'import numpy as np\n'), ((9190, 9199), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9197, 9199), True, 'import matplotlib.pyplot as plt\n'), ((9305, 9314), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9312, 9314), True, 'import matplotlib.pyplot as plt\n'), ((4698, 4728), 're.split', 're.split', (['"""_"""', 'read.query_name'], {}), "('_', read.query_name)\n", (4706, 4728), False, 'import re\n'), ((10335, 10361), 'numpy.array', 'np.array', (['data_array[:, 0]'], {}), '(data_array[:, 0])\n', (10343, 10361), True, 'import numpy as np\n'), ((4338, 4368), 're.split', 're.split', (['"""_"""', 'read.query_name'], {}), "('_', read.query_name)\n", (4346, 4368), False, 'import re\n')] |
#!/usr/bin/env python
# Usage:
# python figure_five.py inputfile outputfile
# optional args: [--target, --raw]
from sequential import optimizers
from objective_functions import objectives
import numpy as np
import argparse
import pickle
import pandas as pd
from collections import defaultdict
def compute_average(objectives, results, n_samples=10**6):
"""Compute the average value of the provided
objective functions using a monte carlo estimate
Input dictionary `objectives` must be of the same
form as `objective_functions.objectives`
Input results must be output from `optimize.py`"""
for func_name in results.keys():
objective = objectives[func_name]
bound_mins = np.array([bnd[0] for bnd in objective['bnds']])
bound_maxs = np.array([bnd[1] for bnd in objective['bnds']])
u = np.random.uniform(size=(n_samples, len(objective['bnds'])))
x_samples = u * (bound_maxs - bound_mins) + bound_mins
# the following line works for the synthetic objective functions
# but I doubt that it will work for the 'real world' examples
y_samples = objective['func'](x_samples.T)
objectives[func_name]['avg'] = np.mean(y_samples)
return objectives
def compute_maximum(objectives, results):
"""Compute the maximum value of the provided
objective functions by looking at maximum across
all simulations
Input dictionary `objectives` must be of the same
form as `objective_functions.objectives`
Input results must be output from `optimize.py`"""
for func_name, func_results in results.items():
all_max = []
for optimizer_name, opt_results in func_results.items():
N = len(opt_results) # number of simulations
for sim in np.arange(N):
all_max.append(np.max(opt_results[sim]['y']))
objectives[func_name]['maximum'] = np.max(all_max)
return objectives
def table_to_df(table):
"""Utility to convert table dictionary to dataframe"""
colnames = list(table.keys())
csv_table = defaultdict(lambda: defaultdict(dict))
for func_name, contents in table.items():
#csv_table[func_name] = dict()
for optimizer_name, opt_results in contents.items():
#csv_table[func_name][optimizer_name] = dict()
for target, results in opt_results.items():
cell = str(results['mean']) + ' +/- {0:.01f}'.format(results['std'])
csv_table[target][optimizer_name][func_name] = cell
multi_ind = {(i,j): csv_table[i][j] for i in csv_table.keys()
for j in csv_table[i].keys()}
df = pd.DataFrame.from_dict(multi_ind).T
df = df[colnames]
return df
def main(results, objectives, target):
objectives = compute_average(objectives, results)
objectives = compute_maximum(objectives, results)
table = dict()
for func_name, func_results in results.items():
table[func_name] = dict()
for optimizer_name, opt_results in func_results.items():
N = len(opt_results) # number of simulations
M = len(opt_results[0]['y']) # number of iterations
# populate the array of simulation results
cur_array = np.zeros((N, M))
for sim in np.arange(N):
cur_array[sim,:] = opt_results[sim]['y']
cur_max = objectives[func_name]['maximum']
cur_avg = objectives[func_name]['avg']
cur_results = dict()
for each_target in target:
cur_target = cur_max - (cur_max - cur_avg) * (1 - each_target)
# find the first position at which we reach the target value.
# if we don't, set it to number of simulations
loc_pass_target = np.zeros(N)
for i in range(N):
above = cur_array[i] >= cur_target
if not np.any(above):
loc_pass_target[i] = M
else:
loc_pass_target[i] = np.min(np.nonzero(above)[0])
cur_results[each_target] = {'mean': np.mean(loc_pass_target), 'std': np.std(loc_pass_target)}
# store the results
table[func_name][optimizer_name] = cur_results
return table
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('inputfile', type=str,
help='input file (results from running optimize.py)')
parser.add_argument('outputfile', type=str,
help='outputfile')
parser.add_argument('--target', type=float,
help='''float between 0 and 1 indicating target value(s)
we seek to reach in optimization process''',
choices=[0.9, 0.95, 0.99])
parser.add_argument('--raw', default=False, action='store_true',
help='indicate if we want a csv output or raw pickle')
args = parser.parse_args()
with open(args.inputfile , 'rb') as stuff:
results = pickle.load(stuff)
if args.target:
target = [args.target]
else:
target = [0.9, 0.95, 0.99]
table = main(results, objectives, target)
if args.raw:
with open(args.outputfile + '.pkl', 'wb') as place:
pickle.dump(table, place, protocol=pickle.HIGHEST_PROTOCOL)
else:
df = table_to_df(table)
df.to_csv(args.outputfile + '.csv')
| [
"numpy.mean",
"pickle.dump",
"argparse.ArgumentParser",
"pickle.load",
"pandas.DataFrame.from_dict",
"numpy.max",
"numpy.any",
"numpy.array",
"numpy.zeros",
"collections.defaultdict",
"numpy.nonzero",
"numpy.std",
"numpy.arange"
] | [((4485, 4510), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4508, 4510), False, 'import argparse\n'), ((733, 780), 'numpy.array', 'np.array', (["[bnd[0] for bnd in objective['bnds']]"], {}), "([bnd[0] for bnd in objective['bnds']])\n", (741, 780), True, 'import numpy as np\n'), ((802, 849), 'numpy.array', 'np.array', (["[bnd[1] for bnd in objective['bnds']]"], {}), "([bnd[1] for bnd in objective['bnds']])\n", (810, 849), True, 'import numpy as np\n'), ((1220, 1238), 'numpy.mean', 'np.mean', (['y_samples'], {}), '(y_samples)\n', (1227, 1238), True, 'import numpy as np\n'), ((1936, 1951), 'numpy.max', 'np.max', (['all_max'], {}), '(all_max)\n', (1942, 1951), True, 'import numpy as np\n'), ((2724, 2757), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['multi_ind'], {}), '(multi_ind)\n', (2746, 2757), True, 'import pandas as pd\n'), ((5230, 5248), 'pickle.load', 'pickle.load', (['stuff'], {}), '(stuff)\n', (5241, 5248), False, 'import pickle\n'), ((1817, 1829), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1826, 1829), True, 'import numpy as np\n'), ((2139, 2156), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (2150, 2156), False, 'from collections import defaultdict\n'), ((3349, 3365), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (3357, 3365), True, 'import numpy as np\n'), ((3389, 3401), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3398, 3401), True, 'import numpy as np\n'), ((5487, 5546), 'pickle.dump', 'pickle.dump', (['table', 'place'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(table, place, protocol=pickle.HIGHEST_PROTOCOL)\n', (5498, 5546), False, 'import pickle\n'), ((3897, 3908), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3905, 3908), True, 'import numpy as np\n'), ((1862, 1891), 'numpy.max', 'np.max', (["opt_results[sim]['y']"], {}), "(opt_results[sim]['y'])\n", (1868, 1891), True, 'import numpy as np\n'), ((4257, 4281), 'numpy.mean', 'np.mean', (['loc_pass_target'], {}), '(loc_pass_target)\n', (4264, 4281), True, 'import numpy as np\n'), ((4290, 4313), 'numpy.std', 'np.std', (['loc_pass_target'], {}), '(loc_pass_target)\n', (4296, 4313), True, 'import numpy as np\n'), ((4026, 4039), 'numpy.any', 'np.any', (['above'], {}), '(above)\n', (4032, 4039), True, 'import numpy as np\n'), ((4166, 4183), 'numpy.nonzero', 'np.nonzero', (['above'], {}), '(above)\n', (4176, 4183), True, 'import numpy as np\n')] |
"""
<NAME> ETHZ, 2020
Script to compute dci score of learned representation.
"""
import warnings
from typing import Union, Iterable
from numpy.core._multiarray_umath import ndarray
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
from absl import flags, app
from sklearn.model_selection import train_test_split
from disentanglement_lib.evaluation.metrics import dci
import os
FLAGS = flags.FLAGS
flags.DEFINE_string('z_name', '', 'Filename for underlying factors z')
flags.DEFINE_string('model_name', '', 'Name of model directory to get learned latent code')
flags.DEFINE_bool('save_score', False, 'Whether or not to save calculated score')
def load_z_c(z_path, c_path):
z_full = np.load(z_path)['factors_test']
c = np.load(c_path)
# Check length of c and only take same amount of z values. Corresponds to z_test.
z = z_full[:c.shape[0],:,:]
assert z.shape[0] == c.shape[0]
return z, c
def main(argv, model_dir=None):
del argv # Unused
if model_dir is None:
out_dir = FLAGS.model_name
else:
out_dir = model_dir
c_path = '{}/z_mean.npy'.format(out_dir)
project_path = '/cluster/work/grlab/projects/projects2020_disentangled_gpvae/data/dsprites'
z_path = os.path.join(project_path, FLAGS.z_name)
z, c = load_z_c(z_path, c_path)
z_shape = z.shape
c_shape = c.shape
z_reshape = np.reshape(np.transpose(z, (0,2,1)),(z_shape[0]*z_shape[2],z_shape[1]))
c_reshape = np.reshape(np.transpose(c, (0,2,1)),(c_shape[0]*c_shape[2],c_shape[1]))
# Check if latent factor doesn't change and remove if is the case
mask = np.ones(z_reshape.shape[1], dtype=bool)
for i in range(z_reshape.shape[1]):
z_change = np.sum(np.diff(z_reshape[:,i]))
if not z_change:
mask[i] = False
z_reshape = z_reshape[:,mask]
c_train, c_test, z_train, z_test = train_test_split(c_reshape, z_reshape, test_size=0.2, shuffle=False)
scores = dci._compute_dci(c_train[:8000,:].transpose(), z_train[:8000,:].transpose(), c_test[:2000,:].transpose(), z_test[:2000,:].transpose())
print('D: {}'.format(scores['disentanglement']))
print('C: {}'.format(scores['completeness']))
print('I: {}'.format(scores['informativeness_test']))
print("Evaluation finished")
if FLAGS.save_score:
np.savez('{}/dci_{}_{}_{}'.format(out_dir, z_shape[1], c_shape[1], z_shape[0]),
informativeness_train=scores['informativeness_train'],
informativeness_test=scores['informativeness_test'],
disentanglement=scores['disentanglement'],
completeness=scores['completeness'])
print("Score saved")
if __name__ == '__main__':
app.run(main)
| [
"numpy.transpose",
"numpy.ones",
"absl.flags.DEFINE_bool",
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.diff",
"absl.app.run",
"warnings.simplefilter",
"numpy.load",
"absl.flags.DEFINE_string"
] | [((184, 246), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (205, 246), False, 'import warnings\n'), ((435, 505), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""z_name"""', '""""""', '"""Filename for underlying factors z"""'], {}), "('z_name', '', 'Filename for underlying factors z')\n", (454, 505), False, 'from absl import flags, app\n'), ((506, 601), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model_name"""', '""""""', '"""Name of model directory to get learned latent code"""'], {}), "('model_name', '',\n 'Name of model directory to get learned latent code')\n", (525, 601), False, 'from absl import flags, app\n'), ((598, 683), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""save_score"""', '(False)', '"""Whether or not to save calculated score"""'], {}), "('save_score', False,\n 'Whether or not to save calculated score')\n", (615, 683), False, 'from absl import flags, app\n'), ((764, 779), 'numpy.load', 'np.load', (['c_path'], {}), '(c_path)\n', (771, 779), True, 'import numpy as np\n'), ((1262, 1302), 'os.path.join', 'os.path.join', (['project_path', 'FLAGS.z_name'], {}), '(project_path, FLAGS.z_name)\n', (1274, 1302), False, 'import os\n'), ((1644, 1683), 'numpy.ones', 'np.ones', (['z_reshape.shape[1]'], {'dtype': 'bool'}), '(z_reshape.shape[1], dtype=bool)\n', (1651, 1683), True, 'import numpy as np\n'), ((1902, 1970), 'sklearn.model_selection.train_test_split', 'train_test_split', (['c_reshape', 'z_reshape'], {'test_size': '(0.2)', 'shuffle': '(False)'}), '(c_reshape, z_reshape, test_size=0.2, shuffle=False)\n', (1918, 1970), False, 'from sklearn.model_selection import train_test_split\n'), ((2746, 2759), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (2753, 2759), False, 'from absl import flags, app\n'), ((724, 739), 'numpy.load', 'np.load', (['z_path'], {}), '(z_path)\n', (731, 739), True, 'import numpy as np\n'), ((1413, 1439), 'numpy.transpose', 'np.transpose', (['z', '(0, 2, 1)'], {}), '(z, (0, 2, 1))\n', (1425, 1439), True, 'import numpy as np\n'), ((1501, 1527), 'numpy.transpose', 'np.transpose', (['c', '(0, 2, 1)'], {}), '(c, (0, 2, 1))\n', (1513, 1527), True, 'import numpy as np\n'), ((1750, 1774), 'numpy.diff', 'np.diff', (['z_reshape[:, i]'], {}), '(z_reshape[:, i])\n', (1757, 1774), True, 'import numpy as np\n')] |
import sys
import stable_baselines
sys.path.append("../../../k_road/")
import numpy as np
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.ddpg.noise import (
OrnsteinUhlenbeckActionNoise,
)
from stable_baselines import DDPG
# from cavs_environments.vehicle.deep_road.deep_road import DeepRoad
import scenario.road as road
# def make_target_env_with_baseline(
# observation_scaling = 1.0,
# action_scaling = 1.0 / 10.0,
# max_distance_from_target = 125,
# time_limit = 60):
# return framework.FactoredGym(
# targeting.TargetProcess(time_limit, max_distance_from_target),
# targeting.TargetObserver(observation_scaling),
# targeting.TargetTerminator(),
# targeting.TargetRewarder(),
# [framework.ActionScaler(action_scaling), targeting.TargetBaseline()]
# )
class ThisRoadEnv(factored_gym.FactoredGym):
def __init__(self, env_config):
observation_scaling = 1.0 # 10.0
ego_starting_distance = 200.0
super().__init__(
road.RoadProcess(ego_starting_distance=ego_starting_distance),
road.RoadObserver(observation_scaling),
road.RoadTerminator(time_limit=5 * 60),
road.RoadGoalRewarder(),
# [framework.ActionScaler(1.0/10.0), framework.ActionCenterer([.001, 5], [0, 0])]
[factored_gym.ActionCenterer([10, 10], [0, 0])]
)
class CustomPolicy(stable_baselines.ddpg.policies.FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(CustomPolicy, self).__init__(*args,
layers=[128, 128, 128, 128],
layer_norm=True,
feature_extraction="mlp",
**kwargs
)
# class CustomPolicy(MlpPolicy):
# def __init__(self, *args, **kwargs):
# super(MlpPolicy, self).__init__(*args, act_fun=tf.nn.tanh, net_arch=[32, 32])
# register_policy('LargeMLP', LargeMLP)
env = DummyVecEnv([lambda: ThisRoadEnv(None)])
# the noise objects for DDPG
n_actions = env.action_space.shape[-1]
param_noise = None
action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5) * np.ones(n_actions))
model = DDPG(CustomPolicy, env, verbose=1, tensorboard_log='/tmp/k_road_0/',
gamma=.999, param_noise=param_noise, action_noise=action_noise)
model.learn(total_timesteps=int(100e3))
model.save('k_road_test')
print('done!')
| [
"stable_baselines.DDPG",
"scenario.road.RoadObserver",
"numpy.ones",
"scenario.road.RoadProcess",
"numpy.zeros",
"scenario.road.RoadTerminator",
"sys.path.append",
"scenario.road.RoadGoalRewarder"
] | [((37, 72), 'sys.path.append', 'sys.path.append', (['"""../../../k_road/"""'], {}), "('../../../k_road/')\n", (52, 72), False, 'import sys\n'), ((2359, 2497), 'stable_baselines.DDPG', 'DDPG', (['CustomPolicy', 'env'], {'verbose': '(1)', 'tensorboard_log': '"""/tmp/k_road_0/"""', 'gamma': '(0.999)', 'param_noise': 'param_noise', 'action_noise': 'action_noise'}), "(CustomPolicy, env, verbose=1, tensorboard_log='/tmp/k_road_0/', gamma=\n 0.999, param_noise=param_noise, action_noise=action_noise)\n", (2363, 2497), False, 'from stable_baselines import DDPG\n'), ((2290, 2309), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (2298, 2309), True, 'import numpy as np\n'), ((1068, 1129), 'scenario.road.RoadProcess', 'road.RoadProcess', ([], {'ego_starting_distance': 'ego_starting_distance'}), '(ego_starting_distance=ego_starting_distance)\n', (1084, 1129), True, 'import scenario.road as road\n'), ((1143, 1181), 'scenario.road.RoadObserver', 'road.RoadObserver', (['observation_scaling'], {}), '(observation_scaling)\n', (1160, 1181), True, 'import scenario.road as road\n'), ((1195, 1233), 'scenario.road.RoadTerminator', 'road.RoadTerminator', ([], {'time_limit': '(5 * 60)'}), '(time_limit=5 * 60)\n', (1214, 1233), True, 'import scenario.road as road\n'), ((1247, 1270), 'scenario.road.RoadGoalRewarder', 'road.RoadGoalRewarder', ([], {}), '()\n', (1268, 1270), True, 'import scenario.road as road\n'), ((2330, 2348), 'numpy.ones', 'np.ones', (['n_actions'], {}), '(n_actions)\n', (2337, 2348), True, 'import numpy as np\n')] |
"""FPS_receive_test.py -- receive (text, image) pairs & print FPS stats
A test program to provide FPS statistics as different imagenode algorithms are
being tested. This program receives images OR images that have been jpg
compressed, depending on the setting of the JPG option.
It computes and prints FPS statistics. It is designed to be the receiver for the
imagenode.py program or one of the test programs in the tests/unit_tests folder.
Be sure to run this program and the sending program in virtual environments
using Python 3.6 or newer.
1. Edit the options in this python program, such as the JPG option.
Save it.
2. Set the yaml options on the imagenode sending RPi in the imagenode.yaml
file at the home directory. Be sure that the jpg setting on the RPi matches
the setting of JPG below. (If using one of the test programs, use git
pull to bring a copy of the test program to the sending RPi)
2. Run this program in its own terminal window on the mac:
python receive_test.py.
This 'receive the images' program must be running before starting
the RPi image sending program.
2. Run the imagenode image sending program on the RPi:
python imagenode.py # OR run one of /tests/unit_tests programs on the RPi
A cv2.imshow() window will only appear on the Mac that is receiving the
tramsmitted images if the "SHOW_IMAGES" option below is set to True.
The receiving program will run until the "TEST_DURATION" number of seconds is
reached or until Ctrl-C is pressed. When the receiving program ends, it will
compute and print FPS statistics and it will stop receiving images and sending
ZMQ "REP" replies. That should cause the sending program on the RPi to stall
and stop. Or you can end the sending program running on the RPi by pressing
Ctrl-C.
"""
########################################################################
# EDIT THES OPTIONS BEFORE RUNNING PROGRAM
JPG = True # or False if receiving images
SHOW_IMAGES = True
TEST_DURATION = 30 # seconds or 0 to keep going until Ctrl-C
########################################################################
import cv2
import sys
import signal
import imagezmq
import traceback
import numpy as np
from time import sleep
from imutils.video import FPS
from threading import Event, Thread
from collections import defaultdict
# instantiate image_hub
image_hub = imagezmq.ImageHub()
def receive_image():
text, image = image_hub.recv_image()
return text, image
def receive_jpg():
text, jpg_buffer = image_hub.recv_jpg()
image = cv2.imdecode(np.frombuffer(jpg_buffer, dtype='uint8'), -1)
return text, image
if JPG:
receive_tuple = receive_jpg
receive_type = 'jpg'
else:
receive_tuple = receive_image
receive_type = 'native OpenCV'
image_count = 0
sender_image_counts = defaultdict(int) # dict for counts by sender
first_image = True
text = None
image = None
if TEST_DURATION <= 0:
TEST_DURATION = 999999 # a large number so Ctrl-C is only stopping method
def receive_images_forever():
global image_count, sender_image_counts, first_image, text, image, fps
keep_going = Event()
keep_going.set()
def timer(duration):
sleep(duration)
keep_going.clear()
sleep(10) # allow cleanup finally time
while keep_going.is_set(): # receive images until timer expires or Ctrl-C
text, image = receive_tuple()
if first_image:
print('First Image Received. Starting FPS timer.')
fps = FPS().start() # start FPS timer after first image is received
Thread(target=timer, daemon=True, args=(TEST_DURATION,)).start()
first_image = False
fps.update()
image_count += 1 # global count of all images received
sender_image_counts[text] += 1 # count images for each RPi name
if SHOW_IMAGES:
cv2.imshow(text, image) # display images 1 window per unique text
cv2.waitKey(1)
image_hub.send_reply(b'OK') # REP reply
try:
print('FPS Test Program: ', __file__)
print('Option settings:')
print(' Receive Image Type:', receive_type)
print(' Show Images:', SHOW_IMAGES)
print(' Test Duration:', TEST_DURATION, ' seconds')
receive_images_forever()
sys.exit()
except (KeyboardInterrupt, SystemExit):
pass # Ctrl-C was pressed to end program; FPS stats computed below
except Exception as ex:
print('Python error with no Exception handler:')
print('Traceback error:', ex)
traceback.print_exc()
finally:
# stop the timer and display FPS information
print()
print('Total Number of Images received: {:,g}'.format(image_count))
if first_image: # never got images from any sender
print('Never got any images from imagenode. Ending program.')
sys.exit()
fps.stop()
print('Number of Images received for each text message type:')
for text_message in sender_image_counts:
print(' ', text_message, ': {:,g}'.format(sender_image_counts[text_message]))
if JPG:
compressed_size = len(image)
print('Size of last jpg buffer received: {:,g} bytes'.format(compressed_size))
else:
compressed_size = 1
image_size = image.shape
print('Dimensions of last image received: ', image_size)
uncompressed_size = 1
for dimension in image_size:
uncompressed_size *= dimension
print(' = {:,} bytes'.format(uncompressed_size))
print('Compressed to Uncompressed ratio: {:.8f}'.format(compressed_size / uncompressed_size))
print('Elasped time: {:,.2f} seconds'.format(fps.elapsed()))
print('Approximate FPS: {:,.2f}'.format(fps.fps()))
cv2.destroyAllWindows() # closes the windows opened by cv2.imshow()
image_hub.close() # closes ZMQ socket and context
sys.exit()
| [
"time.sleep",
"cv2.imshow",
"threading.Event",
"imutils.video.FPS",
"cv2.destroyAllWindows",
"collections.defaultdict",
"sys.exit",
"numpy.frombuffer",
"traceback.print_exc",
"cv2.waitKey",
"imagezmq.ImageHub",
"threading.Thread"
] | [((2353, 2372), 'imagezmq.ImageHub', 'imagezmq.ImageHub', ([], {}), '()\n', (2370, 2372), False, 'import imagezmq\n'), ((2797, 2813), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2808, 2813), False, 'from collections import defaultdict\n'), ((3112, 3119), 'threading.Event', 'Event', ([], {}), '()\n', (3117, 3119), False, 'from threading import Event, Thread\n'), ((4262, 4272), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4270, 4272), False, 'import sys\n'), ((5666, 5689), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5687, 5689), False, 'import cv2\n'), ((5794, 5804), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5802, 5804), False, 'import sys\n'), ((2548, 2588), 'numpy.frombuffer', 'np.frombuffer', (['jpg_buffer'], {'dtype': '"""uint8"""'}), "(jpg_buffer, dtype='uint8')\n", (2561, 2588), True, 'import numpy as np\n'), ((3175, 3190), 'time.sleep', 'sleep', (['duration'], {}), '(duration)\n', (3180, 3190), False, 'from time import sleep\n'), ((3226, 3235), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (3231, 3235), False, 'from time import sleep\n'), ((4500, 4521), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4519, 4521), False, 'import traceback\n'), ((4798, 4808), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4806, 4808), False, 'import sys\n'), ((3855, 3878), 'cv2.imshow', 'cv2.imshow', (['text', 'image'], {}), '(text, image)\n', (3865, 3878), False, 'import cv2\n'), ((3934, 3948), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3945, 3948), False, 'import cv2\n'), ((3489, 3494), 'imutils.video.FPS', 'FPS', ([], {}), '()\n', (3492, 3494), False, 'from imutils.video import FPS\n'), ((3564, 3620), 'threading.Thread', 'Thread', ([], {'target': 'timer', 'daemon': '(True)', 'args': '(TEST_DURATION,)'}), '(target=timer, daemon=True, args=(TEST_DURATION,))\n', (3570, 3620), False, 'from threading import Event, Thread\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, <NAME>PORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import math
from modules.until_config import PretrainedConfig
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
if 'beta' in dir(module) and 'gamma' in dir(module):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def resize_token_embeddings(self, new_num_tokens=None):
raise NotImplementedError
@classmethod
def init_preweight(cls, model, state_dict, prefix=None, task_config=None):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
if prefix is not None:
old_keys = []
new_keys = []
for key in state_dict.keys():
old_keys.append(key)
new_keys.append(prefix + key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='')
if prefix is None and (task_config is None or task_config.local_rank == 0):
logger.info("-" * 20)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(missing_keys)))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(unexpected_keys)))
if len(error_msgs) > 0:
logger.error("Weights from pretrained model cause errors in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(error_msgs)))
return model
@property
def dtype(self):
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
@classmethod
def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
"""
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
return model
model = cls.init_preweight(model, state_dict)
return model
##################################
###### LOSS FUNCTION #############
##################################
class CrossEn(nn.Module):
def __init__(self,):
super(CrossEn, self).__init__()
def forward(self, sim_matrix):
logpt = F.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
sim_loss = nce_loss.mean()
return sim_loss
class MILNCELoss(nn.Module):
def __init__(self, batch_size=1, n_pair=1,):
super(MILNCELoss, self).__init__()
self.batch_size = batch_size
self.n_pair = n_pair
torch_v = float(".".join(torch.__version__.split(".")[:2]))
self.bool_dtype = torch.bool if torch_v >= 1.3 else torch.uint8
def forward(self, sim_matrix):
mm_mask = np.eye(self.batch_size)
mm_mask = np.kron(mm_mask, np.ones((self.n_pair, self.n_pair)))
mm_mask = torch.tensor(mm_mask).float().to(sim_matrix.device)
from_text_matrix = sim_matrix + mm_mask * -1e12
from_video_matrix = sim_matrix.transpose(1, 0)
new_sim_matrix = torch.cat([from_video_matrix, from_text_matrix], dim=-1)
logpt = F.log_softmax(new_sim_matrix, dim=-1)
mm_mask_logpt = torch.cat([mm_mask, torch.zeros_like(mm_mask)], dim=-1)
masked_logpt = logpt + (torch.ones_like(mm_mask_logpt) - mm_mask_logpt) * -1e12
new_logpt = -torch.logsumexp(masked_logpt, dim=-1)
logpt_choice = torch.zeros_like(new_logpt)
mark_ind = torch.arange(self.batch_size).to(sim_matrix.device) * self.n_pair + (self.n_pair//2)
logpt_choice[mark_ind] = 1
sim_loss = new_logpt.masked_select(logpt_choice.to(dtype=self.bool_dtype)).mean()
return sim_loss
class MaxMarginRankingLoss(nn.Module):
def __init__(self,
margin=1.0,
negative_weighting=False,
batch_size=1,
n_pair=1,
hard_negative_rate=0.5,
):
super(MaxMarginRankingLoss, self).__init__()
self.margin = margin
self.n_pair = n_pair
self.batch_size = batch_size
easy_negative_rate = 1 - hard_negative_rate
self.easy_negative_rate = easy_negative_rate
self.negative_weighting = negative_weighting
if n_pair > 1 and batch_size > 1:
alpha = easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate))
mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha
mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))
mm_mask = torch.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate))
self.mm_mask = mm_mask.float()
def forward(self, x):
d = torch.diag(x)
max_margin = F.relu(self.margin + x - d.view(-1, 1)) + \
F.relu(self.margin + x - d.view(1, -1))
if self.negative_weighting and self.n_pair > 1 and self.batch_size > 1:
max_margin = max_margin * self.mm_mask.to(max_margin.device)
return max_margin.mean()
| [
"logging.getLogger",
"numpy.eye",
"torch.ones_like",
"numpy.ones",
"torch.__version__.split",
"torch.sigmoid",
"torch.sqrt",
"math.sqrt",
"torch.tensor",
"torch.is_tensor",
"torch.arange",
"torch.nn.functional.log_softmax",
"torch.logsumexp",
"torch.zeros",
"torch.zeros_like",
"torch.d... | [((903, 930), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (920, 930), False, 'import logging\n'), ((1296, 1312), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (1309, 1312), False, 'import torch\n'), ((7503, 7536), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['sim_matrix'], {'dim': '(-1)'}), '(sim_matrix, dim=-1)\n', (7516, 7536), True, 'import torch.nn.functional as F\n'), ((7553, 7570), 'torch.diag', 'torch.diag', (['logpt'], {}), '(logpt)\n', (7563, 7570), False, 'import torch\n'), ((8038, 8061), 'numpy.eye', 'np.eye', (['self.batch_size'], {}), '(self.batch_size)\n', (8044, 8061), True, 'import numpy as np\n'), ((8342, 8398), 'torch.cat', 'torch.cat', (['[from_video_matrix, from_text_matrix]'], {'dim': '(-1)'}), '([from_video_matrix, from_text_matrix], dim=-1)\n', (8351, 8398), False, 'import torch\n'), ((8415, 8452), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['new_sim_matrix'], {'dim': '(-1)'}), '(new_sim_matrix, dim=-1)\n', (8428, 8452), True, 'import torch.nn.functional as F\n'), ((8706, 8733), 'torch.zeros_like', 'torch.zeros_like', (['new_logpt'], {}), '(new_logpt)\n', (8722, 8733), False, 'import torch\n'), ((9969, 9982), 'torch.diag', 'torch.diag', (['x'], {}), '(x)\n', (9979, 9982), False, 'import torch\n'), ((1644, 1667), 'torch.ones', 'torch.ones', (['hidden_size'], {}), '(hidden_size)\n', (1654, 1667), False, 'import torch\n'), ((1702, 1726), 'torch.zeros', 'torch.zeros', (['hidden_size'], {}), '(hidden_size)\n', (1713, 1726), False, 'import torch\n'), ((1900, 1937), 'torch.sqrt', 'torch.sqrt', (['(s + self.variance_epsilon)'], {}), '(s + self.variance_epsilon)\n', (1910, 1937), False, 'import torch\n'), ((8097, 8132), 'numpy.ones', 'np.ones', (['(self.n_pair, self.n_pair)'], {}), '((self.n_pair, self.n_pair))\n', (8104, 8132), True, 'import numpy as np\n'), ((8644, 8681), 'torch.logsumexp', 'torch.logsumexp', (['masked_logpt'], {'dim': '(-1)'}), '(masked_logpt, dim=-1)\n', (8659, 8681), False, 'import torch\n'), ((8498, 8523), 'torch.zeros_like', 'torch.zeros_like', (['mm_mask'], {}), '(mm_mask)\n', (8514, 8523), False, 'import torch\n'), ((9774, 9799), 'numpy.ones', 'np.ones', (['(n_pair, n_pair)'], {}), '((n_pair, n_pair))\n', (9781, 9799), True, 'import numpy as np\n'), ((9823, 9844), 'torch.tensor', 'torch.tensor', (['mm_mask'], {}), '(mm_mask)\n', (9835, 9844), False, 'import torch\n'), ((1249, 1263), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (1258, 1263), False, 'import math\n'), ((7877, 7905), 'torch.__version__.split', 'torch.__version__.split', (['"""."""'], {}), "('.')\n", (7900, 7905), False, 'import torch\n'), ((8566, 8596), 'torch.ones_like', 'torch.ones_like', (['mm_mask_logpt'], {}), '(mm_mask_logpt)\n', (8581, 8596), False, 'import torch\n'), ((9703, 9726), 'numpy.eye', 'np.eye', (['self.batch_size'], {}), '(self.batch_size)\n', (9709, 9726), True, 'import numpy as np\n'), ((8152, 8173), 'torch.tensor', 'torch.tensor', (['mm_mask'], {}), '(mm_mask)\n', (8164, 8173), False, 'import torch\n'), ((8753, 8782), 'torch.arange', 'torch.arange', (['self.batch_size'], {}), '(self.batch_size)\n', (8765, 8782), False, 'import torch\n'), ((6567, 6585), 'torch.is_tensor', 'torch.is_tensor', (['v'], {}), '(v)\n', (6582, 6585), False, 'import torch\n')] |
from greengraph.ggraph import Greengraph
import numpy as np
from mock import Mock
from pytest import raises
graph = Greengraph("London", "Paris")
precision = 1e-3
def test_init():
assert graph.start == "London"
assert graph.end == "Paris"
def test_geolocate():
# test method returns correct latitude & longitude
graph_geolocate = graph.geolocate('London')
assert abs(graph_geolocate[0]- 51.5074) < precision
assert abs(graph_geolocate[1] + 0.1278) < precision
def test_location_sequence():
# test method returns the correct sequence of coordinates
# between 2 locations given an arbitrary step size
steps = 10
lond_coords = np.asarray(graph.geolocate("London"))
paris_coords = np.asarray(graph.geolocate("Paris"))
diff = (paris_coords - lond_coords)/(steps-1)
coord_seq = graph.location_sequence(lond_coords, paris_coords, steps)
for i in range(0, steps):
assert all(abs(coord_seq[i] - (lond_coords + i*diff)) < precision)
def test_green_between():
# we test the funnctionality of count_green in test_map
# here we look at the size of the returned array and
# that we accept positive integers only
steps = 10
green_between = graph.green_between(steps)
assert np.shape(green_between) == (10,)
with raises(ValueError):
assert graph.green_between(-1)
| [
"numpy.shape",
"pytest.raises",
"greengraph.ggraph.Greengraph"
] | [((118, 147), 'greengraph.ggraph.Greengraph', 'Greengraph', (['"""London"""', '"""Paris"""'], {}), "('London', 'Paris')\n", (128, 147), False, 'from greengraph.ggraph import Greengraph\n'), ((1257, 1280), 'numpy.shape', 'np.shape', (['green_between'], {}), '(green_between)\n', (1265, 1280), True, 'import numpy as np\n'), ((1300, 1318), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1306, 1318), False, 'from pytest import raises\n')] |
import os
import json
from utils import load_datasets, load_target, save_submission
import models
from models.tuning import beyesian_optimization
from models.evaluation import cross_validation_score
from lightgbm.sklearn import LGBMClassifier
from sklearn.ensemble import StackingClassifier, RandomForestClassifier, ExtraTreesClassifier, VotingClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.model_selection import cross_val_score, StratifiedKFold
import json
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.neural_network import MLPClassifier
from keras.models import Sequential, load_model
from keras.utils import np_utils
from keras.callbacks import EarlyStopping
from keras.layers.advanced_activations import PReLU
from keras.layers.core import Activation, Dense, Dropout
from tensorflow.keras.layers import BatchNormalization
from scikeras.wrappers import KerasClassifier
import warnings
# warnings.filterwarnings('ignore')
config = json.load(open('./config/default.json'))
# X_train, X_test = load_datasets(["Age", "AgeSplit", "EducationNum"])
X_train, X_test = load_datasets(config['features'])
y_train = load_target('Y')
n_jobs = 1
def nn_model(layers, meta):
"""
This function compiles and returns a Keras model.
Should be passed to KerasClassifier in the Keras scikit-learn API.
"""
X_shape_ = meta["X_shape_"]
dropout = 0.1
units = 1000
model = Sequential()
model.add(Dense(units, input_shape=(X_shape_[1], )))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(dropout))
for l in range(layers - 1):
model.add(Dense(units))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adadelta', metrics=['accuracy'])
return model
estimators = [
# ('lgbm-shallow', LGBMClassifier(max_depth=5, random_state=0)),
# ('lgbm-middle', LGBMClassifier(max_depth=8, random_state=0)),
# ('lgbm-deep', LGBMClassifier(max_depth=-1, random_state=0)),
# ('rf', RandomForestClassifier(random_state=0, n_jobs=n_jobs)),
# ('ert', ExtraTreesClassifier(random_state=0, n_jobs=n_jobs)),
# ('ridge', RidgeClassifier(random_state=0)),
('nn-shallow', make_pipeline(StandardScaler(),
KerasClassifier(model=nn_model, loss='binary_crossentropy',
batch_size=32, epochs=100, layers=3))),
('nn-deep', make_pipeline(StandardScaler(),
KerasClassifier(model=nn_model, loss='binary_crossentropy',
batch_size=32, epochs=100, layers=10)))
]
final_estimator = VotingClassifier(
estimators=[
('lgbm-shallow', LGBMClassifier(max_depth=5, random_state=0)),
('lgbm-middle', LGBMClassifier(max_depth=8, random_state=0)),
('lgbm-deep', LGBMClassifier(max_depth=-1, random_state=0)),
('rf', RandomForestClassifier(random_state=0, n_jobs=n_jobs)),
('ert', ExtraTreesClassifier(random_state=0, n_jobs=n_jobs)),
('ridge', RidgeClassifier(random_state=0)),
('nn-shallow', make_pipeline(StandardScaler(),
KerasClassifier(model=nn_model, batch_size=128, epochs=1000, random_state=0))),
('nn-deep', make_pipeline(StandardScaler(),
KerasClassifier(model=nn_model, batch_size=128, epochs=1000,random_state=0)))
],
voting='hard',
n_jobs=n_jobs
)
for model in estimators:
model = model[1]
print(model)
cv_score = cross_val_score(model, X_train, y_train, n_jobs=n_jobs, verbose=0,
cv=StratifiedKFold(n_splits=5, random_state=0, shuffle=True))
print(cv_score)
print(np.mean(cv_score))
| [
"numpy.mean",
"sklearn.linear_model.RidgeClassifier",
"lightgbm.sklearn.LGBMClassifier",
"keras.layers.core.Activation",
"sklearn.ensemble.ExtraTreesClassifier",
"keras.layers.advanced_activations.PReLU",
"utils.load_target",
"sklearn.ensemble.RandomForestClassifier",
"keras.models.Sequential",
"t... | [((1170, 1203), 'utils.load_datasets', 'load_datasets', (["config['features']"], {}), "(config['features'])\n", (1183, 1203), False, 'from utils import load_datasets, load_target, save_submission\n'), ((1214, 1230), 'utils.load_target', 'load_target', (['"""Y"""'], {}), "('Y')\n", (1225, 1230), False, 'from utils import load_datasets, load_target, save_submission\n'), ((1494, 1506), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1504, 1506), False, 'from keras.models import Sequential, load_model\n'), ((1521, 1561), 'keras.layers.core.Dense', 'Dense', (['units'], {'input_shape': '(X_shape_[1],)'}), '(units, input_shape=(X_shape_[1],))\n', (1526, 1561), False, 'from keras.layers.core import Activation, Dense, Dropout\n'), ((1578, 1585), 'keras.layers.advanced_activations.PReLU', 'PReLU', ([], {}), '()\n', (1583, 1585), False, 'from keras.layers.advanced_activations import PReLU\n'), ((1601, 1621), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1619, 1621), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((1637, 1653), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (1644, 1653), False, 'from keras.layers.core import Activation, Dense, Dropout\n'), ((1838, 1846), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (1843, 1846), False, 'from keras.layers.core import Activation, Dense, Dropout\n'), ((1862, 1883), 'keras.layers.core.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (1872, 1883), False, 'from keras.layers.core import Activation, Dense, Dropout\n'), ((3981, 3998), 'numpy.mean', 'np.mean', (['cv_score'], {}), '(cv_score)\n', (3988, 3998), True, 'import numpy as np\n'), ((1706, 1718), 'keras.layers.core.Dense', 'Dense', (['units'], {}), '(units)\n', (1711, 1718), False, 'from keras.layers.core import Activation, Dense, Dropout\n'), ((1738, 1745), 'keras.layers.advanced_activations.PReLU', 'PReLU', ([], {}), '()\n', (1743, 1745), False, 'from keras.layers.advanced_activations import PReLU\n'), ((1765, 1785), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1783, 1785), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((1805, 1821), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (1812, 1821), False, 'from keras.layers.core import Activation, Dense, Dropout\n'), ((2454, 2470), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2468, 2470), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2506, 2606), 'scikeras.wrappers.KerasClassifier', 'KerasClassifier', ([], {'model': 'nn_model', 'loss': '"""binary_crossentropy"""', 'batch_size': '(32)', 'epochs': '(100)', 'layers': '(3)'}), "(model=nn_model, loss='binary_crossentropy', batch_size=32,\n epochs=100, layers=3)\n", (2521, 2606), False, 'from scikeras.wrappers import KerasClassifier\n'), ((2671, 2687), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2685, 2687), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2720, 2821), 'scikeras.wrappers.KerasClassifier', 'KerasClassifier', ([], {'model': 'nn_model', 'loss': '"""binary_crossentropy"""', 'batch_size': '(32)', 'epochs': '(100)', 'layers': '(10)'}), "(model=nn_model, loss='binary_crossentropy', batch_size=32,\n epochs=100, layers=10)\n", (2735, 2821), False, 'from scikeras.wrappers import KerasClassifier\n'), ((3892, 3949), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'random_state': '(0)', 'shuffle': '(True)'}), '(n_splits=5, random_state=0, shuffle=True)\n', (3907, 3949), False, 'from sklearn.model_selection import cross_val_score, StratifiedKFold\n'), ((2948, 2991), 'lightgbm.sklearn.LGBMClassifier', 'LGBMClassifier', ([], {'max_depth': '(5)', 'random_state': '(0)'}), '(max_depth=5, random_state=0)\n', (2962, 2991), False, 'from lightgbm.sklearn import LGBMClassifier\n'), ((3018, 3061), 'lightgbm.sklearn.LGBMClassifier', 'LGBMClassifier', ([], {'max_depth': '(8)', 'random_state': '(0)'}), '(max_depth=8, random_state=0)\n', (3032, 3061), False, 'from lightgbm.sklearn import LGBMClassifier\n'), ((3086, 3130), 'lightgbm.sklearn.LGBMClassifier', 'LGBMClassifier', ([], {'max_depth': '(-1)', 'random_state': '(0)'}), '(max_depth=-1, random_state=0)\n', (3100, 3130), False, 'from lightgbm.sklearn import LGBMClassifier\n'), ((3148, 3201), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(0)', 'n_jobs': 'n_jobs'}), '(random_state=0, n_jobs=n_jobs)\n', (3170, 3201), False, 'from sklearn.ensemble import StackingClassifier, RandomForestClassifier, ExtraTreesClassifier, VotingClassifier\n'), ((3220, 3271), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)', 'n_jobs': 'n_jobs'}), '(random_state=0, n_jobs=n_jobs)\n', (3240, 3271), False, 'from sklearn.ensemble import StackingClassifier, RandomForestClassifier, ExtraTreesClassifier, VotingClassifier\n'), ((3292, 3323), 'sklearn.linear_model.RidgeClassifier', 'RidgeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (3307, 3323), False, 'from sklearn.linear_model import RidgeClassifier\n'), ((3364, 3380), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3378, 3380), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3420, 3496), 'scikeras.wrappers.KerasClassifier', 'KerasClassifier', ([], {'model': 'nn_model', 'batch_size': '(128)', 'epochs': '(1000)', 'random_state': '(0)'}), '(model=nn_model, batch_size=128, epochs=1000, random_state=0)\n', (3435, 3496), False, 'from scikeras.wrappers import KerasClassifier\n'), ((3535, 3551), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3549, 3551), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3588, 3664), 'scikeras.wrappers.KerasClassifier', 'KerasClassifier', ([], {'model': 'nn_model', 'batch_size': '(128)', 'epochs': '(1000)', 'random_state': '(0)'}), '(model=nn_model, batch_size=128, epochs=1000, random_state=0)\n', (3603, 3664), False, 'from scikeras.wrappers import KerasClassifier\n')] |
#________HEADER FILES_______
import tkinter
from tkinter import*
#from tkvideo import tkvideo
from tkinter import ttk
from tkinter import filedialog
from _cffi_backend import callback
from PIL import ImageTk, Image
import cv2
from cv2 import *
import numpy as np
import sys
import time
import argparse
import imutils
from pathlib import Path
#________USER-DEFINED FUNCTIONS_______
kernel_d = np.ones((3,3), np.uint8)
kernel_e = np.ones((3,3), np.uint8)
kernel_gauss = (3,3)
dilate_times = 15 #initializing_integer_variables
erode_times = 10 #initializing_integer_variables
is_blur = True #initializing_boolean_variables
is_close = True #initializing_boolean_variables
is_draw_ct = False #initializing_boolean_variables
fac = 2 #initializing_integer_variables
#______INITALIZING THE GUI WINDOW_______
window=Tk()
window.configure(background="grey64");
window.title("BoSS")
window.resizable(0,0)
window.geometry('1300x680')
#______SETTING VARIBALES TO CHECK STATE OF BUTTON (CHECKED OR UNCHECKED)_______
clicked= StringVar()
chkValue1 = BooleanVar()
chkValue2 = BooleanVar()
current_value1 = IntVar()
current_value2 = IntVar()
def get_current_value1():
return '{}'.format(current_value1.get())
def slider_changed1(event1):
value_label1.configure(text=get_current_value1())
slider_label1 = Label(window,text='k Value:',font=("Times New Roman",12),fg="black",bg="grey64").place(x=832,y=52)
slider1 = ttk.Scale(window, from_=0,to=10, orient='horizontal', command=slider_changed1, variable=current_value1).place(x=890,y=50)
value_label1 = ttk.Label(window, text=get_current_value1())
value_label1.place(x=995,y=52)
'''def get_current_value2():
return '{}'.format(current_value2.get())
def slider_changed2(event2):
value_label.configure(text=get_current_value2())
slider_label2 = Label(window,text='Parameter:',font=("Times New Roman",12),fg="black",bg="grey64").place(x=1058,y=52)
slider2 = ttk.Scale(window, from_=0,to=10, orient='horizontal', command=slider_changed2, variable=current_value2).place(x=1135,y=50)
value_label2 = ttk.Label(window, text=get_current_value2())
value_label2.place(x=1240,y=52)'''
#________CREATING BUTTONS_______
title = Label(window, text = "Border Surveillance System",font=("Times New Roman",18, 'bold'),fg="black",bg="grey64").place(x=495, y=10)
label_file_explorer = Label(window, text = "", fg = "blue")
label_file_explorer.grid(column = 1, row = 1)
#_______ADDING FUNCTIONALITES________
def browseFiles():
source_file = filedialog.askopenfilename(initialdir = "/", title = "Select a File", filetypes =[('MP4 files', '.mp4'),('All Files', '.'),('ASF files', '.asf')],parent=window)
label_file_explorer.configure(text=""+source_file)
'''video_1 = Label(window)
video_1.place(x=100,y=100)
player1 = tkvideo(str(source_file), video_1, loop = 0, size = (500,500))
player1.play()'''
def drawRectangle(frame, minus_frame):
if(is_blur):
minus_frame = GaussianBlur(minus_frame, kernel_gauss, 0)
minus_Matrix = np.float32(minus_frame)
if(is_close):
for i in range(dilate_times):
minus_Matrix = dilate(minus_Matrix, kernel_d)
for i in range(erode_times):
minus_Matrix = erode(minus_Matrix, kernel_e)
minus_Matrix = np.clip(minus_Matrix, 0, 255)
minus_Matrix = np.array(minus_Matrix, np.uint8)
contours, hierarchy = findContours(minus_Matrix.copy(), RETR_TREE, CHAIN_APPROX_SIMPLE)
for c in contours:
(x, y, w, h) = boundingRect(c)
rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
if( is_draw_ct ):
drawContours(frame, contours, -1, (0, 255, 255), 2)
imshow('result', frame)
def objdetect():
capture = VideoCapture(str(source_file));
while(1):
(ret_old, old_frame) = capture.read()
gray_oldframe = cvtColor(old_frame, COLOR_BGR2GRAY)
if(is_blur):
gray_oldframe = GaussianBlur(gray_oldframe, kernel_gauss, 0)
oldBlurMatrix = np.float32(gray_oldframe)
accumulateWeighted(gray_oldframe, oldBlurMatrix, 0.003)
while(True):
ret, frame = capture.read()
gray_frame = cvtColor(frame, COLOR_BGR2GRAY)
if(is_blur):
newBlur_frame = GaussianBlur(gray_frame, kernel_gauss, 0)
else:
newBlur_frame = gray_frame
newBlurMatrix = np.float32(newBlur_frame)
minusMatrix = absdiff(newBlurMatrix, oldBlurMatrix)
ret, minus_frame = threshold(minusMatrix, 60, 255.0, THRESH_BINARY)
accumulateWeighted(newBlurMatrix,oldBlurMatrix,0.02)
imshow('Input', frame)
drawRectangle(frame, minus_frame)
if cv2.waitKey(60) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
objdetect()
'''video_2 = Label(window)
video_2.place(x=650,y=100)
player2 = tkvideo(objdetect(), video_2, loop = 0, size = (500,500))
player2.play()'''
C1=Button(window,text = "Browse",font=("Times New Roman",12, 'bold'),command=browseFiles).place(x=100,y=10)
C10=Checkbutton(window,text = "Input",font=("Times New Roman",12, 'bold'), background="grey64", foreground="black", var=chkValue1, state=DISABLED).place(x=140,y=50)
C2=Button(window,text="Live Input",font=("Times New Roman",12, 'bold'),state=DISABLED).place(x=300,y=10)
C20=Checkbutton(window,text = "Output",font=("Times New Roman",12, 'bold'), background="grey64", foreground="black", var=chkValue2, state=DISABLED).place(x=260,y=50)
C3=Button(window,text = "Object Detection",font=("Times New Roman",12, 'bold')).place(x=880,y=10)
C4=Button(window,text="Turbulence Mitigation",font=("Times New Roman",12, 'bold')).place(x=1090,y=10)
#______FOOTER OF THE GUI WINDOW_______
frame=LabelFrame(window,width=1300, height=50,fg="black",bg="aqua").place(x=0,y=630)
foot=Label(frame,text = "DIR/ECS/IRDE/PROC(BRR)/20-21/018",font=("Times New Roman",11),fg="black",bg="aqua").place(x=1010,y=645)
window.mainloop()
#_______END OF PROGRAM_______
| [
"numpy.clip",
"tkinter.ttk.Scale",
"numpy.ones",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.waitKey",
"numpy.float32",
"tkinter.filedialog.askopenfilename"
] | [((398, 423), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (405, 423), True, 'import numpy as np\n'), ((434, 459), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (441, 459), True, 'import numpy as np\n'), ((2645, 2812), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""/"""', 'title': '"""Select a File"""', 'filetypes': "[('MP4 files', '.mp4'), ('All Files', '.'), ('ASF files', '.asf')]", 'parent': 'window'}), "(initialdir='/', title='Select a File', filetypes\n =[('MP4 files', '.mp4'), ('All Files', '.'), ('ASF files', '.asf')],\n parent=window)\n", (2671, 2812), False, 'from tkinter import filedialog\n'), ((1569, 1678), 'tkinter.ttk.Scale', 'ttk.Scale', (['window'], {'from_': '(0)', 'to': '(10)', 'orient': '"""horizontal"""', 'command': 'slider_changed1', 'variable': 'current_value1'}), "(window, from_=0, to=10, orient='horizontal', command=\n slider_changed1, variable=current_value1)\n", (1578, 1678), False, 'from tkinter import ttk\n'), ((3197, 3220), 'numpy.float32', 'np.float32', (['minus_frame'], {}), '(minus_frame)\n', (3207, 3220), True, 'import numpy as np\n'), ((3563, 3592), 'numpy.clip', 'np.clip', (['minus_Matrix', '(0)', '(255)'], {}), '(minus_Matrix, 0, 255)\n', (3570, 3592), True, 'import numpy as np\n'), ((3620, 3652), 'numpy.array', 'np.array', (['minus_Matrix', 'np.uint8'], {}), '(minus_Matrix, np.uint8)\n', (3628, 3652), True, 'import numpy as np\n'), ((4479, 4504), 'numpy.float32', 'np.float32', (['gray_oldframe'], {}), '(gray_oldframe)\n', (4489, 4504), True, 'import numpy as np\n'), ((5641, 5664), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5662, 5664), False, 'import cv2\n'), ((5048, 5073), 'numpy.float32', 'np.float32', (['newBlur_frame'], {}), '(newBlur_frame)\n', (5058, 5073), True, 'import numpy as np\n'), ((5504, 5519), 'cv2.waitKey', 'cv2.waitKey', (['(60)'], {}), '(60)\n', (5515, 5519), False, 'import cv2\n')] |
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.minimum_spanning_tree.html
# Euclidean distance
def dist(p1, p2):
return np.sqrt(sum([(a - b) ** 2 for a, b in zip(p1, p2)]))
# liste of points
dico = {
1:(0, 0, 0),
2:(0, -1, 0),
3:(0, 1, 1),
4:(5, 1, 1),
5:(6, 0, 1),
6:(6, 1, 1)
}
# upper triangular matrix containing the distance between each point
mat = {i:[dist(dico[i], p2) for p2 in list(dico.values())] for i in list(dico)}
df = pd.DataFrame(mat)
df.values[np.tril_indices_from(df, 0)] = 0
df.index = list(dico)
print('Distance matrix between each couple of points')
print(df)
# conversion into sparse matrix and scipy csgraph MST algo
X = csr_matrix(np.array(df))
Tcsr = minimum_spanning_tree(X)
# result into df
res = pd.DataFrame(Tcsr.toarray().astype(float))
res.columns = list(dico)
res.index = list(dico)
print('\nMST matrix')
print(res)
# Display of each couple of points
list_cpl = []
for row in list(res.index):
for col in list(res):
if res.loc[row][col] != 0:
list_cpl.append((row, col))
print('\nOptimal couples')
print(list_cpl)
| [
"pandas.DataFrame",
"numpy.array",
"numpy.tril_indices_from",
"scipy.sparse.csgraph.minimum_spanning_tree"
] | [((625, 642), 'pandas.DataFrame', 'pd.DataFrame', (['mat'], {}), '(mat)\n', (637, 642), True, 'import pandas as pd\n'), ((869, 893), 'scipy.sparse.csgraph.minimum_spanning_tree', 'minimum_spanning_tree', (['X'], {}), '(X)\n', (890, 893), False, 'from scipy.sparse.csgraph import minimum_spanning_tree\n'), ((653, 680), 'numpy.tril_indices_from', 'np.tril_indices_from', (['df', '(0)'], {}), '(df, 0)\n', (673, 680), True, 'import numpy as np\n'), ((848, 860), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (856, 860), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 17 13:38:07 2021
@author: zayn
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 17 12:07:50 2021
@author: zayn
"""
"""
Linear regression implementation.
"""
import numpy as np
import matplotlib.pyplot as plt
class lgreg:
""" implementation of multi variable Logistic Regression,
with results derived using steepest descent
"""
def __init__(self, training_data=[], use_norm=True):
"""Create a logistic regression classifier.
:param training_data: training feature data.
:param use_norm: Whether to use normalizing when calculating linear regression.
"""
if use_norm:
self.normed_training_data, self.mean, self.std = self.featureNormalize(training_data)
else:
self.normed_training_data= training_data
self.mean=[]
self.std=[]
self.use_norm=use_norm
def plotData(self, X, y, xlabel='xlabel', ylabel='ylabel'):
Xpos=X[y>0.5]
Xneg=X[y<0.5]
fig1, ax1 = plt.subplots()
ax1.plot(Xpos[:,0],Xpos[:,1],'k+', label='Positive')
ax1.plot(Xneg[:,0],Xneg[:,1],'yo', label='Negetive')
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles[::-1], labels[::-1])
return ax1
def sigmoid(self, X):
gden=1+np.exp(-X)
z=1.0/gden
return z
def CostFunction(self, X, y, theta, rlambda=0):
m=len(y)
preiction=X@theta
sig_preiction=self.sigmoid(preiction)
err=y*np.log(sig_preiction)+(1-y)*np.log(1-sig_preiction)
J1=-sum(err)/m
reg_lambda=rlambda*sum(theta[1:]**2)/2
J2=reg_lambda/m
J=J1+J2
grad=np.zeros(theta.shape)
grad[0]=sum(sig_preiction-y)/m
Xm=X[:,1:];
grad[1:]=Xm.T@(sig_preiction-y)/m+ rlambda/m*theta[1:]
return J, grad
def gradientDescent(self, X, y, theta, alpha, num_iters, rlambda=0):
# Initialize cost function history
J_history=np.zeros(num_iters)
for iter in range(num_iters):
J_history[iter], grad=self.CostFunction(X, y, theta, rlambda) #Save the cost J in every iteration
theta=theta-alpha*grad
return theta, J_history
def featureNormalize(self, X):
X=np.asarray(X)
XT=X.T;
XnT=np.zeros(XT.shape)
xmeanv=np.zeros([XT.shape[0],1])
xstdv=np.zeros([XT.shape[0],1])
for ii in range(XT.shape[0]):
xmean=np.mean(XT[ii,:]);
xstd=np.std(XT[ii,:])
Xub=XT[ii,:]-xmean;
XnT[ii,:]=Xub/xstd
xmeanv[ii,0]=xmean
xstdv[ii,0]=xstd
Xn=XnT.T
return Xn, xmeanv, xstdv
def predict(self, X, theta, softp=False):
preiction=X@theta
sig_preiction=self.sigmoid(preiction)
predico=sig_preiction if softp else np.round(sig_preiction)
return predico
def mapFeature(self, X1, X2):
m=len(X1)
degree=6
num_par=sum(range(degree+2))
out=np.zeros([m,num_par])
indx=0
for ii in range(degree+1):
for jj in range(ii+1):
out[:, indx] = (X1**(ii-jj))*(X2**jj)
indx+=1
return out
| [
"numpy.mean",
"numpy.log",
"numpy.asarray",
"numpy.exp",
"numpy.zeros",
"numpy.std",
"matplotlib.pyplot.subplots",
"numpy.round"
] | [((1122, 1136), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1134, 1136), True, 'import matplotlib.pyplot as plt\n'), ((1918, 1939), 'numpy.zeros', 'np.zeros', (['theta.shape'], {}), '(theta.shape)\n', (1926, 1939), True, 'import numpy as np\n'), ((2243, 2262), 'numpy.zeros', 'np.zeros', (['num_iters'], {}), '(num_iters)\n', (2251, 2262), True, 'import numpy as np\n'), ((2542, 2555), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (2552, 2555), True, 'import numpy as np\n'), ((2586, 2604), 'numpy.zeros', 'np.zeros', (['XT.shape'], {}), '(XT.shape)\n', (2594, 2604), True, 'import numpy as np\n'), ((2621, 2647), 'numpy.zeros', 'np.zeros', (['[XT.shape[0], 1]'], {}), '([XT.shape[0], 1])\n', (2629, 2647), True, 'import numpy as np\n'), ((2662, 2688), 'numpy.zeros', 'np.zeros', (['[XT.shape[0], 1]'], {}), '([XT.shape[0], 1])\n', (2670, 2688), True, 'import numpy as np\n'), ((3330, 3352), 'numpy.zeros', 'np.zeros', (['[m, num_par]'], {}), '([m, num_par])\n', (3338, 3352), True, 'import numpy as np\n'), ((1506, 1516), 'numpy.exp', 'np.exp', (['(-X)'], {}), '(-X)\n', (1512, 1516), True, 'import numpy as np\n'), ((2746, 2764), 'numpy.mean', 'np.mean', (['XT[ii, :]'], {}), '(XT[ii, :])\n', (2753, 2764), True, 'import numpy as np\n'), ((2783, 2800), 'numpy.std', 'np.std', (['XT[ii, :]'], {}), '(XT[ii, :])\n', (2789, 2800), True, 'import numpy as np\n'), ((3150, 3173), 'numpy.round', 'np.round', (['sig_preiction'], {}), '(sig_preiction)\n', (3158, 3173), True, 'import numpy as np\n'), ((1736, 1757), 'numpy.log', 'np.log', (['sig_preiction'], {}), '(sig_preiction)\n', (1742, 1757), True, 'import numpy as np\n'), ((1764, 1789), 'numpy.log', 'np.log', (['(1 - sig_preiction)'], {}), '(1 - sig_preiction)\n', (1770, 1789), True, 'import numpy as np\n')] |
import numpy as np
import deerlab as dl
#---------------------------------------------------------------------------------------
def assert_bgmodel(model,Bref):
"Check the correct behaviour of the core functionality of a background model"
t = np.linspace(-5,5,500)
# Extract model information
meta = model.getmetadata()
par0 = meta['par0']
lower = meta['lb']
upper = meta['ub']
paramnames = meta['names']
units = meta['units']
# Calculate under different conditions
B1 = model(t,*par0)
B2 = model(t.T,*par0)
B3 = model(t,*lower)
B4 = model(t,*upper)
B5 = model(2.5,*par0)
# Assert
assert all(B1 == B2)
assert all(~np.isnan(B1)) and all(~np.isnan(B2)) and all(~np.isnan(B3)) and all(~np.isnan(B4))
assert abs(B5 - Bref) < 1e-8
assert len(paramnames) == len(par0) and len(units) == len(par0)
#---------------------------------------------------------------------------------------
def test_bg_hom3d():
Bref = 0.882785339742350 # Reference from DeerLab 0.9.2 on MATLAB
assert_bgmodel(dl.bg_hom3d,Bref)
def test_bg_homfractal():
Bref = 0.882785339742350 # Reference from DeerLab 0.9.2 on MATLAB
assert_bgmodel(dl.bg_homfractal,Bref)
def test_bg_hom3dex():
Bref = 0.882896490000000 # Reference from DeerLab 0.9.2 on MATLAB
assert_bgmodel(dl.bg_hom3dex,Bref)
def test_bg_exp():
Bref = 0.416862019678508 # Reference from DeerLab 0.9.2 on MATLAB
assert_bgmodel(dl.bg_exp,Bref)
def test_bg_strexp():
Bref = 0.535261428518990 # Reference from DeerLab 0.9.2 on MATLAB
assert_bgmodel(dl.bg_strexp,Bref)
def test_bg_prodstrexp():
Bref = 0.286504796860190 # Reference from DeerLab 0.9.2 on MATLAB
assert_bgmodel(dl.bg_prodstrexp,Bref)
def test_bg_sumstrexp():
Bref = 0.535261428518990 # Reference from DeerLab 0.9.2 on MATLAB
assert_bgmodel(dl.bg_sumstrexp,Bref)
def test_bg_poly1():
Bref = -1.500000000000000 # Reference from DeerLab 0.9.2 on MATLAB
assert_bgmodel(dl.bg_poly1,Bref)
def test_bg_poly2():
Bref = -7.750000000000000 # Reference from DeerLab 0.9.2 on MATLAB
assert_bgmodel(dl.bg_poly2,Bref)
def test_bg_poly3():
Bref = -23.37500000000000 # Reference from DeerLab 0.9.2 on MATLAB
assert_bgmodel(dl.bg_poly3,Bref)
| [
"numpy.linspace",
"numpy.isnan"
] | [((263, 286), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(500)'], {}), '(-5, 5, 500)\n', (274, 286), True, 'import numpy as np\n'), ((724, 736), 'numpy.isnan', 'np.isnan', (['B1'], {}), '(B1)\n', (732, 736), True, 'import numpy as np\n'), ((747, 759), 'numpy.isnan', 'np.isnan', (['B2'], {}), '(B2)\n', (755, 759), True, 'import numpy as np\n'), ((770, 782), 'numpy.isnan', 'np.isnan', (['B3'], {}), '(B3)\n', (778, 782), True, 'import numpy as np\n'), ((793, 805), 'numpy.isnan', 'np.isnan', (['B4'], {}), '(B4)\n', (801, 805), True, 'import numpy as np\n')] |
import numpy as np
# from scp import SCP
from main_komo import run_komo_standalone
from utils_motion_primitives import sort_primitives, visualize_motion, plot_stats
import robots
import yaml
import msgpack
import multiprocessing as mp
import tqdm
import itertools
import argparse
import subprocess
import tempfile
from pathlib import Path
import psutil
import checker
import time
import sys, os
sys.path.append(os.getcwd())
def gen_motion(robot_type, start, goal, is2D, cfg):
dbg = False
with tempfile.TemporaryDirectory() as tmpdirname:
if dbg:
p = Path("../results/test")
else:
p = Path(tmpdirname)
env = {
"environment":{
"min": [-10, -10],
"max": [10, 10],
"obstacles": []
},
"robots": [{
"type": robot_type,
"start": list(start),
"goal": list(goal),
}]
}
if not is2D:
env["environment"]["min"].append(-10)
env["environment"]["max"].append(10)
filename_env = str(p / "env.yaml")
with open(filename_env, 'w') as f:
yaml.dump(env, f, Dumper=yaml.CSafeDumper)
filename_result = p / "result_komo.yaml"
# success = run_komo_standalone(filename_env, str(p), 120, "", search="linear", initialguess="none")
# use_T = np.random.randint(20, 100)
# success = run_komo_standalone(filename_env, str(p), 5 * 60, "soft_goal: 1", search="none", initialguess="none", use_T=use_T)
success = run_komo_standalone(filename_env, str(p), cfg['timelimit'], cfg['rai_cfg'], cfg['search'], initialguess="none", T_range_abs=[0, 200])
# print("SDF", success)
# if success:
# print("PPPPSDF")
# # read the result
# with open(filename_result) as f:
# result = yaml.load(f, Loader=yaml.CSafeLoader)
# xf = result["result"][0]["states"][-1]
# # update env
# env["robots"][0]["goal"] = xf
# with open(filename_env, 'w') as f:
# yaml.dump(env, f, Dumper=yaml.CSafeDumper)
# # try to find a solution with lower T
# success = run_komo_standalone(filename_env, str(p), 5 * 60, "", search="linearReverse", initialguess="none", T_range_abs=[1, use_T-1])
# else:
# return []
if not success:
return []
# checker.check(str(filename_env), str(filename_result))
if dbg:
subprocess.run(["python3",
"../benchmark/{}/visualize.py".format(robot_type),
str(filename_env),
"--result", str(filename_result),
"--video", str(filename_result.with_suffix(".mp4"))])
# read the result
with open(filename_result) as f:
result = yaml.load(f, Loader=yaml.CSafeLoader)
states = np.array(result["result"][0]["states"])
actions = np.array(result["result"][0]["actions"])
eucledian_distance = 0
split = [0]
for k in range(1, len(states)):
if is2D:
eucledian_distance += np.linalg.norm(states[k-1][0:2] - states[k][0:2])
else:
eucledian_distance += np.linalg.norm(states[k-1][0:3] - states[k][0:3])
if eucledian_distance >= 0.5:
split.append(k)
eucledian_distance = 0
# include last segment, if it not very short
if len(states) - split[-1] > 5:
split.append(len(states)-1)
# create motions
motions = []
for idx in range(1, len(split)):
start_k = split[idx-1]
k = split[idx]
# shift states
if is2D:
states[start_k:, 0:2] -= states[start_k, 0:2]
else:
states[start_k:, 0:3] -= states[start_k, 0:3]
# create motion
motion = dict()
motion['x0'] = states[start_k].tolist()
motion['xf'] = states[k].tolist()
motion['states'] = states[start_k:k+1].tolist()
motion['actions'] = actions[start_k:k].tolist()
motion['T'] = k-start_k
motions.append(motion)
# use the following break to only create the first motion
# this will create a nicer (uniform) distribution, but take
# much longer
# break
return motions
def gen_random_motion(robot_type):
# NOTE: It is *very* important to keep this as a local import, otherwise
# random numbers may repeat, when using multiprocessing
from motionplanningutils import RobotHelper
# load tuning settings for this case
tuning_path = Path("../tuning")
cfg = tuning_path / robot_type / "algorithms.yaml"
assert(cfg.is_file())
with open(cfg) as f:
cfg = yaml.safe_load(f)
# find cfg
mycfg = cfg['gen-motion']
rh = RobotHelper(robot_type, mycfg["env_limit"])
start = rh.sampleUniform()
# shift to center (at 0,0)
start[0] = 0
start[1] = 0
if not rh.is2D():
start[2] = 0
# if "quadrotor" in robot_type:
# goal = [0,0,0, 0,0,0,1, 0,0,0, 0,0,0]
# else:
# goal = rh.sampleUniform()
goal = rh.sampleUniform()
# print(start, goal)
# exit()
motions = gen_motion(robot_type, start, goal, rh.is2D(), mycfg)
for motion in motions:
motion['distance'] = rh.distance(motion['x0'], motion['xf'])
return motions
def main():
parser = argparse.ArgumentParser()
parser.add_argument("robot_type", help="name of robot type to generate motions for")
parser.add_argument("--N", help="number of motions", default=100, type=int)
args = parser.parse_args()
# rh = RobotHelper(args.robot_type)
motions = []
tasks = itertools.repeat(args.robot_type, args.N)
tmp_path = Path("../results/tmp/motions/{}".format(args.robot_type))
tmp_path.mkdir(parents=True, exist_ok=True)
def add_motions(additional_motions):
if len(additional_motions) > 0:
motions.extend(additional_motions)
print("Generated {} motions".format(len(motions)), flush=True)
# Store intermediate results, in case we need to interupt the generation
i = 0
while True:
p = tmp_path / "{}.yaml".format(i)
if not p.exists():
with open(p, 'w') as f:
yaml.dump(additional_motions, f)
break
i = i + 1
# if args.N <= 10:
if False:
while len(motions) < args.N:
multiple_motions = gen_random_motion(args.robot_type)
motions.extend(multiple_motions)
else:
# mp.set_start_method('spawn')
use_cpus = psutil.cpu_count(logical=False)
async_results = []
with mp.Pool(use_cpus) as p:
while len(motions) < args.N:
# clean up async_results
async_results = [x for x in async_results if not x.ready()]
# run some more workers
while len(async_results) < use_cpus:
ar = p.apply_async(gen_random_motion, (args.robot_type,), callback=add_motions)
async_results.append(ar)
time.sleep(1)
p.terminate()
for k, motion in enumerate(motions):
motion['name'] = 'm{}'.format(k)
out_path = Path("../cloud/motions")
out_path.mkdir(parents=True, exist_ok=True)
# with open(out_path / "{}.yaml".format(args.robot_type), 'w') as file:
# yaml.dump(motions, file, Dumper=yaml.CSafeDumper)
# now sort the primitives
sorted_motions = sort_primitives(motions, args.robot_type)
# with open(out_path / "{}_sorted.yaml".format(args.robot_type), 'w') as file:
# yaml.dump(sorted_motions, file, Dumper=yaml.CSafeDumper)
with open(out_path / "{}_sorted.msgpack".format(args.robot_type), 'wb') as file:
msgpack.pack(sorted_motions, file)
# visualize the top 100
for k, m in enumerate(sorted_motions[0:10]):
visualize_motion(m, args.robot_type, tmp_path / "top_{}.mp4".format(k))
# plot statistics
plot_stats(sorted_motions, args.robot_type, tmp_path / "stats.pdf")
if __name__ == '__main__':
main()
| [
"tempfile.TemporaryDirectory",
"utils_motion_primitives.sort_primitives",
"msgpack.pack",
"argparse.ArgumentParser",
"pathlib.Path",
"yaml.dump",
"utils_motion_primitives.plot_stats",
"yaml.load",
"numpy.linalg.norm",
"os.getcwd",
"time.sleep",
"numpy.array",
"yaml.safe_load",
"psutil.cpu_... | [((413, 424), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (422, 424), False, 'import sys, os\n'), ((4006, 4023), 'pathlib.Path', 'Path', (['"""../tuning"""'], {}), "('../tuning')\n", (4010, 4023), False, 'from pathlib import Path\n'), ((4196, 4239), 'motionplanningutils.RobotHelper', 'RobotHelper', (['robot_type', "mycfg['env_limit']"], {}), "(robot_type, mycfg['env_limit'])\n", (4207, 4239), False, 'from motionplanningutils import RobotHelper\n'), ((4724, 4749), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4747, 4749), False, 'import argparse\n'), ((5003, 5044), 'itertools.repeat', 'itertools.repeat', (['args.robot_type', 'args.N'], {}), '(args.robot_type, args.N)\n', (5019, 5044), False, 'import itertools\n'), ((6317, 6341), 'pathlib.Path', 'Path', (['"""../cloud/motions"""'], {}), "('../cloud/motions')\n", (6321, 6341), False, 'from pathlib import Path\n'), ((6561, 6602), 'utils_motion_primitives.sort_primitives', 'sort_primitives', (['motions', 'args.robot_type'], {}), '(motions, args.robot_type)\n', (6576, 6602), False, 'from utils_motion_primitives import sort_primitives, visualize_motion, plot_stats\n'), ((7030, 7097), 'utils_motion_primitives.plot_stats', 'plot_stats', (['sorted_motions', 'args.robot_type', "(tmp_path / 'stats.pdf')"], {}), "(sorted_motions, args.robot_type, tmp_path / 'stats.pdf')\n", (7040, 7097), False, 'from utils_motion_primitives import sort_primitives, visualize_motion, plot_stats\n'), ((499, 528), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (526, 528), False, 'import tempfile\n'), ((2503, 2542), 'numpy.array', 'np.array', (["result['result'][0]['states']"], {}), "(result['result'][0]['states'])\n", (2511, 2542), True, 'import numpy as np\n'), ((2555, 2595), 'numpy.array', 'np.array', (["result['result'][0]['actions']"], {}), "(result['result'][0]['actions'])\n", (2563, 2595), True, 'import numpy as np\n'), ((4131, 4148), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (4145, 4148), False, 'import yaml\n'), ((5802, 5833), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (5818, 5833), False, 'import psutil\n'), ((6828, 6862), 'msgpack.pack', 'msgpack.pack', (['sorted_motions', 'file'], {}), '(sorted_motions, file)\n', (6840, 6862), False, 'import msgpack\n'), ((561, 584), 'pathlib.Path', 'Path', (['"""../results/test"""'], {}), "('../results/test')\n", (565, 584), False, 'from pathlib import Path\n'), ((600, 616), 'pathlib.Path', 'Path', (['tmpdirname'], {}), '(tmpdirname)\n', (604, 616), False, 'from pathlib import Path\n'), ((990, 1032), 'yaml.dump', 'yaml.dump', (['env', 'f'], {'Dumper': 'yaml.CSafeDumper'}), '(env, f, Dumper=yaml.CSafeDumper)\n', (999, 1032), False, 'import yaml\n'), ((2453, 2490), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.CSafeLoader'}), '(f, Loader=yaml.CSafeLoader)\n', (2462, 2490), False, 'import yaml\n'), ((5862, 5879), 'multiprocessing.Pool', 'mp.Pool', (['use_cpus'], {}), '(use_cpus)\n', (5869, 5879), True, 'import multiprocessing as mp\n'), ((2708, 2759), 'numpy.linalg.norm', 'np.linalg.norm', (['(states[k - 1][0:2] - states[k][0:2])'], {}), '(states[k - 1][0:2] - states[k][0:2])\n', (2722, 2759), True, 'import numpy as np\n'), ((2793, 2844), 'numpy.linalg.norm', 'np.linalg.norm', (['(states[k - 1][0:3] - states[k][0:3])'], {}), '(states[k - 1][0:3] - states[k][0:3])\n', (2807, 2844), True, 'import numpy as np\n'), ((6199, 6212), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6209, 6212), False, 'import time\n'), ((5535, 5567), 'yaml.dump', 'yaml.dump', (['additional_motions', 'f'], {}), '(additional_motions, f)\n', (5544, 5567), False, 'import yaml\n')] |
""" Unit tests for the finite_difference module
"""
import unittest
import numpy as np
from scipy.optimize import rosen, rosen_der, rosen_hess
from polynomials_on_simplices.calculus.finite_difference import (
central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian,
second_central_difference, second_forward_difference)
def is_equal(array1, array2):
""" Check if two numpy arrays are approximately equal
"""
try:
np.testing.assert_allclose(array1, array2, atol=1e-4, rtol=1e-4)
except AssertionError as ae:
print(ae)
return False
return True
class TestRosenbrockCD(unittest.TestCase):
def test1(self):
x = np.zeros(100)
gradient = rosen_der(x)
fd_gradient = central_difference(rosen, x)
self.assertTrue(is_equal(gradient, fd_gradient))
hessian = rosen_hess(x)
fd_hessian = second_central_difference(rosen, x)
self.assertTrue(np.allclose(hessian, fd_hessian, rtol=1e-5, atol=1e-4))
def test2(self):
x = np.ones(100)
gradient = rosen_der(x)
fd_gradient = central_difference(rosen, x)
self.assertTrue(is_equal(gradient, fd_gradient))
hessian = rosen_hess(x)
fd_hessian = second_central_difference(rosen, x)
self.assertTrue(np.allclose(hessian, fd_hessian, rtol=1e-5, atol=1e-4))
def test3(self):
x = np.random.rand(100)
gradient = rosen_der(x)
fd_gradient = central_difference(rosen, x)
self.assertTrue(is_equal(gradient, fd_gradient))
hessian = rosen_hess(x)
fd_hessian = second_central_difference(rosen, x)
self.assertTrue(np.allclose(hessian, fd_hessian, rtol=1e-5, atol=1e-2))
class TestRosenbrockFD(unittest.TestCase):
def test1(self):
x = np.zeros(100)
gradient = rosen_der(x)
fd_gradient = forward_difference(rosen, x)
self.assertTrue(is_equal(gradient, fd_gradient))
hessian = rosen_hess(x)
fd_hessian = second_forward_difference(rosen, x)
self.assertTrue(np.allclose(hessian, fd_hessian, rtol=1e-5, atol=1e-2))
def test2(self):
x = np.ones(100)
gradient = rosen_der(x)
fd_gradient = forward_difference(rosen, x)
self.assertTrue(is_equal(gradient, fd_gradient))
hessian = rosen_hess(x)
fd_hessian = second_forward_difference(rosen, x)
self.assertTrue(np.allclose(hessian, fd_hessian, rtol=1e-5, atol=1e-1))
def test3(self):
x = np.random.rand(100)
gradient = rosen_der(x)
fd_gradient = forward_difference(rosen, x)
self.assertTrue(is_equal(gradient, fd_gradient))
hessian = rosen_hess(x)
fd_hessian = second_forward_difference(rosen, x)
self.assertTrue(np.allclose(hessian, fd_hessian, rtol=1e-5, atol=1e-1))
class Test1D(unittest.TestCase):
def test_sin(self):
f = np.sin
x = np.random.rand()
d = forward_difference(f, x)
self.assertTrue(np.abs(d - np.cos(x)) < 1e-6)
d = central_difference(f, x)
self.assertTrue(np.abs(d - np.cos(x)) < 1e-6)
d2 = second_forward_difference(f, x)
self.assertTrue(np.abs(d2 - (-np.sin(x))) < 1e-4)
d2 = second_central_difference(f, x)
self.assertTrue(np.abs(d2 - (-np.sin(x))) < 1e-5)
class TestJacobian(unittest.TestCase):
def test_fd_1(self):
# f : R^2 -> R^2
def f(x):
return np.array([x[0]**2 * x[1], 5 * x[0] + np.sin(x[1])])
p = np.random.rand(2)
j_expected = np.array([
[2 * p[0] * p[1], p[0]**2],
[5, np.cos(p[1])]
])
j_fd = forward_difference_jacobian(f, 2, p)
assert j_fd.shape == (2, 2)
self.assertTrue(np.allclose(j_expected, j_fd))
def test_fd_2(self):
# f : R^3 -> R^4
def f(x):
return np.array([
x[0],
5 * x[2],
4 * x[1]**2 - 2 * x[2],
x[2] * np.sin(x[0])
])
p = np.random.rand(3)
j_expected = np.array([
[1.0, 0.0, 0.0],
[0.0, 0.0, 5.0],
[0.0, 8.0 * p[1], -2.0],
[p[2] * np.cos(p[0]), 0.0, np.sin(p[0])]
])
j_fd = forward_difference_jacobian(f, 4, p)
assert j_fd.shape == (4, 3)
self.assertTrue(np.allclose(j_expected, j_fd))
def test_fd_3(self):
# The Jacobian matrix should still be a matrix, even in the special case where the function is univariate
# f : R -> R^2
def f(x):
return np.array([x, x**2])
p = np.random.rand()
j_expected = np.array([[1], [2 * p]])
j_fd = forward_difference_jacobian(f, 2, p)
assert j_fd.shape == (2, 1)
self.assertTrue(np.allclose(j_expected, j_fd))
def test_fd_4(self):
# The Jacobian matrix should still be a matrix, even in the special case where the function is scalar valued
# f : R^2 -> R
def f(x):
return x[0] * x[1]**2
p = np.random.rand(2)
j_expected = np.array([[p[1]**2, 2 * p[0] * p[1]]])
j_fd = forward_difference_jacobian(f, 1, p)
assert j_fd.shape == (1, 2)
self.assertTrue(np.allclose(j_expected, j_fd))
def test_cd_1(self):
# f : R^2 -> R^2
def f(x):
return np.array([x[0]**2 * x[1], 5 * x[0] + np.sin(x[1])])
p = np.random.rand(2)
j_expected = np.array([
[2 * p[0] * p[1], p[0]**2],
[5, np.cos(p[1])]
])
j_fd = central_difference_jacobian(f, 2, p)
assert j_fd.shape == (2, 2)
self.assertTrue(np.allclose(j_expected, j_fd))
def test_cd_2(self):
# f : R^3 -> R^4
def f(x):
return np.array([
x[0],
5 * x[2],
4 * x[1]**2 - 2 * x[2],
x[2] * np.sin(x[0])
])
p = np.random.rand(3)
j_expected = np.array([
[1.0, 0.0, 0.0],
[0.0, 0.0, 5.0],
[0.0, 8.0 * p[1], -2.0],
[p[2] * np.cos(p[0]), 0.0, np.sin(p[0])]
])
j_fd = central_difference_jacobian(f, 4, p)
assert j_fd.shape == (4, 3)
self.assertTrue(np.allclose(j_expected, j_fd))
def test_cd_3(self):
# The Jacobian matrix should still be a matrix, even in the special case where the function is univariate
# f : R -> R^2
def f(x):
return np.array([x, x**2])
p = np.random.rand()
j_expected = np.array([[1], [2 * p]])
j_fd = central_difference_jacobian(f, 2, p)
assert j_fd.shape == (2, 1)
self.assertTrue(np.allclose(j_expected, j_fd))
def test_cd_4(self):
# The Jacobian matrix should still be a matrix, even in the special case where the function is scalar valued
# f : R^2 -> R
def f(x):
return x[0] * x[1]**2
p = np.random.rand(2)
j_expected = np.array([[p[1]**2, 2 * p[0] * p[1]]])
j_fd = central_difference_jacobian(f, 1, p)
assert j_fd.shape == (1, 2)
self.assertTrue(np.allclose(j_expected, j_fd))
if __name__ == "__main__":
unittest.main()
| [
"polynomials_on_simplices.calculus.finite_difference.forward_difference_jacobian",
"numpy.allclose",
"numpy.ones",
"numpy.random.rand",
"numpy.sin",
"numpy.testing.assert_allclose",
"scipy.optimize.rosen_der",
"polynomials_on_simplices.calculus.finite_difference.second_central_difference",
"polynomi... | [((7313, 7328), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7326, 7328), False, 'import unittest\n'), ((487, 555), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['array1', 'array2'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(array1, array2, atol=0.0001, rtol=0.0001)\n', (513, 555), True, 'import numpy as np\n'), ((718, 731), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (726, 731), True, 'import numpy as np\n'), ((751, 763), 'scipy.optimize.rosen_der', 'rosen_der', (['x'], {}), '(x)\n', (760, 763), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((786, 814), 'polynomials_on_simplices.calculus.finite_difference.central_difference', 'central_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (804, 814), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((891, 904), 'scipy.optimize.rosen_hess', 'rosen_hess', (['x'], {}), '(x)\n', (901, 904), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((926, 961), 'polynomials_on_simplices.calculus.finite_difference.second_central_difference', 'second_central_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (951, 961), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((1076, 1088), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (1083, 1088), True, 'import numpy as np\n'), ((1108, 1120), 'scipy.optimize.rosen_der', 'rosen_der', (['x'], {}), '(x)\n', (1117, 1120), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((1143, 1171), 'polynomials_on_simplices.calculus.finite_difference.central_difference', 'central_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (1161, 1171), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((1248, 1261), 'scipy.optimize.rosen_hess', 'rosen_hess', (['x'], {}), '(x)\n', (1258, 1261), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((1283, 1318), 'polynomials_on_simplices.calculus.finite_difference.second_central_difference', 'second_central_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (1308, 1318), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((1433, 1452), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (1447, 1452), True, 'import numpy as np\n'), ((1472, 1484), 'scipy.optimize.rosen_der', 'rosen_der', (['x'], {}), '(x)\n', (1481, 1484), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((1507, 1535), 'polynomials_on_simplices.calculus.finite_difference.central_difference', 'central_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (1525, 1535), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((1612, 1625), 'scipy.optimize.rosen_hess', 'rosen_hess', (['x'], {}), '(x)\n', (1622, 1625), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((1647, 1682), 'polynomials_on_simplices.calculus.finite_difference.second_central_difference', 'second_central_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (1672, 1682), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((1841, 1854), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (1849, 1854), True, 'import numpy as np\n'), ((1874, 1886), 'scipy.optimize.rosen_der', 'rosen_der', (['x'], {}), '(x)\n', (1883, 1886), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((1909, 1937), 'polynomials_on_simplices.calculus.finite_difference.forward_difference', 'forward_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (1927, 1937), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((2014, 2027), 'scipy.optimize.rosen_hess', 'rosen_hess', (['x'], {}), '(x)\n', (2024, 2027), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((2049, 2084), 'polynomials_on_simplices.calculus.finite_difference.second_forward_difference', 'second_forward_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (2074, 2084), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((2199, 2211), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (2206, 2211), True, 'import numpy as np\n'), ((2231, 2243), 'scipy.optimize.rosen_der', 'rosen_der', (['x'], {}), '(x)\n', (2240, 2243), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((2266, 2294), 'polynomials_on_simplices.calculus.finite_difference.forward_difference', 'forward_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (2284, 2294), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((2371, 2384), 'scipy.optimize.rosen_hess', 'rosen_hess', (['x'], {}), '(x)\n', (2381, 2384), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((2406, 2441), 'polynomials_on_simplices.calculus.finite_difference.second_forward_difference', 'second_forward_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (2431, 2441), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((2556, 2575), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (2570, 2575), True, 'import numpy as np\n'), ((2595, 2607), 'scipy.optimize.rosen_der', 'rosen_der', (['x'], {}), '(x)\n', (2604, 2607), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((2630, 2658), 'polynomials_on_simplices.calculus.finite_difference.forward_difference', 'forward_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (2648, 2658), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((2735, 2748), 'scipy.optimize.rosen_hess', 'rosen_hess', (['x'], {}), '(x)\n', (2745, 2748), False, 'from scipy.optimize import rosen, rosen_der, rosen_hess\n'), ((2770, 2805), 'polynomials_on_simplices.calculus.finite_difference.second_forward_difference', 'second_forward_difference', (['rosen', 'x'], {}), '(rosen, x)\n', (2795, 2805), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((2976, 2992), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2990, 2992), True, 'import numpy as np\n'), ((3005, 3029), 'polynomials_on_simplices.calculus.finite_difference.forward_difference', 'forward_difference', (['f', 'x'], {}), '(f, x)\n', (3023, 3029), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((3096, 3120), 'polynomials_on_simplices.calculus.finite_difference.central_difference', 'central_difference', (['f', 'x'], {}), '(f, x)\n', (3114, 3120), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((3189, 3220), 'polynomials_on_simplices.calculus.finite_difference.second_forward_difference', 'second_forward_difference', (['f', 'x'], {}), '(f, x)\n', (3214, 3220), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((3292, 3323), 'polynomials_on_simplices.calculus.finite_difference.second_central_difference', 'second_central_difference', (['f', 'x'], {}), '(f, x)\n', (3317, 3323), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((3575, 3592), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (3589, 3592), True, 'import numpy as np\n'), ((3722, 3758), 'polynomials_on_simplices.calculus.finite_difference.forward_difference_jacobian', 'forward_difference_jacobian', (['f', '(2)', 'p'], {}), '(f, 2, p)\n', (3749, 3758), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((4102, 4119), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (4116, 4119), True, 'import numpy as np\n'), ((4327, 4363), 'polynomials_on_simplices.calculus.finite_difference.forward_difference_jacobian', 'forward_difference_jacobian', (['f', '(4)', 'p'], {}), '(f, 4, p)\n', (4354, 4363), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((4689, 4705), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4703, 4705), True, 'import numpy as np\n'), ((4728, 4752), 'numpy.array', 'np.array', (['[[1], [2 * p]]'], {}), '([[1], [2 * p]])\n', (4736, 4752), True, 'import numpy as np\n'), ((4768, 4804), 'polynomials_on_simplices.calculus.finite_difference.forward_difference_jacobian', 'forward_difference_jacobian', (['f', '(2)', 'p'], {}), '(f, 2, p)\n', (4795, 4804), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((5128, 5145), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (5142, 5145), True, 'import numpy as np\n'), ((5168, 5208), 'numpy.array', 'np.array', (['[[p[1] ** 2, 2 * p[0] * p[1]]]'], {}), '([[p[1] ** 2, 2 * p[0] * p[1]]])\n', (5176, 5208), True, 'import numpy as np\n'), ((5222, 5258), 'polynomials_on_simplices.calculus.finite_difference.forward_difference_jacobian', 'forward_difference_jacobian', (['f', '(1)', 'p'], {}), '(f, 1, p)\n', (5249, 5258), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((5504, 5521), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (5518, 5521), True, 'import numpy as np\n'), ((5651, 5687), 'polynomials_on_simplices.calculus.finite_difference.central_difference_jacobian', 'central_difference_jacobian', (['f', '(2)', 'p'], {}), '(f, 2, p)\n', (5678, 5687), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((6031, 6048), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (6045, 6048), True, 'import numpy as np\n'), ((6256, 6292), 'polynomials_on_simplices.calculus.finite_difference.central_difference_jacobian', 'central_difference_jacobian', (['f', '(4)', 'p'], {}), '(f, 4, p)\n', (6283, 6292), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((6618, 6634), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6632, 6634), True, 'import numpy as np\n'), ((6657, 6681), 'numpy.array', 'np.array', (['[[1], [2 * p]]'], {}), '([[1], [2 * p]])\n', (6665, 6681), True, 'import numpy as np\n'), ((6697, 6733), 'polynomials_on_simplices.calculus.finite_difference.central_difference_jacobian', 'central_difference_jacobian', (['f', '(2)', 'p'], {}), '(f, 2, p)\n', (6724, 6733), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((7057, 7074), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (7071, 7074), True, 'import numpy as np\n'), ((7097, 7137), 'numpy.array', 'np.array', (['[[p[1] ** 2, 2 * p[0] * p[1]]]'], {}), '([[p[1] ** 2, 2 * p[0] * p[1]]])\n', (7105, 7137), True, 'import numpy as np\n'), ((7151, 7187), 'polynomials_on_simplices.calculus.finite_difference.central_difference_jacobian', 'central_difference_jacobian', (['f', '(1)', 'p'], {}), '(f, 1, p)\n', (7178, 7187), False, 'from polynomials_on_simplices.calculus.finite_difference import central_difference, central_difference_jacobian, forward_difference, forward_difference_jacobian, second_central_difference, second_forward_difference\n'), ((986, 1043), 'numpy.allclose', 'np.allclose', (['hessian', 'fd_hessian'], {'rtol': '(1e-05)', 'atol': '(0.0001)'}), '(hessian, fd_hessian, rtol=1e-05, atol=0.0001)\n', (997, 1043), True, 'import numpy as np\n'), ((1343, 1400), 'numpy.allclose', 'np.allclose', (['hessian', 'fd_hessian'], {'rtol': '(1e-05)', 'atol': '(0.0001)'}), '(hessian, fd_hessian, rtol=1e-05, atol=0.0001)\n', (1354, 1400), True, 'import numpy as np\n'), ((1707, 1762), 'numpy.allclose', 'np.allclose', (['hessian', 'fd_hessian'], {'rtol': '(1e-05)', 'atol': '(0.01)'}), '(hessian, fd_hessian, rtol=1e-05, atol=0.01)\n', (1718, 1762), True, 'import numpy as np\n'), ((2109, 2164), 'numpy.allclose', 'np.allclose', (['hessian', 'fd_hessian'], {'rtol': '(1e-05)', 'atol': '(0.01)'}), '(hessian, fd_hessian, rtol=1e-05, atol=0.01)\n', (2120, 2164), True, 'import numpy as np\n'), ((2466, 2520), 'numpy.allclose', 'np.allclose', (['hessian', 'fd_hessian'], {'rtol': '(1e-05)', 'atol': '(0.1)'}), '(hessian, fd_hessian, rtol=1e-05, atol=0.1)\n', (2477, 2520), True, 'import numpy as np\n'), ((2830, 2884), 'numpy.allclose', 'np.allclose', (['hessian', 'fd_hessian'], {'rtol': '(1e-05)', 'atol': '(0.1)'}), '(hessian, fd_hessian, rtol=1e-05, atol=0.1)\n', (2841, 2884), True, 'import numpy as np\n'), ((3820, 3849), 'numpy.allclose', 'np.allclose', (['j_expected', 'j_fd'], {}), '(j_expected, j_fd)\n', (3831, 3849), True, 'import numpy as np\n'), ((4425, 4454), 'numpy.allclose', 'np.allclose', (['j_expected', 'j_fd'], {}), '(j_expected, j_fd)\n', (4436, 4454), True, 'import numpy as np\n'), ((4656, 4677), 'numpy.array', 'np.array', (['[x, x ** 2]'], {}), '([x, x ** 2])\n', (4664, 4677), True, 'import numpy as np\n'), ((4866, 4895), 'numpy.allclose', 'np.allclose', (['j_expected', 'j_fd'], {}), '(j_expected, j_fd)\n', (4877, 4895), True, 'import numpy as np\n'), ((5320, 5349), 'numpy.allclose', 'np.allclose', (['j_expected', 'j_fd'], {}), '(j_expected, j_fd)\n', (5331, 5349), True, 'import numpy as np\n'), ((5749, 5778), 'numpy.allclose', 'np.allclose', (['j_expected', 'j_fd'], {}), '(j_expected, j_fd)\n', (5760, 5778), True, 'import numpy as np\n'), ((6354, 6383), 'numpy.allclose', 'np.allclose', (['j_expected', 'j_fd'], {}), '(j_expected, j_fd)\n', (6365, 6383), True, 'import numpy as np\n'), ((6585, 6606), 'numpy.array', 'np.array', (['[x, x ** 2]'], {}), '([x, x ** 2])\n', (6593, 6606), True, 'import numpy as np\n'), ((6795, 6824), 'numpy.allclose', 'np.allclose', (['j_expected', 'j_fd'], {}), '(j_expected, j_fd)\n', (6806, 6824), True, 'import numpy as np\n'), ((7249, 7278), 'numpy.allclose', 'np.allclose', (['j_expected', 'j_fd'], {}), '(j_expected, j_fd)\n', (7260, 7278), True, 'import numpy as np\n'), ((3682, 3694), 'numpy.cos', 'np.cos', (['p[1]'], {}), '(p[1])\n', (3688, 3694), True, 'import numpy as np\n'), ((4287, 4299), 'numpy.sin', 'np.sin', (['p[0]'], {}), '(p[0])\n', (4293, 4299), True, 'import numpy as np\n'), ((5611, 5623), 'numpy.cos', 'np.cos', (['p[1]'], {}), '(p[1])\n', (5617, 5623), True, 'import numpy as np\n'), ((6216, 6228), 'numpy.sin', 'np.sin', (['p[0]'], {}), '(p[0])\n', (6222, 6228), True, 'import numpy as np\n'), ((3065, 3074), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (3071, 3074), True, 'import numpy as np\n'), ((3156, 3165), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (3162, 3165), True, 'import numpy as np\n'), ((3547, 3559), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (3553, 3559), True, 'import numpy as np\n'), ((4061, 4073), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (4067, 4073), True, 'import numpy as np\n'), ((4268, 4280), 'numpy.cos', 'np.cos', (['p[0]'], {}), '(p[0])\n', (4274, 4280), True, 'import numpy as np\n'), ((5476, 5488), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (5482, 5488), True, 'import numpy as np\n'), ((5990, 6002), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (5996, 6002), True, 'import numpy as np\n'), ((6197, 6209), 'numpy.cos', 'np.cos', (['p[0]'], {}), '(p[0])\n', (6203, 6209), True, 'import numpy as np\n'), ((3259, 3268), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3265, 3268), True, 'import numpy as np\n'), ((3362, 3371), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3368, 3371), True, 'import numpy as np\n')] |
from skfem import *
import numpy as np
import matplotlib.pyplot as plt
m = MeshTri()
m.refine(5)
@bilinear_form
def jacobian(u, du, v, dv, w):
w, dw = w.w, w.dw
return 1.0/np.sqrt(1.0 + dw[0]**2 + dw[1]**2)*(du[0]*dv[0] + du[1]*dv[1])\
-(2.0*du[1]*dw[1] + 2.0*du[0]*dw[0])*(dw[1]*dv[1] + dw[0]*dv[0])\
/(2.0*(1 + dw[1]**2 + dw[0]**2)**(3./2.))
@linear_form
def rhs(v, dv, w):
w, dw = w.w, w.dw
return 1.0/np.sqrt(1.0 + dw[0]**2 + dw[1]**2)*(dw[0]*dv[0] + dw[1]*dv[1])
basis = InteriorBasis(m, ElementTriP1())
x = np.zeros(basis.N)
I = m.interior_nodes()
D = m.boundary_nodes()
x[D] = np.sin(np.pi * m.p[0, D])
for itr in range(100):
w = basis.interpolate(x)
J = asm(jacobian, basis, w=w)
F = asm(rhs, basis, w=w)
x_prev = x.copy()
x += 0.7 * solve(*condense(J, -F, I=I))
if np.linalg.norm(x - x_prev) < 1e-8:
break
if __name__ == "__main__":
print(np.linalg.norm(x - x_prev))
if __name__ == "__main__":
m.plot3(x)
m.show()
| [
"numpy.sin",
"numpy.zeros",
"numpy.sqrt",
"numpy.linalg.norm"
] | [((556, 573), 'numpy.zeros', 'np.zeros', (['basis.N'], {}), '(basis.N)\n', (564, 573), True, 'import numpy as np\n'), ((628, 653), 'numpy.sin', 'np.sin', (['(np.pi * m.p[0, D])'], {}), '(np.pi * m.p[0, D])\n', (634, 653), True, 'import numpy as np\n'), ((844, 870), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - x_prev)'], {}), '(x - x_prev)\n', (858, 870), True, 'import numpy as np\n'), ((446, 484), 'numpy.sqrt', 'np.sqrt', (['(1.0 + dw[0] ** 2 + dw[1] ** 2)'], {}), '(1.0 + dw[0] ** 2 + dw[1] ** 2)\n', (453, 484), True, 'import numpy as np\n'), ((938, 964), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - x_prev)'], {}), '(x - x_prev)\n', (952, 964), True, 'import numpy as np\n'), ((182, 220), 'numpy.sqrt', 'np.sqrt', (['(1.0 + dw[0] ** 2 + dw[1] ** 2)'], {}), '(1.0 + dw[0] ** 2 + dw[1] ** 2)\n', (189, 220), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# pylint: disable=unexpected-keyword-arg,too-few-public-methods
"""
Test suite for data processing, dummy data and input/output functions.
"""
import os
import sys
import shutil
import unittest
import warnings
import numpy as np
import numpy.testing
import nestcheck.data_processing
import nestcheck.diagnostics_tables
import nestcheck.dummy_data
import nestcheck.error_analysis
import nestcheck.io_utils
import nestcheck.ns_run_utils
import nestcheck.parallel_utils
import nestcheck.plots
import nestcheck.write_polychord_output
# Define a directory to output files produced by tests (this will be deleted
# when the tests finish).
TEST_CACHE_DIR = 'temp_test_data_to_delete'
def setUpModule():
"""Before running the test suite, check that TEST_CACHE_DIR does not
already exist - as the tests will delete it."""
assert not os.path.exists(TEST_CACHE_DIR), (
'Directory ' + TEST_CACHE_DIR + ' exists! Tests use this directory to '
'check caching then delete it afterwards, so its path should be left '
'empty. You should manually delete or move ' + TEST_CACHE_DIR
+ ' before running the tests.')
class TestDataProcessing(unittest.TestCase):
"""Tests for data_processing.py"""
def setUp(self):
"""Make a temporary directory for saving test results."""
try:
os.makedirs(TEST_CACHE_DIR)
except FileExistsError:
pass
def tearDown(self):
"""Remove any caches saved by the tests."""
try:
shutil.rmtree(TEST_CACHE_DIR)
except OSError:
pass
def test_batch_process_data_unexpected_kwarg(self):
"""Test unexpected kwargs checks."""
self.assertRaises(
TypeError, nestcheck.data_processing.batch_process_data,
['path'], base_dir=TEST_CACHE_DIR, unexpected=1)
def test_birth_inds_given_contours_unexpected_kwarg(self):
"""Test unexpected kwargs checks."""
self.assertRaises(
TypeError, nestcheck.data_processing.birth_inds_given_contours,
'birth_logl', 'logl', unexpected=1)
def test_birth_inds_given_contours(self):
"""Check birth inds allocation function."""
# Check birth_inds_given_contours works when points born and dying on same contour
logl = np.asarray([1, 1, 3, 5])
birth_logl = np.asarray([-1, 1, 1, 3])
inds = nestcheck.data_processing.birth_inds_given_contours(birth_logl, logl)
numpy.testing.assert_array_equal(inds, np.asarray([-1, 0, 1, 2]))
# Check error handeling of PolyChord v1.13 bug with more births than
# deaths on a contour
logl = np.asarray([1, 1, 2, 3, 5])
birth_logl = np.asarray([-1, 1, 1, 1, 3])
with warnings.catch_warnings(record=True) as war:
warnings.simplefilter("always")
inds = nestcheck.data_processing.birth_inds_given_contours(birth_logl, logl)
self.assertEqual(len(war), 1)
numpy.testing.assert_array_equal(inds, np.asarray([-1, 0, 0, 1, 3]))
def test_threads_given_birth_inds(self):
"""Check mapping from birth inds to threads."""
birth_inds = np.array([-1, -1, 1, 2, 3, 7]).astype(int)
# Check random assignment of leftover points to threads
with warnings.catch_warnings(record=True) as war:
warnings.simplefilter("always")
numpy.testing.assert_array_equal(
nestcheck.data_processing.threads_given_birth_inds(
birth_inds),
np.array([0, 1, 1, 1, 1, 0]).astype(int))
self.assertEqual(len(war), 1)
def test_process_polychord_data(self):
"""Check processing some dummy PolyChord data."""
file_root = 'dummy_run'
run = nestcheck.dummy_data.get_dummy_dynamic_run(
10, seed=0, nthread_init=2, nthread_dyn=3)
dead = nestcheck.write_polychord_output.run_dead_birth_array(run)
np.savetxt(os.path.join(
TEST_CACHE_DIR, file_root + '_dead-birth.txt'), dead)
with warnings.catch_warnings(record=True) as war:
warnings.simplefilter("always")
processed_run = nestcheck.data_processing.process_polychord_run(
file_root, TEST_CACHE_DIR)
self.assertEqual(len(war), 1)
nestcheck.ns_run_utils.check_ns_run(processed_run)
for key, value in processed_run.items():
if key not in ['output']:
numpy.testing.assert_array_equal(
value, run[key], err_msg=key + ' not the same')
self.assertEqual(processed_run['output']['file_root'], file_root)
self.assertEqual(processed_run['output']['base_dir'], TEST_CACHE_DIR)
def test_process_polychord_stats_file(self):
"""Check reading in PolyChord's <root>.stats file by making and saving
a dummy one."""
file_root = 'temp'
output = nestcheck.write_polychord_output.write_stats_file(
{'file_root': file_root, 'base_dir': TEST_CACHE_DIR,
'nlike': np.nan})
self.assertEqual(nestcheck.data_processing.process_polychord_stats(
file_root, TEST_CACHE_DIR), output)
def test_process_multinest_data(self):
"""Check processing some dummy MultiNest data."""
file_root = 'dummy_run'
run = nestcheck.dummy_data.get_dummy_run(5, 10, seed=False)
samples = nestcheck.write_polychord_output.run_dead_birth_array(run)
# Replicate MultiNest's dead and live points files, including their
# extra columns
dead = samples[:-2, :]
live = samples[-2:, :]
dead = np.hstack((dead, np.zeros((dead.shape[0], 2))))
live = np.hstack((live, np.zeros((live.shape[0], 1))))
np.savetxt(os.path.join(
TEST_CACHE_DIR, file_root + '-dead-birth.txt'), dead)
np.savetxt(os.path.join(
TEST_CACHE_DIR, file_root + '-phys_live-birth.txt'), live)
processed_run = nestcheck.data_processing.process_multinest_run(
file_root, TEST_CACHE_DIR)
nestcheck.ns_run_utils.check_ns_run(processed_run)
for key, value in processed_run.items():
if key not in ['output']:
numpy.testing.assert_array_equal(
value, run[key], err_msg=key + ' not the same')
self.assertEqual(processed_run['output']['file_root'], file_root)
self.assertEqual(processed_run['output']['base_dir'], TEST_CACHE_DIR)
def test_batch_process_data(self):
"""Test processing some dummy PolyChord data using
batch_process_data."""
file_root = 'dummy_run'
run = nestcheck.dummy_data.get_dummy_dynamic_run(
10, seed=0, nthread_init=2, nthread_dyn=3)
dead = nestcheck.write_polychord_output.run_dead_birth_array(run)
np.savetxt(os.path.join(
TEST_CACHE_DIR, file_root + '_dead-birth.txt'), dead)
with warnings.catch_warnings(record=True) as war:
warnings.simplefilter("always")
run_list = nestcheck.data_processing.batch_process_data(
[file_root, 'an_empty_path'], base_dir=TEST_CACHE_DIR,
parallel=False, errors_to_handle=(OSError, IOError))
self.assertEqual(len(war), 2)
self.assertEqual(len(run_list), 1)
def test_process_dynesty_run(self):
"""Test processing dynesty results into nestcheck format."""
class DynestyResults(object):
"""A dummy dynesty results object for testing."""
def __init__(self, run, dynamic=False):
"""Initialse dynesty-format attributes corresponding to the
input run."""
self.samples = run['theta']
self.samples_id = run['thread_labels']
self.logl = run['logl']
if not dynamic:
assert np.all(run['thread_min_max'][:, 0] == -np.inf)
self.nlive = run['thread_min_max'].shape[0]
else:
# Treat every thread as a seperate batch
self.batch_bounds = run['thread_min_max']
self.batch_nlive = np.full(
run['thread_min_max'].shape[0], 1)
self.samples_batch = run['thread_labels']
run = nestcheck.dummy_data.get_dummy_run(1, 10)
for dynamic in [True, False]:
results = DynestyResults(run, dynamic=dynamic)
processed = nestcheck.data_processing.process_dynesty_run(results)
for key, value in run.items():
if key not in ['output']:
numpy.testing.assert_array_equal(
value, processed[key],
err_msg=('{0} not the same. dynamic={1}'
.format(key, dynamic)))
def test_process_samples_array(self):
"""Check the handling of duplicate loglikelihood values."""
# Make a samples array with some duplicate logl values
samples = np.zeros((4, 3))
samples[:, -1] = np.asarray([-1e30, -1e30, 1, 1]) # births
samples[:, -2] = np.asarray([1, 1, 2, 3]) # logls
# Should raise warning if dup_warn is True
with warnings.catch_warnings(record=True) as war:
warnings.simplefilter("always")
nestcheck.data_processing.process_samples_array(
samples, dup_warn=True)
self.assertEqual(len(war), 1)
# Should raise AssertionError if dup_assert is True
self.assertRaises(
AssertionError, nestcheck.data_processing.process_samples_array,
samples, dup_assert=True)
class TestWritePolyChordOutput(unittest.TestCase):
"""Tests for write_polychord_output.py."""
def setUp(self):
"""Make a temporary directory for saving test results."""
try:
os.makedirs(TEST_CACHE_DIR)
except FileExistsError:
pass
def tearDown(self):
"""Remove any caches saved by the tests."""
try:
shutil.rmtree(TEST_CACHE_DIR)
except OSError:
pass
def test_write_run_output_unexpected_kwarg(self):
"""Check write_run_output raises TypeError with unexpected
kwargs."""
self.assertRaises(
TypeError, nestcheck.write_polychord_output.write_run_output,
{}, unexpected=1)
def test_write_run_output(self):
"""Check writing PolyChord output files."""
file_root = 'dummy_run'
run = nestcheck.dummy_data.get_dummy_run(10, 10)
run['output'] = {'file_root': file_root, 'base_dir': TEST_CACHE_DIR}
# Run with and without equals=True and posterior=True to ensure full
# coverage
nestcheck.write_polychord_output.write_run_output(
run, equals=True, posteriors=True)
nestcheck.write_polychord_output.write_run_output(run)
processed_run = nestcheck.data_processing.process_polychord_run(
file_root, TEST_CACHE_DIR)
self.assertEqual(set(run.keys()), set(processed_run.keys()))
for key, value in processed_run.items():
if key not in ['output']:
numpy.testing.assert_allclose(
value, run[key], err_msg=key + ' not the same')
self.assertEqual(processed_run['output']['file_root'], file_root)
self.assertEqual(processed_run['output']['base_dir'], TEST_CACHE_DIR)
class TestDummyData(unittest.TestCase):
"""Tests for the dummy_data.py module."""
def test_get_dummy_run_unexpected_kwarg(self):
"""Check get_dummy_run raises TypeError with unexpected
kwargs."""
self.assertRaises(
TypeError, nestcheck.dummy_data.get_dummy_run,
1, 2, unexpected=1)
def test_get_dummy_thread_unexpected_kwarg(self):
"""Check get_dummy_thread raises TypeError with unexpected
kwargs."""
self.assertRaises(
TypeError, nestcheck.dummy_data.get_dummy_thread,
1, unexpected=1)
def test_get_dummy_dynamic_run_unexpected_kwarg(self):
"""Check get_dummy_dynamic_run raises TypeError with unexpected
kwargs."""
self.assertRaises(
TypeError, nestcheck.dummy_data.get_dummy_dynamic_run,
1, unexpected=1)
class TestIOUtils(unittest.TestCase):
"""Tests for io_utils.py."""
def setUp(self):
"""Get some data data for io testing.
Note that the saving function in io_utils makes the specified
directory if it does not already exist, so there is no need to
make it in setUp.
"""
self.test_data = np.random.random(10)
@nestcheck.io_utils.save_load_result
def io_func(data):
"""Helper for testing save and load functions via the
io_utils.save_load_result decorator."""
return data
self.io_func = io_func
def tearDown(self):
"""Remove any caches saved by the tests."""
try:
shutil.rmtree(TEST_CACHE_DIR)
except OSError:
pass
def test_save_load_wrapper(self):
"""Try saving and loading some test data and check it dosnt change."""
# Without save_name (will neither save nor load)
with warnings.catch_warnings(record=True) as war:
warnings.simplefilter("always")
data_out = self.io_func(self.test_data, save=True, load=True)
self.assertEqual(len(war), 2)
self.assertTrue(np.array_equal(self.test_data, data_out))
# Before any data saved (will save but not load)
with warnings.catch_warnings(record=True) as war:
warnings.simplefilter("always")
data_out = self.io_func(
self.test_data, save=True, load=True, warn_if_error=True,
save_name=TEST_CACHE_DIR + '/io_test')
self.assertEqual(len(war), 1)
self.assertTrue(np.array_equal(self.test_data, data_out))
# After data saved (will load)
data_out = self.io_func(self.test_data, save=True, load=True,
save_name=TEST_CACHE_DIR + '/io_test')
self.assertTrue(np.array_equal(self.test_data, data_out))
# Check handling of permission and memory errors when saving
with warnings.catch_warnings(record=True) as war:
warnings.simplefilter("always")
nestcheck.io_utils.pickle_save(data_out, '//')
self.assertEqual(len(war), 1)
def test_load_filenotfound(self):
"""Test loading files which dont exist causes FileNotFoundError."""
if sys.version_info[0] >= 3:
self.assertRaises(
FileNotFoundError, nestcheck.io_utils.pickle_load,
TEST_CACHE_DIR + 'not_here')
else:
# FileNotFoundError not defined in python2 - use IOError instead
self.assertRaises(
IOError, nestcheck.io_utils.pickle_load,
TEST_CACHE_DIR + 'not_here')
def test_no_overwrite(self):
"""Check option to not overwrite existing files."""
# Save our test data
nestcheck.io_utils.pickle_save(
self.test_data, TEST_CACHE_DIR + '/io_test', print_time=True)
# Try saving some different data to same path
nestcheck.io_utils.pickle_save(
self.test_data - 100, TEST_CACHE_DIR + '/io_test',
overwrite_existing=False)
# Check the test data was not edited
data_out = nestcheck.io_utils.pickle_load(TEST_CACHE_DIR + '/io_test')
self.assertTrue(np.array_equal(self.test_data, data_out))
def test_save_load_unexpected_kwargs(self):
"""Unexpected kwarg should throw exception."""
self.assertRaises(
TypeError, nestcheck.io_utils.pickle_load,
self.test_data, TEST_CACHE_DIR + '/io_test', unexpected=1)
self.assertRaises(
TypeError, nestcheck.io_utils.pickle_save,
self.test_data, TEST_CACHE_DIR + '/io_test', unexpected=1)
| [
"os.path.exists",
"os.makedirs",
"numpy.random.random",
"numpy.asarray",
"warnings.catch_warnings",
"os.path.join",
"numpy.array",
"numpy.zeros",
"numpy.array_equal",
"shutil.rmtree",
"warnings.simplefilter",
"numpy.full",
"numpy.all"
] | [((862, 892), 'os.path.exists', 'os.path.exists', (['TEST_CACHE_DIR'], {}), '(TEST_CACHE_DIR)\n', (876, 892), False, 'import os\n'), ((2339, 2363), 'numpy.asarray', 'np.asarray', (['[1, 1, 3, 5]'], {}), '([1, 1, 3, 5])\n', (2349, 2363), True, 'import numpy as np\n'), ((2385, 2410), 'numpy.asarray', 'np.asarray', (['[-1, 1, 1, 3]'], {}), '([-1, 1, 1, 3])\n', (2395, 2410), True, 'import numpy as np\n'), ((2692, 2719), 'numpy.asarray', 'np.asarray', (['[1, 1, 2, 3, 5]'], {}), '([1, 1, 2, 3, 5])\n', (2702, 2719), True, 'import numpy as np\n'), ((2741, 2769), 'numpy.asarray', 'np.asarray', (['[-1, 1, 1, 1, 3]'], {}), '([-1, 1, 1, 1, 3])\n', (2751, 2769), True, 'import numpy as np\n'), ((9096, 9112), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (9104, 9112), True, 'import numpy as np\n'), ((9138, 9172), 'numpy.asarray', 'np.asarray', (['[-1e+30, -1e+30, 1, 1]'], {}), '([-1e+30, -1e+30, 1, 1])\n', (9148, 9172), True, 'import numpy as np\n'), ((9206, 9230), 'numpy.asarray', 'np.asarray', (['[1, 1, 2, 3]'], {}), '([1, 1, 2, 3])\n', (9216, 9230), True, 'import numpy as np\n'), ((12752, 12772), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (12768, 12772), True, 'import numpy as np\n'), ((1365, 1392), 'os.makedirs', 'os.makedirs', (['TEST_CACHE_DIR'], {}), '(TEST_CACHE_DIR)\n', (1376, 1392), False, 'import os\n'), ((1544, 1573), 'shutil.rmtree', 'shutil.rmtree', (['TEST_CACHE_DIR'], {}), '(TEST_CACHE_DIR)\n', (1557, 1573), False, 'import shutil\n'), ((2543, 2568), 'numpy.asarray', 'np.asarray', (['[-1, 0, 1, 2]'], {}), '([-1, 0, 1, 2])\n', (2553, 2568), True, 'import numpy as np\n'), ((2783, 2819), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (2806, 2819), False, 'import warnings\n'), ((2840, 2871), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (2861, 2871), False, 'import warnings\n'), ((3050, 3078), 'numpy.asarray', 'np.asarray', (['[-1, 0, 0, 1, 3]'], {}), '([-1, 0, 0, 1, 3])\n', (3060, 3078), True, 'import numpy as np\n'), ((3323, 3359), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3346, 3359), False, 'import warnings\n'), ((3380, 3411), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (3401, 3411), False, 'import warnings\n'), ((3999, 4058), 'os.path.join', 'os.path.join', (['TEST_CACHE_DIR', "(file_root + '_dead-birth.txt')"], {}), "(TEST_CACHE_DIR, file_root + '_dead-birth.txt')\n", (4011, 4058), False, 'import os\n'), ((4092, 4128), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (4115, 4128), False, 'import warnings\n'), ((4149, 4180), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (4170, 4180), False, 'import warnings\n'), ((5813, 5872), 'os.path.join', 'os.path.join', (['TEST_CACHE_DIR', "(file_root + '-dead-birth.txt')"], {}), "(TEST_CACHE_DIR, file_root + '-dead-birth.txt')\n", (5825, 5872), False, 'import os\n'), ((5912, 5976), 'os.path.join', 'os.path.join', (['TEST_CACHE_DIR', "(file_root + '-phys_live-birth.txt')"], {}), "(TEST_CACHE_DIR, file_root + '-phys_live-birth.txt')\n", (5924, 5976), False, 'import os\n'), ((6893, 6952), 'os.path.join', 'os.path.join', (['TEST_CACHE_DIR', "(file_root + '_dead-birth.txt')"], {}), "(TEST_CACHE_DIR, file_root + '_dead-birth.txt')\n", (6905, 6952), False, 'import os\n'), ((6986, 7022), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (7009, 7022), False, 'import warnings\n'), ((7043, 7074), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (7064, 7074), False, 'import warnings\n'), ((9304, 9340), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (9327, 9340), False, 'import warnings\n'), ((9361, 9392), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (9382, 9392), False, 'import warnings\n'), ((9952, 9979), 'os.makedirs', 'os.makedirs', (['TEST_CACHE_DIR'], {}), '(TEST_CACHE_DIR)\n', (9963, 9979), False, 'import os\n'), ((10131, 10160), 'shutil.rmtree', 'shutil.rmtree', (['TEST_CACHE_DIR'], {}), '(TEST_CACHE_DIR)\n', (10144, 10160), False, 'import shutil\n'), ((13121, 13150), 'shutil.rmtree', 'shutil.rmtree', (['TEST_CACHE_DIR'], {}), '(TEST_CACHE_DIR)\n', (13134, 13150), False, 'import shutil\n'), ((13380, 13416), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (13403, 13416), False, 'import warnings\n'), ((13437, 13468), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (13458, 13468), False, 'import warnings\n'), ((13609, 13649), 'numpy.array_equal', 'np.array_equal', (['self.test_data', 'data_out'], {}), '(self.test_data, data_out)\n', (13623, 13649), True, 'import numpy as np\n'), ((13721, 13757), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (13744, 13757), False, 'import warnings\n'), ((13778, 13809), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (13799, 13809), False, 'import warnings\n'), ((14042, 14082), 'numpy.array_equal', 'np.array_equal', (['self.test_data', 'data_out'], {}), '(self.test_data, data_out)\n', (14056, 14082), True, 'import numpy as np\n'), ((14288, 14328), 'numpy.array_equal', 'np.array_equal', (['self.test_data', 'data_out'], {}), '(self.test_data, data_out)\n', (14302, 14328), True, 'import numpy as np\n'), ((14412, 14448), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (14435, 14448), False, 'import warnings\n'), ((14469, 14500), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (14490, 14500), False, 'import warnings\n'), ((15701, 15741), 'numpy.array_equal', 'np.array_equal', (['self.test_data', 'data_out'], {}), '(self.test_data, data_out)\n', (15715, 15741), True, 'import numpy as np\n'), ((3203, 3233), 'numpy.array', 'np.array', (['[-1, -1, 1, 2, 3, 7]'], {}), '([-1, -1, 1, 2, 3, 7])\n', (3211, 3233), True, 'import numpy as np\n'), ((5700, 5728), 'numpy.zeros', 'np.zeros', (['(dead.shape[0], 2)'], {}), '((dead.shape[0], 2))\n', (5708, 5728), True, 'import numpy as np\n'), ((5763, 5791), 'numpy.zeros', 'np.zeros', (['(live.shape[0], 1)'], {}), '((live.shape[0], 1))\n', (5771, 5791), True, 'import numpy as np\n'), ((7938, 7984), 'numpy.all', 'np.all', (["(run['thread_min_max'][:, 0] == -np.inf)"], {}), "(run['thread_min_max'][:, 0] == -np.inf)\n", (7944, 7984), True, 'import numpy as np\n'), ((8233, 8275), 'numpy.full', 'np.full', (["run['thread_min_max'].shape[0]", '(1)'], {}), "(run['thread_min_max'].shape[0], 1)\n", (8240, 8275), True, 'import numpy as np\n'), ((3575, 3603), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 1, 0]'], {}), '([0, 1, 1, 1, 1, 0])\n', (3583, 3603), True, 'import numpy as np\n')] |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
from pathlib import Path
import numpy as np
import torch
from spconv import ops, utils
from spconv.conv import (SparseConv2d, SparseConv3d, SparseConvTranspose2d,
SparseConvTranspose3d, SparseInverseConv2d,
SparseInverseConv3d, SubMConv2d, SubMConv3d)
from spconv.identity import Identity
from spconv.modules import SparseModule, SparseSequential
from spconv.ops import ConvAlgo
from spconv.pool import SparseMaxPool2d, SparseMaxPool3d
from spconv.tables import AddTable, ConcatTable, JoinTable
_LIB_FILE_NAME = "libspconv.dylib"
if platform.system() == "Windows":
_LIB_FILE_NAME = "spconv.dll"
_LIB_PATH = str(Path(__file__).parent / _LIB_FILE_NAME)
torch.ops.load_library(_LIB_PATH)
def scatter_nd(indices, updates, shape):
"""pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully
when indice repeats, don't support repeat add which is supported
in tensorflow.
"""
ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)
ndim = indices.shape[-1]
output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1]:]
flatted_indices = indices.view(-1, ndim)
slices = [flatted_indices[:, i] for i in range(ndim)]
slices += [Ellipsis]
ret[slices] = updates.view(*output_shape)
return ret
class SparseConvTensor(object):
def __init__(self, features, indices, spatial_shape, batch_size,
grid=None):
"""
Args:
features: [num_points, num_features] feature tensor
indices: [num_points, ndim + 1] indice tensor. batch index saved in indices[:, 0]
spatial_shape: spatial shape of your sparse data
batch_size: batch size of your sparse data
grid: pre-allocated grid tensor. should be used when the volume of spatial shape
is very large.
"""
self.features = features
self.indices = indices
self.spatial_shape = spatial_shape
self.batch_size = batch_size
self.indice_dict = {}
self.grid = grid
@classmethod
def from_dense(cls, x: torch.Tensor):
"""create sparse tensor fron channel last dense tensor by to_sparse
x must be NHWC tensor, channel last
"""
x = x.to_sparse(x.ndim - 1)
spatial_shape = x.shape[1:-1]
batch_size = x.shape[0]
indices_th = x.indices().permute(1, 0).contiguous().int()
features_th = x.values()
return cls(features_th, indices_th, spatial_shape, batch_size)
@property
def spatial_size(self):
return np.prod(self.spatial_shape)
def find_indice_pair(self, key):
if key is None:
return None
if key in self.indice_dict:
return self.indice_dict[key]
return None
def dense(self, channels_first=True):
output_shape = [self.batch_size] + list(
self.spatial_shape) + [self.features.shape[1]]
res = scatter_nd(
self.indices.to(self.features.device).long(), self.features,
output_shape)
if not channels_first:
return res
ndim = len(self.spatial_shape)
trans_params = list(range(0, ndim + 1))
trans_params.insert(1, ndim + 1)
return res.permute(*trans_params).contiguous()
@property
def sparity(self):
return self.indices.shape[0] / np.prod(
self.spatial_shape) / self.batch_size
class ToDense(SparseModule):
"""convert SparseConvTensor to NCHW dense tensor.
"""
def forward(self, x: SparseConvTensor):
return x.dense()
class RemoveGrid(SparseModule):
"""remove pre-allocated grid buffer.
"""
def forward(self, x: SparseConvTensor):
x.grid = None
return x
| [
"torch.ops.load_library",
"numpy.prod",
"pathlib.Path",
"platform.system",
"torch.zeros"
] | [((1295, 1328), 'torch.ops.load_library', 'torch.ops.load_library', (['_LIB_PATH'], {}), '(_LIB_PATH)\n', (1317, 1328), False, 'import torch\n'), ((1173, 1190), 'platform.system', 'platform.system', ([], {}), '()\n', (1188, 1190), False, 'import platform\n'), ((1601, 1664), 'torch.zeros', 'torch.zeros', (['*shape'], {'dtype': 'updates.dtype', 'device': 'updates.device'}), '(*shape, dtype=updates.dtype, device=updates.device)\n', (1612, 1664), False, 'import torch\n'), ((3248, 3275), 'numpy.prod', 'np.prod', (['self.spatial_shape'], {}), '(self.spatial_shape)\n', (3255, 3275), True, 'import numpy as np\n'), ((1255, 1269), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1259, 1269), False, 'from pathlib import Path\n'), ((4049, 4076), 'numpy.prod', 'np.prod', (['self.spatial_shape'], {}), '(self.spatial_shape)\n', (4056, 4076), True, 'import numpy as np\n')] |
# -- coding: utf-8 --
import os
from timeit import default_timer as timer
import numpy as np
from PIL import Image
from tensorflow.keras import backend, layers, models
from detection_yolo3.model import yolo_eval, yolo_body
from detection_yolo3.utils import get_anchors, draw_boxes
from util import check_or_makedirs
from config import BOX_CLASSES_ON_BOOK
from config import YOLO3_ANCHORS_FILE
from config import YOLO3_CLASS_SCORE_THRESH
from config import YOLO3_NMS_IOU_THRESH, YOLO3_NMS_MAX_BOXES_NUM
class YOLO(object):
def __init__(self, model_struc="densenet", model_path=""):
self.model_struc = model_struc
self.model_path = model_path
self.classes = BOX_CLASSES_ON_BOOK
self.anchors = get_anchors(YOLO3_ANCHORS_FILE)
self.score_thresh = YOLO3_CLASS_SCORE_THRESH
self.nms_iou_thresh = YOLO3_NMS_IOU_THRESH
self.max_boxes_num = YOLO3_NMS_MAX_BOXES_NUM
self.predict_model = self.build_and_load_model()
def build_and_load_model(self):
assert self.model_path.endswith('.h5'), "Keras model or weights must be a .h5 file."
image_input = layers.Input(shape=(None, None, 1), dtype="float32") # 图片输入格式
raw_img_shape = layers.Input(shape=(2,), dtype="int32")
num_anchors = len(self.anchors) # anchor的数量
num_classes = len(self.classes) # 类别数
self.yolo_model = yolo_body(image_input, num_anchors // 3, num_classes, self.model_struc)
self.yolo_model.load_weights(self.model_path) # 加载模型参数
print('{} model, {} anchors, and {} classes loaded.'.format(self.model_path, num_anchors, num_classes))
# 处理模型的输出,提取模型的预测结果。注意这里的设定:一个batch只包含一张图片
boxes, scores, classes = layers.Lambada(yolo_eval,
name='yolo_eval',
arguments={'anchors': self.anchors,
'num_classes': num_classes,
'score_thresh': self.score_thresh,
'iou_thresh': self.nms_iou_thresh,
'max_boxes': self.max_boxes_num}
)(self.yolo_model.outputs)
return models.Model(self.yolo_model.inputs, outputs=[boxes, scores, classes])
def detect_image(self, img_path, dest_dir, background="white"):
if not os.path.exists(img_path): return
img_name = os.path.basename(img_path)
check_or_makedirs(dest_dir)
PIL_img = Image.open(img_path)
if PIL_img.mode != "L":
PIL_img = PIL_img.convert("L")
np_img = np.array(PIL_img, dtype=np.uint8)
h, w = np_img.shape[:2]
new_h = -h % 32 + h
new_w = -w % 32 + w
batch_imgs = np.empty(shape=(1, new_h, new_w), dtype=np.float32)
if background == "white":
batch_imgs.fill(255)
elif background == "black":
batch_imgs.fill(0)
else:
ValueError("Optional image background: 'white', 'black'.")
batch_imgs[0, :h, :w] = np_img
batch_imgs = np.expand_dims(batch_imgs, axis=-1)
start = timer() # 起始时间
out_boxes, out_scores, out_classes = self.predict_model.predict(x=batch_imgs)
print('Time {:.2f}s, found {} boxes in {}'.format(timer() - start, len(out_boxes), img_name))
np_img_rgb = draw_boxes(np_img, out_boxes, out_scores, out_classes)
PIL_img = Image.fromarray(np_img_rgb)
PIL_img.save(os.path.join(dest_dir, img_name), format="jpeg")
def detect_images(self, src_dir, dest_dir, background="white"):
assert os.path.exists(src_dir)
img_paths = [os.path.join(src_dir, file) for file in os.listdir(src_dir)
if file.endswith(".jpg") or file.endswith(".png") or file.endswith(".gif")]
for img_path in img_paths:
self.detect_image(img_path, dest_dir, background)
if __name__ == '__main__':
print("Done !")
| [
"tensorflow.keras.layers.Input",
"PIL.Image.fromarray",
"PIL.Image.open",
"os.path.exists",
"os.listdir",
"detection_yolo3.utils.get_anchors",
"timeit.default_timer",
"os.path.join",
"detection_yolo3.model.yolo_body",
"util.check_or_makedirs",
"numpy.array",
"numpy.empty",
"os.path.basename"... | [((734, 765), 'detection_yolo3.utils.get_anchors', 'get_anchors', (['YOLO3_ANCHORS_FILE'], {}), '(YOLO3_ANCHORS_FILE)\n', (745, 765), False, 'from detection_yolo3.utils import get_anchors, draw_boxes\n'), ((1135, 1187), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': '(None, None, 1)', 'dtype': '"""float32"""'}), "(shape=(None, None, 1), dtype='float32')\n", (1147, 1187), False, 'from tensorflow.keras import backend, layers, models\n'), ((1222, 1261), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': '(2,)', 'dtype': '"""int32"""'}), "(shape=(2,), dtype='int32')\n", (1234, 1261), False, 'from tensorflow.keras import backend, layers, models\n'), ((1390, 1461), 'detection_yolo3.model.yolo_body', 'yolo_body', (['image_input', '(num_anchors // 3)', 'num_classes', 'self.model_struc'], {}), '(image_input, num_anchors // 3, num_classes, self.model_struc)\n', (1399, 1461), False, 'from detection_yolo3.model import yolo_eval, yolo_body\n'), ((2357, 2427), 'tensorflow.keras.models.Model', 'models.Model', (['self.yolo_model.inputs'], {'outputs': '[boxes, scores, classes]'}), '(self.yolo_model.inputs, outputs=[boxes, scores, classes])\n', (2369, 2427), False, 'from tensorflow.keras import backend, layers, models\n'), ((2564, 2590), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (2580, 2590), False, 'import os\n'), ((2599, 2626), 'util.check_or_makedirs', 'check_or_makedirs', (['dest_dir'], {}), '(dest_dir)\n', (2616, 2626), False, 'from util import check_or_makedirs\n'), ((2646, 2666), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2656, 2666), False, 'from PIL import Image\n'), ((2759, 2792), 'numpy.array', 'np.array', (['PIL_img'], {'dtype': 'np.uint8'}), '(PIL_img, dtype=np.uint8)\n', (2767, 2792), True, 'import numpy as np\n'), ((2903, 2954), 'numpy.empty', 'np.empty', ([], {'shape': '(1, new_h, new_w)', 'dtype': 'np.float32'}), '(shape=(1, new_h, new_w), dtype=np.float32)\n', (2911, 2954), True, 'import numpy as np\n'), ((3234, 3269), 'numpy.expand_dims', 'np.expand_dims', (['batch_imgs'], {'axis': '(-1)'}), '(batch_imgs, axis=-1)\n', (3248, 3269), True, 'import numpy as np\n'), ((3287, 3294), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3292, 3294), True, 'from timeit import default_timer as timer\n'), ((3513, 3567), 'detection_yolo3.utils.draw_boxes', 'draw_boxes', (['np_img', 'out_boxes', 'out_scores', 'out_classes'], {}), '(np_img, out_boxes, out_scores, out_classes)\n', (3523, 3567), False, 'from detection_yolo3.utils import get_anchors, draw_boxes\n'), ((3586, 3613), 'PIL.Image.fromarray', 'Image.fromarray', (['np_img_rgb'], {}), '(np_img_rgb)\n', (3601, 3613), False, 'from PIL import Image\n'), ((3768, 3791), 'os.path.exists', 'os.path.exists', (['src_dir'], {}), '(src_dir)\n', (3782, 3791), False, 'import os\n'), ((1723, 1943), 'tensorflow.keras.layers.Lambada', 'layers.Lambada', (['yolo_eval'], {'name': '"""yolo_eval"""', 'arguments': "{'anchors': self.anchors, 'num_classes': num_classes, 'score_thresh': self.\n score_thresh, 'iou_thresh': self.nms_iou_thresh, 'max_boxes': self.\n max_boxes_num}"}), "(yolo_eval, name='yolo_eval', arguments={'anchors': self.\n anchors, 'num_classes': num_classes, 'score_thresh': self.score_thresh,\n 'iou_thresh': self.nms_iou_thresh, 'max_boxes': self.max_boxes_num})\n", (1737, 1943), False, 'from tensorflow.keras import backend, layers, models\n'), ((2512, 2536), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (2526, 2536), False, 'import os\n'), ((3635, 3667), 'os.path.join', 'os.path.join', (['dest_dir', 'img_name'], {}), '(dest_dir, img_name)\n', (3647, 3667), False, 'import os\n'), ((3813, 3840), 'os.path.join', 'os.path.join', (['src_dir', 'file'], {}), '(src_dir, file)\n', (3825, 3840), False, 'import os\n'), ((3853, 3872), 'os.listdir', 'os.listdir', (['src_dir'], {}), '(src_dir)\n', (3863, 3872), False, 'import os\n'), ((3447, 3454), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3452, 3454), True, 'from timeit import default_timer as timer\n')] |
import cv2
from shutil import *
import os
from PIL import Image
import numpy as np
sampleNum = 0
folder = input("\nEnter your Registration number's numerical part : ")
user = input("\nEnter Your name : ")
folder1 = folder
user1 = user
copy2('C:\\Users\\MY PC\\PycharmProjects\\untitled\\try1.py','C:\\Users\\MY PC\\PycharmProjects\\untitled\\Images')
sampleNum = 0
facecascade = cv2.CascadeClassifier('D:\\UDAY\\SOFTWARES\\opencv\\sources\\OpenCV Master\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml')
eyecascade = cv2.CascadeClassifier('D:\\UDAY\\SOFTWARES\\opencv\\sources\\OpenCV Master\\opencv-master\\data\\haarcascades\\haarcascade_eye.xml')
camera = cv2.VideoCapture(0)
count = 0
while(True):
ret,frame = camera.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = facecascade.detectMultiScale(gray,1.3,5)
for(x,y,w,h) in faces:
sampleNum = sampleNum + 1
img = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
f = cv2.resize(gray[y:y+h,x:x+h],(500,500))
cv2.imwrite("Images/"+user+"."+str(folder)+"."+str(sampleNum)+".jpg",f )
count+=1
cv2.waitKey(200)
cv2.imshow("camera : ",frame)
cv2.waitKey(1)
if sampleNum>25:
break
os.remove('C:\\Users\\MY PC\\PycharmProjects\\untitled\\Images\\try1.py')
camera.release()
cv2.destroyAllWindows()
#trainer code
recogniser = cv2.face.createLBPHFaceRecognizer()
path = 'C:\\Users\\MY PC\\PycharmProjects\\untitled\\Images'
def getimageIds(path):
imagepaths = [os.path.join(path,f) for f in os.listdir(path)]
faces = []
Ids = []
for imagepath in imagepaths:
faceimg = Image.open(imagepath).convert('L')
facenp = np.array(faceimg,'uint8')
Id = int(os.path.split(imagepath)[-1].split('.')[1])
faces.append(facenp)
print(Id)
Ids.append(Id)
cv2.imshow('Training the dataset',facenp)
cv2.waitKey(10)
return Ids,faces
Ids,faces = getimageIds(path)
recogniser.train(faces,np.array(Ids))
recogniser.save('recogniser/recogniser_all.yml')
cv2.destroyAllWindows()
#recognition code
facecascade = cv2.CascadeClassifier('D:\\UDAY\\SOFTWARES\\opencv\\sources\\OpenCV Master\\opencv-master\data\\haarcascades\\haarcascade_frontalface_default.xml')
eyecascade = cv2.CascadeClassifier('D:\\UDAY\\SOFTWARES\\opencv\\sources\\OpenCV Master\\opencv-master\\data\\haarcascades\\haarcascade_eye.xml')
rec = cv2.face.createLBPHFaceRecognizer()
rec.load('C:\\Users\\MY PC\\PycharmProjects\\untitled\\recogniser\\recogniser_all.yml')
camera = cv2.VideoCapture(0)
sampleNum = 0
id = 0
font = cv2.FONT_HERSHEY_COMPLEX
while(True):
ret,frame = camera.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = facecascade.detectMultiScale(gray,1.3,5)
for(x,y,w,h) in faces:
sampleNum = sampleNum + 1
img = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
id,conf = rec.predict(gray[y:y+h,x:x+w])
if(folder=='0656'):
folder = "UDAY"
if(folder=='0441'):
folder = "Dheeraj"
cv2.putText(frame,str(folder),(x,y+h),font,2,255)
f = cv2.resize(gray[y:y+h,x:x+h],(500,500))
cv2.waitKey(100)
cv2.imshow("camera : ",frame)
cv2.waitKey(1)
if sampleNum>25:
break
camera.release()
cv2.destroyAllWindows() | [
"cv2.rectangle",
"os.listdir",
"PIL.Image.open",
"os.path.join",
"cv2.imshow",
"os.path.split",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.resize",
"cv2.waitKey",
"cv2.face.createLBPHFaceRecognizer",
"os.remove"
] | [((380, 538), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""D:\\\\UDAY\\\\SOFTWARES\\\\opencv\\\\sources\\\\OpenCV Master\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_frontalface_default.xml"""'], {}), "(\n 'D:\\\\UDAY\\\\SOFTWARES\\\\opencv\\\\sources\\\\OpenCV Master\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_frontalface_default.xml'\n )\n", (401, 538), False, 'import cv2\n'), ((542, 684), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""D:\\\\UDAY\\\\SOFTWARES\\\\opencv\\\\sources\\\\OpenCV Master\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_eye.xml"""'], {}), "(\n 'D:\\\\UDAY\\\\SOFTWARES\\\\opencv\\\\sources\\\\OpenCV Master\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_eye.xml'\n )\n", (563, 684), False, 'import cv2\n'), ((684, 703), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (700, 703), False, 'import cv2\n'), ((1247, 1320), 'os.remove', 'os.remove', (['"""C:\\\\Users\\\\MY PC\\\\PycharmProjects\\\\untitled\\\\Images\\\\try1.py"""'], {}), "('C:\\\\Users\\\\MY PC\\\\PycharmProjects\\\\untitled\\\\Images\\\\try1.py')\n", (1256, 1320), False, 'import os\n'), ((1338, 1361), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1359, 1361), False, 'import cv2\n'), ((1391, 1426), 'cv2.face.createLBPHFaceRecognizer', 'cv2.face.createLBPHFaceRecognizer', ([], {}), '()\n', (1424, 1426), False, 'import cv2\n'), ((2078, 2101), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2099, 2101), False, 'import cv2\n'), ((2138, 2296), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""D:\\\\UDAY\\\\SOFTWARES\\\\opencv\\\\sources\\\\OpenCV Master\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_frontalface_default.xml"""'], {}), "(\n 'D:\\\\UDAY\\\\SOFTWARES\\\\opencv\\\\sources\\\\OpenCV Master\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_frontalface_default.xml'\n )\n", (2159, 2296), False, 'import cv2\n'), ((2299, 2441), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""D:\\\\UDAY\\\\SOFTWARES\\\\opencv\\\\sources\\\\OpenCV Master\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_eye.xml"""'], {}), "(\n 'D:\\\\UDAY\\\\SOFTWARES\\\\opencv\\\\sources\\\\OpenCV Master\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_eye.xml'\n )\n", (2320, 2441), False, 'import cv2\n'), ((2438, 2473), 'cv2.face.createLBPHFaceRecognizer', 'cv2.face.createLBPHFaceRecognizer', ([], {}), '()\n', (2471, 2473), False, 'import cv2\n'), ((2571, 2590), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2587, 2590), False, 'import cv2\n'), ((3318, 3341), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3339, 3341), False, 'import cv2\n'), ((768, 807), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (780, 807), False, 'import cv2\n'), ((1163, 1193), 'cv2.imshow', 'cv2.imshow', (['"""camera : """', 'frame'], {}), "('camera : ', frame)\n", (1173, 1193), False, 'import cv2\n'), ((1197, 1211), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1208, 1211), False, 'import cv2\n'), ((2014, 2027), 'numpy.array', 'np.array', (['Ids'], {}), '(Ids)\n', (2022, 2027), True, 'import numpy as np\n'), ((2698, 2737), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (2710, 2737), False, 'import cv2\n'), ((3217, 3247), 'cv2.imshow', 'cv2.imshow', (['"""camera : """', 'frame'], {}), "('camera : ', frame)\n", (3227, 3247), False, 'import cv2\n'), ((3251, 3265), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3262, 3265), False, 'import cv2\n'), ((935, 995), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (948, 995), False, 'import cv2\n'), ((996, 1042), 'cv2.resize', 'cv2.resize', (['gray[y:y + h, x:x + h]', '(500, 500)'], {}), '(gray[y:y + h, x:x + h], (500, 500))\n', (1006, 1042), False, 'import cv2\n'), ((1142, 1158), 'cv2.waitKey', 'cv2.waitKey', (['(200)'], {}), '(200)\n', (1153, 1158), False, 'import cv2\n'), ((1530, 1551), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (1542, 1551), False, 'import os\n'), ((1709, 1735), 'numpy.array', 'np.array', (['faceimg', '"""uint8"""'], {}), "(faceimg, 'uint8')\n", (1717, 1735), True, 'import numpy as np\n'), ((1874, 1916), 'cv2.imshow', 'cv2.imshow', (['"""Training the dataset"""', 'facenp'], {}), "('Training the dataset', facenp)\n", (1884, 1916), False, 'import cv2\n'), ((1924, 1939), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (1935, 1939), False, 'import cv2\n'), ((2865, 2925), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (2878, 2925), False, 'import cv2\n'), ((3148, 3194), 'cv2.resize', 'cv2.resize', (['gray[y:y + h, x:x + h]', '(500, 500)'], {}), '(gray[y:y + h, x:x + h], (500, 500))\n', (3158, 3194), False, 'import cv2\n'), ((3196, 3212), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (3207, 3212), False, 'import cv2\n'), ((1560, 1576), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1570, 1576), False, 'import os\n'), ((1657, 1678), 'PIL.Image.open', 'Image.open', (['imagepath'], {}), '(imagepath)\n', (1667, 1678), False, 'from PIL import Image\n'), ((1752, 1776), 'os.path.split', 'os.path.split', (['imagepath'], {}), '(imagepath)\n', (1765, 1776), False, 'import os\n')] |
#<NAME>
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_predict
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, Imputer, StandardScaler
from sklearn.externals import joblib
from helper_functions import *
final_df = unpickle_object("analysis_dataframe.pkl")
def baseline_model(train_feature, test_feature, train_target, test_target):
"""
This function performs a baseline model assessment for our dataframe.
This function is primarily informative, although it does return a fitted model which could be used elsewhere
Inputs:
- train_feature - analogous to X_train
- test_feature - analogous to X_test
- train_target - analogous to y_train
- test_target - y_test
Returns:
- fittened Linear Regression Model
"""
top_3_coef = []
best_feature_index = []
lin_reg = LinearRegression()
fitted_model = lin_reg.fit(train_feature, train_target)
r2_sq = fitted_model.score(test_feature, test_target)
coef_array = fitted_model.coef_
print(f"The R2 score of a basline regression model is {r2_sq}")
print()
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((lin_reg.predict(test_feature) - test_target) ** 2))
print()
for index, value in enumerate(coef_array.reshape(-1,1)):
top_3_coef.append((index, value))
top_3_coef = sorted(top_3_coef, key=lambda x: x[1], reverse=True)[:3]
for i in top_3_coef:
best_feature_index.append(i[0])
feature_names = list(final_df.iloc[:, best_feature_index].columns)
print()
print(f"The top 3 features for predictive power according to the baseline model is {feature_names}")
joblib.dump(fitted_model, "baseline_linear_regression_model");
return fitted_model
def regular_grid(lst, features, target):
"""
This function will run a 'regular grid' meaning that no holdout sets should be passed to the function.
This function employs cross validation voa the GridSearchCV class.
Inputs:
- lst: lst of model names. Should be one of "Ride", "Lasso", "Elastic Net"
- features: Numpy array consisting of all features to be included in model.
- target: Numpy array consisting of entire target variable values to be included in model
Returns:
- all_results: Dictionary of results of each model specified.
"""
all_results = {}
models = {"Ridge": {"clf": Ridge(), "parameters": {'alpha': np.arange(0.1,1000,0.1)}},
"Lasso": {"clf": Lasso(),"parameters": {'alpha': np.arange(0.1,1000,0.1)}},
"Elastic Net": {"clf":ElasticNet(),"parameters": {'alpha': [0.01, 0.1, 0.5,1],'l1_ratio': [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]}}}
for i in lst:
top_3_coef = []
graphical_info = []
object_ = models[i]['clf']
params = models[i]['parameters']
grid = GridSearchCV(object_, params, cv=11)
fitted_model = grid.fit(features, target)
r_sq = fitted_model.score(features, target)
ideal_param = fitted_model.best_params_
best_cross_val_score = fitted_model.best_score_
best_model = fitted_model.best_estimator_
coef_array = best_model.coef_
for index, value in enumerate(coef_array.reshape(-1,1)):
top_3_coef.append((index, value))
top_3_coef = sorted(top_3_coef, key=lambda x: x[1], reverse=True)[:3]
for row in fitted_model.grid_scores_:
alpha = row[0]['alpha']
mean_score = row[1]
graphical_info.append((alpha, mean_score))
all_results[i] = {'R_sq': r_sq, "best_model": best_model,
"tuned_params": ideal_param,
"Best_cv_score": best_cross_val_score,
"important_features": top_3_coef,
"graphical_info": graphical_info,
"grid_object": fitted_model}
joblib.dump(fitted_model, i+"grid_search_model");
return all_results
def holdout_grid(lst, train_feature, test_feature, train_target, test_target):
"""
This function will run a holdout grid - meaning that train/test splits of the features and target should be passed.
Implementation of this function relies on teh GridSearchCV class!
Inputs:
- lst: lst of model names. Should be one of "Ride", "Lasso", "Elastic Net"
- train_feature - analogous to X_train
- test_feature - analogous to X_test
- train_target - analogous to y_train
- test_target - y_test
Returns:
- all_results: Dictionary of results of each model specified.
"""
all_results = {}
models = {"Ridge": {"clf": Ridge(), "parameters": {'alpha': np.arange(0.1,1000,0.1)}},
"Lasso": {"clf": Lasso(),"parameters": {'alpha': np.arange(0.1,1000,0.1)}},
"Elastic Net": {"clf":ElasticNet(),"parameters": {'alpha': [0.01, 0.1, 0.5,1],'l1_ratio': [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]}}}
for i in lst:
top_3_coef = []
graphical_info = []
object_ = models[i]['clf']
params = models[i]['parameters']
grid = GridSearchCV(object_, params, cv=11)
fitted_model = grid.fit(train_feature, train_target)
r_sq = fitted_model.score(test_feature, test_target)
ideal_param = fitted_model.best_params_
best_cross_val_score = fitted_model.best_score_
best_model = fitted_model.best_estimator_
coef_array = best_model.coef_
for index, value in enumerate(coef_array.reshape(-1,1)):
top_3_coef.append((index, value))
top_3_coef = sorted(top_3_coef, key=lambda x: x[1], reverse=True)[:3]
for row in fitted_model.grid_scores_:
alpha = row[0]['alpha']
mean_score = row[1]
graphical_info.append((alpha, mean_score))
all_results[i] = {'R_sq': r_sq, "best_model": best_model,
"tuned_params": ideal_param,
"Best_cv_score": best_cross_val_score,
"important_features": top_3_coef,
"graphical_info": graphical_info,
"grid_object": fitted_model}
joblib.dump(fitted_model, i+"grid_search_holdout_model");
return all_results
def extract_model_comparisons(model_1_dict, model_2_dict, model_name):
"""
This function will compare the results of models run through cross-validation with a holdout and without a holdout.
This function is informative only.
Inputs:
- model_1_dict: A dictionary relating to the CV holdout models
- model_2_dict: A dictionary relating to the CV non-holdout models
Returns:
- None
"""
model_1_feature_index = []
model_2_feature_index = []
r_sq_1 = model_1_dict[model_name]['R_sq']
optimal_params_1 = model_1_dict[model_name]['tuned_params']
mean_cross_val_score_1 = model_1_dict[model_name]['Best_cv_score']
model_1_graph_list = model_1_dict[model_name]['graphical_info']
for i in model_1_dict[model_name]['important_features']:
model_1_feature_index.append(i[0])
model_1_top_features = list(final_df.iloc[:, model_1_feature_index].columns)
r_sq_2 = model_2_dict[model_name]['R_sq']
optimal_params_2 = model_2_dict[model_name]['tuned_params']
mean_cross_val_score_2 = model_2_dict[model_name]['Best_cv_score']
model_2_graph_list = model_2_dict[model_name]['graphical_info']
for i in model_2_dict[model_name]['important_features']:
model_2_feature_index.append(i[0])
model_2_top_features = list(final_df.iloc[:, model_2_feature_index].columns)
print()
if r_sq_1 > r_sq_2:
print(f"The Model with the holdout set has a higher R2 of {r_sq_1}. This is higher by {r_sq_1-r_sq_2}")
print()
print(f"The optimal parameters for this model are {optimal_params_1}")
print()
print(f"The mean cross validation score on the test set is: {mean_cross_val_score_1}")
print()
print(f"The most important features accordning to this model is {model_1_top_features}")
else:
print(f"The Model with no holdout set has a higher R2 of {r_sq_2}. This is higher by {r_sq_2-r_sq_1}")
print()
print(f"The optimal parameters for this model are {optimal_params_2}")
print()
print(f"The mean cross validation score for all of the data is: {mean_cross_val_score_2}")
print()
print(f"The most important features accordning to this model is {model_2_top_features}")
print()
print("Graphical Comparison below: ")
print()
if model_name == "Lasso" or model_name == "Elastic Net":
x_vals = (-0.2, 1.25)
y_vals = (-1,1)
if model_name == "Ridge":
x_vals = (0,100)
y_vals = (0.3, 0.5)
plt.figure(figsize=(12,6))
plt.plot(*zip(*model_1_graph_list), '--r') #holdout
plt.plot(*zip(*model_2_graph_list), '--b') #no holdout
plt.ylim(y_vals)
plt.xlim(x_vals)
plt.xlabel("Alpha Value")
plt.ylabel("Mean Test Score")
plt.legend(['Holdout Cross-Validation', 'No Holdout Cross-Validation'])
plt.tight_layout() | [
"warnings.filterwarnings",
"sklearn.model_selection.GridSearchCV",
"sklearn.linear_model.ElasticNet",
"sklearn.linear_model.Lasso",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"sklearn.linear_model.Ridge",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
... | [((96, 158), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (119, 158), False, 'import warnings\n'), ((1118, 1136), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1134, 1136), False, 'from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\n'), ((1964, 2025), 'sklearn.externals.joblib.dump', 'joblib.dump', (['fitted_model', '"""baseline_linear_regression_model"""'], {}), "(fitted_model, 'baseline_linear_regression_model')\n", (1975, 2025), False, 'from sklearn.externals import joblib\n'), ((9449, 9476), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (9459, 9476), True, 'import matplotlib.pyplot as plt\n'), ((9606, 9622), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y_vals'], {}), '(y_vals)\n', (9614, 9622), True, 'import matplotlib.pyplot as plt\n'), ((9627, 9643), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x_vals'], {}), '(x_vals)\n', (9635, 9643), True, 'import matplotlib.pyplot as plt\n'), ((9648, 9673), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Alpha Value"""'], {}), "('Alpha Value')\n", (9658, 9673), True, 'import matplotlib.pyplot as plt\n'), ((9678, 9707), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Test Score"""'], {}), "('Mean Test Score')\n", (9688, 9707), True, 'import matplotlib.pyplot as plt\n'), ((9713, 9784), 'matplotlib.pyplot.legend', 'plt.legend', (["['Holdout Cross-Validation', 'No Holdout Cross-Validation']"], {}), "(['Holdout Cross-Validation', 'No Holdout Cross-Validation'])\n", (9723, 9784), True, 'import matplotlib.pyplot as plt\n'), ((9794, 9812), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9810, 9812), True, 'import matplotlib.pyplot as plt\n'), ((3215, 3251), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['object_', 'params'], {'cv': '(11)'}), '(object_, params, cv=11)\n', (3227, 3251), False, 'from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_predict\n'), ((4344, 4394), 'sklearn.externals.joblib.dump', 'joblib.dump', (['fitted_model', "(i + 'grid_search_model')"], {}), "(fitted_model, i + 'grid_search_model')\n", (4355, 4394), False, 'from sklearn.externals import joblib\n'), ((5616, 5652), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['object_', 'params'], {'cv': '(11)'}), '(object_, params, cv=11)\n', (5628, 5652), False, 'from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_predict\n'), ((6765, 6823), 'sklearn.externals.joblib.dump', 'joblib.dump', (['fitted_model', "(i + 'grid_search_holdout_model')"], {}), "(fitted_model, i + 'grid_search_holdout_model')\n", (6776, 6823), False, 'from sklearn.externals import joblib\n'), ((2719, 2726), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (2724, 2726), False, 'from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\n'), ((2805, 2812), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (2810, 2812), False, 'from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\n'), ((2895, 2907), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {}), '()\n', (2905, 2907), False, 'from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\n'), ((5120, 5127), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (5125, 5127), False, 'from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\n'), ((5206, 5213), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (5211, 5213), False, 'from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\n'), ((5296, 5308), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {}), '()\n', (5306, 5308), False, 'from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\n'), ((2752, 2777), 'numpy.arange', 'np.arange', (['(0.1)', '(1000)', '(0.1)'], {}), '(0.1, 1000, 0.1)\n', (2761, 2777), True, 'import numpy as np\n'), ((2837, 2862), 'numpy.arange', 'np.arange', (['(0.1)', '(1000)', '(0.1)'], {}), '(0.1, 1000, 0.1)\n', (2846, 2862), True, 'import numpy as np\n'), ((5153, 5178), 'numpy.arange', 'np.arange', (['(0.1)', '(1000)', '(0.1)'], {}), '(0.1, 1000, 0.1)\n', (5162, 5178), True, 'import numpy as np\n'), ((5238, 5263), 'numpy.arange', 'np.arange', (['(0.1)', '(1000)', '(0.1)'], {}), '(0.1, 1000, 0.1)\n', (5247, 5263), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.decomposition import PCA , TruncatedSVD
import joblib
from sklearn.manifold import TSNE
# import seaborn as sns
import matplotlib.pyplot as plt
class preprocessor() :
"""class to read the dataset and clean it for for furthur processsing
"""
def __init__(self , DATASET_PATH = '../data/dataset_' , from_file = True) :
"""constructor for the preprocessord class. It specifies the path to dataset
Parameters
----------
DATASET_PATH : str, optional
path to the dataset, by default 'data/dataset_' for all splitted files
"""
# If you want to use the single file i.e.DATASET_PATH = 'data/training.1600000.processed.noemoticon.csv'
# then self.df = self.read_dataset(DATASET_PATH, join_flag = False)
if(from_file == False) :
self.df = self.read_dataset(DATASET_PATH) # setter for the dataset
def read_dataset(self , DATASET_PATH , join_flag = True , ext = '.csv') :
"""function to extract the dataset (dataframe) from the specified file path
Parameters
----------
DATASET_PATH : string
path to dataset
join_flag : boolean
flag to indicate whether to extract data from splited files or a single file
ext : string
extenstion of splitted files (only to be used when join_flag = True), by default '.csv.'
Returns
-------
pandas dataframe
data extracted from file at specified path
"""
columns = [ 'target' , 'ID' , 'date' , 'flag' , 'user' , 'text' ] # naming the columns
if(join_flag) : # joining dataset
frames = [] # dataset was divided by 'split -C 20971520 -d training.1600000.processed.noemoticon.csv --additional-suffix=.csv dataset_' where 20971520 B = 20 MB
for i in range(12) :
frames.append(pd.read_csv(DATASET_PATH + str(i).zfill(2) + ext , encoding = "ISO-8859-1" , header=None , names=columns))
df = pd.concat(frames , ignore_index=True)
else :
df = pd.read_csv(DATASET_PATH , encoding = "ISO-8859-1" , header= None , names = columns)
df.loc[df['target'] == 4, 'target'] = 1 # changing the target value from 4 to 1
return df # returning dataset
def __get_day(self,x) :
"""function to tell the day (int) of tweat based upon date-time
Parameters
----------
x : string-like
Date-time of tweat
Returns
-------
int
number associated with the day i.e., (0: Monday, 1: Tuesday, 2: Wednessday, 3: Thursday, 4: Friday, 5: Saturday, 6: Sunday, -1: None)
"""
days = ['Mon' , 'Tue' , 'Wed' , 'Thu' , 'Fri' , 'Sat' , 'Sun']
d = x[:3] # sliciing the day string from date-time string
if(d in days) :
return days.index(d)
return -1 # -1 if day is not known
def get_user_resolved(self) :
"""function to make usefull feature out of user feature (which contain usernames)
We have appliad one-hot-encoding to extract the uniqueness and repeatition of a user.
Since the there were too many unique users (i.e. high dimensional data), hence dimension reduction is done
Returns
-------
numpy 2D array
Array contains resolved features of user column
"""
user_ndarr = self.df['user'].to_numpy().reshape(-1,1)
encoder = OneHotEncoder()
encoder = encoder.fit(user_ndarr)
hot_coded = encoder.transform(user_ndarr) # hot_coded is scipy.sparse.csr.csr_matrix, which is a memory efficent way of storing 1-hot-coded matrix
tsvd = TruncatedSVD(n_components= 50)
return tsvd.fit(hot_coded).transform(hot_coded)
# TODO : what to choose TVSD and PCA --> also add this docstring 'by using PCA'
# dim_red = PCA(n_components=2)
# dim_red.fit(hot_coded)
# return dim_red.transform(hot_coded)
def remove_pattern(self , text , pattern):
"""function to clean the tweats for furtur processing.
Here we are removing the specified pattern from text
Parameters
----------
text : string
the text of tweat
pattern : string
the pattern to be removed
Returns
-------
string
cleaned tweat
"""
r = re.findall(pattern,text) # finds the pattern i.e @user and puts it in a list for further task
for i in r:
text = re.sub(i,"",text)
return text
def preprocess(self , FILE_PATH = '../data/preprocessed') :
"""function to preprocess the dataframe and return dependent (X) and independent (y)
Parameters
----------
FILE_PATH : string
path to the pickle file
Returns
-------
X : numpy 2D array
it is the array of features
y : numpy 1D array
it is the array of labels
"""
try :
X , y = joblib.load(FILE_PATH)
print('Extracted from stored file')
except :
print('Preprocessing data')
day = self.df.date.apply(lambda x : self.__get_day(x))
self.df['date'] = pd.to_datetime(self.df['date'])
date = self.df.date.apply(lambda x : x.day)
month = self.df.date.apply(lambda x : x.month)
year = self.df.date.apply(lambda x : x.year)
time_in_minutes = self.df.date.apply(lambda x : x.minute + x.hour * 60)
usr = self.get_user_resolved()
self.df['Tidy_Tweets'] = np.vectorize(self.remove_pattern)(self.df['text'], "@[\w]*")
self.df['Tidy_Tweets'] = self.df['Tidy_Tweets'].str.replace("[^a-zA-Z#]", " ")
self.df['Tidy_Tweets'] = self.df['Tidy_Tweets'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3]))
# Bag of Words
bow_vectorizer = CountVectorizer(max_df=0.90, min_df=2, max_features=1000, stop_words='english')
# bag-of-words feature matrix
bow = bow_vectorizer.fit_transform(self.df['Tidy_Tweets'])
# df_bow = pd.DataFrame(bow.todense())
# train_bow = bow
tsvd = TruncatedSVD(n_components=200)
tweets_resolved = tsvd.fit_transform(bow)
usr = np.append(usr , tweets_resolved , axis=1)
X = pd.concat ([ day , date , month , year , time_in_minutes ] , axis = 1).to_numpy()
X = np.append(X,usr , axis=1)
# X = 1
y = self.df['target'].to_numpy()
joblib.dump((X,y) , FILE_PATH)
return X , y
class EDA() :
"""class to perform the exploratory data analysis on the data
"""
def scatter_plot(self, X, y):
"""function to apply tsne on the data to get reduce it to 2 dimension, and plot the resulted dimensions
Parameters
----------
X : Pandas dataframe
Dataframe of the preprocessed feature X
y : Pandas dataframe
Dataframe of the label for every corresponding datapoint in X
"""
tsvd = TruncatedSVD(n_components= 10)
X = tsvd.fit(X).transform(X)
print('starting TSNE')
NNfeatures = TSNE(n_components = 2).fit_transform(X)
print('ending TSNE')
self.__plot_cluster(NNfeatures , y , 'Scatter plot')
def __plot_cluster(self, X , y , title) :
"""function to plot the cluster with diffren colors associated with labels
Args:
X (numpy 2D array): It is the set of independent variable
y (numpy 1D aray): It is the dependent vaibale respective to X
"""
D1 = X[:,0] # extracting the dimension one
D2 = X[:,1] # extracting the dimension two
# print(D1.shape , D2.shape, y.shape)
plt.figure('Scatter plot')
plt.xlabel('D0')
plt.ylabel('D1')
a = plt.scatter(D1 , D2 , c = y) # ploting the scatter plot
plt.legend(*a.legend_elements(),loc = 'best')
plt.title(title)
# plt.show() # showing the plot
plt.savefig('../plot_images/'+title+'.png')
# def basic_info(self , ) :
# pass
# def
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"pandas.to_datetime",
"sklearn.feature_extraction.text.CountVectorizer",
"matplotlib.pyplot.xlabel",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.scatter",
"joblib.load",
"joblib.dump",
"matplotlib.pyplot.savefig",
"sklearn.decomposition.TruncatedSVD... | [((3375, 3390), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (3388, 3390), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((3595, 3624), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(50)'}), '(n_components=50)\n', (3607, 3624), False, 'from sklearn.decomposition import PCA, TruncatedSVD\n'), ((4210, 4235), 're.findall', 're.findall', (['pattern', 'text'], {}), '(pattern, text)\n', (4220, 4235), False, 'import re\n'), ((6617, 6646), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(10)'}), '(n_components=10)\n', (6629, 6646), False, 'from sklearn.decomposition import PCA, TruncatedSVD\n'), ((7263, 7289), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Scatter plot"""'], {}), "('Scatter plot')\n", (7273, 7289), True, 'import matplotlib.pyplot as plt\n'), ((7293, 7309), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""D0"""'], {}), "('D0')\n", (7303, 7309), True, 'import matplotlib.pyplot as plt\n'), ((7313, 7329), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""D1"""'], {}), "('D1')\n", (7323, 7329), True, 'import matplotlib.pyplot as plt\n'), ((7337, 7361), 'matplotlib.pyplot.scatter', 'plt.scatter', (['D1', 'D2'], {'c': 'y'}), '(D1, D2, c=y)\n', (7348, 7361), True, 'import matplotlib.pyplot as plt\n'), ((7447, 7463), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7456, 7463), True, 'import matplotlib.pyplot as plt\n'), ((7514, 7561), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../plot_images/' + title + '.png')"], {}), "('../plot_images/' + title + '.png')\n", (7525, 7561), True, 'import matplotlib.pyplot as plt\n'), ((2020, 2056), 'pandas.concat', 'pd.concat', (['frames'], {'ignore_index': '(True)'}), '(frames, ignore_index=True)\n', (2029, 2056), True, 'import pandas as pd\n'), ((2077, 2153), 'pandas.read_csv', 'pd.read_csv', (['DATASET_PATH'], {'encoding': '"""ISO-8859-1"""', 'header': 'None', 'names': 'columns'}), "(DATASET_PATH, encoding='ISO-8859-1', header=None, names=columns)\n", (2088, 2153), True, 'import pandas as pd\n'), ((4334, 4353), 're.sub', 're.sub', (['i', '""""""', 'text'], {}), "(i, '', text)\n", (4340, 4353), False, 'import re\n'), ((4761, 4783), 'joblib.load', 'joblib.load', (['FILE_PATH'], {}), '(FILE_PATH)\n', (4772, 4783), False, 'import joblib\n'), ((4949, 4980), 'pandas.to_datetime', 'pd.to_datetime', (["self.df['date']"], {}), "(self.df['date'])\n", (4963, 4980), True, 'import pandas as pd\n'), ((5571, 5649), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'max_df': '(0.9)', 'min_df': '(2)', 'max_features': '(1000)', 'stop_words': '"""english"""'}), "(max_df=0.9, min_df=2, max_features=1000, stop_words='english')\n", (5586, 5649), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((5824, 5854), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(200)'}), '(n_components=200)\n', (5836, 5854), False, 'from sklearn.decomposition import PCA, TruncatedSVD\n'), ((5911, 5950), 'numpy.append', 'np.append', (['usr', 'tweets_resolved'], {'axis': '(1)'}), '(usr, tweets_resolved, axis=1)\n', (5920, 5950), True, 'import numpy as np\n'), ((6053, 6078), 'numpy.append', 'np.append', (['X', 'usr'], {'axis': '(1)'}), '(X, usr, axis=1)\n', (6062, 6078), True, 'import numpy as np\n'), ((6134, 6164), 'joblib.dump', 'joblib.dump', (['(X, y)', 'FILE_PATH'], {}), '((X, y), FILE_PATH)\n', (6145, 6164), False, 'import joblib\n'), ((6722, 6742), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (6726, 6742), False, 'from sklearn.manifold import TSNE\n'), ((5273, 5306), 'numpy.vectorize', 'np.vectorize', (['self.remove_pattern'], {}), '(self.remove_pattern)\n', (5285, 5306), True, 'import numpy as np\n'), ((5963, 6023), 'pandas.concat', 'pd.concat', (['[day, date, month, year, time_in_minutes]'], {'axis': '(1)'}), '([day, date, month, year, time_in_minutes], axis=1)\n', (5972, 6023), True, 'import pandas as pd\n')] |
"""
Utility functions for domain decomposition.
"""
def lazy_reduce(reduction, block, launches, contexts):
"""
Applies a reduction over a sequence of parallelizable device operations.
The reduction can be something like built-in `max` or `min`. The
`launches` argument is a sequence of callables which trigger the
asynchronous calculation and return a "token" (in the case of a cupy
reduction, the token is a device array). The `block` argument is a
callable which operates on the token, blocking until the underlying
operation is completed, and then returns a value (`block` is probably
built-in `float`). The `contexts` argument is a sequence of execution
contexts which should switch to the device on which the respective launch
callable was executed.
"""
tokens = [launch() for launch in launches]
results = []
for token, context in zip(tokens, contexts):
with context:
results.append(block(token))
return reduction(results)
def partition(elements, num_parts):
"""
Equitably divide the given number of elements into `num_parts` partitions.
The sum of the partitions is `elements`. The number of partitions must be
less than or equal to the number of elements.
"""
n = elements // num_parts
r = elements % num_parts
for i in range(num_parts):
yield n + (1 if i < r else 0)
def subdivide(interval, num_parts):
"""
Divide an interval into non-overlapping contiguous sub-intervals.
"""
try:
a, b = interval
except TypeError:
a, b = 0, interval
for n in partition(b - a, num_parts):
yield a, a + n
a += n
def concat_on_host(arrays: list, num_guard=None):
"""
Concatenate a list of arrays, which may be allocated on different devices.
The array returned is allocated on the host. The arrays are either 2d or
3d, where the final axis contains fields. The concatenation is performed
on the first axis.
"""
import numpy as np
def all_equal(seq):
for x in seq:
try:
if x != y:
raise ValueError("got distinct values")
except UnboundLocalError:
y = x
return x
def to_host(a):
try:
return a.get()
except AttributeError:
return a
if len(arrays[0].shape) == 2:
ng = num_guard or 0
ni = sum(a.shape[0] - 2 * ng for a in arrays)
nq = all_equal(a.shape[1] for a in arrays)
si = slice(ng, -ng) if ng > 0 else slice(None)
result = np.zeros([ni, nq])
i = 0
for array in arrays:
a = i
b = i + array.shape[0] - 2 * ng
i += b - a
result[a:b] = to_host(array[si])
return result
if len(arrays[0].shape) == 3:
ngi, ngj = num_guard or (0, 0)
ni = sum(a.shape[0] - 2 * ngi for a in arrays)
nj = all_equal(a.shape[1] - 2 * ngj for a in arrays)
nq = all_equal(a.shape[2] for a in arrays)
si = slice(ngi, -ngi) if ngi > 0 else slice(None)
sj = slice(ngj, -ngj) if ngj > 0 else slice(None)
result = np.zeros([ni, nj, nq])
i = 0
for array in arrays:
a = i
b = i + array.shape[0] - 2 * ngi
i += b - a
result[a:b] = to_host(array[si, sj])
return result
| [
"numpy.zeros"
] | [((2623, 2641), 'numpy.zeros', 'np.zeros', (['[ni, nq]'], {}), '([ni, nq])\n', (2631, 2641), True, 'import numpy as np\n'), ((3212, 3234), 'numpy.zeros', 'np.zeros', (['[ni, nj, nq]'], {}), '([ni, nj, nq])\n', (3220, 3234), True, 'import numpy as np\n')] |
import random
import os
import numpy as np
from scipy.ndimage.filters import median_filter
import os
import random
import numpy as np
from scipy.ndimage.filters import median_filter
def gaussian_noise(img, mean=0, sigma=0.03):
img = img.copy()
noise = np.random.normal(mean, sigma, img.shape)
mask_overflow_upper = img+noise >= 1.0
mask_overflow_lower = img+noise < 0
noise[mask_overflow_upper] = 1.0
noise[mask_overflow_lower] = 0
img += noise
return img
def data_gen_training_segmentation(data_type, batch_size, output_depth, output_rows, output_cols,
shuffle, augment, smoothing=False, validation_split = 0.1):
data_path = "./data/segmentation/numpy/"
if data_type == "train":
train_input = np.load(os.path.join(data_path, 'imgs_train.npy'))
train_input = train_input[0:validation_split]
masks_input = np.load(os.path.join(data_path, 'imgs_mask_train.npy'))
masks_input = masks_input[0:validation_split]
elif data_type == "validation":
train_input = np.load(os.path.join(data_path, 'imgs_train.npy'))
train_input = train_input[validation_split:]
masks_input = np.load(os.path.join(data_path, 'imgs_mask_train.npy'))
masks_input = masks_input[validation_split:]
train_return = np.zeros((batch_size, output_depth, output_rows, output_cols)).astype('float32')
masks_return = np.zeros((batch_size, output_depth, output_rows, output_cols)).astype('float32')
visualise_count = 0
pred_dir_train = './results/generator_preprocessed/train'
if not os.path.exists(pred_dir_train):
os.mkdir(pred_dir_train)
pred_dir_masks = './results/generator_preprocessed/masks'
if not os.path.exists(pred_dir_masks):
os.mkdir(pred_dir_masks)
while (True):
for train_counter in range(0, train_input.shape[0]): # if shuffle=false then go through entire dataset
# start_time = time.time()
if shuffle:
random_shuffle = random.randint(0, train_input.shape[0])
else:
random_shuffle = 0
# Random numbers for data augmentation probability:
rotate = random.uniform(0.0, 1.0)
rotate_angle = random.randint(0, 4)
add_noise = random.uniform(0.0, 1.0)
image = train_input[(train_counter+depth_counter+random_shuffle)%train_input.shape[0]]
mask = masks_input[(train_counter+depth_counter+random_shuffle)%masks_input.shape[0]]
if(depth_counter==0 and data_type == "train" and select_masks==True and mask_select_probability < 0.3):
mask_sum = np.sum(mask)+1
mask_ratio = mask_sum / mask.shape[0] ** 2
while(mask_ratio < mask_threshold):
random_shuffle = random.randint(0, train_input.shape[0])
image = train_input[(train_counter + depth_counter + random_shuffle) % train_input.shape[0]]
mask = masks_input[(train_counter + depth_counter + random_shuffle) % masks_input.shape[0]]
mask_sum = np.sum(mask)+1
mask_ratio = mask_sum / mask.shape[0] ** 2
if augment:
if data_type == 'train':
# Vertical mirroring
# if mirror_vertical < 0.2:
# image = cv2.flip(image, 0)
# mask = cv2.flip(mask, 0)
# # Horizontal mirroring
# if mirror_horizontal < 0.2:
# image = cv2.flip(image, 1)
# mask = cv2.flip(mask, 1)
# # Random rotation
if rotate < 0.2:
image = rotate_img(image, rotate_angle)
mask = rotate_img(mask, rotate_angle)
mask[mask > 0.5] = 1
# Shearing
if shear < 0.2:
image = shear_img(image, shear_angle)
mask = shear_img(mask, shear_angle)
mask[mask > 0.5] = 1
# Adding gaussian noise
if add_noise < 0.4:
image = gaussian_noise(image)
# image = cv2.resize(image, (output_rows, output_cols), interpolation=cv2.INTER_CUBIC)
# mask = cv2.resize(mask, (output_rows, output_cols), interpolation=cv2.INTER_NEAREST)
if(blur==True and blur_number>0):
mask = median_filter(mask, size=blur_number)
# imsave(os.path.join(pred_dir_train, 'pre_processed_' + str(visualise_count) + '.png'), image)
# imsave(os.path.join(pred_dir_masks, 'pre_processed_' + str(visualise_count) + '.png'), mask)
# # # imsave(os.path.join(pred_dir_masks, 'pre_processed_' + str(visualise_count) + '.png'), cv2.resize(mask, (output_rows//16, output_cols//16), interpolation=cv2.INTER_NEAREST))
# # #
# visualise_count += 1
# #
# print(time.time()-start_time)
# train_return[batch_counter][depth_counter] = (image-0.5)*255
train_return[batch_counter][depth_counter] = image
masks_return[batch_counter][depth_counter] = mask
# masks_return_small[batch_counter][depth_counter] = cv2.resize(mask, (output_rows//2, output_cols//2), interpolation=cv2.INTER_NEAREST)
yield np.expand_dims(train_return, axis=4), np.expand_dims(masks_return, axis=4)
def data_gen_testing(dataset_project, batch_size, output_depth, output_rows, output_cols):
if dataset_project == "segmentation":
data_path = "./data/segmentation/numpy/"
elif dataset_project == "denoising":
data_path = "./data/denoising/numpy/"
elif dataset_project == "detection":
data_path = "./data/detection/numpy/"
elif dataset_project == "volumetry":
data_path = "./data/volumetry/numpy/"
pred_input = np.load(os.path.join(data_path, 'imgs_test.npy')).astype('float32')
# scale between [0, 1]
pred_input /= pred_input.max()
pred_return = np.zeros((batch_size, output_depth, output_rows, output_cols)).astype('float32')
# visualise_count = 0
#
# pred_dir_test = './results/generator_preprocessed/test'
# if not os.path.exists(pred_dir_train):
# os.mkdir(pred_dir_test)
while (True):
for train_counter in range(0, pred_input.shape[0]): # if shuffle=false then go through entire dataset
pred_return[train_counter % batch_size] = pred_input[train_counter]
# yield if the pred return is not 0, if it is filled with values and if the for cycle ends
if(train_counter != 0 and train_counter % batch_size == 0 or train_counter == pred_input.shape[0]-1):
yield np.expand_dims(pred_return, axis=4)
pred_return.fill(0) | [
"numpy.random.normal",
"os.path.exists",
"random.uniform",
"scipy.ndimage.filters.median_filter",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"os.mkdir",
"numpy.expand_dims",
"random.randint"
] | [((263, 303), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma', 'img.shape'], {}), '(mean, sigma, img.shape)\n', (279, 303), True, 'import numpy as np\n'), ((1600, 1630), 'os.path.exists', 'os.path.exists', (['pred_dir_train'], {}), '(pred_dir_train)\n', (1614, 1630), False, 'import os\n'), ((1640, 1664), 'os.mkdir', 'os.mkdir', (['pred_dir_train'], {}), '(pred_dir_train)\n', (1648, 1664), False, 'import os\n'), ((1738, 1768), 'os.path.exists', 'os.path.exists', (['pred_dir_masks'], {}), '(pred_dir_masks)\n', (1752, 1768), False, 'import os\n'), ((1778, 1802), 'os.mkdir', 'os.mkdir', (['pred_dir_masks'], {}), '(pred_dir_masks)\n', (1786, 1802), False, 'import os\n'), ((778, 819), 'os.path.join', 'os.path.join', (['data_path', '"""imgs_train.npy"""'], {}), "(data_path, 'imgs_train.npy')\n", (790, 819), False, 'import os\n'), ((905, 951), 'os.path.join', 'os.path.join', (['data_path', '"""imgs_mask_train.npy"""'], {}), "(data_path, 'imgs_mask_train.npy')\n", (917, 951), False, 'import os\n'), ((1320, 1382), 'numpy.zeros', 'np.zeros', (['(batch_size, output_depth, output_rows, output_cols)'], {}), '((batch_size, output_depth, output_rows, output_cols))\n', (1328, 1382), True, 'import numpy as np\n'), ((1420, 1482), 'numpy.zeros', 'np.zeros', (['(batch_size, output_depth, output_rows, output_cols)'], {}), '((batch_size, output_depth, output_rows, output_cols))\n', (1428, 1482), True, 'import numpy as np\n'), ((2212, 2236), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2226, 2236), False, 'import random\n'), ((2264, 2284), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (2278, 2284), False, 'import random\n'), ((2309, 2333), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2323, 2333), False, 'import random\n'), ((6168, 6230), 'numpy.zeros', 'np.zeros', (['(batch_size, output_depth, output_rows, output_cols)'], {}), '((batch_size, output_depth, output_rows, output_cols))\n', (6176, 6230), True, 'import numpy as np\n'), ((1073, 1114), 'os.path.join', 'os.path.join', (['data_path', '"""imgs_train.npy"""'], {}), "(data_path, 'imgs_train.npy')\n", (1085, 1114), False, 'import os\n'), ((1199, 1245), 'os.path.join', 'os.path.join', (['data_path', '"""imgs_mask_train.npy"""'], {}), "(data_path, 'imgs_mask_train.npy')\n", (1211, 1245), False, 'import os\n'), ((2033, 2072), 'random.randint', 'random.randint', (['(0)', 'train_input.shape[0]'], {}), '(0, train_input.shape[0])\n', (2047, 2072), False, 'import random\n'), ((4563, 4600), 'scipy.ndimage.filters.median_filter', 'median_filter', (['mask'], {'size': 'blur_number'}), '(mask, size=blur_number)\n', (4576, 4600), False, 'from scipy.ndimage.filters import median_filter\n'), ((5479, 5515), 'numpy.expand_dims', 'np.expand_dims', (['train_return'], {'axis': '(4)'}), '(train_return, axis=4)\n', (5493, 5515), True, 'import numpy as np\n'), ((5517, 5553), 'numpy.expand_dims', 'np.expand_dims', (['masks_return'], {'axis': '(4)'}), '(masks_return, axis=4)\n', (5531, 5553), True, 'import numpy as np\n'), ((6025, 6065), 'os.path.join', 'os.path.join', (['data_path', '"""imgs_test.npy"""'], {}), "(data_path, 'imgs_test.npy')\n", (6037, 6065), False, 'import os\n'), ((2677, 2689), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (2683, 2689), True, 'import numpy as np\n'), ((2842, 2881), 'random.randint', 'random.randint', (['(0)', 'train_input.shape[0]'], {}), '(0, train_input.shape[0])\n', (2856, 2881), False, 'import random\n'), ((6875, 6910), 'numpy.expand_dims', 'np.expand_dims', (['pred_return'], {'axis': '(4)'}), '(pred_return, axis=4)\n', (6889, 6910), True, 'import numpy as np\n'), ((3138, 3150), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (3144, 3150), True, 'import numpy as np\n')] |
from collections import namedtuple
from scipy.special import expit
import numpy as np
from .mapping import Mapping
class Activation(Mapping):
pass
# Activation = namedtuple("Activation", ["forward", "backward"])
class Relu(Activation):
@staticmethod
def forward(x):
return np.where(x>0, x, 0)
@staticmethod
def backward(x):
return np.where(x.T>0, 1, 0)[..., None]*np.eye(x.shape[0])[None, ...]
# Jacobian == samples X out X in
class Softmax:#(Activation):
@staticmethod
def forward(x):
e = np.exp(x)
return e/np.sum(e, axis=0, keepdims=True)
@classmethod
def backward(cls, x):
s = cls.forward(x)
return s.T[..., None]*(np.identity(x.shape[0])[None, ...]-s.T[:,None,:])
# Jacobian == samples X out X in
class SeqSoftmax:#(Activation):
@staticmethod
def forward(x):
"""nsamples x dim x L"""
e = np.exp(x)
return e/np.sum(e, axis=1, keepdims=True)
@classmethod
def backward(cls, X):
"""
Out: nsamples x dim x dim x L
"""
s = cls.forward(x)
eye = np.identity(x.shape[1])
return s[:, :, None, :]*(eye[None, :, :, None]-s[:, None, :, :])
#np.mul.outer(_softmax(x), np.identity(x.size)-_softmax(x)))
| [
"numpy.identity",
"numpy.eye",
"numpy.where",
"numpy.exp",
"numpy.sum"
] | [((296, 317), 'numpy.where', 'np.where', (['(x > 0)', 'x', '(0)'], {}), '(x > 0, x, 0)\n', (304, 317), True, 'import numpy as np\n'), ((549, 558), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (555, 558), True, 'import numpy as np\n'), ((912, 921), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (918, 921), True, 'import numpy as np\n'), ((1119, 1142), 'numpy.identity', 'np.identity', (['x.shape[1]'], {}), '(x.shape[1])\n', (1130, 1142), True, 'import numpy as np\n'), ((576, 608), 'numpy.sum', 'np.sum', (['e'], {'axis': '(0)', 'keepdims': '(True)'}), '(e, axis=0, keepdims=True)\n', (582, 608), True, 'import numpy as np\n'), ((939, 971), 'numpy.sum', 'np.sum', (['e'], {'axis': '(1)', 'keepdims': '(True)'}), '(e, axis=1, keepdims=True)\n', (945, 971), True, 'import numpy as np\n'), ((371, 394), 'numpy.where', 'np.where', (['(x.T > 0)', '(1)', '(0)'], {}), '(x.T > 0, 1, 0)\n', (379, 394), True, 'import numpy as np\n'), ((404, 422), 'numpy.eye', 'np.eye', (['x.shape[0]'], {}), '(x.shape[0])\n', (410, 422), True, 'import numpy as np\n'), ((711, 734), 'numpy.identity', 'np.identity', (['x.shape[0]'], {}), '(x.shape[0])\n', (722, 734), True, 'import numpy as np\n')] |
from collections import OrderedDict
import torch
import torch.nn as nn
from gym import spaces
from rl.policies.utils import MLP, BC_Visual_Policy
from rl.policies.actor_critic import Actor, Critic
from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size
import numpy as np
from rl.policies.distributions import (
FixedCategorical,
FixedNormal,
MixedDistribution,
FixedGumbelSoftmax,
)
from util.pytorch import to_tensor
import torch.nn.functional as F
class AsymActor(Actor):
def __init__(
self,
config,
ob_space,
ac_space,
tanh_policy,
ac_scale,
deterministic=False,
activation="relu",
rl_hid_size=None,
):
super().__init__(config, ob_space, ac_space, tanh_policy, ac_scale)
self._ac_space = ac_space
self._deterministic = deterministic
self.env = config.env
self._ac_scale = ac_scale
if rl_hid_size == None:
rl_hid_size = config.rl_hid_size
if config.env == 'PusherObstacle-v0':
# observation (excluding goal information)
input_dim = observation_size(ob_space) - goal_size(ob_space) - box_size(ob_space)
elif 'Sawyer' in config.env:
input_dim = robot_state_size(ob_space)
else:
raise NotImplementedError
self.cnn = BC_Visual_Policy(robot_state=input_dim, num_classes=256, img_size=config.env_image_size)
# self.cnn.load_pretrained(config.bc_checkpoint)
# print('load pretrained BC weights to the actor from {}'.format(config.bc_checkpoint))
self.fc_means = nn.ModuleDict()
self.fc_log_stds = nn.ModuleDict()
for k, space in ac_space.spaces.items():
if isinstance(space, spaces.Box):
self.fc_means.update(
{
k: MLP(
config,
rl_hid_size,
action_size(space),
activation=activation,
)
}
)
if not self._deterministic:
self.fc_log_stds.update(
{
k: MLP(
config,
rl_hid_size,
action_size(space),
activation=activation,
)
}
)
elif isinstance(space, spaces.Discrete):
self.fc_means.update(
{k: MLP(config, rl_hid_size, space.n, activation=activation)}
)
else:
self.fc_means.update(
{k: MLP(config, rl_hid_size, space, activation=activation)}
)
def forward(self, ob, deterministic=False):
if self.env == 'PusherObstacle-v0':
inp = list(ob.values())
if self._config.obs_space == "all":
inp_robot_state = inp[:2]
if len(inp[0].shape) == 1:
inp_robot_state = [x.unsqueeze(0) for x in inp_robot_state]
inp_robot_state = torch.cat(inp_robot_state, dim=-1)
inp_img = inp[5]
if len(inp_img.shape) == 5:
inp_img = inp_img.squeeze(1) # remove unnecessary dimension
out = self._activation_fn(self.cnn(inp_img, inp_robot_state))
else:
raise NotImplementedError
elif 'Sawyer' in self.env:
if len(ob['joint_pos'].shape) == 1:
inp_robot_state = torch.cat([ob['joint_pos'], ob['joint_vel'], ob['gripper_qpos'], ob['gripper_qvel'], ob['eef_pos'], ob['eef_quat']])
inp_robot_state = inp_robot_state[None, :]
inp_img = ob['image']
elif len(ob['joint_pos'].shape) == 2:
inp_robot_state = torch.cat([ob['joint_pos'], ob['joint_vel'], ob['gripper_qpos'], ob['gripper_qvel'], ob['eef_pos'], ob['eef_quat']], axis=1)
inp_img = ob['image']
if len(inp_img.shape) == 5:
inp_img = inp_img.squeeze(1) # remove unnecessary dimension
out = self._activation_fn(self.cnn(inp_img, inp_robot_state))
else:
raise NotImplementedError
out = torch.reshape(out, (out.shape[0], -1))
means, stds = OrderedDict(), OrderedDict()
for k, space in self._ac_space.spaces.items():
mean = self.fc_means[k](out)
if isinstance(space, spaces.Box) and not self._deterministic:
if self._config.algo == "ppo":
zeros = torch.zeros(mean.size()).to(self._config.device)
log_std = self.fc_log_stds[k](zeros)
else:
log_std = self.fc_log_stds[k](out)
log_std = torch.clamp(log_std, -10, 2)
std = torch.exp(log_std.double())
else:
std = None
means[k] = mean
stds[k] = std
return means, stds
def act(self, ob, is_train=True, return_log_prob=False, return_stds=False):
ob_copy = ob.copy()
if 'image' in ob.keys() and isinstance(ob['image'], str):
ob_copy['image'] = np.load(ob_copy['image'])
ob_copy = to_tensor(ob_copy, self._config.device)
means, stds = self.forward(ob_copy, self._deterministic)
ob_copy.clear()
dists = OrderedDict()
for k, space in self._ac_space.spaces.items():
if isinstance(space, spaces.Box):
if self._deterministic:
stds[k] = torch.zeros_like(means[k])
dists[k] = FixedNormal(means[k], stds[k])
else:
if self._config.algo == "sac" or "aac" in self._config.algo:
dists[k] = FixedGumbelSoftmax(
torch.tensor(self._config.temperature), logits=means[k]
)
else:
dists[k] = FixedCategorical(logits=means[k])
actions = OrderedDict()
mixed_dist = MixedDistribution(dists)
if not is_train or self._deterministic:
activations = mixed_dist.mode()
else:
activations = mixed_dist.sample()
if return_log_prob:
log_probs = mixed_dist.log_probs(activations)
for k, space in self._ac_space.spaces.items():
z = activations[k]
if self._tanh and isinstance(space, spaces.Box):
action = torch.tanh(z)
if return_log_prob:
# follow the Appendix C. Enforcing Action Bounds
log_det_jacobian = 2 * (np.log(2.0) - z - F.softplus(-2.0 * z)).sum(
dim=-1, keepdim=True
)
log_probs[k] = log_probs[k] - log_det_jacobian
else:
action = z
action = torch.clip(action, -self._ac_scale, self._ac_scale)
actions[k] = action.detach().cpu().numpy().squeeze(0)
activations[k] = z.detach().cpu().numpy().squeeze(0)
if return_log_prob:
log_probs_ = torch.cat(list(log_probs.values()), -1).sum(-1, keepdim=True)
# if log_probs_.min() < -100:
# print('sampling an action with a probability of 1e-100')
# import ipdb; ipdb.set_trace()
log_probs_ = log_probs_.detach().cpu().numpy().squeeze(0)
return actions, activations, log_probs_
elif return_stds:
return actions, activations, stds
else:
return actions, activations
def act_log(self, ob, activations=None):
means, stds = self.forward(ob)
dists = OrderedDict()
actions = OrderedDict()
for k, space in self._ac_space.spaces.items():
if isinstance(space, spaces.Box):
if self._deterministic:
stds[k] = torch.zeros_like(means[k])
dists[k] = FixedNormal(means[k], stds[k])
else:
if self._config.algo == "sac" or "aac" in self._config.algo:
dists[k] = FixedGumbelSoftmax(
torch.tensor(self._config.temperature), logits=means[k]
)
else:
dists[k] = FixedCategorical(logits=means[k])
mixed_dist = MixedDistribution(dists)
activations_ = mixed_dist.rsample() if activations is None else activations
for k in activations_.keys():
if len(activations_[k].shape) == 1:
activations_[k] = activations_[k].unsqueeze(0)
log_probs = mixed_dist.log_probs(activations_)
for k, space in self._ac_space.spaces.items():
z = activations_[k]
if self._tanh and isinstance(space, spaces.Box):
action = torch.tanh(z)
# follow the Appendix C. Enforcing Action Bounds
log_det_jacobian = 2 * (np.log(2.0) - z - F.softplus(-2.0 * z)).sum(
dim=-1, keepdim=True
)
log_probs[k] = log_probs[k] - log_det_jacobian
else:
action = z
action = torch.clip(action, -self._ac_scale, self._ac_scale)
actions[k] = action
log_probs_ = torch.cat(list(log_probs.values()), -1).sum(-1, keepdim=True)
# if log_probs_.min() < -100:
# print(ob)
# print(log_probs_.min())
# import ipdb; ipdb.set_trace()
if activations is None:
return actions, log_probs_
else:
ents = mixed_dist.entropy()
return log_probs_, ents
def load_partial_layers(self, state_dict):
filtered_dict = {}
for k, v in state_dict.items():
if k in self.state_dict().keys():
filtered_dict[k] = v
self.load_state_dict(filtered_dict, strict=False)
return None
def load_state_dict_processed(self, state_dict):
processed_dict = {}
for k, v in state_dict.items():
if 'cnn' in k or 'linear_layers' in k:
k = 'cnn.' + k
assert k in self.state_dict().keys()
elif 'fc' in k:
tokens = k.split('.')
k = tokens[0] + '.default.fc.' + tokens[1] + '.' + tokens[2]
assert k in self.state_dict().keys()
else:
print('incorrect checkpoint')
exit(1)
processed_dict[k] = v
self.load_state_dict(processed_dict)
return True
class AsymCritic(Critic):
def __init__(
self, config, ob_space, ac_space=None, activation="relu", rl_hid_size=None
):
super().__init__(config)
self.env = config.env
if config.env == 'PusherObstacle-v0':
# observation (including goal information)
input_dim = observation_size(ob_space)
elif 'Sawyer' in config.env and 'image' not in ob_space:
input_dim = observation_size(ob_space)
elif 'Sawyer' in config.env and 'image' in ob_space:
input_dim = observation_size(ob_space) - image_size(ob_space)
else:
raise NotImplementedErro
if ac_space is not None:
input_dim += action_size(ac_space)
if rl_hid_size == None:
rl_hid_size = config.rl_hid_size
self.fc = MLP(config, input_dim, 1, [rl_hid_size] * 2, activation=activation)
def forward(self, ob, ac=None):
inp = list(ob.values())
if self.env == 'PusherObstacle-v0':
if self._config.obs_space == "all":
# only use robot and environment state (env. image is not used)
inp = inp[:5]
else:
raise NotImplementedError
elif 'Sawyer' in self.env:
inp = inp[:-1]
else:
raise NotImplementedError
if len(inp[0].shape) == 1:
inp = [x.unsqueeze(0) for x in inp]
if ac is not None:
ac = list(ac.values())
if len(ac[0].shape) == 1:
ac = [x.unsqueeze(0) for x in ac]
inp.extend(ac)
out = self.fc(torch.cat(inp, dim=-1))
out = torch.reshape(out, (out.shape[0], 1))
return out
# Necessary for my KFAC implementation.
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
| [
"rl.policies.distributions.FixedNormal",
"numpy.log",
"torch.tanh",
"rl.policies.distributions.MixedDistribution",
"rl.policies.distributions.FixedCategorical",
"util.gym.goal_size",
"util.gym.image_size",
"torch.nn.ModuleDict",
"torch.zeros_like",
"util.pytorch.to_tensor",
"collections.OrderedD... | [((1407, 1500), 'rl.policies.utils.BC_Visual_Policy', 'BC_Visual_Policy', ([], {'robot_state': 'input_dim', 'num_classes': '(256)', 'img_size': 'config.env_image_size'}), '(robot_state=input_dim, num_classes=256, img_size=config.\n env_image_size)\n', (1423, 1500), False, 'from rl.policies.utils import MLP, BC_Visual_Policy\n'), ((1674, 1689), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (1687, 1689), True, 'import torch.nn as nn\n'), ((1717, 1732), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (1730, 1732), True, 'import torch.nn as nn\n'), ((4471, 4509), 'torch.reshape', 'torch.reshape', (['out', '(out.shape[0], -1)'], {}), '(out, (out.shape[0], -1))\n', (4484, 4509), False, 'import torch\n'), ((5476, 5515), 'util.pytorch.to_tensor', 'to_tensor', (['ob_copy', 'self._config.device'], {}), '(ob_copy, self._config.device)\n', (5485, 5515), False, 'from util.pytorch import to_tensor\n'), ((5623, 5636), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5634, 5636), False, 'from collections import OrderedDict\n'), ((6247, 6260), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6258, 6260), False, 'from collections import OrderedDict\n'), ((6282, 6306), 'rl.policies.distributions.MixedDistribution', 'MixedDistribution', (['dists'], {}), '(dists)\n', (6299, 6306), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((7944, 7957), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7955, 7957), False, 'from collections import OrderedDict\n'), ((7976, 7989), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7987, 7989), False, 'from collections import OrderedDict\n'), ((8603, 8627), 'rl.policies.distributions.MixedDistribution', 'MixedDistribution', (['dists'], {}), '(dists)\n', (8620, 8627), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((11665, 11732), 'rl.policies.utils.MLP', 'MLP', (['config', 'input_dim', '(1)', '([rl_hid_size] * 2)'], {'activation': 'activation'}), '(config, input_dim, 1, [rl_hid_size] * 2, activation=activation)\n', (11668, 11732), False, 'from rl.policies.utils import MLP, BC_Visual_Policy\n'), ((12500, 12537), 'torch.reshape', 'torch.reshape', (['out', '(out.shape[0], 1)'], {}), '(out, (out.shape[0], 1))\n', (12513, 12537), False, 'import torch\n'), ((4533, 4546), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4544, 4546), False, 'from collections import OrderedDict\n'), ((4548, 4561), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4559, 4561), False, 'from collections import OrderedDict\n'), ((5432, 5457), 'numpy.load', 'np.load', (["ob_copy['image']"], {}), "(ob_copy['image'])\n", (5439, 5457), True, 'import numpy as np\n'), ((7128, 7179), 'torch.clip', 'torch.clip', (['action', '(-self._ac_scale)', 'self._ac_scale'], {}), '(action, -self._ac_scale, self._ac_scale)\n', (7138, 7179), False, 'import torch\n'), ((9444, 9495), 'torch.clip', 'torch.clip', (['action', '(-self._ac_scale)', 'self._ac_scale'], {}), '(action, -self._ac_scale, self._ac_scale)\n', (9454, 9495), False, 'import torch\n'), ((11158, 11184), 'util.gym.observation_size', 'observation_size', (['ob_space'], {}), '(ob_space)\n', (11174, 11184), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((11546, 11567), 'util.gym.action_size', 'action_size', (['ac_space'], {}), '(ac_space)\n', (11557, 11567), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((12462, 12484), 'torch.cat', 'torch.cat', (['inp'], {'dim': '(-1)'}), '(inp, dim=-1)\n', (12471, 12484), False, 'import torch\n'), ((1228, 1246), 'util.gym.box_size', 'box_size', (['ob_space'], {}), '(ob_space)\n', (1236, 1246), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((1308, 1334), 'util.gym.robot_state_size', 'robot_state_size', (['ob_space'], {}), '(ob_space)\n', (1324, 1334), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((3306, 3340), 'torch.cat', 'torch.cat', (['inp_robot_state'], {'dim': '(-1)'}), '(inp_robot_state, dim=-1)\n', (3315, 3340), False, 'import torch\n'), ((5862, 5892), 'rl.policies.distributions.FixedNormal', 'FixedNormal', (['means[k]', 'stds[k]'], {}), '(means[k], stds[k])\n', (5873, 5892), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((6719, 6732), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (6729, 6732), False, 'import torch\n'), ((8215, 8245), 'rl.policies.distributions.FixedNormal', 'FixedNormal', (['means[k]', 'stds[k]'], {}), '(means[k], stds[k])\n', (8226, 8245), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((9091, 9104), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (9101, 9104), False, 'import torch\n'), ((11274, 11300), 'util.gym.observation_size', 'observation_size', (['ob_space'], {}), '(ob_space)\n', (11290, 11300), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((1177, 1203), 'util.gym.observation_size', 'observation_size', (['ob_space'], {}), '(ob_space)\n', (1193, 1203), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((1206, 1225), 'util.gym.goal_size', 'goal_size', (['ob_space'], {}), '(ob_space)\n', (1215, 1225), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((3753, 3874), 'torch.cat', 'torch.cat', (["[ob['joint_pos'], ob['joint_vel'], ob['gripper_qpos'], ob['gripper_qvel'],\n ob['eef_pos'], ob['eef_quat']]"], {}), "([ob['joint_pos'], ob['joint_vel'], ob['gripper_qpos'], ob[\n 'gripper_qvel'], ob['eef_pos'], ob['eef_quat']])\n", (3762, 3874), False, 'import torch\n'), ((5021, 5049), 'torch.clamp', 'torch.clamp', (['log_std', '(-10)', '(2)'], {}), '(log_std, -10, 2)\n', (5032, 5049), False, 'import torch\n'), ((5808, 5834), 'torch.zeros_like', 'torch.zeros_like', (['means[k]'], {}), '(means[k])\n', (5824, 5834), False, 'import torch\n'), ((6194, 6227), 'rl.policies.distributions.FixedCategorical', 'FixedCategorical', ([], {'logits': 'means[k]'}), '(logits=means[k])\n', (6210, 6227), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((8161, 8187), 'torch.zeros_like', 'torch.zeros_like', (['means[k]'], {}), '(means[k])\n', (8177, 8187), False, 'import torch\n'), ((8547, 8580), 'rl.policies.distributions.FixedCategorical', 'FixedCategorical', ([], {'logits': 'means[k]'}), '(logits=means[k])\n', (8563, 8580), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((4051, 4180), 'torch.cat', 'torch.cat', (["[ob['joint_pos'], ob['joint_vel'], ob['gripper_qpos'], ob['gripper_qvel'],\n ob['eef_pos'], ob['eef_quat']]"], {'axis': '(1)'}), "([ob['joint_pos'], ob['joint_vel'], ob['gripper_qpos'], ob[\n 'gripper_qvel'], ob['eef_pos'], ob['eef_quat']], axis=1)\n", (4060, 4180), False, 'import torch\n'), ((6063, 6101), 'torch.tensor', 'torch.tensor', (['self._config.temperature'], {}), '(self._config.temperature)\n', (6075, 6101), False, 'import torch\n'), ((8416, 8454), 'torch.tensor', 'torch.tensor', (['self._config.temperature'], {}), '(self._config.temperature)\n', (8428, 8454), False, 'import torch\n'), ((11386, 11412), 'util.gym.observation_size', 'observation_size', (['ob_space'], {}), '(ob_space)\n', (11402, 11412), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((11415, 11435), 'util.gym.image_size', 'image_size', (['ob_space'], {}), '(ob_space)\n', (11425, 11435), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((2026, 2044), 'util.gym.action_size', 'action_size', (['space'], {}), '(space)\n', (2037, 2044), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((2699, 2755), 'rl.policies.utils.MLP', 'MLP', (['config', 'rl_hid_size', 'space.n'], {'activation': 'activation'}), '(config, rl_hid_size, space.n, activation=activation)\n', (2702, 2755), False, 'from rl.policies.utils import MLP, BC_Visual_Policy\n'), ((2855, 2909), 'rl.policies.utils.MLP', 'MLP', (['config', 'rl_hid_size', 'space'], {'activation': 'activation'}), '(config, rl_hid_size, space, activation=activation)\n', (2858, 2909), False, 'from rl.policies.utils import MLP, BC_Visual_Policy\n'), ((2431, 2449), 'util.gym.action_size', 'action_size', (['space'], {}), '(space)\n', (2442, 2449), False, 'from util.gym import observation_size, action_size, goal_size, box_size, robot_state_size, image_size\n'), ((9228, 9248), 'torch.nn.functional.softplus', 'F.softplus', (['(-2.0 * z)'], {}), '(-2.0 * z)\n', (9238, 9248), True, 'import torch.nn.functional as F\n'), ((6900, 6920), 'torch.nn.functional.softplus', 'F.softplus', (['(-2.0 * z)'], {}), '(-2.0 * z)\n', (6910, 6920), True, 'import torch.nn.functional as F\n'), ((9210, 9221), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (9216, 9221), True, 'import numpy as np\n'), ((6882, 6893), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (6888, 6893), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 09:30:54 2020
@author: jeremiasknoblauch
Description: Read in the results and produce plots. Before creating the plots
create .txt files holding the results to be plotted.
"""
import numpy as np
import matplotlib.pyplot as plt
# global variables
color_TVD = '#009E73'
color_KLD = '#56B4E9'
median_color = 'k'
linewidth_boxplot = 2
def aggregate_splits(path, data_name, split_num=50):
# construct the path
file_path = path + "/" + data_name + "/"
# for each of the result types, set the result and inference types
result_type = ["_log_probs_", "_accuracy_", "_probabilistic_accuracy_",
"_cross_entropy_"]
inference_type = ["KLD", "TVD"]
# for each (result, inference) combination, extract all results and
# aggregate into single file named "aggregate_" + result + inference
for result in result_type:
for inference in inference_type:
# template name that needs a number still
template_file_name = result + inference + ".txt"
# create the aggregate output file and write all the results
# to it
aggregate_file = file_path + "aggregate" + template_file_name
with open(aggregate_file, 'w') as aggregate:
for i in range(0, split_num):
# get the right string to append to front of template
if i < 10:
num = "0" + str(i)
else:
num = str(i)
# new filename
file_name = file_path + num + template_file_name
# append template to aggregate
with open(file_name) as current_split_results:
for line in current_split_results:
aggregate.write(line)
def single_boxplot_grouped_comparison(base_path, fig, ax, data_name, criterion):
"""Create a single boxplot comparison between TVD and KLD on data set
data_name with the given criterion"""
# get the path at which relevant aggregates are stored
path_name_TVD = (base_path + "/" + data_name + "/" +
"aggregate" + criterion + "TVD" + ".txt")
path_name_KLD = (base_path + "/" + data_name + "/" +
"aggregate" + criterion + "KLD" + ".txt")
# read in the aggregate data
data_TVD = np.loadtxt(path_name_TVD)
data_KLD = np.loadtxt(path_name_KLD)
# if the data was stored as matrix
if len(data_TVD.shape) > 1:
n_total, B = data_KLD.shape
n = int(n_total/50)
# get the number of rows corresponding to a single iteration/split
means_TVD = np.zeros(50)
means_KLD = np.zeros(50)
for i in range(0, 50):
start = i*n
stop = (i+1)*n
means_TVD[i] = np.mean(data_TVD[start:stop,:])
means_KLD[i] = np.mean(data_KLD[start:stop,:])
# if the data was stored as a single vector, this is an NN result
else:
n_total = len(data_TVD)
n_split = int(n_total/50)
# get the number of rows corresponding to a single iteration/split
means_TVD = np.zeros(50)
means_KLD = np.zeros(50)
for i in range(0, 50):
start = i*n_split
stop = (i+1)*n_split
means_TVD[i] = np.mean(data_TVD[start:stop])
means_KLD[i] = np.mean(data_KLD[start:stop])
dats = [means_TVD, means_KLD]
# if entropy, do log scale
if criterion == "_cross_entropy_":
max1, max2 = np.max(means_TVD), np.max(means_KLD)
max_ = abs(max(max1, max2) + 100)
dats = [np.log(-means_TVD+max_), np.log(-means_KLD+max_)]
# group and plot the data
medianprops = dict(linestyle='-', color='black')
bp = ax.boxplot(dats, notch = False, showfliers = False, patch_artist=True,
medianprops = medianprops, widths=0.6)
# set colors for boxplot outlines
cols = [color_TVD, color_KLD]
for box, whisker, cap, median, flier, i in zip(bp['boxes'], bp['whiskers'],
bp['caps'], bp['medians'], bp['fliers'], range(0,2)):
box.set( color=cols[i], linewidth=linewidth_boxplot)
whisker.set( color=cols[i], linewidth=linewidth_boxplot)
cap.set( color=cols[i], linewidth=linewidth_boxplot)
median.set(color = "black")
flier.set(False)
for patch, color in zip(bp['boxes'], cols):
patch.set_facecolor(color)
# set x-axis label
ax.set_xticklabels(['TVD', 'KLD'], fontsize = 13)
ax.yaxis.set_tick_params(labelsize=12)
# remove top axes and right axes ticks
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
return fig, ax
def single_boxplot_comparison(base_path, fig, ax, data_name, criterion):
"""Create a single boxplot comparison between TVD and KLD on data set
data_name with the given criterion"""
# get the path at which relevant aggregates are stored
path_name_TVD = (base_path + "/" + data_name + "/" +
"aggregate" + criterion + "TVD" + ".txt")
path_name_KLD = (base_path + "/" + data_name + "/" +
"aggregate" + criterion + "KLD" + ".txt")
# read in the aggregate data
data_TVD = np.loadtxt(path_name_TVD).flatten()
data_KLD = np.loadtxt(path_name_KLD).flatten()
# group and plot the data
dats = [data_TVD, data_KLD]
bp = ax.boxplot(dats, notch = False, showfliers = False, widths = 0.6)
# set colors for boxplot outlines
cols = [color_TVD, color_KLD]
for box, whisker, cap, median, flier, i in zip(bp['boxes'], bp['whiskers'],
bp['caps'], bp['medians'], bp['fliers'], range(0,2)):
box.set( color=cols[i], linewidth=linewidth_boxplot)
whisker.set( color=cols[i], linewidth=linewidth_boxplot)
cap.set( color=cols[i], linewidth=linewidth_boxplot)
median.set(color = median_color,linewidth=linewidth_boxplot)
flier.set(False)
# set x-axis label
ax.set_xticklabels(['TVD', 'KLD'])
# remove top axes and right axes ticks
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
return fig, ax
def boxplot_comparison(base_path, list_of_data_names, list_of_criteria, fig_size):
"""Create a plot s.t. each row gives a criterion, each col a data set"""
# create panels
num_rows = len(list_of_criteria)
num_cols = len(list_of_data_names)
fig, ax_array = plt.subplots(num_rows, num_cols, figsize = fig_size)
for ax, i in zip(ax_array.flatten(), range(0, num_rows * num_cols)):
ax = single_boxplot_comparison(base_path, fig,ax,
list_of_data_names[i], list_of_criteria[i])
return fig, ax_array
def boxplot_grouped_comparison(base_path, list_of_data_names, list_of_plot_names,
list_of_criteria, fig_size, ylim=[0.48, 0.699]):
"""Create a plot s.t. each row gives a criterion, each col a data set"""
# create panels
num_rows = len(list_of_criteria)
num_cols = len(list_of_data_names)
fig, ax_array = plt.subplots(num_rows, num_cols, figsize = fig_size)
ylabel_names = ["predictive likelihood", "accuracy"]
for row in range(0, num_rows):
for col in range(0, num_cols):
fig, ax_array[row, col] = single_boxplot_grouped_comparison(base_path,
fig,ax_array[row,col], list_of_data_names[col], list_of_criteria[row])
ax_array[row, col].set_ylim(ylim)
if row == 0:
ax_array[row, col].set_title(list_of_plot_names[col], fontsize=15)
if col == 0:
ax_array[row, col].set_ylabel(ylabel_names[row], fontsize=15)
return fig, ax_array
'''Aggregate the full experimental files into a single dataset'''
# note: this might take some time
# set the save paths (where are the results stored?)
probit_path = "data/probit"
nn_path = "data/NN"
# datasets to evaluate
probit_data = ["mammographic_mass", "fourclass", "heart", "haberman",
"breast-cancer-wisconsin"]
# NN datasets evaluated
nn_data = ["pima", "diabetic",
"banknote_authentication", "ilpd", "rice"]
for d in probit_data:
aggregate_splits(probit_path, d)
for d in nn_data:
aggregate_splits(nn_path, d)
"""Plots"""
fig_path = "figures"
# evaluation criteria
crits = ["_accuracy_", "_log_probs_", ]
# Probit results
# probit plot headers with padded white space
probit_headers = ["mammograph", "fourclass", "heart", "haberman ", "breast cancer "]
# create boxplots with one panel for each dataset,
# top row = predictive likelihood, bottom row = accuracy
fig, ax = boxplot_grouped_comparison(probit_path, probit_data, probit_headers, crits, (7.5,8))
fig.suptitle("Probit results", fontsize=18 )
fig.tight_layout()
fig.savefig(fig_path + "/" + "probit_results.pdf")
# NN results
# plot headers
nn_headers = ["pima", "diabetic",
"banknote", "ilpd", "rice"]
# create boxplots with one panel for each dataset,
# top row = predictive likelihood, bottom row = accuracy
fig, ax = boxplot_grouped_comparison(nn_path, nn_data, nn_headers, crits, (7.5,8))
fig.tight_layout()
fig.suptitle("Neural Network results", fontsize=18 )
fig.tight_layout()
fig.savefig(fig_path + "/" + "NN_results.pdf")
| [
"numpy.mean",
"numpy.log",
"numpy.max",
"numpy.zeros",
"numpy.loadtxt",
"matplotlib.pyplot.subplots"
] | [((2559, 2584), 'numpy.loadtxt', 'np.loadtxt', (['path_name_TVD'], {}), '(path_name_TVD)\n', (2569, 2584), True, 'import numpy as np\n'), ((2600, 2625), 'numpy.loadtxt', 'np.loadtxt', (['path_name_KLD'], {}), '(path_name_KLD)\n', (2610, 2625), True, 'import numpy as np\n'), ((6767, 6817), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_rows', 'num_cols'], {'figsize': 'fig_size'}), '(num_rows, num_cols, figsize=fig_size)\n', (6779, 6817), True, 'import matplotlib.pyplot as plt\n'), ((7422, 7472), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_rows', 'num_cols'], {'figsize': 'fig_size'}), '(num_rows, num_cols, figsize=fig_size)\n', (7434, 7472), True, 'import matplotlib.pyplot as plt\n'), ((2875, 2887), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (2883, 2887), True, 'import numpy as np\n'), ((2908, 2920), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (2916, 2920), True, 'import numpy as np\n'), ((3376, 3388), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (3384, 3388), True, 'import numpy as np\n'), ((3409, 3421), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (3417, 3421), True, 'import numpy as np\n'), ((3030, 3062), 'numpy.mean', 'np.mean', (['data_TVD[start:stop, :]'], {}), '(data_TVD[start:stop, :])\n', (3037, 3062), True, 'import numpy as np\n'), ((3089, 3121), 'numpy.mean', 'np.mean', (['data_KLD[start:stop, :]'], {}), '(data_KLD[start:stop, :])\n', (3096, 3121), True, 'import numpy as np\n'), ((3543, 3572), 'numpy.mean', 'np.mean', (['data_TVD[start:stop]'], {}), '(data_TVD[start:stop])\n', (3550, 3572), True, 'import numpy as np\n'), ((3600, 3629), 'numpy.mean', 'np.mean', (['data_KLD[start:stop]'], {}), '(data_KLD[start:stop])\n', (3607, 3629), True, 'import numpy as np\n'), ((3778, 3795), 'numpy.max', 'np.max', (['means_TVD'], {}), '(means_TVD)\n', (3784, 3795), True, 'import numpy as np\n'), ((3797, 3814), 'numpy.max', 'np.max', (['means_KLD'], {}), '(means_KLD)\n', (3803, 3814), True, 'import numpy as np\n'), ((3873, 3898), 'numpy.log', 'np.log', (['(-means_TVD + max_)'], {}), '(-means_TVD + max_)\n', (3879, 3898), True, 'import numpy as np\n'), ((3898, 3923), 'numpy.log', 'np.log', (['(-means_KLD + max_)'], {}), '(-means_KLD + max_)\n', (3904, 3923), True, 'import numpy as np\n'), ((5533, 5558), 'numpy.loadtxt', 'np.loadtxt', (['path_name_TVD'], {}), '(path_name_TVD)\n', (5543, 5558), True, 'import numpy as np\n'), ((5584, 5609), 'numpy.loadtxt', 'np.loadtxt', (['path_name_KLD'], {}), '(path_name_KLD)\n', (5594, 5609), True, 'import numpy as np\n')] |
from utils.prepare_data import get_training_data
from utils.prepare_plots import plot_results
from simpleencoderdecoder.build_simple_encoderdecoder_model import simple_encoderdecoder
import random
import numpy as np
if __name__ == "__main__":
profile_gray_objs, midcurve_gray_objs = get_training_data()
test_gray_images = random.sample(profile_gray_objs, 5)
profile_gray_objs = np.asarray(profile_gray_objs) / 255.
midcurve_gray_objs = np.asarray(midcurve_gray_objs) / 255.
test_gray_images = np.asarray(test_gray_images) / 255.
retrain_model = True
endec = simple_encoderdecoder()
endec.train(profile_gray_objs, midcurve_gray_objs, retrain_model)
original_profile_imgs, predicted_midcurve_imgs = endec.predict(test_gray_images)
plot_results(original_profile_imgs, predicted_midcurve_imgs)
| [
"random.sample",
"utils.prepare_plots.plot_results",
"numpy.asarray",
"utils.prepare_data.get_training_data",
"simpleencoderdecoder.build_simple_encoderdecoder_model.simple_encoderdecoder"
] | [((288, 307), 'utils.prepare_data.get_training_data', 'get_training_data', ([], {}), '()\n', (305, 307), False, 'from utils.prepare_data import get_training_data\n'), ((331, 366), 'random.sample', 'random.sample', (['profile_gray_objs', '(5)'], {}), '(profile_gray_objs, 5)\n', (344, 366), False, 'import random\n'), ((589, 612), 'simpleencoderdecoder.build_simple_encoderdecoder_model.simple_encoderdecoder', 'simple_encoderdecoder', ([], {}), '()\n', (610, 612), False, 'from simpleencoderdecoder.build_simple_encoderdecoder_model import simple_encoderdecoder\n'), ((773, 833), 'utils.prepare_plots.plot_results', 'plot_results', (['original_profile_imgs', 'predicted_midcurve_imgs'], {}), '(original_profile_imgs, predicted_midcurve_imgs)\n', (785, 833), False, 'from utils.prepare_plots import plot_results\n'), ((392, 421), 'numpy.asarray', 'np.asarray', (['profile_gray_objs'], {}), '(profile_gray_objs)\n', (402, 421), True, 'import numpy as np\n'), ((454, 484), 'numpy.asarray', 'np.asarray', (['midcurve_gray_objs'], {}), '(midcurve_gray_objs)\n', (464, 484), True, 'import numpy as np\n'), ((515, 543), 'numpy.asarray', 'np.asarray', (['test_gray_images'], {}), '(test_gray_images)\n', (525, 543), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.