code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import print_function
import sys
import random
import numpy as np
def set_random_seed(seed):
"""Sets the random seed.
:param seed: new random seed
>>> set_random_seed(19)
>>> random.randint(0, 10000)
708
>>> np.random.rand(3, 2)
array([[0.6356515 , 0.15946741],
[0.42432349, 0.93350408],
[0.20335322, 0.5258474 ]])
"""
random.seed(seed)
np.random.seed(random.randint(0, 1e8))
def rand_direction(n, d):
"""Generates uniformly random directions.
:param n: number of random directions to generate
:param d: dimension of the random direction vectors
:return: matrix of random directions (size: n x d)
>>> set_random_seed(19)
>>> x = rand_direction(10, 3)
>>> x
array([[-0.78538771, 0.31285999, 0.53412056],
[-0.08505102, 0.08648851, -0.99261577],
[-0.03835023, 0.14032477, -0.98936253],
[ 0.44378856, 0.20424592, -0.87254531],
[-0.13436984, -0.8220843 , -0.55328307],
[ 0.41833227, 0.30613646, 0.85514828],
[-0.77293566, -0.57197324, 0.2746217 ],
[-0.7169452 , 0.44005241, 0.54068794],
[ 0.99017269, 0.1185976 , 0.07411245],
[-0.33350422, 0.42052367, -0.84376227]])
>>> np.linalg.norm(x, axis=1)
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
"""
x = np.random.randn(n, d)
x /= np.linalg.norm(x, axis=1).reshape((x.shape[0], 1))
return x
def eprint(*args, **kwargs):
"""Printing to standard error, useful for debugging (as standard error is not buffered)."""
print(*args, file=sys.stderr, **kwargs)
| [
"numpy.random.randn",
"random.randint",
"random.seed",
"numpy.linalg.norm"
] | [((398, 415), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (409, 415), False, 'import random\n'), ((1395, 1416), 'numpy.random.randn', 'np.random.randn', (['n', 'd'], {}), '(n, d)\n', (1410, 1416), True, 'import numpy as np\n'), ((435, 465), 'random.randint', 'random.randint', (['(0)', '(100000000.0)'], {}), '(0, 100000000.0)\n', (449, 465), False, 'import random\n'), ((1426, 1451), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (1440, 1451), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import eom_rccsd
from pyscf.cc import gintermediates as imd
########################################
# EOM-IP-CCSD
########################################
def vector_to_amplitudes_ip(vector, nmo, nocc):
nvir = nmo - nocc
r1 = vector[:nocc].copy()
r2 = np.zeros((nocc,nocc,nvir), dtype=vector.dtype)
idx, idy = np.tril_indices(nocc, -1)
r2[idx,idy] = vector[nocc:].reshape(nocc*(nocc-1)//2,nvir)
r2[idy,idx] =-vector[nocc:].reshape(nocc*(nocc-1)//2,nvir)
return r1, r2
def amplitudes_to_vector_ip(r1, r2):
nocc = r1.size
return np.hstack((r1, r2[np.tril_indices(nocc, -1)].ravel()))
def ipccsd_matvec(eom, vector, imds=None, diag=None):
# Ref: Tu, Wang, and Li, J. Chem. Phys. 136, 174102 (2012) Eqs.(8)-(9)
if imds is None: imds = eom.make_imds()
nocc = eom.nocc
nmo = eom.nmo
r1, r2 = vector_to_amplitudes_ip(vector, nmo, nocc)
# Eq. (8)
Hr1 = -np.einsum('mi,m->i', imds.Foo, r1)
Hr1 += np.einsum('me,mie->i', imds.Fov, r2)
Hr1 += -0.5*np.einsum('nmie,mne->i', imds.Wooov, r2)
# Eq. (9)
Hr2 = lib.einsum('ae,ije->ija', imds.Fvv, r2)
tmp1 = lib.einsum('mi,mja->ija', imds.Foo, r2)
Hr2 -= tmp1 - tmp1.transpose(1,0,2)
Hr2 -= np.einsum('maji,m->ija', imds.Wovoo, r1)
Hr2 += 0.5*lib.einsum('mnij,mna->ija', imds.Woooo, r2)
tmp2 = lib.einsum('maei,mje->ija', imds.Wovvo, r2)
Hr2 += tmp2 - tmp2.transpose(1,0,2)
Hr2 += 0.5*lib.einsum('mnef,mnf,ijae->ija', imds.Woovv, r2, imds.t2)
vector = amplitudes_to_vector_ip(Hr1, Hr2)
return vector
def ipccsd_diag(eom, imds=None):
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nocc, nvir = t1.shape
Hr1 = -np.diag(imds.Foo)
Hr2 = np.zeros((nocc,nocc,nvir), dtype=t1.dtype)
for i in range(nocc):
for j in range(nocc):
for a in range(nvir):
Hr2[i,j,a] += imds.Fvv[a,a]
Hr2[i,j,a] += -imds.Foo[i,i]
Hr2[i,j,a] += -imds.Foo[j,j]
Hr2[i,j,a] += 0.5*(imds.Woooo[i,j,i,j]-imds.Woooo[j,i,i,j])
Hr2[i,j,a] += imds.Wovvo[i,a,a,i]
Hr2[i,j,a] += imds.Wovvo[j,a,a,j]
Hr2[i,j,a] += 0.5*(np.dot(imds.Woovv[i,j,:,a], t2[i,j,a,:])
-np.dot(imds.Woovv[j,i,:,a], t2[i,j,a,:]))
vector = amplitudes_to_vector_ip(Hr1, Hr2)
return vector
class EOMIP(eom_rccsd.EOMIP):
matvec = ipccsd_matvec
l_matvec = None
get_diag = ipccsd_diag
ipccsd_star = None
def vector_to_amplitudes(self, vector, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return vector_to_amplitudes_ip(vector, nmo, nocc)
def amplitudes_to_vector(self, r1, r2):
return amplitudes_to_vector_ip(r1, r2)
def vector_size(self):
nocc = self.nocc
nvir = self.nmo - nocc
return nocc + nocc*(nocc-1)/2*nvir
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ip()
return imds
########################################
# EOM-EA-CCSD
########################################
def vector_to_amplitudes_ea(vector, nmo, nocc):
nvir = nmo - nocc
r1 = vector[:nvir].copy()
r2 = np.zeros((nocc,nvir,nvir), vector.dtype)
idx, idy = np.tril_indices(nvir, -1)
r2[:,idx,idy] = vector[nvir:].reshape(nocc,-1)
r2[:,idy,idx] =-vector[nvir:].reshape(nocc,-1)
return r1, r2
def amplitudes_to_vector_ea(r1, r2):
nvir = r1.size
idx, idy = np.tril_indices(nvir, -1)
return np.hstack((r1, r2[:,idx,idy].ravel()))
def eaccsd_matvec(eom, vector, imds=None, diag=None):
# Ref: Nooijen and Bartlett, <NAME>. Phys. 102, 3629 (1994) Eqs.(30)-(31)
if imds is None: imds = eom.make_imds()
nocc = eom.nocc
nmo = eom.nmo
r1, r2 = vector_to_amplitudes_ea(vector, nmo, nocc)
# Eq. (30)
Hr1 = np.einsum('ac,c->a', imds.Fvv, r1)
Hr1 += np.einsum('ld,lad->a', imds.Fov, r2)
Hr1 += 0.5*np.einsum('alcd,lcd->a', imds.Wvovv, r2)
# Eq. (31)
Hr2 = np.einsum('abcj,c->jab', imds.Wvvvo, r1)
tmp1 = lib.einsum('ac,jcb->jab', imds.Fvv, r2)
Hr2 += tmp1 - tmp1.transpose(0,2,1)
Hr2 -= lib.einsum('lj,lab->jab', imds.Foo, r2)
tmp2 = lib.einsum('lbdj,lad->jab', imds.Wovvo, r2)
Hr2 += tmp2 - tmp2.transpose(0,2,1)
Hr2 += 0.5*lib.einsum('abcd,jcd->jab', imds.Wvvvv, r2)
Hr2 -= 0.5*lib.einsum('klcd,lcd,kjab->jab', imds.Woovv, r2, imds.t2)
vector = amplitudes_to_vector_ea(Hr1, Hr2)
return vector
def eaccsd_diag(eom, imds=None):
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nocc, nvir = t1.shape
Hr1 = np.diag(imds.Fvv)
Hr2 = np.zeros((nocc,nvir,nvir),dtype=t1.dtype)
for a in range(nvir):
_Wvvvva = np.array(imds.Wvvvv[a])
for b in range(a):
for j in range(nocc):
Hr2[j,a,b] += imds.Fvv[a,a]
Hr2[j,a,b] += imds.Fvv[b,b]
Hr2[j,a,b] += -imds.Foo[j,j]
Hr2[j,a,b] += imds.Wovvo[j,b,b,j]
Hr2[j,a,b] += imds.Wovvo[j,a,a,j]
Hr2[j,a,b] += 0.5*(_Wvvvva[b,a,b]-_Wvvvva[b,b,a])
Hr2[j,a,b] += -0.5*(np.dot(imds.Woovv[:,j,a,b], t2[:,j,a,b])
-np.dot(imds.Woovv[:,j,b,a], t2[:,j,a,b]))
vector = amplitudes_to_vector_ea(Hr1, Hr2)
return vector
class EOMEA(eom_rccsd.EOMEA):
matvec = eaccsd_matvec
l_matvec = None
get_diag = eaccsd_diag
eaccsd_star = None
def vector_to_amplitudes(self, vector, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return vector_to_amplitudes_ea(vector, nmo, nocc)
def amplitudes_to_vector(self, r1, r2):
return amplitudes_to_vector_ea(r1, r2)
def vector_size(self):
nocc = self.nocc
nvir = self.nmo - nocc
return nvir + nocc*nvir*(nvir-1)//2
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ea()
return imds
########################################
# EOM-EE-CCSD
########################################
vector_to_amplitudes_ee = ccsd.vector_to_amplitudes_s4
amplitudes_to_vector_ee = ccsd.amplitudes_to_vector_s4
def eeccsd_matvec(eom, vector, imds=None, diag=None):
# Ref: Wang, Tu, and Wang, J. Chem. Theory Comput. 10, 5567 (2014) Eqs.(9)-(10)
# Note: Last line in Eq. (10) is superfluous.
# See, e.g. Gwaltney, Nooijen, and Barlett, Chem. Phys. Lett. 248, 189 (1996)
if imds is None: imds = eom.make_imds()
nocc = eom.nocc
nmo = eom.nmo
r1, r2 = vector_to_amplitudes_ee(vector, nmo, nocc)
# Eq. (9)
Hr1 = lib.einsum('ae,ie->ia', imds.Fvv, r1)
Hr1 -= lib.einsum('mi,ma->ia', imds.Foo, r1)
Hr1 += lib.einsum('me,imae->ia', imds.Fov, r2)
Hr1 += lib.einsum('maei,me->ia', imds.Wovvo, r1)
Hr1 -= 0.5*lib.einsum('mnie,mnae->ia', imds.Wooov, r2)
Hr1 += 0.5*lib.einsum('amef,imef->ia', imds.Wvovv, r2)
# Eq. (10)
tmpab = lib.einsum('be,ijae->ijab', imds.Fvv, r2)
tmpab -= 0.5*lib.einsum('mnef,ijae,mnbf->ijab', imds.Woovv, imds.t2, r2)
tmpab -= lib.einsum('mbij,ma->ijab', imds.Wovoo, r1)
tmpab -= lib.einsum('amef,ijfb,me->ijab', imds.Wvovv, imds.t2, r1)
tmpij = lib.einsum('mj,imab->ijab', -imds.Foo, r2)
tmpij -= 0.5*lib.einsum('mnef,imab,jnef->ijab', imds.Woovv, imds.t2, r2)
tmpij += lib.einsum('abej,ie->ijab', imds.Wvvvo, r1)
tmpij += lib.einsum('mnie,njab,me->ijab', imds.Wooov, imds.t2, r1)
tmpabij = lib.einsum('mbej,imae->ijab', imds.Wovvo, r2)
tmpabij = tmpabij - tmpabij.transpose(1,0,2,3)
tmpabij = tmpabij - tmpabij.transpose(0,1,3,2)
Hr2 = tmpabij
Hr2 += tmpab - tmpab.transpose(0,1,3,2)
Hr2 += tmpij - tmpij.transpose(1,0,2,3)
Hr2 += 0.5*lib.einsum('mnij,mnab->ijab', imds.Woooo, r2)
Hr2 += 0.5*lib.einsum('abef,ijef->ijab', imds.Wvvvv, r2)
vector = amplitudes_to_vector_ee(Hr1, Hr2)
return vector
def eeccsd_diag(eom, imds=None):
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nocc, nvir = t1.shape
Hr1 = np.zeros((nocc,nvir), dtype=t1.dtype)
Hr2 = np.zeros((nocc,nocc,nvir,nvir), dtype=t1.dtype)
for i in range(nocc):
for a in range(nvir):
Hr1[i,a] = imds.Fvv[a,a] - imds.Foo[i,i] + imds.Wovvo[i,a,a,i]
for a in range(nvir):
tmp = 0.5*(np.einsum('ijeb,ijbe->ijb', imds.Woovv, t2)
-np.einsum('jieb,ijbe->ijb', imds.Woovv, t2))
Hr2[:,:,:,a] += imds.Fvv[a,a] + tmp
Hr2[:,:,a,:] += imds.Fvv[a,a] + tmp
_Wvvvva = np.array(imds.Wvvvv[a])
for b in range(a):
Hr2[:,:,a,b] += 0.5*(_Wvvvva[b,a,b]-_Wvvvva[b,b,a])
for i in range(nocc):
tmp = imds.Wovvo[i,a,a,i]
Hr2[:,i,:,a] += tmp
Hr2[i,:,:,a] += tmp
Hr2[:,i,a,:] += tmp
Hr2[i,:,a,:] += tmp
for i in range(nocc):
tmp = 0.5*(np.einsum('kjab,jkab->jab', imds.Woovv, t2)
-np.einsum('kjba,jkab->jab', imds.Woovv, t2))
Hr2[:,i,:,:] += -imds.Foo[i,i] + tmp
Hr2[i,:,:,:] += -imds.Foo[i,i] + tmp
for j in range(i):
Hr2[i,j,:,:] += 0.5*(imds.Woooo[i,j,i,j]-imds.Woooo[j,i,i,j])
vector = amplitudes_to_vector_ee(Hr1, Hr2)
return vector
def eeccsd(eom, nroots=1, koopmans=False, guess=None, eris=None, imds=None):
'''Calculate N-electron neutral excitations via EOM-EE-CCSD.
Kwargs:
nroots : int
Number of roots (eigenvalues) requested
koopmans : bool
Calculate Koopmans'-like (1p1h) excitations only, targeting via
overlap.
guess : list of ndarray
List of guess vectors to use for targeting via overlap.
'''
return eom_rccsd.eomee_ccsd_singlet(eom, nroots, koopmans, guess, eris, imds)
class EOMEE(eom_rccsd.EOMEE):
kernel = eeccsd
eeccsd = eeccsd
matvec = eeccsd_matvec
get_diag = eeccsd_diag
def gen_matvec(self, imds=None, **kwargs):
if imds is None: imds = self.make_imds()
diag = self.get_diag(imds)
matvec = lambda xs: [self.matvec(x, imds) for x in xs]
return matvec, diag
def vector_to_amplitudes(self, vector, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return vector_to_amplitudes_ee(vector, nmo, nocc)
def amplitudes_to_vector(self, r1, r2):
return amplitudes_to_vector_ee(r1, r2)
def vector_size(self):
'''size of the vector based on spin-orbital basis'''
nocc = self.nocc
nvir = self.nmo - nocc
return nocc*nvir + nocc*(nocc-1)//2*nvir*(nvir-1)//2
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ee()
return imds
class _IMDS:
# Exactly the same as RCCSD IMDS except
# -- rintermediates --> gintermediates
# -- Loo, Lvv, cc_Fov --> Foo, Fvv, Fov
# -- One less 2-virtual intermediate
def __init__(self, cc, eris=None):
self.verbose = cc.verbose
self.stdout = cc.stdout
self.t1 = cc.t1
self.t2 = cc.t2
if eris is None:
eris = cc.ao2mo()
self.eris = eris
self._made_shared = False
self.made_ip_imds = False
self.made_ea_imds = False
self.made_ee_imds = False
def _make_shared(self):
cput0 = (time.clock(), time.time())
t1, t2, eris = self.t1, self.t2, self.eris
self.Foo = imd.Foo(t1, t2, eris)
self.Fvv = imd.Fvv(t1, t2, eris)
self.Fov = imd.Fov(t1, t2, eris)
# 2 virtuals
self.Wovvo = imd.Wovvo(t1, t2, eris)
self.Woovv = eris.oovv
self._made_shared = True
logger.timer_debug1(self, 'EOM-CCSD shared intermediates', *cput0)
return self
def make_ip(self):
if not self._made_shared:
self._make_shared()
cput0 = (time.clock(), time.time())
t1, t2, eris = self.t1, self.t2, self.eris
# 0 or 1 virtuals
self.Woooo = imd.Woooo(t1, t2, eris)
self.Wooov = imd.Wooov(t1, t2, eris)
self.Wovoo = imd.Wovoo(t1, t2, eris)
self.made_ip_imds = True
logger.timer_debug1(self, 'EOM-CCSD IP intermediates', *cput0)
return self
def make_ea(self):
if not self._made_shared:
self._make_shared()
cput0 = (time.clock(), time.time())
t1, t2, eris = self.t1, self.t2, self.eris
# 3 or 4 virtuals
self.Wvovv = imd.Wvovv(t1, t2, eris)
self.Wvvvv = imd.Wvvvv(t1, t2, eris)
self.Wvvvo = imd.Wvvvo(t1, t2, eris,self.Wvvvv)
self.made_ea_imds = True
logger.timer_debug1(self, 'EOM-CCSD EA intermediates', *cput0)
return self
def make_ee(self):
if not self._made_shared:
self._make_shared()
cput0 = (time.clock(), time.time())
t1, t2, eris = self.t1, self.t2, self.eris
if not self.made_ip_imds:
# 0 or 1 virtuals
self.Woooo = imd.Woooo(t1, t2, eris)
self.Wooov = imd.Wooov(t1, t2, eris)
self.Wovoo = imd.Wovoo(t1, t2, eris)
if not self.made_ea_imds:
# 3 or 4 virtuals
self.Wvovv = imd.Wvovv(t1, t2, eris)
self.Wvvvv = imd.Wvvvv(t1, t2, eris)
self.Wvvvo = imd.Wvvvo(t1, t2, eris,self.Wvvvv)
self.made_ee_imds = True
logger.timer(self, 'EOM-CCSD EE intermediates', *cput0)
return self
if __name__ == '__main__':
from pyscf import scf
from pyscf import gto
from pyscf.cc import gccsd
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = 'cc-pvdz'
mol.spin = 0
mol.build()
mf = scf.UHF(mol).run()
mf = scf.addons.convert_to_ghf(mf)
mycc = gccsd.GCCSD(mf)
ecc, t1, t2 = mycc.kernel()
print(ecc - -0.2133432712431435)
e,v = mycc.ipccsd(nroots=8)
print(e[0] - 0.4335604332073799)
print(e[2] - 0.5187659896045407)
print(e[4] - 0.6782876002229172)
#mycc.verbose = 5
e,v = mycc.eaccsd(nroots=8)
print(e[0] - 0.16737886338859731)
print(e[2] - 0.24027613852009164)
print(e[4] - 0.51006797826488071)
e,v = mycc.eeccsd(nroots=4)
print(e[0] - 0.2757159395886167)
print(e[1] - 0.2757159395886167)
print(e[2] - 0.2757159395886167)
print(e[3] - 0.3005716731825082)
| [
"pyscf.cc.gintermediates.Wvvvo",
"pyscf.cc.gintermediates.Foo",
"pyscf.lib.logger.timer",
"time.clock",
"numpy.array",
"numpy.einsum",
"pyscf.cc.gintermediates.Fvv",
"pyscf.cc.gintermediates.Wvvvv",
"pyscf.scf.UHF",
"pyscf.cc.gccsd.GCCSD",
"numpy.dot",
"pyscf.cc.gintermediates.Fov",
"numpy.t... | [((1025, 1073), 'numpy.zeros', 'np.zeros', (['(nocc, nocc, nvir)'], {'dtype': 'vector.dtype'}), '((nocc, nocc, nvir), dtype=vector.dtype)\n', (1033, 1073), True, 'import numpy as np\n'), ((1087, 1112), 'numpy.tril_indices', 'np.tril_indices', (['nocc', '(-1)'], {}), '(nocc, -1)\n', (1102, 1112), True, 'import numpy as np\n'), ((1720, 1756), 'numpy.einsum', 'np.einsum', (['"""me,mie->i"""', 'imds.Fov', 'r2'], {}), "('me,mie->i', imds.Fov, r2)\n", (1729, 1756), True, 'import numpy as np\n'), ((1839, 1878), 'pyscf.lib.einsum', 'lib.einsum', (['"""ae,ije->ija"""', 'imds.Fvv', 'r2'], {}), "('ae,ije->ija', imds.Fvv, r2)\n", (1849, 1878), False, 'from pyscf import lib\n'), ((1890, 1929), 'pyscf.lib.einsum', 'lib.einsum', (['"""mi,mja->ija"""', 'imds.Foo', 'r2'], {}), "('mi,mja->ija', imds.Foo, r2)\n", (1900, 1929), False, 'from pyscf import lib\n'), ((1981, 2021), 'numpy.einsum', 'np.einsum', (['"""maji,m->ija"""', 'imds.Wovoo', 'r1'], {}), "('maji,m->ija', imds.Wovoo, r1)\n", (1990, 2021), True, 'import numpy as np\n'), ((2092, 2135), 'pyscf.lib.einsum', 'lib.einsum', (['"""maei,mje->ija"""', 'imds.Wovvo', 'r2'], {}), "('maei,mje->ija', imds.Wovvo, r2)\n", (2102, 2135), False, 'from pyscf import lib\n'), ((2489, 2533), 'numpy.zeros', 'np.zeros', (['(nocc, nocc, nvir)'], {'dtype': 't1.dtype'}), '((nocc, nocc, nvir), dtype=t1.dtype)\n', (2497, 2533), True, 'import numpy as np\n'), ((4028, 4070), 'numpy.zeros', 'np.zeros', (['(nocc, nvir, nvir)', 'vector.dtype'], {}), '((nocc, nvir, nvir), vector.dtype)\n', (4036, 4070), True, 'import numpy as np\n'), ((4084, 4109), 'numpy.tril_indices', 'np.tril_indices', (['nvir', '(-1)'], {}), '(nvir, -1)\n', (4099, 4109), True, 'import numpy as np\n'), ((4302, 4327), 'numpy.tril_indices', 'np.tril_indices', (['nvir', '(-1)'], {}), '(nvir, -1)\n', (4317, 4327), True, 'import numpy as np\n'), ((4676, 4710), 'numpy.einsum', 'np.einsum', (['"""ac,c->a"""', 'imds.Fvv', 'r1'], {}), "('ac,c->a', imds.Fvv, r1)\n", (4685, 4710), True, 'import numpy as np\n'), ((4722, 4758), 'numpy.einsum', 'np.einsum', (['"""ld,lad->a"""', 'imds.Fov', 'r2'], {}), "('ld,lad->a', imds.Fov, r2)\n", (4731, 4758), True, 'import numpy as np\n'), ((4840, 4880), 'numpy.einsum', 'np.einsum', (['"""abcj,c->jab"""', 'imds.Wvvvo', 'r1'], {}), "('abcj,c->jab', imds.Wvvvo, r1)\n", (4849, 4880), True, 'import numpy as np\n'), ((4892, 4931), 'pyscf.lib.einsum', 'lib.einsum', (['"""ac,jcb->jab"""', 'imds.Fvv', 'r2'], {}), "('ac,jcb->jab', imds.Fvv, r2)\n", (4902, 4931), False, 'from pyscf import lib\n'), ((4983, 5022), 'pyscf.lib.einsum', 'lib.einsum', (['"""lj,lab->jab"""', 'imds.Foo', 'r2'], {}), "('lj,lab->jab', imds.Foo, r2)\n", (4993, 5022), False, 'from pyscf import lib\n'), ((5034, 5077), 'pyscf.lib.einsum', 'lib.einsum', (['"""lbdj,lad->jab"""', 'imds.Wovvo', 'r2'], {}), "('lbdj,lad->jab', imds.Wovvo, r2)\n", (5044, 5077), False, 'from pyscf import lib\n'), ((5461, 5478), 'numpy.diag', 'np.diag', (['imds.Fvv'], {}), '(imds.Fvv)\n', (5468, 5478), True, 'import numpy as np\n'), ((5489, 5533), 'numpy.zeros', 'np.zeros', (['(nocc, nvir, nvir)'], {'dtype': 't1.dtype'}), '((nocc, nvir, nvir), dtype=t1.dtype)\n', (5497, 5533), True, 'import numpy as np\n'), ((7487, 7524), 'pyscf.lib.einsum', 'lib.einsum', (['"""ae,ie->ia"""', 'imds.Fvv', 'r1'], {}), "('ae,ie->ia', imds.Fvv, r1)\n", (7497, 7524), False, 'from pyscf import lib\n'), ((7536, 7573), 'pyscf.lib.einsum', 'lib.einsum', (['"""mi,ma->ia"""', 'imds.Foo', 'r1'], {}), "('mi,ma->ia', imds.Foo, r1)\n", (7546, 7573), False, 'from pyscf import lib\n'), ((7585, 7624), 'pyscf.lib.einsum', 'lib.einsum', (['"""me,imae->ia"""', 'imds.Fov', 'r2'], {}), "('me,imae->ia', imds.Fov, r2)\n", (7595, 7624), False, 'from pyscf import lib\n'), ((7636, 7677), 'pyscf.lib.einsum', 'lib.einsum', (['"""maei,me->ia"""', 'imds.Wovvo', 'r1'], {}), "('maei,me->ia', imds.Wovvo, r1)\n", (7646, 7677), False, 'from pyscf import lib\n'), ((7823, 7864), 'pyscf.lib.einsum', 'lib.einsum', (['"""be,ijae->ijab"""', 'imds.Fvv', 'r2'], {}), "('be,ijae->ijab', imds.Fvv, r2)\n", (7833, 7864), False, 'from pyscf import lib\n'), ((7955, 7998), 'pyscf.lib.einsum', 'lib.einsum', (['"""mbij,ma->ijab"""', 'imds.Wovoo', 'r1'], {}), "('mbij,ma->ijab', imds.Wovoo, r1)\n", (7965, 7998), False, 'from pyscf import lib\n'), ((8012, 8069), 'pyscf.lib.einsum', 'lib.einsum', (['"""amef,ijfb,me->ijab"""', 'imds.Wvovv', 'imds.t2', 'r1'], {}), "('amef,ijfb,me->ijab', imds.Wvovv, imds.t2, r1)\n", (8022, 8069), False, 'from pyscf import lib\n'), ((8083, 8125), 'pyscf.lib.einsum', 'lib.einsum', (['"""mj,imab->ijab"""', '(-imds.Foo)', 'r2'], {}), "('mj,imab->ijab', -imds.Foo, r2)\n", (8093, 8125), False, 'from pyscf import lib\n'), ((8216, 8259), 'pyscf.lib.einsum', 'lib.einsum', (['"""abej,ie->ijab"""', 'imds.Wvvvo', 'r1'], {}), "('abej,ie->ijab', imds.Wvvvo, r1)\n", (8226, 8259), False, 'from pyscf import lib\n'), ((8273, 8330), 'pyscf.lib.einsum', 'lib.einsum', (['"""mnie,njab,me->ijab"""', 'imds.Wooov', 'imds.t2', 'r1'], {}), "('mnie,njab,me->ijab', imds.Wooov, imds.t2, r1)\n", (8283, 8330), False, 'from pyscf import lib\n'), ((8346, 8391), 'pyscf.lib.einsum', 'lib.einsum', (['"""mbej,imae->ijab"""', 'imds.Wovvo', 'r2'], {}), "('mbej,imae->ijab', imds.Wovvo, r2)\n", (8356, 8391), False, 'from pyscf import lib\n'), ((8934, 8972), 'numpy.zeros', 'np.zeros', (['(nocc, nvir)'], {'dtype': 't1.dtype'}), '((nocc, nvir), dtype=t1.dtype)\n', (8942, 8972), True, 'import numpy as np\n'), ((8982, 9032), 'numpy.zeros', 'np.zeros', (['(nocc, nocc, nvir, nvir)'], {'dtype': 't1.dtype'}), '((nocc, nocc, nvir, nvir), dtype=t1.dtype)\n', (8990, 9032), True, 'import numpy as np\n'), ((10611, 10681), 'pyscf.cc.eom_rccsd.eomee_ccsd_singlet', 'eom_rccsd.eomee_ccsd_singlet', (['eom', 'nroots', 'koopmans', 'guess', 'eris', 'imds'], {}), '(eom, nroots, koopmans, guess, eris, imds)\n', (10639, 10681), False, 'from pyscf.cc import eom_rccsd\n'), ((14502, 14512), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (14510, 14512), False, 'from pyscf import gto\n'), ((14734, 14763), 'pyscf.scf.addons.convert_to_ghf', 'scf.addons.convert_to_ghf', (['mf'], {}), '(mf)\n', (14759, 14763), False, 'from pyscf import scf\n'), ((14776, 14791), 'pyscf.cc.gccsd.GCCSD', 'gccsd.GCCSD', (['mf'], {}), '(mf)\n', (14787, 14791), False, 'from pyscf.cc import gccsd\n'), ((1674, 1708), 'numpy.einsum', 'np.einsum', (['"""mi,m->i"""', 'imds.Foo', 'r1'], {}), "('mi,m->i', imds.Foo, r1)\n", (1683, 1708), True, 'import numpy as np\n'), ((1773, 1813), 'numpy.einsum', 'np.einsum', (['"""nmie,mne->i"""', 'imds.Wooov', 'r2'], {}), "('nmie,mne->i', imds.Wooov, r2)\n", (1782, 1813), True, 'import numpy as np\n'), ((2037, 2080), 'pyscf.lib.einsum', 'lib.einsum', (['"""mnij,mna->ija"""', 'imds.Woooo', 'r2'], {}), "('mnij,mna->ija', imds.Woooo, r2)\n", (2047, 2080), False, 'from pyscf import lib\n'), ((2191, 2248), 'pyscf.lib.einsum', 'lib.einsum', (['"""mnef,mnf,ijae->ija"""', 'imds.Woovv', 'r2', 'imds.t2'], {}), "('mnef,mnf,ijae->ija', imds.Woovv, r2, imds.t2)\n", (2201, 2248), False, 'from pyscf import lib\n'), ((2461, 2478), 'numpy.diag', 'np.diag', (['imds.Foo'], {}), '(imds.Foo)\n', (2468, 2478), True, 'import numpy as np\n'), ((4774, 4814), 'numpy.einsum', 'np.einsum', (['"""alcd,lcd->a"""', 'imds.Wvovv', 'r2'], {}), "('alcd,lcd->a', imds.Wvovv, r2)\n", (4783, 4814), True, 'import numpy as np\n'), ((5133, 5176), 'pyscf.lib.einsum', 'lib.einsum', (['"""abcd,jcd->jab"""', 'imds.Wvvvv', 'r2'], {}), "('abcd,jcd->jab', imds.Wvvvv, r2)\n", (5143, 5176), False, 'from pyscf import lib\n'), ((5192, 5249), 'pyscf.lib.einsum', 'lib.einsum', (['"""klcd,lcd,kjab->jab"""', 'imds.Woovv', 'r2', 'imds.t2'], {}), "('klcd,lcd,kjab->jab', imds.Woovv, r2, imds.t2)\n", (5202, 5249), False, 'from pyscf import lib\n'), ((5575, 5598), 'numpy.array', 'np.array', (['imds.Wvvvv[a]'], {}), '(imds.Wvvvv[a])\n', (5583, 5598), True, 'import numpy as np\n'), ((7693, 7736), 'pyscf.lib.einsum', 'lib.einsum', (['"""mnie,mnae->ia"""', 'imds.Wooov', 'r2'], {}), "('mnie,mnae->ia', imds.Wooov, r2)\n", (7703, 7736), False, 'from pyscf import lib\n'), ((7752, 7795), 'pyscf.lib.einsum', 'lib.einsum', (['"""amef,imef->ia"""', 'imds.Wvovv', 'r2'], {}), "('amef,imef->ia', imds.Wvovv, r2)\n", (7762, 7795), False, 'from pyscf import lib\n'), ((7882, 7941), 'pyscf.lib.einsum', 'lib.einsum', (['"""mnef,ijae,mnbf->ijab"""', 'imds.Woovv', 'imds.t2', 'r2'], {}), "('mnef,ijae,mnbf->ijab', imds.Woovv, imds.t2, r2)\n", (7892, 7941), False, 'from pyscf import lib\n'), ((8143, 8202), 'pyscf.lib.einsum', 'lib.einsum', (['"""mnef,imab,jnef->ijab"""', 'imds.Woovv', 'imds.t2', 'r2'], {}), "('mnef,imab,jnef->ijab', imds.Woovv, imds.t2, r2)\n", (8153, 8202), False, 'from pyscf import lib\n'), ((8616, 8661), 'pyscf.lib.einsum', 'lib.einsum', (['"""mnij,mnab->ijab"""', 'imds.Woooo', 'r2'], {}), "('mnij,mnab->ijab', imds.Woooo, r2)\n", (8626, 8661), False, 'from pyscf import lib\n'), ((8677, 8722), 'pyscf.lib.einsum', 'lib.einsum', (['"""abef,ijef->ijab"""', 'imds.Wvvvv', 'r2'], {}), "('abef,ijef->ijab', imds.Wvvvv, r2)\n", (8687, 8722), False, 'from pyscf import lib\n'), ((9420, 9443), 'numpy.array', 'np.array', (['imds.Wvvvv[a]'], {}), '(imds.Wvvvv[a])\n', (9428, 9443), True, 'import numpy as np\n'), ((12351, 12372), 'pyscf.cc.gintermediates.Foo', 'imd.Foo', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (12358, 12372), True, 'from pyscf.cc import gintermediates as imd\n'), ((12392, 12413), 'pyscf.cc.gintermediates.Fvv', 'imd.Fvv', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (12399, 12413), True, 'from pyscf.cc import gintermediates as imd\n'), ((12433, 12454), 'pyscf.cc.gintermediates.Fov', 'imd.Fov', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (12440, 12454), True, 'from pyscf.cc import gintermediates as imd\n'), ((12498, 12521), 'pyscf.cc.gintermediates.Wovvo', 'imd.Wovvo', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (12507, 12521), True, 'from pyscf.cc import gintermediates as imd\n'), ((12595, 12661), 'pyscf.lib.logger.timer_debug1', 'logger.timer_debug1', (['self', '"""EOM-CCSD shared intermediates"""', '*cput0'], {}), "(self, 'EOM-CCSD shared intermediates', *cput0)\n", (12614, 12661), False, 'from pyscf.lib import logger\n'), ((12917, 12940), 'pyscf.cc.gintermediates.Woooo', 'imd.Woooo', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (12926, 12940), True, 'from pyscf.cc import gintermediates as imd\n'), ((12962, 12985), 'pyscf.cc.gintermediates.Wooov', 'imd.Wooov', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (12971, 12985), True, 'from pyscf.cc import gintermediates as imd\n'), ((13007, 13030), 'pyscf.cc.gintermediates.Wovoo', 'imd.Wovoo', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (13016, 13030), True, 'from pyscf.cc import gintermediates as imd\n'), ((13073, 13135), 'pyscf.lib.logger.timer_debug1', 'logger.timer_debug1', (['self', '"""EOM-CCSD IP intermediates"""', '*cput0'], {}), "(self, 'EOM-CCSD IP intermediates', *cput0)\n", (13092, 13135), False, 'from pyscf.lib import logger\n'), ((13391, 13414), 'pyscf.cc.gintermediates.Wvovv', 'imd.Wvovv', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (13400, 13414), True, 'from pyscf.cc import gintermediates as imd\n'), ((13436, 13459), 'pyscf.cc.gintermediates.Wvvvv', 'imd.Wvvvv', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (13445, 13459), True, 'from pyscf.cc import gintermediates as imd\n'), ((13481, 13516), 'pyscf.cc.gintermediates.Wvvvo', 'imd.Wvvvo', (['t1', 't2', 'eris', 'self.Wvvvv'], {}), '(t1, t2, eris, self.Wvvvv)\n', (13490, 13516), True, 'from pyscf.cc import gintermediates as imd\n'), ((13558, 13620), 'pyscf.lib.logger.timer_debug1', 'logger.timer_debug1', (['self', '"""EOM-CCSD EA intermediates"""', '*cput0'], {}), "(self, 'EOM-CCSD EA intermediates', *cput0)\n", (13577, 13620), False, 'from pyscf.lib import logger\n'), ((14304, 14359), 'pyscf.lib.logger.timer', 'logger.timer', (['self', '"""EOM-CCSD EE intermediates"""', '*cput0'], {}), "(self, 'EOM-CCSD EE intermediates', *cput0)\n", (14316, 14359), False, 'from pyscf.lib import logger\n'), ((12253, 12265), 'time.clock', 'time.clock', ([], {}), '()\n', (12263, 12265), False, 'import time\n'), ((12267, 12278), 'time.time', 'time.time', ([], {}), '()\n', (12276, 12278), False, 'import time\n'), ((12790, 12802), 'time.clock', 'time.clock', ([], {}), '()\n', (12800, 12802), False, 'import time\n'), ((12804, 12815), 'time.time', 'time.time', ([], {}), '()\n', (12813, 12815), False, 'import time\n'), ((13264, 13276), 'time.clock', 'time.clock', ([], {}), '()\n', (13274, 13276), False, 'import time\n'), ((13278, 13289), 'time.time', 'time.time', ([], {}), '()\n', (13287, 13289), False, 'import time\n'), ((13749, 13761), 'time.clock', 'time.clock', ([], {}), '()\n', (13759, 13761), False, 'import time\n'), ((13763, 13774), 'time.time', 'time.time', ([], {}), '()\n', (13772, 13774), False, 'import time\n'), ((13918, 13941), 'pyscf.cc.gintermediates.Woooo', 'imd.Woooo', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (13927, 13941), True, 'from pyscf.cc import gintermediates as imd\n'), ((13967, 13990), 'pyscf.cc.gintermediates.Wooov', 'imd.Wooov', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (13976, 13990), True, 'from pyscf.cc import gintermediates as imd\n'), ((14016, 14039), 'pyscf.cc.gintermediates.Wovoo', 'imd.Wovoo', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (14025, 14039), True, 'from pyscf.cc import gintermediates as imd\n'), ((14129, 14152), 'pyscf.cc.gintermediates.Wvovv', 'imd.Wvovv', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (14138, 14152), True, 'from pyscf.cc import gintermediates as imd\n'), ((14178, 14201), 'pyscf.cc.gintermediates.Wvvvv', 'imd.Wvvvv', (['t1', 't2', 'eris'], {}), '(t1, t2, eris)\n', (14187, 14201), True, 'from pyscf.cc import gintermediates as imd\n'), ((14227, 14262), 'pyscf.cc.gintermediates.Wvvvo', 'imd.Wvvvo', (['t1', 't2', 'eris', 'self.Wvvvv'], {}), '(t1, t2, eris, self.Wvvvv)\n', (14236, 14262), True, 'from pyscf.cc import gintermediates as imd\n'), ((14706, 14718), 'pyscf.scf.UHF', 'scf.UHF', (['mol'], {}), '(mol)\n', (14713, 14718), False, 'from pyscf import scf\n'), ((9206, 9249), 'numpy.einsum', 'np.einsum', (['"""ijeb,ijbe->ijb"""', 'imds.Woovv', 't2'], {}), "('ijeb,ijbe->ijb', imds.Woovv, t2)\n", (9215, 9249), True, 'import numpy as np\n'), ((9269, 9312), 'numpy.einsum', 'np.einsum', (['"""jieb,ijbe->ijb"""', 'imds.Woovv', 't2'], {}), "('jieb,ijbe->ijb', imds.Woovv, t2)\n", (9278, 9312), True, 'import numpy as np\n'), ((9776, 9819), 'numpy.einsum', 'np.einsum', (['"""kjab,jkab->jab"""', 'imds.Woovv', 't2'], {}), "('kjab,jkab->jab', imds.Woovv, t2)\n", (9785, 9819), True, 'import numpy as np\n'), ((9839, 9882), 'numpy.einsum', 'np.einsum', (['"""kjba,jkab->jab"""', 'imds.Woovv', 't2'], {}), "('kjba,jkab->jab', imds.Woovv, t2)\n", (9848, 9882), True, 'import numpy as np\n'), ((1343, 1368), 'numpy.tril_indices', 'np.tril_indices', (['nocc', '(-1)'], {}), '(nocc, -1)\n', (1358, 1368), True, 'import numpy as np\n'), ((2967, 3013), 'numpy.dot', 'np.dot', (['imds.Woovv[i, j, :, a]', 't2[i, j, a, :]'], {}), '(imds.Woovv[i, j, :, a], t2[i, j, a, :])\n', (2973, 3013), True, 'import numpy as np\n'), ((3043, 3089), 'numpy.dot', 'np.dot', (['imds.Woovv[j, i, :, a]', 't2[i, j, a, :]'], {}), '(imds.Woovv[j, i, :, a], t2[i, j, a, :])\n', (3049, 3089), True, 'import numpy as np\n'), ((5988, 6034), 'numpy.dot', 'np.dot', (['imds.Woovv[:, j, a, b]', 't2[:, j, a, b]'], {}), '(imds.Woovv[:, j, a, b], t2[:, j, a, b])\n', (5994, 6034), True, 'import numpy as np\n'), ((6064, 6110), 'numpy.dot', 'np.dot', (['imds.Woovv[:, j, b, a]', 't2[:, j, a, b]'], {}), '(imds.Woovv[:, j, b, a], t2[:, j, a, b])\n', (6070, 6110), True, 'import numpy as np\n')] |
# coding=utf-8
import numpy as np
import scipy as sp
import scipy.sparse as sparse
import scipy.sparse.linalg as sparse_alg
from time import time
import IEEE_cdf as cdf
from jacobian import jacobian
from P_Q import P_Q
class powerflow:
'''
'''
def __init__(self, filename=''):
n, mat_admitancia, load, generation, voltage_phase, swing_bus, PV_buses = cdf.read(filename)
self.n = n
self.Y = sparse.coo_matrix(mat_admitancia)
self.swing_bus = swing_bus
self.PV_buses = PV_buses
delta_PQ = generation-load
self.P = delta_PQ.real
self.Q = delta_PQ.imag
self.P_Q_inj = delta_PQ[1:].real
self.P_Q_inj = np.append(self.P_Q_inj, delta_PQ[1:].imag)
def J(self, x):
'''Computa el jacobiano para un valor de tensión y ángulo dado
:parameter x: un vactor de 2*(n-1) donde n es la cantidad de nodos del sistema
:returns jacobiano: una matriz de 2(n-1) x 2(n-1)
'''
return jacobian(self.Y, x, self.swing_bus, self.last_P_Q)
def f(self, x):
''' Computa deltaP y deltaQ para un valor de tensión y ángulo dado
:parameter x un vactor de 2*(n-1) donde n es la cantidad de nodos del sistema
:returns delta_PQ: una vector de 2(n-1)'''
self.last_P_Q = P_Q(self.Y, x)
return (self.P_Q_inj - self.last_P_Q)
def solve_newton(self, initial_voltage=1, initial_angle=0):
theta = initial_angle * np.ones(self.n-1)
v = initial_voltage * np.ones(self.n-1)
x = np.append(theta, v)
#while(delta_x < convergencia):
for i in range(1):
func = self.f(x)
#self.disp_matrix(np.vstack([func, func]))
jaco = self.J(x)
J_to_disp = jaco.todense()
#self.disp_matrix(J_to_disp[:self.n-1,:self.n-1])
#Jacobiano reducido
rJ = jaco.todense()
#Las filas a eliminar son aquellas que corresponden a la dQ/dtheta de un PV bus
filas_a_eliminar = self.PV_buses- 1 + self.n-1
#Las columas a eliminar son aquellas que corresponden a la dP/dV de un PV bus
columnas_a_eliminar = filas_a_eliminar
rJ = np.delete(rJ, filas_a_eliminar, 0)
rJ = np.delete(rJ, columnas_a_eliminar, 1)
#self.disp_matrix(rJ)
#a = sparse_alg.spsolve(jaco,-func)
func_reducido = np.delete(func, columnas_a_eliminar)
a = np.linalg.solve(rJ, -func_reducido)
xn = a - x
x = xn
return x
def disp_matrix(self, mat):
import matplotlib.pyplot as plt
if sparse.issparse(mat):
mat = mat.todense()
mat_plot = mat != 0.0
plt.matshow(mat_plot)
plt.show()
#---------------------------------------------------------------------------------------------
if __name__ == '__main__':
ieee14bus = powerflow('IEEE14cdf.txt')
start = time()
x = ieee14bus.solve_newton()
end = time()
#print(x)
print('Tiempo: ', end-start, 's')
| [
"P_Q.P_Q",
"numpy.linalg.solve",
"numpy.ones",
"numpy.delete",
"jacobian.jacobian",
"scipy.sparse.issparse",
"numpy.append",
"IEEE_cdf.read",
"scipy.sparse.coo_matrix",
"matplotlib.pyplot.matshow",
"time.time",
"matplotlib.pyplot.show"
] | [((2972, 2978), 'time.time', 'time', ([], {}), '()\n', (2976, 2978), False, 'from time import time\n'), ((3022, 3028), 'time.time', 'time', ([], {}), '()\n', (3026, 3028), False, 'from time import time\n'), ((373, 391), 'IEEE_cdf.read', 'cdf.read', (['filename'], {}), '(filename)\n', (381, 391), True, 'import IEEE_cdf as cdf\n'), ((429, 462), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['mat_admitancia'], {}), '(mat_admitancia)\n', (446, 462), True, 'import scipy.sparse as sparse\n'), ((694, 736), 'numpy.append', 'np.append', (['self.P_Q_inj', 'delta_PQ[1:].imag'], {}), '(self.P_Q_inj, delta_PQ[1:].imag)\n', (703, 736), True, 'import numpy as np\n'), ((1001, 1051), 'jacobian.jacobian', 'jacobian', (['self.Y', 'x', 'self.swing_bus', 'self.last_P_Q'], {}), '(self.Y, x, self.swing_bus, self.last_P_Q)\n', (1009, 1051), False, 'from jacobian import jacobian\n'), ((1309, 1323), 'P_Q.P_Q', 'P_Q', (['self.Y', 'x'], {}), '(self.Y, x)\n', (1312, 1323), False, 'from P_Q import P_Q\n'), ((1546, 1565), 'numpy.append', 'np.append', (['theta', 'v'], {}), '(theta, v)\n', (1555, 1565), True, 'import numpy as np\n'), ((2657, 2677), 'scipy.sparse.issparse', 'sparse.issparse', (['mat'], {}), '(mat)\n', (2672, 2677), True, 'import scipy.sparse as sparse\n'), ((2750, 2771), 'matplotlib.pyplot.matshow', 'plt.matshow', (['mat_plot'], {}), '(mat_plot)\n', (2761, 2771), True, 'import matplotlib.pyplot as plt\n'), ((2780, 2790), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2788, 2790), True, 'import matplotlib.pyplot as plt\n'), ((1467, 1486), 'numpy.ones', 'np.ones', (['(self.n - 1)'], {}), '(self.n - 1)\n', (1474, 1486), True, 'import numpy as np\n'), ((1515, 1534), 'numpy.ones', 'np.ones', (['(self.n - 1)'], {}), '(self.n - 1)\n', (1522, 1534), True, 'import numpy as np\n'), ((2223, 2257), 'numpy.delete', 'np.delete', (['rJ', 'filas_a_eliminar', '(0)'], {}), '(rJ, filas_a_eliminar, 0)\n', (2232, 2257), True, 'import numpy as np\n'), ((2275, 2312), 'numpy.delete', 'np.delete', (['rJ', 'columnas_a_eliminar', '(1)'], {}), '(rJ, columnas_a_eliminar, 1)\n', (2284, 2312), True, 'import numpy as np\n'), ((2424, 2460), 'numpy.delete', 'np.delete', (['func', 'columnas_a_eliminar'], {}), '(func, columnas_a_eliminar)\n', (2433, 2460), True, 'import numpy as np\n'), ((2477, 2512), 'numpy.linalg.solve', 'np.linalg.solve', (['rJ', '(-func_reducido)'], {}), '(rJ, -func_reducido)\n', (2492, 2512), True, 'import numpy as np\n')] |
import numpy
import scipy.interpolate
import scipy.ndimage
import matplotlib.pyplot
import matplotlib.patches
import logging
def parseSpeedFlowsToCongestions(speeds, flows, speedThreshold, flowThreshold):
logging.debug("Starting parseSpeedFlowsToCongestions()")
congestions = speeds / speedThreshold # + flows / flowThreshold
logging.debug("Ending parseSpeedFlowsToCongestions()")
return congestions
def interpolateMissingValues(congestions):
logging.debug("Starting interpolateMissingValues()")
x = numpy.arange(0, congestions.shape[1])
y = numpy.arange(0, congestions.shape[0])
congestionsMask = numpy.ma.masked_invalid(congestions)
xx, yy = numpy.meshgrid(x, y)
x1 = xx[~congestionsMask.mask]
y1 = yy[~congestionsMask.mask]
congestionsMasked = congestions[~congestionsMask.mask]
congestions = scipy.interpolate.griddata((x1, y1), congestionsMasked.ravel(), (xx, yy), method="cubic")
logging.debug("Ending interpolateMissingValues()")
return congestions
def applySmoothingFilter(congestions, spaceSmoothing, timeSmoothing):
logging.debug("Starting applySmoothingFilter()")
# congestions = scipy.ndimage.filters.gaussian_filter(congestions, 5)
congestions = scipy.ndimage.filters.uniform_filter(congestions, [spaceSmoothing, timeSmoothing])
logging.debug("Ending applySmoothingFilter()")
return congestions
def addBoundaries(ax, patch): # TODO: move to docs/utils?
rect = matplotlib.patches.Rectangle((
patch.getYStart() - 0.5,
patch.getXStart() - 0.5),
patch.yLength(),
patch.xLength(),
linewidth=1,
edgecolor="r",
hatch="//",
facecolor="none")
ax.add_patch(rect)
def plotCongestionsWithPatches(congestions, patches): # TODO: move to docs/utils?
fig, ax = matplotlib.pyplot.subplots(1)
ax.imshow(congestions, aspect="auto")
for patch in patches:
addBoundaries(ax, patch)
matplotlib.pyplot.show()
| [
"numpy.ma.masked_invalid",
"numpy.meshgrid",
"logging.debug",
"numpy.arange"
] | [((220, 276), 'logging.debug', 'logging.debug', (['"""Starting parseSpeedFlowsToCongestions()"""'], {}), "('Starting parseSpeedFlowsToCongestions()')\n", (233, 276), False, 'import logging\n'), ((352, 406), 'logging.debug', 'logging.debug', (['"""Ending parseSpeedFlowsToCongestions()"""'], {}), "('Ending parseSpeedFlowsToCongestions()')\n", (365, 406), False, 'import logging\n'), ((484, 536), 'logging.debug', 'logging.debug', (['"""Starting interpolateMissingValues()"""'], {}), "('Starting interpolateMissingValues()')\n", (497, 536), False, 'import logging\n'), ((546, 583), 'numpy.arange', 'numpy.arange', (['(0)', 'congestions.shape[1]'], {}), '(0, congestions.shape[1])\n', (558, 583), False, 'import numpy\n'), ((593, 630), 'numpy.arange', 'numpy.arange', (['(0)', 'congestions.shape[0]'], {}), '(0, congestions.shape[0])\n', (605, 630), False, 'import numpy\n'), ((654, 690), 'numpy.ma.masked_invalid', 'numpy.ma.masked_invalid', (['congestions'], {}), '(congestions)\n', (677, 690), False, 'import numpy\n'), ((705, 725), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (719, 725), False, 'import numpy\n'), ((972, 1022), 'logging.debug', 'logging.debug', (['"""Ending interpolateMissingValues()"""'], {}), "('Ending interpolateMissingValues()')\n", (985, 1022), False, 'import logging\n'), ((1127, 1175), 'logging.debug', 'logging.debug', (['"""Starting applySmoothingFilter()"""'], {}), "('Starting applySmoothingFilter()')\n", (1140, 1175), False, 'import logging\n'), ((1358, 1404), 'logging.debug', 'logging.debug', (['"""Ending applySmoothingFilter()"""'], {}), "('Ending applySmoothingFilter()')\n", (1371, 1404), False, 'import logging\n')] |
from CubeSolver import CubeSolver
import numpy as np
# disposition = np.array([[['U', 'G', 'Y'],
# ['U', 'W', 'O'],
# ['R', 'Y', 'W']],
# [['G', 'G', 'U'],
# ['Y', 'R', 'G'],
# ['O', 'Y', 'U']],
# [['R', 'R', 'O'],
# ['W', 'G', 'O'],
# ['O', 'Y', 'R']],
# [['G', 'G', 'Y'],
# ['W', 'O', 'O'],
# ['G', 'U', 'W']],
# [['R', 'U', 'Y'],
# ['U', 'U', 'R'],
# ['U', 'R', 'W']],
# [['G', 'R', 'W'],
# ['W', 'Y', 'W'],
# ['O', 'O', 'Y']]
# ])
disposition = np.array([[['G', 'U', 'W'],
['Y', 'W', 'Y'],
['U', 'G', 'Y']],
[['R', 'R', 'O'],
['O', 'R', 'U'],
['O', 'O', 'O']],
[['G', 'G', 'Y'],
['Y', 'G', 'W'],
['U', 'O', 'R']],
[['R', 'O', 'U'],
['U', 'O', 'R'],
['U', 'R', 'R']],
[['Y', 'G', 'G'],
['U', 'U', 'G'],
['W', 'R', 'Y']],
[['O', 'W', 'G'],
['Y', 'Y', 'W'],
['W', 'W', 'W']]
])
sample_input = np.array([[['G', 'U', 'W'],
['Y', 'W', 'Y'],
['U', 'G', 'Y']],
[['R', 'R', 'O'],
['O', 'R', 'U'],
['O', 'O', 'O']],
[['G', 'G', 'Y'],
['Y', 'G', 'W'],
['U', 'O', 'R']],
[['R', 'O', 'U'],
['U', 'O', 'R'],
['U', 'R', 'R']],
[['Y', 'G', 'G'],
['U', 'U', 'G'],
['W', 'R', 'Y']],
[['O', 'W', 'G'],
['Y', 'Y', 'W'],
['W', 'W', 'W']]
])
print('input must be a numpy array and contain faces in this order: white, red, green, orange, blue, yellow')
print(sample_input)
print('white face input considered as if you had red face on top')
print('yellow face input considered as if you had orange face on top')
print('the other faces are considered as if you had yellow face on top')
print('while you solve the cube, you should look to the red face with yellow face on top')
print('F = move front (red) face clockwise, F1 = move front (red) face anticlockwise')
print('R = move right (green) face clockwise, R1 = move right (green) face anticlockwise')
print('B = move back (orange) face clockwise, B1 = move back (orange) face anticlockwise')
print('L = move left (blue) face clockwise, L1 = move left (blue) face anticlockwise')
print('U = move up (yellow) face clockwise, U1 = move up (yellow) face anticlockwise')
print('D = move down (white) face clockwise, D1 = move down (white) face anticlockwise')
print('\n\n')
solver = CubeSolver(disposition)
print(solver.mover.moves)
print(solver.mover.cube)
print(solver.mover.initial_cube)
| [
"numpy.array",
"CubeSolver.CubeSolver"
] | [((895, 1239), 'numpy.array', 'np.array', (["[[['G', 'U', 'W'], ['Y', 'W', 'Y'], ['U', 'G', 'Y']], [['R', 'R', 'O'], [\n 'O', 'R', 'U'], ['O', 'O', 'O']], [['G', 'G', 'Y'], ['Y', 'G', 'W'], [\n 'U', 'O', 'R']], [['R', 'O', 'U'], ['U', 'O', 'R'], ['U', 'R', 'R']], [\n ['Y', 'G', 'G'], ['U', 'U', 'G'], ['W', 'R', 'Y']], [['O', 'W', 'G'], [\n 'Y', 'Y', 'W'], ['W', 'W', 'W']]]"], {}), "([[['G', 'U', 'W'], ['Y', 'W', 'Y'], ['U', 'G', 'Y']], [['R', 'R',\n 'O'], ['O', 'R', 'U'], ['O', 'O', 'O']], [['G', 'G', 'Y'], ['Y', 'G',\n 'W'], ['U', 'O', 'R']], [['R', 'O', 'U'], ['U', 'O', 'R'], ['U', 'R',\n 'R']], [['Y', 'G', 'G'], ['U', 'U', 'G'], ['W', 'R', 'Y']], [['O', 'W',\n 'G'], ['Y', 'Y', 'W'], ['W', 'W', 'W']]])\n", (903, 1239), True, 'import numpy as np\n'), ((1685, 2029), 'numpy.array', 'np.array', (["[[['G', 'U', 'W'], ['Y', 'W', 'Y'], ['U', 'G', 'Y']], [['R', 'R', 'O'], [\n 'O', 'R', 'U'], ['O', 'O', 'O']], [['G', 'G', 'Y'], ['Y', 'G', 'W'], [\n 'U', 'O', 'R']], [['R', 'O', 'U'], ['U', 'O', 'R'], ['U', 'R', 'R']], [\n ['Y', 'G', 'G'], ['U', 'U', 'G'], ['W', 'R', 'Y']], [['O', 'W', 'G'], [\n 'Y', 'Y', 'W'], ['W', 'W', 'W']]]"], {}), "([[['G', 'U', 'W'], ['Y', 'W', 'Y'], ['U', 'G', 'Y']], [['R', 'R',\n 'O'], ['O', 'R', 'U'], ['O', 'O', 'O']], [['G', 'G', 'Y'], ['Y', 'G',\n 'W'], ['U', 'O', 'R']], [['R', 'O', 'U'], ['U', 'O', 'R'], ['U', 'R',\n 'R']], [['Y', 'G', 'G'], ['U', 'U', 'G'], ['W', 'R', 'Y']], [['O', 'W',\n 'G'], ['Y', 'Y', 'W'], ['W', 'W', 'W']]])\n", (1693, 2029), True, 'import numpy as np\n'), ((3466, 3489), 'CubeSolver.CubeSolver', 'CubeSolver', (['disposition'], {}), '(disposition)\n', (3476, 3489), False, 'from CubeSolver import CubeSolver\n')] |
import matplotlib
matplotlib.use('Agg') #display backend
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy.spatial import KDTree
import scipy.stats as st
from scipy.optimize import curve_fit as cu
from astropy.io import fits
import astropy.cosmology as co
from legacyanalysis.pathnames import get_indir,get_outdir,make_dir
indir= get_indir('cosmos')
#CREATES THE CATALOG LIST
catList = ["catalog-R3-R4.fits",
"catalog-R2-R4.fits",
"catalog-R2-R3.fits",
"catalog-R1-R4.fits",
"catalog-R1-R3.fits",
"catalog-R1-R2.fits"]
for cnt,cat in enumerate(catList): catList[cnt]= os.path.join(indir,cat)
# EACH CATALOG NEEDS TO HAVE THE TYPICAL DECALS CATALOG ENTRIES WITH "_1" AND "_2" APPENDED FOR DR2 and DR3
# DEFINES THE GAUSSIAN FUNCTION
gfun = lambda x, m0, s0 : st.norm.pdf(x,loc=m0,scale=s0)
#OPENS A FILE TO WRITE OUTPUTS
f=open(os.path.join(get_outdir('cosmos'),"depth-comparisonp.txt"),"w")
f.write("20<g<21.5 \n")
# CREATES A FIGURE
plt.figure(2,(5,5))
plt.axes([0.17,0.15,0.75,0.75])
# PLOT THE EXPECTED NORMAL DISTRIBUTION
plt.plot(np.arange(-10,6,0.1), st.norm.pdf(np.arange(-10,6,0.1),loc=0,scale=1), 'k--', lw=2, label='N(0,1)')
# LOOPS OVER MATCHED CATALOGS
for ii, el in enumerate(catList):
hdu=fits.open(el)
dr2=hdu[1].data
# DEFINES MAGNITUDES TO SELECT A MAGNITUDE BIN
g_mag_dr2 = 22.5 - 2.5 * np.log10(dr2['decam_flux_2'].T[1] / dr2['decam_mw_transmission_2'].T[1])
r_mag_dr2 = 22.5 - 2.5 * np.log10(dr2['decam_flux_2'].T[2] / dr2['decam_mw_transmission_2'].T[2])
z_mag_dr2 = 22.5 - 2.5 * np.log10(dr2['decam_flux_2'].T[4] / dr2['decam_mw_transmission_2'].T[4])
# SELECT A POPULATION OF SOURCES
sel = (dr2['type_2'] == "PSF")&(g_mag_dr2>20)&(g_mag_dr2<21.5)
# COMPARES THE PHOTOMETRIC OUTPUTS
df_g = dr2[sel]['decam_flux_1'].T[1] / dr2[sel]['decam_mw_transmission_1'].T[1] - dr2[sel]['decam_flux_2'].T[1] / dr2[sel]['decam_mw_transmission_2'].T[1]
sigma_g = (1./dr2[sel]['decam_flux_ivar_1'].T[1] + 1./dr2[sel]['decam_flux_ivar_2'].T[1])**(-0.5)
# CREATES THE HISTOGRAM
area=1 #plot is normalized so does not matter
nnn,bbb, ppp=plt.hist(df_g * sigma_g, bins=np.arange(-4,4.5,0.25), weights = np.ones_like(df_g)/area, histtype='step', label=str(ii), normed=True)
#FITS A GAUSSIAN TO THE HISTOGRAM AND WRITES THE OUTPUT
out = cu(gfun,(bbb[1:]+bbb[:-1])/2.,nnn,p0=(0,1))
f.write(el + '\n')
f.write(str(ii) + " " + str(out[0]))
f.write('\n')
plt.xlabel('(g(Ri)-g(FD))/sqrt(var_g(Ri) + var_g(FD))')
plt.ylabel('Normed counts')
plt.xlim((-4,4))
plt.ylim((0,0.7))
gp = plt.legend(loc=2, fontsize=10)
gp.set_frame_on(False)
plt.title('20<g<21.5 type PSF')
plt.grid()
#SAVES THE PLOT AND CLOSES THE FILE WHERE THINGS WERE WRITTEnp.
path=os.path.join(get_outdir('cosmos'),"plotsRc")
make_dir(path)
plt.savefig(os.path.join(path, "comparison-depth-normed-g-20-215-cosmos.png"))
plt.clf()
f.close()
print('finished comparison: cosmos')
| [
"matplotlib.pyplot.grid",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"astropy.io.fits.open",
"legacyanalysis.pathnames.get_outdir",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.use",
"legacyanalysis.pathnames.get_indir",
"matplotlib.pyplot.axes",
"scipy.sta... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((353, 372), 'legacyanalysis.pathnames.get_indir', 'get_indir', (['"""cosmos"""'], {}), "('cosmos')\n", (362, 372), False, 'from legacyanalysis.pathnames import get_indir, get_outdir, make_dir\n'), ((960, 981), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)', '(5, 5)'], {}), '(2, (5, 5))\n', (970, 981), True, 'import matplotlib.pyplot as plt\n'), ((980, 1014), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.17, 0.15, 0.75, 0.75]'], {}), '([0.17, 0.15, 0.75, 0.75])\n', (988, 1014), True, 'import matplotlib.pyplot as plt\n'), ((2482, 2537), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""(g(Ri)-g(FD))/sqrt(var_g(Ri) + var_g(FD))"""'], {}), "('(g(Ri)-g(FD))/sqrt(var_g(Ri) + var_g(FD))')\n", (2492, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2538, 2565), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normed counts"""'], {}), "('Normed counts')\n", (2548, 2565), True, 'import matplotlib.pyplot as plt\n'), ((2566, 2583), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-4, 4)'], {}), '((-4, 4))\n', (2574, 2583), True, 'import matplotlib.pyplot as plt\n'), ((2583, 2601), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 0.7)'], {}), '((0, 0.7))\n', (2591, 2601), True, 'import matplotlib.pyplot as plt\n'), ((2606, 2636), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)', 'fontsize': '(10)'}), '(loc=2, fontsize=10)\n', (2616, 2636), True, 'import matplotlib.pyplot as plt\n'), ((2660, 2691), 'matplotlib.pyplot.title', 'plt.title', (['"""20<g<21.5 type PSF"""'], {}), "('20<g<21.5 type PSF')\n", (2669, 2691), True, 'import matplotlib.pyplot as plt\n'), ((2692, 2702), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2700, 2702), True, 'import matplotlib.pyplot as plt\n'), ((2817, 2831), 'legacyanalysis.pathnames.make_dir', 'make_dir', (['path'], {}), '(path)\n', (2825, 2831), False, 'from legacyanalysis.pathnames import get_indir, get_outdir, make_dir\n'), ((2911, 2920), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2918, 2920), True, 'import matplotlib.pyplot as plt\n'), ((592, 616), 'os.path.join', 'os.path.join', (['indir', 'cat'], {}), '(indir, cat)\n', (604, 616), False, 'import os\n'), ((784, 816), 'scipy.stats.norm.pdf', 'st.norm.pdf', (['x'], {'loc': 'm0', 'scale': 's0'}), '(x, loc=m0, scale=s0)\n', (795, 816), True, 'import scipy.stats as st\n'), ((1061, 1083), 'numpy.arange', 'np.arange', (['(-10)', '(6)', '(0.1)'], {}), '(-10, 6, 0.1)\n', (1070, 1083), True, 'import numpy as np\n'), ((1234, 1247), 'astropy.io.fits.open', 'fits.open', (['el'], {}), '(el)\n', (1243, 1247), False, 'from astropy.io import fits\n'), ((2351, 2403), 'scipy.optimize.curve_fit', 'cu', (['gfun', '((bbb[1:] + bbb[:-1]) / 2.0)', 'nnn'], {'p0': '(0, 1)'}), '(gfun, (bbb[1:] + bbb[:-1]) / 2.0, nnn, p0=(0, 1))\n', (2353, 2403), True, 'from scipy.optimize import curve_fit as cu\n'), ((2785, 2805), 'legacyanalysis.pathnames.get_outdir', 'get_outdir', (['"""cosmos"""'], {}), "('cosmos')\n", (2795, 2805), False, 'from legacyanalysis.pathnames import get_indir, get_outdir, make_dir\n'), ((2844, 2909), 'os.path.join', 'os.path.join', (['path', '"""comparison-depth-normed-g-20-215-cosmos.png"""'], {}), "(path, 'comparison-depth-normed-g-20-215-cosmos.png')\n", (2856, 2909), False, 'import os\n'), ((866, 886), 'legacyanalysis.pathnames.get_outdir', 'get_outdir', (['"""cosmos"""'], {}), "('cosmos')\n", (876, 886), False, 'from legacyanalysis.pathnames import get_indir, get_outdir, make_dir\n'), ((1095, 1117), 'numpy.arange', 'np.arange', (['(-10)', '(6)', '(0.1)'], {}), '(-10, 6, 0.1)\n', (1104, 1117), True, 'import numpy as np\n'), ((1352, 1424), 'numpy.log10', 'np.log10', (["(dr2['decam_flux_2'].T[1] / dr2['decam_mw_transmission_2'].T[1])"], {}), "(dr2['decam_flux_2'].T[1] / dr2['decam_mw_transmission_2'].T[1])\n", (1360, 1424), True, 'import numpy as np\n'), ((1454, 1526), 'numpy.log10', 'np.log10', (["(dr2['decam_flux_2'].T[2] / dr2['decam_mw_transmission_2'].T[2])"], {}), "(dr2['decam_flux_2'].T[2] / dr2['decam_mw_transmission_2'].T[2])\n", (1462, 1526), True, 'import numpy as np\n'), ((1556, 1628), 'numpy.log10', 'np.log10', (["(dr2['decam_flux_2'].T[4] / dr2['decam_mw_transmission_2'].T[4])"], {}), "(dr2['decam_flux_2'].T[4] / dr2['decam_mw_transmission_2'].T[4])\n", (1564, 1628), True, 'import numpy as np\n'), ((2173, 2197), 'numpy.arange', 'np.arange', (['(-4)', '(4.5)', '(0.25)'], {}), '(-4, 4.5, 0.25)\n', (2182, 2197), True, 'import numpy as np\n'), ((2207, 2225), 'numpy.ones_like', 'np.ones_like', (['df_g'], {}), '(df_g)\n', (2219, 2225), True, 'import numpy as np\n')] |
import glob
import os
from typing import List, Callable
import cv2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import to_rgb
from scipy.stats import wasserstein_distance
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from image_clustering.tiler import GridTiler
from mydeep_api.tensor import Tensor
TagComputer = Callable[[Tensor], int]
HistComputer = Callable[[Tensor], Tensor]
class Params(object):
def __init__(self, bins: int = 64, pca_components: int = 64, tile_size: int = 64):
self.bins = bins
self.pca_components = pca_components
self.tiler = GridTiler(tile_size=tile_size)
def hist_computer(self, img: Tensor):
r, _ = np.histogram(img[2], bins=self.bins, range=[0, 256])
r, _ = np.histogram(img[2], bins=self.bins, range=[0, 256])
r = r / np.linalg.norm(r)
g, _ = np.histogram(img[1], bins=self.bins, range=[0, 256])
g = g / np.linalg.norm(g)
b, _ = np.histogram(img[0], bins=self.bins, range=[0, 256])
b = b / np.linalg.norm(b)
return np.hstack((r, g, b))
class ClusterTagComputer(TagComputer):
def __init__(self, path: str, hist_computer: HistComputer):
self.hist_computer = hist_computer
self.clusters = [
[hist_computer(cv2.imread(img_path)) for img_path in glob.glob('{}/{}/*.png'.format(path, _))]
for _ in os.listdir(path)
]
self.stats()
def stats(self):
for _ in self.clusters:
for c in _:
bins = np.array(range(len(c)))
prob = c / np.sum(c)
image = np.sort(np.random.choice(bins, size=128 * 128, replace=True, p=prob)).reshape((128, 128))
plt.imshow(image, 'gray')
def __call__(self, data: Tensor):
hist = self.hist_computer(data)
d2 = [min([wasserstein_distance(hist, _) for _ in c]) for c in self.clusters]
return int(np.argmin(d2))
class KmeanTagComputer(TagComputer):
def __init__(self, p: Params, images: List[str], cluster_number: int):
self.hist_computer = p.hist_computer
self.model = KMeans(n_clusters=cluster_number, n_init=20)
dataset = []
for _ in images:
img = cv2.imread(_)
boxes = GridTiler(tile_size=32).tiles(img.shape[:2])
histograms = [p.hist_computer(box.cut(img)) for box in boxes]
dataset.extend(histograms)
self.pipeline = Pipeline(steps=[
('pca', PCA(n_components=p.pca_components)),
('clustering', self.model),
])
self.pipeline.fit(dataset)
# self.stats()
def stats(self):
centers = (self.model.cluster_centers_ + 1) / 2
for c in centers:
bins = np.array(range(len(c))) * 4
prob = c / np.sum(c)
image = np.sort(np.random.choice(bins, size=128 * 128, replace=True, p=prob)).reshape((128, 128))
plt.imshow(image, 'gray')
def __call__(self, data: Tensor):
hist = self.hist_computer(data)
return self.pipeline.predict([hist])[0]
def tile_clustering(img: Tensor, tag_computer: TagComputer, tiler: GridTiler):
out = img.copy()
k = 8
for box in tiler.tiles(img.shape[:2]):
flag = tag_computer(box.cut(img))
pt1 = (box.left + k, box.top + k)
pt2 = (box.right - k, box.bottom - k)
cv2.rectangle(out, pt1, pt2, tuple(256 * _ for _ in to_rgb(COLORS[flag])), 2)
return out
COLORS = ['red', 'blue', 'green', 'white', 'yellow',
'orange', 'purple', 'cyan', 'magenta', 'gray']
P = Params(
bins=128,
pca_components=128,
tile_size=128
)
if __name__ == '__main__':
dataset = 'cs'
images = glob.glob('../tests/20190802_export_s2_it1/{}/*_?.png'.format(dataset))
model_tag_computer = KmeanTagComputer(P, images, cluster_number=4)
cluster_tag_computer = ClusterTagComputer('../image_editor/tiles', P.hist_computer)
os.makedirs(dataset, exist_ok=True)
for _ in images:
name = os.path.basename(_).replace('.tif', '')
img = cv2.imread(_)
img1 = tile_clustering(img, model_tag_computer, P.tiler)
cv2.imwrite('{}/{}_model.png'.format(dataset, name), img1)
# img2 = tile_clustering(img, cluster_tag_computer, P.tiler)
# cv2.imwrite('{}/{}_clusters.png'.format(dataset, name), img2)
| [
"sklearn.cluster.KMeans",
"matplotlib.pyplot.imshow",
"numpy.histogram",
"os.listdir",
"os.makedirs",
"numpy.hstack",
"sklearn.decomposition.PCA",
"numpy.random.choice",
"numpy.sum",
"scipy.stats.wasserstein_distance",
"image_clustering.tiler.GridTiler",
"os.path.basename",
"numpy.linalg.nor... | [((4043, 4078), 'os.makedirs', 'os.makedirs', (['dataset'], {'exist_ok': '(True)'}), '(dataset, exist_ok=True)\n', (4054, 4078), False, 'import os\n'), ((678, 708), 'image_clustering.tiler.GridTiler', 'GridTiler', ([], {'tile_size': 'tile_size'}), '(tile_size=tile_size)\n', (687, 708), False, 'from image_clustering.tiler import GridTiler\n'), ((767, 819), 'numpy.histogram', 'np.histogram', (['img[2]'], {'bins': 'self.bins', 'range': '[0, 256]'}), '(img[2], bins=self.bins, range=[0, 256])\n', (779, 819), True, 'import numpy as np\n'), ((835, 887), 'numpy.histogram', 'np.histogram', (['img[2]'], {'bins': 'self.bins', 'range': '[0, 256]'}), '(img[2], bins=self.bins, range=[0, 256])\n', (847, 887), True, 'import numpy as np\n'), ((937, 989), 'numpy.histogram', 'np.histogram', (['img[1]'], {'bins': 'self.bins', 'range': '[0, 256]'}), '(img[1], bins=self.bins, range=[0, 256])\n', (949, 989), True, 'import numpy as np\n'), ((1039, 1091), 'numpy.histogram', 'np.histogram', (['img[0]'], {'bins': 'self.bins', 'range': '[0, 256]'}), '(img[0], bins=self.bins, range=[0, 256])\n', (1051, 1091), True, 'import numpy as np\n'), ((1141, 1161), 'numpy.hstack', 'np.hstack', (['(r, g, b)'], {}), '((r, g, b))\n', (1150, 1161), True, 'import numpy as np\n'), ((2209, 2253), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'cluster_number', 'n_init': '(20)'}), '(n_clusters=cluster_number, n_init=20)\n', (2215, 2253), False, 'from sklearn.cluster import KMeans\n'), ((4169, 4182), 'cv2.imread', 'cv2.imread', (['_'], {}), '(_)\n', (4179, 4182), False, 'import cv2\n'), ((904, 921), 'numpy.linalg.norm', 'np.linalg.norm', (['r'], {}), '(r)\n', (918, 921), True, 'import numpy as np\n'), ((1006, 1023), 'numpy.linalg.norm', 'np.linalg.norm', (['g'], {}), '(g)\n', (1020, 1023), True, 'import numpy as np\n'), ((1108, 1125), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (1122, 1125), True, 'import numpy as np\n'), ((2014, 2027), 'numpy.argmin', 'np.argmin', (['d2'], {}), '(d2)\n', (2023, 2027), True, 'import numpy as np\n'), ((2319, 2332), 'cv2.imread', 'cv2.imread', (['_'], {}), '(_)\n', (2329, 2332), False, 'import cv2\n'), ((3025, 3050), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image', '"""gray"""'], {}), "(image, 'gray')\n", (3035, 3050), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1480), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1474, 1480), False, 'import os\n'), ((1804, 1829), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image', '"""gray"""'], {}), "(image, 'gray')\n", (1814, 1829), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2902), 'numpy.sum', 'np.sum', (['c'], {}), '(c)\n', (2899, 2902), True, 'import numpy as np\n'), ((4115, 4134), 'os.path.basename', 'os.path.basename', (['_'], {}), '(_)\n', (4131, 4134), False, 'import os\n'), ((1363, 1383), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1373, 1383), False, 'import cv2\n'), ((1664, 1673), 'numpy.sum', 'np.sum', (['c'], {}), '(c)\n', (1670, 1673), True, 'import numpy as np\n'), ((1928, 1957), 'scipy.stats.wasserstein_distance', 'wasserstein_distance', (['hist', '_'], {}), '(hist, _)\n', (1948, 1957), False, 'from scipy.stats import wasserstein_distance\n'), ((2353, 2376), 'image_clustering.tiler.GridTiler', 'GridTiler', ([], {'tile_size': '(32)'}), '(tile_size=32)\n', (2362, 2376), False, 'from image_clustering.tiler import GridTiler\n'), ((2573, 2607), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'p.pca_components'}), '(n_components=p.pca_components)\n', (2576, 2607), False, 'from sklearn.decomposition import PCA\n'), ((2931, 2991), 'numpy.random.choice', 'np.random.choice', (['bins'], {'size': '(128 * 128)', 'replace': '(True)', 'p': 'prob'}), '(bins, size=128 * 128, replace=True, p=prob)\n', (2947, 2991), True, 'import numpy as np\n'), ((3523, 3543), 'matplotlib.colors.to_rgb', 'to_rgb', (['COLORS[flag]'], {}), '(COLORS[flag])\n', (3529, 3543), False, 'from matplotlib.colors import to_rgb\n'), ((1706, 1766), 'numpy.random.choice', 'np.random.choice', (['bins'], {'size': '(128 * 128)', 'replace': '(True)', 'p': 'prob'}), '(bins, size=128 * 128, replace=True, p=prob)\n', (1722, 1766), True, 'import numpy as np\n')] |
"""Functions for reading and writing XDMF files."""
import logging
import os
from copy import deepcopy
import h5py
import lxml.etree as etree
import numpy as np
from mocmg.mesh import GridMesh, Mesh
module_log = logging.getLogger(__name__)
numpy_to_xdmf_dtype = {
"int32": ("Int", "4"),
"int64": ("Int", "8"),
"uint32": ("UInt", "4"),
"uint64": ("UInt", "8"),
"float32": ("Float", "4"),
"float64": ("Float", "8"),
}
topo_to_xdmf_type = {
"quad": ["Quadrilateral"],
"quad8": ["Quadrilateral_8", "Quad_8"],
"triangle": ["Triangle"],
"triangle6": ["Triangle_6", "Tri_6"],
}
xdmf_int_to_topo_type = {
4: "triangle",
5: "quad",
36: "triangle6",
37: "quad8",
}
topo_type_to_xdmf_int = {v: k for k, v in xdmf_int_to_topo_type.items()}
def write_xdmf_file(filename, mesh, split_level=None, material_name_map=None, compression_opts=4):
"""Write a mesh object into an XDMF file.
Note that if a mesh has any materials, it is assumed that every cell has a material.
A :class:`mocmg.mesh.Mesh` is written as one uniform grid without a mesh hierarchy.
A :class:`mocmg.mesh.GridMesh` is assumed to be partitioned by cell sets of the 'GRID' form.
The GridMesh is written in the XDMF as a tree of the child meshes.
See the GridMesh docstring for more info.
Args:
filename (str) : File name of the form 'name.xdmf'.
mesh (mocmg.mesh.Mesh) : The mesh object to save as an XDMF file.
split_level (int, optional) : Split the mesh into different files based on grid level provided.
compression_opts (int, optional) : Compression level. May be an integer from 0 to 9, default is 4.
"""
module_log.require(isinstance(mesh, Mesh), "Invalid type given as input.")
if material_name_map is None and (isinstance(mesh, GridMesh) or mesh.cell_sets):
module_log.info("Generating global material ID map.")
material_name_map, material_ctr = _make_global_material_id_map(mesh)
if split_level is not None:
_handle_split_level(filename, mesh, split_level, material_name_map, compression_opts)
return
module_log.info(f"Writing mesh data to XDMF file '{filename}'.")
if isinstance(mesh, Mesh) and not isinstance(mesh, GridMesh):
h5_filename = os.path.splitext(filename)[0] + ".h5"
h5_file = h5py.File(h5_filename, "w")
xdmf_file = etree.Element("Xdmf", Version="3.0")
domain = etree.SubElement(xdmf_file, "Domain")
vertices = mesh.vertices
cells = mesh.cells
cell_sets = mesh.cell_sets
if material_name_map:
# print the material names before any grids
material_names = list(material_name_map.keys())
material_information = etree.SubElement(domain, "Information", Name="MaterialNames")
material_information.text = " ".join(material_names)
if mesh.name != "":
name = mesh.name
else:
name = [os.path.splitext(filename)[0]][-1]
_add_uniform_grid(
name,
domain,
h5_filename,
h5_file,
vertices,
cells,
cell_sets,
material_name_map,
compression_opts,
)
tree = etree.ElementTree(xdmf_file)
tree.write(filename, pretty_print=True, encoding="utf-8", xml_declaration=True)
h5_file.close()
else:
module_log.require(isinstance(mesh, GridMesh), "Bad type.")
h5_filename = os.path.splitext(filename)[0] + ".h5"
h5_file = h5py.File(h5_filename, "w")
xdmf_file = etree.Element("Xdmf", Version="3.0")
domain = etree.SubElement(xdmf_file, "Domain")
if material_name_map:
# print the material names before any grids
material_names = list(material_name_map.keys())
material_information = etree.SubElement(domain, "Information", Name="MaterialNames")
material_information.text = " ".join(material_names)
# Add all grid levels
_add_gridmesh_levels(
[(domain, mesh)],
h5_filename,
h5_file,
material_name_map,
compression_opts=compression_opts,
)
tree = etree.ElementTree(xdmf_file)
tree.write(filename, pretty_print=True, encoding="utf-8", xml_declaration=True)
h5_file.close()
def _add_uniform_grid(
name,
xml_element,
h5_filename,
h5_group,
vertices,
cells,
cell_sets,
material_name_map,
compression_opts,
):
"""Add a uniform grid to the xml element and write the h5 data."""
# Name is basically group list
grid = etree.SubElement(xml_element, "Grid", Name=name, GridType="Uniform")
# Create group for name
material_names, material_cells = _get_material_sets(cell_sets)
this_h5_group = h5_group.create_group(name)
_add_geometry(grid, h5_filename, this_h5_group, vertices, compression_opts)
_add_topology(grid, h5_filename, this_h5_group, vertices, cells, compression_opts)
if cell_sets:
_add_cell_sets(grid, h5_filename, this_h5_group, cells, cell_sets, compression_opts)
if material_cells:
_add_materials(
grid,
h5_filename,
this_h5_group,
cells,
material_name_map,
material_names,
material_cells,
compression_opts,
)
def _add_geometry(grid, h5_filename, h5_group, vertices, compression_opts):
"""Add XYZ vertex locations in the geometry block."""
geom = etree.SubElement(grid, "Geometry", GeometryType="XYZ")
vert_ids = list(vertices.keys())
datatype, precision = numpy_to_xdmf_dtype[vertices[vert_ids[0]].dtype.name]
dim = "{} {}".format(len(vert_ids), 3)
vertices_data_item = etree.SubElement(
geom,
"DataItem",
DataType=datatype,
Dimensions=dim,
Format="HDF",
Precision=precision,
)
h5_group.create_dataset(
"vertices",
data=np.stack(list(vertices.values())),
compression="gzip",
compression_opts=compression_opts,
)
vertices_data_item.text = os.path.basename(h5_filename) + ":" + h5_group.name + "/vertices"
def _map_to_0_index(keys):
"""Map data from current ID to 0 index for hdf5."""
list_keys = list(keys)
key_map = {}
for i, key in enumerate(list_keys):
key_map[key] = i
return key_map
def _make_global_material_id_map(mesh):
"""Generate a map from material name to integer ID."""
material_name_map = {}
material_ctr = 0
if isinstance(mesh, GridMesh):
# Get the leaves
mesh_children = [mesh]
next_mesh_children = []
leaves_reached = False
while not leaves_reached:
for child_mesh in mesh_children:
if child_mesh.children is not None:
next_mesh_children.extend(child_mesh.children)
else:
leaves_reached = True
if not leaves_reached:
mesh_children = next_mesh_children
next_mesh_children = []
else:
mesh_children = [mesh]
for child_mesh in mesh_children:
cell_sets = child_mesh.cell_sets
if cell_sets:
set_names = list(cell_sets.keys())
for set_name in set_names:
if ("MATERIAL" in set_name.upper()) and (set_name.upper() not in material_name_map):
material_name_map[set_name.replace(" ", "_").upper()] = material_ctr
material_ctr = material_ctr + 1
_print_material_names_and_ids(material_ctr, material_name_map)
return material_name_map, material_ctr
def _print_material_names_and_ids(material_ctr, material_name_map):
if material_ctr > 0:
module_log.info("Material Name : Material ID")
module_log.info("==================================")
for mat_name in list(material_name_map.keys()):
module_log.info(f"{mat_name.ljust(20)} : {material_name_map[mat_name]}")
def _get_material_sets(cell_sets):
"""Get the cell sets that are materials."""
material_names = []
material_cells = []
if cell_sets:
set_names = list(cell_sets.keys())
for set_name in set_names:
if "MATERIAL" in set_name.upper():
material_names.append(set_name.replace(" ", "_").upper())
material_cells.append(cell_sets.pop(set_name))
return material_names, material_cells
def _add_topology(grid, h5_filename, h5_group, vertices, cells, compression_opts):
"""Add mesh cells in the topology block."""
# Get map of vertex IDs to 0 index hdf5 data
vert_map = _map_to_0_index(vertices.keys())
# Single topology
if len(cells) == 1:
topo_type = list(cells.keys())[0]
xdmf_type = topo_to_xdmf_type[topo_type][0]
cell_arrays = list(deepcopy(cells[topo_type]).values())
# convert vertices to hdf5 local 0 index
for i in range(len(cell_arrays)):
for j in range(len(cell_arrays[i])):
cell_arrays[i][j] = vert_map[cell_arrays[i][j]]
num_cells = len(cell_arrays)
verts_per_cell = len(cell_arrays[0])
topo = etree.SubElement(
grid,
"Topology",
TopologyType=xdmf_type,
NumberOfElements=str(num_cells),
NodesPerElement=str(verts_per_cell),
)
datatype, precision = numpy_to_xdmf_dtype[cell_arrays[0].dtype.name]
dim = "{} {}".format(num_cells, verts_per_cell)
topo_data_item = etree.SubElement(
topo,
"DataItem",
DataType=datatype,
Dimensions=dim,
Format="HDF",
Precision=precision,
)
h5_group.create_dataset(
"cells",
data=np.stack(cell_arrays),
compression="gzip",
compression_opts=compression_opts,
)
topo_data_item.text = os.path.basename(h5_filename) + ":" + h5_group.name + "/cells"
# Mixed topology
else:
total_num_cells = sum(len(cells[cell_type]) for cell_type in cells.keys())
topo = etree.SubElement(
grid,
"Topology",
TopologyType="Mixed",
NumberOfElements=str(total_num_cells),
)
vert_map = _map_to_0_index(vertices.keys())
topo_data = []
total_num_verts = 0
for cell_type in cells.keys():
first_array = cells[cell_type][list(cells[cell_type].keys())[0]]
verts_per_cell = len(first_array)
num_cells = len(cells[cell_type].keys())
total_num_verts += num_cells * verts_per_cell
xdmf_int = topo_type_to_xdmf_int[cell_type]
for cell in cells[cell_type].values():
new_cell_verts = np.zeros(verts_per_cell + 1, dtype=np.int64)
new_cell_verts[0] = xdmf_int
for i, vert in enumerate(cell):
new_cell_verts[i + 1] = vert_map[vert]
topo_data.append(new_cell_verts)
dim = str(total_num_cells + total_num_verts)
datatype, precision = numpy_to_xdmf_dtype[first_array.dtype.name]
topo_data_item = etree.SubElement(
topo,
"DataItem",
DataType=datatype,
Dimensions=dim,
Format="HDF",
Precision=precision,
)
h5_group.create_dataset(
"cells",
data=np.concatenate(topo_data),
compression="gzip",
compression_opts=compression_opts,
)
topo_data_item.text = os.path.basename(h5_filename) + ":" + h5_group.name + "/cells"
def _add_materials(
grid,
h5_filename,
h5_group,
cells,
material_name_map,
material_names,
material_cells,
compression_opts,
):
"""Add materials in an attribute block."""
material_attribute = etree.SubElement(
grid,
"Attribute",
Center="Cell",
Name="MaterialID",
)
total_num_cells = sum(len(cells[cell_type]) for cell_type in cells.keys())
material_array = np.zeros(total_num_cells, dtype=np.int64) - 1
mat_ctr = 0
# If any cell has multiple materials this is going to give index out of bounds.
# TODO: Add check for multiple mat cells, but no cell should have multiple materials
for cell_type in cells.keys():
for cell in cells[cell_type].keys():
for i, material in enumerate(material_names):
if cell in material_cells[i]:
material_array[mat_ctr] = material_name_map[material]
mat_ctr = mat_ctr + 1
module_log.require(
mat_ctr == total_num_cells,
f"Total number of cells ({total_num_cells}) not equal to "
+ f"number of cells with a material ({mat_ctr}).",
)
module_log.require(all(material_array >= 0), "A cell was not assigned a material.")
datatype, precision = numpy_to_xdmf_dtype[material_array[0].dtype.name]
material_id_data_item = etree.SubElement(
material_attribute,
"DataItem",
DataType=datatype,
Dimensions=str(total_num_cells),
Format="HDF",
Precision=precision,
)
h5_group.create_dataset(
"material_id",
data=material_array,
compression="gzip",
compression_opts=compression_opts,
)
material_id_data_item.text = (
os.path.basename(h5_filename) + ":" + h5_group.name + "/material_id"
)
def _add_cell_sets(grid, h5_filename, h5_group, cells, cell_sets, compression_opts):
"""Add cells_sets in set blocks."""
set_names = list(cell_sets.keys())
# Need to map the cell ids in cell sets to how the data appears in the h5 by mapping to
# a 0 index array.
cell_id_map = {}
cell_ctr = 0
for cell_type in cells.keys():
for cell_id in cells[cell_type].keys():
cell_id_map[cell_id] = cell_ctr
cell_ctr = cell_ctr + 1
for set_name in set_names:
set_block = etree.SubElement(grid, "Set", Name=set_name, SetType="Cell")
set_cells = cell_sets[set_name]
set_cells_post_map = np.zeros(len(set_cells), dtype=np.int64)
for i, cell_id in enumerate(set_cells):
set_cells_post_map[i] = cell_id_map[cell_id]
datatype, precision = numpy_to_xdmf_dtype[set_cells_post_map[0].dtype.name]
dim = str(len(set_cells_post_map))
set_data_item = etree.SubElement(
set_block,
"DataItem",
DataType=datatype,
Dimensions=dim,
Format="HDF",
Precision=precision,
)
h5_group.create_dataset(
set_name,
data=set_cells_post_map,
compression="gzip",
compression_opts=compression_opts,
)
set_data_item.text = os.path.basename(h5_filename) + ":" + h5_group.name + "/" + set_name
def _add_gridmesh_levels(
xml_mesh_list, h5_filename, h5_group, material_name_map, compression_opts=4
):
child_list = []
for parent_xml_tree, mesh in xml_mesh_list:
# If it has children, write the tree and add children to child list
if mesh.children is not None:
mesh_xml_tree = etree.SubElement(
parent_xml_tree, "Grid", Name=mesh.name, GridType="Tree"
)
for child_mesh in mesh.children:
child_list.append((mesh_xml_tree, child_mesh))
else:
# If there are not children, this must be the bottom level. Write the data
_add_uniform_grid(
mesh.name,
parent_xml_tree,
h5_filename,
h5_group,
mesh.vertices,
mesh.cells,
mesh.cell_sets,
material_name_map,
compression_opts,
)
if child_list:
_add_gridmesh_levels(child_list, h5_filename, h5_group, material_name_map, compression_opts)
def _handle_split_level(filename, mesh, split_level, material_name_map, compression_opts):
# Check that the level is appropriate
module_log.require(split_level >= 0, "split_level must be greater than or equal to 0.")
# If level is 0, write
if split_level == 0:
write_xdmf_file(
filename,
mesh,
split_level=None,
material_name_map=material_name_map,
compression_opts=compression_opts,
)
# Otherwise call next level
else:
next_mesh = mesh
for _i in range(split_level):
module_log.require(
next_mesh.children is not None,
"split_level is too high. Not enough grid levels in mesh.",
)
next_mesh = next_mesh.children[0]
for child in mesh.children:
new_filename = os.path.splitext(filename)[0] + "_" + child.name + ".xdmf"
write_xdmf_file(
new_filename,
child,
split_level=split_level - 1,
material_name_map=material_name_map,
compression_opts=compression_opts,
)
| [
"logging.getLogger",
"lxml.etree.Element",
"lxml.etree.SubElement",
"lxml.etree.ElementTree",
"os.path.splitext",
"h5py.File",
"numpy.stack",
"numpy.zeros",
"os.path.basename",
"numpy.concatenate",
"copy.deepcopy"
] | [((215, 242), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (232, 242), False, 'import logging\n'), ((4712, 4780), 'lxml.etree.SubElement', 'etree.SubElement', (['xml_element', '"""Grid"""'], {'Name': 'name', 'GridType': '"""Uniform"""'}), "(xml_element, 'Grid', Name=name, GridType='Uniform')\n", (4728, 4780), True, 'import lxml.etree as etree\n'), ((5613, 5667), 'lxml.etree.SubElement', 'etree.SubElement', (['grid', '"""Geometry"""'], {'GeometryType': '"""XYZ"""'}), "(grid, 'Geometry', GeometryType='XYZ')\n", (5629, 5667), True, 'import lxml.etree as etree\n'), ((5853, 5961), 'lxml.etree.SubElement', 'etree.SubElement', (['geom', '"""DataItem"""'], {'DataType': 'datatype', 'Dimensions': 'dim', 'Format': '"""HDF"""', 'Precision': 'precision'}), "(geom, 'DataItem', DataType=datatype, Dimensions=dim,\n Format='HDF', Precision=precision)\n", (5869, 5961), True, 'import lxml.etree as etree\n'), ((12047, 12116), 'lxml.etree.SubElement', 'etree.SubElement', (['grid', '"""Attribute"""'], {'Center': '"""Cell"""', 'Name': '"""MaterialID"""'}), "(grid, 'Attribute', Center='Cell', Name='MaterialID')\n", (12063, 12116), True, 'import lxml.etree as etree\n'), ((2358, 2385), 'h5py.File', 'h5py.File', (['h5_filename', '"""w"""'], {}), "(h5_filename, 'w')\n", (2367, 2385), False, 'import h5py\n'), ((2407, 2443), 'lxml.etree.Element', 'etree.Element', (['"""Xdmf"""'], {'Version': '"""3.0"""'}), "('Xdmf', Version='3.0')\n", (2420, 2443), True, 'import lxml.etree as etree\n'), ((2461, 2498), 'lxml.etree.SubElement', 'etree.SubElement', (['xdmf_file', '"""Domain"""'], {}), "(xdmf_file, 'Domain')\n", (2477, 2498), True, 'import lxml.etree as etree\n'), ((3294, 3322), 'lxml.etree.ElementTree', 'etree.ElementTree', (['xdmf_file'], {}), '(xdmf_file)\n', (3311, 3322), True, 'import lxml.etree as etree\n'), ((3592, 3619), 'h5py.File', 'h5py.File', (['h5_filename', '"""w"""'], {}), "(h5_filename, 'w')\n", (3601, 3619), False, 'import h5py\n'), ((3641, 3677), 'lxml.etree.Element', 'etree.Element', (['"""Xdmf"""'], {'Version': '"""3.0"""'}), "('Xdmf', Version='3.0')\n", (3654, 3677), True, 'import lxml.etree as etree\n'), ((3695, 3732), 'lxml.etree.SubElement', 'etree.SubElement', (['xdmf_file', '"""Domain"""'], {}), "(xdmf_file, 'Domain')\n", (3711, 3732), True, 'import lxml.etree as etree\n'), ((4283, 4311), 'lxml.etree.ElementTree', 'etree.ElementTree', (['xdmf_file'], {}), '(xdmf_file)\n', (4300, 4311), True, 'import lxml.etree as etree\n'), ((9680, 9788), 'lxml.etree.SubElement', 'etree.SubElement', (['topo', '"""DataItem"""'], {'DataType': 'datatype', 'Dimensions': 'dim', 'Format': '"""HDF"""', 'Precision': 'precision'}), "(topo, 'DataItem', DataType=datatype, Dimensions=dim,\n Format='HDF', Precision=precision)\n", (9696, 9788), True, 'import lxml.etree as etree\n'), ((11345, 11453), 'lxml.etree.SubElement', 'etree.SubElement', (['topo', '"""DataItem"""'], {'DataType': 'datatype', 'Dimensions': 'dim', 'Format': '"""HDF"""', 'Precision': 'precision'}), "(topo, 'DataItem', DataType=datatype, Dimensions=dim,\n Format='HDF', Precision=precision)\n", (11361, 11453), True, 'import lxml.etree as etree\n'), ((12256, 12297), 'numpy.zeros', 'np.zeros', (['total_num_cells'], {'dtype': 'np.int64'}), '(total_num_cells, dtype=np.int64)\n', (12264, 12297), True, 'import numpy as np\n'), ((14177, 14237), 'lxml.etree.SubElement', 'etree.SubElement', (['grid', '"""Set"""'], {'Name': 'set_name', 'SetType': '"""Cell"""'}), "(grid, 'Set', Name=set_name, SetType='Cell')\n", (14193, 14237), True, 'import lxml.etree as etree\n'), ((14604, 14717), 'lxml.etree.SubElement', 'etree.SubElement', (['set_block', '"""DataItem"""'], {'DataType': 'datatype', 'Dimensions': 'dim', 'Format': '"""HDF"""', 'Precision': 'precision'}), "(set_block, 'DataItem', DataType=datatype, Dimensions=dim,\n Format='HDF', Precision=precision)\n", (14620, 14717), True, 'import lxml.etree as etree\n'), ((2777, 2838), 'lxml.etree.SubElement', 'etree.SubElement', (['domain', '"""Information"""'], {'Name': '"""MaterialNames"""'}), "(domain, 'Information', Name='MaterialNames')\n", (2793, 2838), True, 'import lxml.etree as etree\n'), ((3915, 3976), 'lxml.etree.SubElement', 'etree.SubElement', (['domain', '"""Information"""'], {'Name': '"""MaterialNames"""'}), "(domain, 'Information', Name='MaterialNames')\n", (3931, 3976), True, 'import lxml.etree as etree\n'), ((15397, 15471), 'lxml.etree.SubElement', 'etree.SubElement', (['parent_xml_tree', '"""Grid"""'], {'Name': 'mesh.name', 'GridType': '"""Tree"""'}), "(parent_xml_tree, 'Grid', Name=mesh.name, GridType='Tree')\n", (15413, 15471), True, 'import lxml.etree as etree\n'), ((2302, 2328), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2318, 2328), False, 'import os\n'), ((3536, 3562), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (3552, 3562), False, 'import os\n'), ((6217, 6246), 'os.path.basename', 'os.path.basename', (['h5_filename'], {}), '(h5_filename)\n', (6233, 6246), False, 'import os\n'), ((9939, 9960), 'numpy.stack', 'np.stack', (['cell_arrays'], {}), '(cell_arrays)\n', (9947, 9960), True, 'import numpy as np\n'), ((10946, 10990), 'numpy.zeros', 'np.zeros', (['(verts_per_cell + 1)'], {'dtype': 'np.int64'}), '(verts_per_cell + 1, dtype=np.int64)\n', (10954, 10990), True, 'import numpy as np\n'), ((11604, 11629), 'numpy.concatenate', 'np.concatenate', (['topo_data'], {}), '(topo_data)\n', (11618, 11629), True, 'import numpy as np\n'), ((13568, 13597), 'os.path.basename', 'os.path.basename', (['h5_filename'], {}), '(h5_filename)\n', (13584, 13597), False, 'import os\n'), ((8983, 9009), 'copy.deepcopy', 'deepcopy', (['cells[topo_type]'], {}), '(cells[topo_type])\n', (8991, 9009), False, 'from copy import deepcopy\n'), ((10081, 10110), 'os.path.basename', 'os.path.basename', (['h5_filename'], {}), '(h5_filename)\n', (10097, 10110), False, 'import os\n'), ((11750, 11779), 'os.path.basename', 'os.path.basename', (['h5_filename'], {}), '(h5_filename)\n', (11766, 11779), False, 'import os\n'), ((2996, 3022), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (3012, 3022), False, 'import os\n'), ((15007, 15036), 'os.path.basename', 'os.path.basename', (['h5_filename'], {}), '(h5_filename)\n', (15023, 15036), False, 'import os\n'), ((17017, 17043), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (17033, 17043), False, 'import os\n')] |
import math
import numpy as np
#
# line segment intersection using vectors
# see Computer Graphics by <NAME>
#
def segPerp(a) :
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
# return
def seg_intersect(a1,a2, b1,b2):
da = a2-a1
db = b2-b1
dp = a1-b1
dap = segPerp(da)
denom = np.dot( dap, db)
num = np.dot( dap, dp )
if denom == 0:
return (False, (0,0))
return (True, (num / denom.astype(float))*db + b1)
# Find if two lines are more or less orthogonal - can't be too precise as the
# image may be warped by perspective etc
def isOrthogonal(l1, l2):
ang = (l1["theta"] - l2["theta"] + np.pi * 2) % np.pi
return abs(ang - np.pi/2) < np.pi/4
# Calculate square of distance from a point to a line (for sorting)
def distSqPtToLineXY(x1, y1, x2, y2, xPt, yPt):
dx = x2-x1
dy = y2-y1
sqdiff = dx*dx + dy*dy
u = ((xPt - x1) * dx + (yPt - y1) * dy) / float(sqdiff)
u = 0 if u < 0 else (1 if u > 1 else u)
x = x1 + u * dx
y = y1 + u * dy
ddx = x - xPt
ddy = y - yPt
distSq = ddx*ddx + ddy*ddy
return distSq
def distSqPtToLine(linePt1, linePt2, pt):
return distSqPtToLineXY(linePt1[0], linePt1[1], linePt2[0], linePt2[1], pt[0], pt[1])
# Find the mid point of a line
def lineMidPt(line):
return (((line["p1"][0]+line["p2"][0])/2, (line["p1"][1]+line["p2"][1])/2))
# Distance from pt to pt
def distPtToPt(p1, p2):
dx = p1[0]-p2[0]
dy = p1[1]-p2[1]
return math.sqrt(dx*dx+dy*dy)
# Find separation distance of the mid-points of two lines
def lineSeparation(l1, l2):
mid1 = lineMidPt(l1)
mid2 = lineMidPt(l2)
# print("Mids", l1, l2, mid1, mid2)
return distPtToPt(mid1, mid2) | [
"math.sqrt",
"numpy.dot",
"numpy.empty_like"
] | [((137, 153), 'numpy.empty_like', 'np.empty_like', (['a'], {}), '(a)\n', (150, 153), True, 'import numpy as np\n'), ((408, 423), 'numpy.dot', 'np.dot', (['dap', 'db'], {}), '(dap, db)\n', (414, 423), True, 'import numpy as np\n'), ((435, 450), 'numpy.dot', 'np.dot', (['dap', 'dp'], {}), '(dap, dp)\n', (441, 450), True, 'import numpy as np\n'), ((1573, 1601), 'math.sqrt', 'math.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (1582, 1601), False, 'import math\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
y = iris.target
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=1,gamma=0).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
h = (x_max / x_min)/100
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
plt.subplot(1, 1, 1)
Z = svc.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.title('SVC with linear kernel')
plt.show() | [
"sklearn.datasets.load_iris",
"matplotlib.pyplot.contourf",
"sklearn.svm.SVC",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((125, 145), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (143, 145), False, 'from sklearn import svm, datasets\n'), ((559, 579), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (570, 579), True, 'import matplotlib.pyplot as plt\n'), ((651, 705), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'cmap': 'plt.cm.Paired', 'alpha': '(0.8)'}), '(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)\n', (663, 705), True, 'import matplotlib.pyplot as plt\n'), ((706, 760), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y', 'cmap': 'plt.cm.Paired'}), '(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)\n', (717, 760), True, 'import matplotlib.pyplot as plt\n'), ((761, 787), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sepal length"""'], {}), "('Sepal length')\n", (771, 787), True, 'import matplotlib.pyplot as plt\n'), ((788, 813), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sepal width"""'], {}), "('Sepal width')\n", (798, 813), True, 'import matplotlib.pyplot as plt\n'), ((843, 878), 'matplotlib.pyplot.title', 'plt.title', (['"""SVC with linear kernel"""'], {}), "('SVC with linear kernel')\n", (852, 878), True, 'import matplotlib.pyplot as plt\n'), ((879, 889), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (887, 889), True, 'import matplotlib.pyplot as plt\n'), ((502, 528), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (511, 528), True, 'import numpy as np\n'), ((531, 557), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (540, 557), True, 'import numpy as np\n'), ((276, 314), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'C': '(1)', 'gamma': '(0)'}), "(kernel='linear', C=1, gamma=0)\n", (283, 314), False, 'from sklearn import svm, datasets\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 <NAME>
# wwdtm_scoreimage is relased under the terms of the Apache License 2.0
"""Generate PNG image file based on WWDTM show score totals"""
import json
import math
import os
from typing import List
import mysql.connector
from mysql.connector.errors import DatabaseError, ProgrammingError
import numpy
from PIL import Image
BASE_IMAGE_WIDTH = 30
IMAGE_SCALE = 40
def retrieve_show_total_scores(database_connection: mysql.connector) -> List[int]:
"""Retrieve total scores for each show"""
cursor = database_connection.cursor()
query = ("select sum(pm.panelistscore) as total "
"from ww_showpnlmap pm "
"join ww_shows s on s.showid = pm.showid "
"where s.bestof = 0 and s.repeatshowid is null "
"group by s.showdate "
"having sum(pm.panelistscore) > 0")
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
scores = []
for row in result:
scores.append(int(row[0]))
return scores
def remap(in_value: int,
in_minimum: int,
in_maximum: int,
out_minimum: int,
out_maximum: int) -> int:
"""Remap a value from one value range to another value range
while maintaining ratio"""
new_value = (in_value - in_minimum) * (out_maximum - out_minimum) \
/ (in_maximum - in_minimum) + out_minimum
return math.floor(new_value)
def pad(list_object: List, content, width: int) -> List:
list_object.extend([content] * (width - len(list_object)))
return list_object
def split(values):
for i in range(0, len(values), BASE_IMAGE_WIDTH):
yield values[i:i+BASE_IMAGE_WIDTH]
def convert_list_to_pixels(values: List[int]) -> List[List]:
pixels = []
for row in values:
row_tuples = []
for value in row:
row_tuples.append((value, math.floor(value / 3), 0))
if len(row_tuples) < BASE_IMAGE_WIDTH:
pad(row_tuples, (0, 0, 0), BASE_IMAGE_WIDTH)
pixels.append(row_tuples)
return pixels
def generate_image(values, dimension_side: int):
"""Generate a PNG image based on a list of integers"""
image_size = dimension_side * IMAGE_SCALE
array = numpy.array(values, dtype=numpy.uint8)
image = Image.fromarray(array)
resized_image = image.resize((image_size, image_size), Image.NEAREST)
resized_image.save('output.png')
resized_image.show()
def load_config(app_environment):
"""Load configuration file from config.json"""
with open('config.json', 'r') as config_file:
config_dict = json.load(config_file)
if app_environment.startswith("develop"):
if "development" in config_dict:
config = config_dict["development"]
else:
raise Exception("Missing 'development' section in config file")
elif app_environment.startswith("prod"):
if "production" in config_dict:
config = config_dict['production']
else:
raise Exception("Missing 'production' section in config file")
else:
if "local" in config_dict:
config = config_dict["local"]
else:
raise Exception("Missing 'local' section in config file")
return config
def main():
"""Pull in scoring data and generate image based on the data"""
app_environment = os.getenv("APP_ENV", "local").strip().lower()
config = load_config(app_environment)
database_connection = mysql.connector.connect(**config["database"])
original_totals = retrieve_show_total_scores(database_connection)
if not original_totals:
print("No scores to process")
original_min_total = min(original_totals)
original_max_total = max(original_totals)
new_min_value = 0
new_max_value = 255
remapped_totals = []
for total in original_totals:
remapped_totals.append(remap(total,
original_min_total,
original_max_total,
new_min_value,
new_max_value))
list_values = list(split(remapped_totals))
pixels = list(convert_list_to_pixels(list_values))
side = math.ceil(math.sqrt(len(original_totals)))
generate_image(pixels, side)
# Only run if executed as a script and not imported
if __name__ == '__main__':
main()
| [
"PIL.Image.fromarray",
"os.getenv",
"math.floor",
"numpy.array",
"json.load"
] | [((1450, 1471), 'math.floor', 'math.floor', (['new_value'], {}), '(new_value)\n', (1460, 1471), False, 'import math\n'), ((2275, 2313), 'numpy.array', 'numpy.array', (['values'], {'dtype': 'numpy.uint8'}), '(values, dtype=numpy.uint8)\n', (2286, 2313), False, 'import numpy\n'), ((2326, 2348), 'PIL.Image.fromarray', 'Image.fromarray', (['array'], {}), '(array)\n', (2341, 2348), False, 'from PIL import Image\n'), ((2643, 2665), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (2652, 2665), False, 'import json\n'), ((1922, 1943), 'math.floor', 'math.floor', (['(value / 3)'], {}), '(value / 3)\n', (1932, 1943), False, 'import math\n'), ((3406, 3435), 'os.getenv', 'os.getenv', (['"""APP_ENV"""', '"""local"""'], {}), "('APP_ENV', 'local')\n", (3415, 3435), False, 'import os\n')] |
import numpy as np
class Perceptron:
@staticmethod
def step(z):
return 1 if z >= 0 else 0
def __init__(self, lr=0.01, epochs=100):
self.lr = lr
self.epochs = epochs
self.W = None
self.errors = None
@staticmethod
def weight_init(x):
a = 1 + x.shape[1]
sigma = np.sqrt(2/(a+1))
return np.random.normal(0, sigma, size=a)
def feed_forward(self, x):
z = np.dot(x, self.W[1:]) + self.W[0]
return self.step(z)
def loss(self, x, y):
error = 0
for j in range(len(y)):
y_hat = self.feed_forward(x[j])
err = (y[j] - y_hat) * self.lr
self.W[1:] += err * x[j]
self.W[0] += err
error += int(err != 0.0)
return error/np.shape(x)[0]
def fit(self, x, y):
self.W = self.weight_init(x)
self.errors = []
for _ in range(self.epochs):
self.errors.append(self.loss(x, y))
return self
def predict(self, x_test):
y_pred = []
for j in range(len(x_test)):
y_pred.append(self.step(np.dot(x_test[j], self.W[1:]) + self.W[0]))
return y_pred
| [
"numpy.random.normal",
"numpy.shape",
"numpy.dot",
"numpy.sqrt"
] | [((283, 303), 'numpy.sqrt', 'np.sqrt', (['(2 / (a + 1))'], {}), '(2 / (a + 1))\n', (290, 303), True, 'import numpy as np\n'), ((309, 343), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma'], {'size': 'a'}), '(0, sigma, size=a)\n', (325, 343), True, 'import numpy as np\n'), ((379, 400), 'numpy.dot', 'np.dot', (['x', 'self.W[1:]'], {}), '(x, self.W[1:])\n', (385, 400), True, 'import numpy as np\n'), ((657, 668), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (665, 668), True, 'import numpy as np\n'), ((932, 961), 'numpy.dot', 'np.dot', (['x_test[j]', 'self.W[1:]'], {}), '(x_test[j], self.W[1:])\n', (938, 961), True, 'import numpy as np\n')] |
# +
import numpy as np
import tensorflow as tf
from gpflow import set_trainable
from gpflow.ci_utils import ci_niter
from gpflow.kernels import RBF
from gpflow.likelihoods import Gaussian
from matplotlib import pyplot as plt
from markovflow.kernels import Matern32
from markovflow.models import SparseSpatioTemporalVariational
from markovflow.ssm_natgrad import SSMNaturalGradient
np.random.seed(10)
# -
# Declaring the model
# +
M_time = 7
M_space = 4
kernel_space = RBF(variance=1.0, lengthscales=0.2)
kernel_time = Matern32(variance=1.0, lengthscale=0.2)
likelihood = Gaussian(variance=0.1)
inducing_space = np.linspace(0.1, 0.9, M_space).reshape(-1, 1)
inducing_time = np.linspace(0, 1, M_time).reshape(-1, )
model = SparseSpatioTemporalVariational(
inducing_time=tf.identity(inducing_time),
inducing_space=tf.identity(inducing_space),
kernel_space=kernel_space,
kernel_time=kernel_time,
likelihood=likelihood,
)
# -
# Creating data
num_data = 200
time_points = np.random.rand(num_data, 1)
space_points = np.random.rand(num_data, 1)
X = np.concatenate([space_points, time_points], -1)
f = lambda v: np.cos(5.0 * (v[..., 1:] + v[..., :1]))
F = f(X)
Y = F + np.random.randn(num_data, 1)
data = (X, Y)
# Creating a plotting grid and plotting function
# +
x_grid, t_grid = np.meshgrid(np.linspace(0, 1, 50), np.linspace(0, 1, 50))
X_grid = np.concatenate([x_grid.reshape(-1, 1), t_grid.reshape(-1, 1)], axis=-1)
def plot_model(model):
mu_f, var_f = model.space_time_predict_f(X_grid)
fig, axarr = plt.subplots(2, 1)
axarr[0].scatter(x=space_points, y=time_points, c=Y)
axarr[1].scatter(x=X_grid[..., :1], y=X_grid[..., 1:], c=mu_f.numpy())
for ax in axarr:
ax.hlines(model.inducing_space, xmin=time_points.min(), xmax=time_points.max(), colors="r")
ax.vlines(
model.inducing_space, ymin=space_points.min(), ymax=space_points.max(), colors="k"
)
plt.savefig("spatio_temporal.pdf", dpi=300)
plt.show()
# -
# Training
# +
# Start at a small learning rate
adam_learning_rate = 0.0001
natgrad_learning_rate = 0.5
adam_opt = tf.optimizers.Adam(learning_rate=adam_learning_rate)
natgrad_opt = SSMNaturalGradient(gamma=natgrad_learning_rate, momentum=False)
set_trainable(model.ssm_q, False)
adam_var_list = model.trainable_variables # trainable_variables
set_trainable(model.ssm_q, True)
# +
# tf.function
def loss(input_data):
return -model.elbo(input_data)
# tf.function
def opt_step(input_data):
natgrad_opt.minimize(lambda: loss(input_data), model.ssm_q)
adam_opt.minimize(lambda: loss(input_data), adam_var_list)
# +
max_iter = ci_niter(500)
for i in range(max_iter):
opt_step(data)
if i % 20 == 0:
plot_model(model)
print("Iteration:", i, ", Loss:", model.loss(data).numpy())
| [
"gpflow.ci_utils.ci_niter",
"matplotlib.pyplot.savefig",
"numpy.random.rand",
"markovflow.ssm_natgrad.SSMNaturalGradient",
"markovflow.kernels.Matern32",
"numpy.linspace",
"numpy.random.randn",
"tensorflow.optimizers.Adam",
"numpy.random.seed",
"numpy.concatenate",
"gpflow.kernels.RBF",
"numpy... | [((382, 400), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (396, 400), True, 'import numpy as np\n'), ((473, 508), 'gpflow.kernels.RBF', 'RBF', ([], {'variance': '(1.0)', 'lengthscales': '(0.2)'}), '(variance=1.0, lengthscales=0.2)\n', (476, 508), False, 'from gpflow.kernels import RBF\n'), ((523, 562), 'markovflow.kernels.Matern32', 'Matern32', ([], {'variance': '(1.0)', 'lengthscale': '(0.2)'}), '(variance=1.0, lengthscale=0.2)\n', (531, 562), False, 'from markovflow.kernels import Matern32\n'), ((576, 598), 'gpflow.likelihoods.Gaussian', 'Gaussian', ([], {'variance': '(0.1)'}), '(variance=0.1)\n', (584, 598), False, 'from gpflow.likelihoods import Gaussian\n'), ((994, 1021), 'numpy.random.rand', 'np.random.rand', (['num_data', '(1)'], {}), '(num_data, 1)\n', (1008, 1021), True, 'import numpy as np\n'), ((1037, 1064), 'numpy.random.rand', 'np.random.rand', (['num_data', '(1)'], {}), '(num_data, 1)\n', (1051, 1064), True, 'import numpy as np\n'), ((1069, 1116), 'numpy.concatenate', 'np.concatenate', (['[space_points, time_points]', '(-1)'], {}), '([space_points, time_points], -1)\n', (1083, 1116), True, 'import numpy as np\n'), ((2122, 2174), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'learning_rate': 'adam_learning_rate'}), '(learning_rate=adam_learning_rate)\n', (2140, 2174), True, 'import tensorflow as tf\n'), ((2189, 2252), 'markovflow.ssm_natgrad.SSMNaturalGradient', 'SSMNaturalGradient', ([], {'gamma': 'natgrad_learning_rate', 'momentum': '(False)'}), '(gamma=natgrad_learning_rate, momentum=False)\n', (2207, 2252), False, 'from markovflow.ssm_natgrad import SSMNaturalGradient\n'), ((2254, 2287), 'gpflow.set_trainable', 'set_trainable', (['model.ssm_q', '(False)'], {}), '(model.ssm_q, False)\n', (2267, 2287), False, 'from gpflow import set_trainable\n'), ((2353, 2385), 'gpflow.set_trainable', 'set_trainable', (['model.ssm_q', '(True)'], {}), '(model.ssm_q, True)\n', (2366, 2385), False, 'from gpflow import set_trainable\n'), ((2649, 2662), 'gpflow.ci_utils.ci_niter', 'ci_niter', (['(500)'], {}), '(500)\n', (2657, 2662), False, 'from gpflow.ci_utils import ci_niter\n'), ((1131, 1170), 'numpy.cos', 'np.cos', (['(5.0 * (v[..., 1:] + v[..., :1]))'], {}), '(5.0 * (v[..., 1:] + v[..., :1]))\n', (1137, 1170), True, 'import numpy as np\n'), ((1188, 1216), 'numpy.random.randn', 'np.random.randn', (['num_data', '(1)'], {}), '(num_data, 1)\n', (1203, 1216), True, 'import numpy as np\n'), ((1315, 1336), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (1326, 1336), True, 'import numpy as np\n'), ((1338, 1359), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (1349, 1359), True, 'import numpy as np\n'), ((1537, 1555), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (1549, 1555), True, 'from matplotlib import pyplot as plt\n'), ((1939, 1982), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""spatio_temporal.pdf"""'], {'dpi': '(300)'}), "('spatio_temporal.pdf', dpi=300)\n", (1950, 1982), True, 'from matplotlib import pyplot as plt\n'), ((1987, 1997), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1995, 1997), True, 'from matplotlib import pyplot as plt\n'), ((617, 647), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.9)', 'M_space'], {}), '(0.1, 0.9, M_space)\n', (628, 647), True, 'import numpy as np\n'), ((679, 704), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'M_time'], {}), '(0, 1, M_time)\n', (690, 704), True, 'import numpy as np\n'), ((779, 805), 'tensorflow.identity', 'tf.identity', (['inducing_time'], {}), '(inducing_time)\n', (790, 805), True, 'import tensorflow as tf\n'), ((826, 853), 'tensorflow.identity', 'tf.identity', (['inducing_space'], {}), '(inducing_space)\n', (837, 853), True, 'import tensorflow as tf\n')] |
import h5py
import numpy as np
import torch
import cv2
from torch.utils.data import DataLoader, TensorDataset
def get_data(batch_size=64):
train_dataset = h5py.File('datasets/train_signs.h5', "r")
x_train = np.array(train_dataset["train_set_x"][:]) # your train set features
x_train = np.transpose(x_train, (0, 3, 1, 2))
y_train = np.array(train_dataset["train_set_y"][:]) # your train set labels
y_train = y_train.reshape((1, y_train.shape[0])).T
test_dataset = h5py.File('datasets/test_signs.h5', "r")
x_test = np.array(test_dataset["test_set_x"][:]) # your test set features
x_test = np.transpose(x_test, (0, 3, 1, 2))
y_test = np.array(test_dataset["test_set_y"][:]) # your test set labels
y_test = y_test.reshape((1, y_test.shape[0])).T
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
X_train_tensor = torch.tensor(x_train, dtype=torch.float)/255
Y_train_tensor = torch.tensor(y_train, dtype=torch.long)
X_test_tensor = torch.tensor(x_test, dtype=torch.float)/255
Y_test_tensor = torch.tensor(y_test, dtype=torch.long)
train_dataset = TensorDataset(X_train_tensor, Y_train_tensor)
test_dataset = TensorDataset(X_test_tensor, Y_test_tensor)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
return train_dataset, test_dataset, train_loader, test_loader, classes
def path_to_input(image_path, input_size, device):
img = cv2.imread(image_path)
img = cv2.resize(img, (input_size, input_size)) #Resize
img = img[..., ::-1].transpose((2, 0, 1)) #BGR -> RGB and HxWxC -> CxHxW
img = img[np.newaxis, ...] / 255.0 #Add a channel at 0, thus making it a batch
img = torch.tensor(img, dtype=torch.float, device=device) #Convert to Tensor
return img
| [
"torch.utils.data.TensorDataset",
"h5py.File",
"numpy.array",
"torch.tensor",
"torch.utils.data.DataLoader",
"cv2.resize",
"numpy.transpose",
"cv2.imread"
] | [((161, 202), 'h5py.File', 'h5py.File', (['"""datasets/train_signs.h5"""', '"""r"""'], {}), "('datasets/train_signs.h5', 'r')\n", (170, 202), False, 'import h5py\n'), ((217, 258), 'numpy.array', 'np.array', (["train_dataset['train_set_x'][:]"], {}), "(train_dataset['train_set_x'][:])\n", (225, 258), True, 'import numpy as np\n'), ((299, 334), 'numpy.transpose', 'np.transpose', (['x_train', '(0, 3, 1, 2)'], {}), '(x_train, (0, 3, 1, 2))\n', (311, 334), True, 'import numpy as np\n'), ((349, 390), 'numpy.array', 'np.array', (["train_dataset['train_set_y'][:]"], {}), "(train_dataset['train_set_y'][:])\n", (357, 390), True, 'import numpy as np\n'), ((490, 530), 'h5py.File', 'h5py.File', (['"""datasets/test_signs.h5"""', '"""r"""'], {}), "('datasets/test_signs.h5', 'r')\n", (499, 530), False, 'import h5py\n'), ((544, 583), 'numpy.array', 'np.array', (["test_dataset['test_set_x'][:]"], {}), "(test_dataset['test_set_x'][:])\n", (552, 583), True, 'import numpy as np\n'), ((622, 656), 'numpy.transpose', 'np.transpose', (['x_test', '(0, 3, 1, 2)'], {}), '(x_test, (0, 3, 1, 2))\n', (634, 656), True, 'import numpy as np\n'), ((670, 709), 'numpy.array', 'np.array', (["test_dataset['test_set_y'][:]"], {}), "(test_dataset['test_set_y'][:])\n", (678, 709), True, 'import numpy as np\n'), ((800, 841), 'numpy.array', 'np.array', (["test_dataset['list_classes'][:]"], {}), "(test_dataset['list_classes'][:])\n", (808, 841), True, 'import numpy as np\n'), ((952, 991), 'torch.tensor', 'torch.tensor', (['y_train'], {'dtype': 'torch.long'}), '(y_train, dtype=torch.long)\n', (964, 991), False, 'import torch\n'), ((1077, 1115), 'torch.tensor', 'torch.tensor', (['y_test'], {'dtype': 'torch.long'}), '(y_test, dtype=torch.long)\n', (1089, 1115), False, 'import torch\n'), ((1137, 1182), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X_train_tensor', 'Y_train_tensor'], {}), '(X_train_tensor, Y_train_tensor)\n', (1150, 1182), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((1202, 1245), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X_test_tensor', 'Y_test_tensor'], {}), '(X_test_tensor, Y_test_tensor)\n', (1215, 1245), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((1266, 1328), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=batch_size, shuffle=True)\n', (1276, 1328), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((1347, 1408), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(test_dataset, batch_size=batch_size, shuffle=True)\n', (1357, 1408), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((1549, 1571), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1559, 1571), False, 'import cv2\n'), ((1582, 1623), 'cv2.resize', 'cv2.resize', (['img', '(input_size, input_size)'], {}), '(img, (input_size, input_size))\n', (1592, 1623), False, 'import cv2\n'), ((1872, 1923), 'torch.tensor', 'torch.tensor', (['img'], {'dtype': 'torch.float', 'device': 'device'}), '(img, dtype=torch.float, device=device)\n', (1884, 1923), False, 'import torch\n'), ((886, 926), 'torch.tensor', 'torch.tensor', (['x_train'], {'dtype': 'torch.float'}), '(x_train, dtype=torch.float)\n', (898, 926), False, 'import torch\n'), ((1013, 1052), 'torch.tensor', 'torch.tensor', (['x_test'], {'dtype': 'torch.float'}), '(x_test, dtype=torch.float)\n', (1025, 1052), False, 'import torch\n')] |
import app.ai.model as model
from app.ai.genetic import Genetic
import app.ai.plot as plot
import time
import datetime
import numpy as np
import torch as t
import threading
import sys
class Service():
def __init__(self, inputs=1, outputs=1, main_service=False):
self.main_service = main_service
self.uid = None
self.description = None
self.online_learning = True
self.batch_size = 10
self.lr = 0.0001
self.GAMMA = 0.999
self.opt = 'SGD'
self.layers = []
self.active = True
self.genetic_learning = False
self.mr = 0.1
self.population_size = 4
self.genetic = None
self.reward_total = 0
self.epoch = 0
self.batch = []
self.losses = []
self.date = str(datetime.datetime.now())
self.inputs = inputs
self.outputs = outputs
self.model = model.Model_deep(self.inputs,
self.outputs)
self.update_genetic()
def use_token(self, token):
return self.genetic.use_token(token)
def get_token(self):
return self.genetic.free_token()
def plot_losses(self):
return plot.linear(self.losses)
def copy(self):
service = Service(self.inputs, self.outputs)
service.layers = self.layers.copy()
service.GAMMA = self.GAMMA
service.batch_size = self.batch_size
service.online_learning = self.online_learning
service.date = self.date # may be real date
service.description = 'tmp'
service.lr = self.lr
service.opt = self.opt
service.active = self.active
service.update_service()
service.model = self.model.copy() # torch model must have copy()
return service
def init_genetic(self):
self.genetic = Genetic(service=self)
def update_genetic(self):
if self.genetic_learning and not self.genetic: # start genetic
self.init_genetic()
if not self.genetic_learning: # remove gentic
self.genetic = None
def update_service(self, form=None):
self.update_genetic()
if form is not None:
# checklist
self.options(form.getlist('options'))
form = form.to_dict()
# q-learning
if self.online_learning:
try:
self.lr_percent = form['lr_percent']
self.lr = np.float(form['lr'])
self.opt = form['opt']
self.GAMMA = np.float(form['GAMMA'])
self.batch_size = np.int(form['batch_size'])
except:
pass
# genetic
if self.genetic_learning:
if 'mr' in form.keys():
self.mr = np.float(form['mr'])
if 'psi' in form.keys():
self.genetic.psi = np.float(form['psi'])
if 'childrens' in form.keys():
self.genetic.childrens = np.int(form['childrens'])
if 'population_size' in form.keys():
self.population_size = np.int(form['population_size'])
# nn configuration
for n in range(len(self.layers)):
try:
l = form['l'+str(n)]
l = np.int(l)
if l <= 0:
self.layers = self.layers[:n-1]
pass
self.layers[n] = l
except Exception:
self.layers = self.layers[:n-1]
pass
if self.layers is not self.model.layers:
self.model = model.Model_deep(self.inputs, self.outputs,
layers=self.layers.copy())
if self.lr is not self.model.lr:
self.model.update_optimizer(lr=self.lr)
if self.GAMMA is not self.model.GAMMA:
self.model.GAMMA = self.GAMMA
if self.opt is not self.model.opt:
self.model.update_optimizer(opt=self.opt)
def options(self, options):
if 'online_learning' in options:
self.online_learning = True
else:
self.online_learning = False
if 'genetic_learning' in options:
self.genetic_learning = True
else:
self.genetic_learning = False
self.update_genetic()
def finish(self, token, data):
if not self.genetic:
return 'null'
data = data.split('$')[1]
data = data.replace(',', '.')
reward = np.float(data)
self.genetic.finish(token, reward)
def forward(self, x):
x = self.to_tensor(x)
x = self.model.forward(x.view((1, -1)))
return self.from_tensor(x)
def add(self, state, action, reward):
if not self.online_learning:
return None
if self.main_service:
return None
state = self.to_tensor(state)
action = self.to_tensor(action)
reward = self.to_tensor(reward)
self.batch.append((state, action, reward))
# if len(self.batch) > self.batch_size:
# loss = self.train_on_batch()
# self.losses.append(loss)
# self.batch = []
def train_on_batch(self):
x, y, r = data_from_batch()
loss = self.model.train(x, y, r)
#loss = self.model.train_loss(x, y, r)
self.batch = []
return loss
def data_from_batch(self):
x = t.stack([t[0] for t in self.batch])
y = t.stack([t[1] for t in self.batch])
r = t.stack([t[2] for t in self.batch])
return x, y, r
def to_tensor(self, x):
x = np.array(x).astype(np.float)
#x = [np.float(v) for v in x]
x = t.FloatTensor(x)
return x
def from_tensor(self, x):
#x = x.round()
resp = ""
for v in x.view(-1):
resp += str(v.item())+";"
resp = resp[:-1]
resp = resp.replace(".", ",")
return resp
def n_layers(self):
return len(self.layers)
| [
"numpy.float",
"torch.stack",
"datetime.datetime.now",
"numpy.array",
"app.ai.plot.linear",
"app.ai.genetic.Genetic",
"app.ai.model.Model_deep",
"numpy.int",
"torch.FloatTensor"
] | [((960, 1003), 'app.ai.model.Model_deep', 'model.Model_deep', (['self.inputs', 'self.outputs'], {}), '(self.inputs, self.outputs)\n', (976, 1003), True, 'import app.ai.model as model\n'), ((1271, 1295), 'app.ai.plot.linear', 'plot.linear', (['self.losses'], {}), '(self.losses)\n', (1282, 1295), True, 'import app.ai.plot as plot\n'), ((1937, 1958), 'app.ai.genetic.Genetic', 'Genetic', ([], {'service': 'self'}), '(service=self)\n', (1944, 1958), False, 'from app.ai.genetic import Genetic\n'), ((4868, 4882), 'numpy.float', 'np.float', (['data'], {}), '(data)\n', (4876, 4882), True, 'import numpy as np\n'), ((5827, 5862), 'torch.stack', 't.stack', (['[t[0] for t in self.batch]'], {}), '([t[0] for t in self.batch])\n', (5834, 5862), True, 'import torch as t\n'), ((5876, 5911), 'torch.stack', 't.stack', (['[t[1] for t in self.batch]'], {}), '([t[1] for t in self.batch])\n', (5883, 5911), True, 'import torch as t\n'), ((5925, 5960), 'torch.stack', 't.stack', (['[t[2] for t in self.batch]'], {}), '([t[2] for t in self.batch])\n', (5932, 5960), True, 'import torch as t\n'), ((6112, 6128), 'torch.FloatTensor', 't.FloatTensor', (['x'], {}), '(x)\n', (6125, 6128), True, 'import torch as t\n'), ((847, 870), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (868, 870), False, 'import datetime\n'), ((6029, 6040), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (6037, 6040), True, 'import numpy as np\n'), ((2598, 2618), 'numpy.float', 'np.float', (["form['lr']"], {}), "(form['lr'])\n", (2606, 2618), True, 'import numpy as np\n'), ((2721, 2744), 'numpy.float', 'np.float', (["form['GAMMA']"], {}), "(form['GAMMA'])\n", (2729, 2744), True, 'import numpy as np\n'), ((2802, 2828), 'numpy.int', 'np.int', (["form['batch_size']"], {}), "(form['batch_size'])\n", (2808, 2828), True, 'import numpy as np\n'), ((3014, 3034), 'numpy.float', 'np.float', (["form['mr']"], {}), "(form['mr'])\n", (3022, 3034), True, 'import numpy as np\n'), ((3117, 3138), 'numpy.float', 'np.float', (["form['psi']"], {}), "(form['psi'])\n", (3125, 3138), True, 'import numpy as np\n'), ((3233, 3258), 'numpy.int', 'np.int', (["form['childrens']"], {}), "(form['childrens'])\n", (3239, 3258), True, 'import numpy as np\n'), ((3360, 3391), 'numpy.int', 'np.int', (["form['population_size']"], {}), "(form['population_size'])\n", (3366, 3391), True, 'import numpy as np\n'), ((3562, 3571), 'numpy.int', 'np.int', (['l'], {}), '(l)\n', (3568, 3571), True, 'import numpy as np\n')] |
"""
Reinforcement Learning Using Q-learning, Double Q-learning, and Dyna-Q.
Copyright (c) 2020 <NAME>
References
----------
- Based on project 7 in the Georgia Tech Spring 2020 course "Machine Learning
for Trading" by Prof. <NAME>.
- Course: http://quantsoftware.gatech.edu/CS7646_Spring_2020
- Project: http://quantsoftware.gatech.edu/Spring_2020_Project_7:_Qlearning_Robot
- Main book reference: Sutton and Barto, "Reinforcement Learning: An Introduction"
(http://incompleteideas.net/book/the-book-2nd.html)
Characteristics
---------------
- The code has been written and tested in Python 3.7.7.
- Q-learning implementation for reinforcement learning.
- Options: basic Q-learning, Dyna-Q (for model planning), double Q-learning (to
avoid maximization bias).
- Dyna-Q has been implemented with both a deterministic model and a probabilistic
model.
- The deterministic model and probabilistic model have both two versions, one
using dictionaries (less memory but slower) and one using arrays (more memory
but faster).
- Double Q-learning can be used with basic Q-learning as well as with Dyna-Q.
- The Q-learning class in <QLearner.py> can be used for any reinforcement learning
problem, while <robot.py> and <test.py> are specific for a grid-world type of
problem (i.e. finding the best policy to go from a start point to a goal point).
- Usage: python test.py <csv-filename>.
Parameters
----------
sys.argv[1]
File name with the map layout passed as argument. It must be in a csv file,
with the map elements specified using integer numbers.
map_elements
List of elements allowed in the map layout.
reward_list
List of rewards associated to each element in <map_elements>.
move_list
List of allowed moves for the robot.
episodes
Number of episodes (each episode is a trip from start to goal)
max_steps
Maximum number of steps allowed to reach the goal (for each episode).
0 <= random_rate <= 1
Probability the robot will move randomly instead to move as required.
0 <= alpha <= 1
Learning rate (used to vary the weight given to new experiences compared with
past Q-values).
0 <= gamma <= 1
Discount factor (used to progressively reduce the value of future rewards).
0 <= rar <= 1
Probability of selecting a random action instead of using the action derived
from the Q-table(s) (i.e. probability to explore).
0 <= radr <= 1
Rate decay for the probability to explore (used to reduce the probability to
explore with time).
dyna >= 0
Number of simulated updates in Dyna-Q (when equal to zero Dyna-Q is not used).
model_type = 1, 2, 3, 4
Type of model used for the simulation in Dyna-Q (1-2 are deterministic models,
3-4 are probabilistic models).
double_Q = True, False
Specifies if double Q-learning is used (to avoid maximization bias).
Examples
--------
All examples are for the map layout in `map.csv`. All initial data are as in this
file, except when differently specified.
- Basic Q-learning, episodes = 1000, dyna = 0
REWARDS: mean = -63.1, median = -32.0, std = 109.8
STEPS: mean = 62.1, median = 34.0, std = 96.3
Number of updates done: 62085
BEST PATH: rewards = -22.0, Steps = 24.0
- Double Q learning, episodes = 1000, dyna = 0
REWARDS: mean = -85.0, median = -40.0, std = 132.7
STEPS: mean = 85.5, median = 42.0, std = 130.5
Number of updates done: 85473
BEST PATH: rewards = -22.0, Steps = 24.0
- Double Q-learning, episodes = 50, dyna = 200, model_type = 1
REWARDS: mean = -70.7, median = -28.0, std = 158.5
STEPS: mean = 52.9, median = 30.0, std = 93.5
Number of updates done: 531243
BEST PATH: rewards = -22.0, Steps = 24.0
- Basic Q-learning, episodes = 50, dyna = 200, model_type = 4
REWARDS: mean = -92.7, median = -42.5, std = 183.9
STEPS: mean = 76.9, median = 44.5, std = 94.5
Number of updates done: 567340
Number of updates skipped: 205103
BEST PATH: rewards = -22.0, Steps = 24.0
- Basic Q-learning, episodes = 1000, dyna = 0, but using an 8-way robot
REWARDS: mean = -66.6, median = -25.0, std = 120.9
STEPS: mean = 63.3, median = 27.0, std = 100.1
Number of updates done: 63261
BEST PATH: rewards = -13.0, Steps = 15.0
"""
import sys
import numpy as np
import QLearner as ql
import robot as rb
# Elements allowed in the map
map_elements = [' ', # 0 = empty space
'#', # 1 = wall/obstacle
'S', # 2 = start (must be defined)
'G', # 3 = goal (must be defined)
'~'] # 4 = sand
# Rewards (must correspond to elements in the map)
reward_list = np.array([-1.0, # empty space
-1.0, # wall/obstacle
-1.0, # start (walk-back)
+1.0, # goal
-100.0]) # sand
# Directions of motion (4-way robot)
move_list = np.array([[-1, 0], # Go North one step
[ 0, +1], # Go East one step
[+1, 0], # Go South one step
[ 0, -1]]) # Go West one step
# Directions of motion (8-way robot)
# move_list = np.array([[-1, 0], # Go North one step
# [-1, +1], # Go North-East one step
# [ 0, +1], # Go East one step
# [+1, +1], # Go South-East one step
# [+1, 0], # Go South one step
# [+1, -1], # Go South-West one step
# [ 0, -1], # Go West one step
# [-1, -1]]) # Go North-West one step
# Other grid-world parameters
episodes = 1000 # Number of episodes
max_steps = 10000 # Max. number of steps for each episode
random_rate = 0.2 # Probability the robot will move randomly
# Q-learner parameters
alpha = 0.2 # Learning rate
gamma = 0.9 # Discount factor
rar = 0.50 # Probability to explore
radr = 0.99 # Rate decay for the probability to explore
dyna = 0 # Number of simulated updates in Dyna-Q (not used if zero)
model_type = 1 # Type of model used for the simulation in Dyna-Q
# 1 = Deterministic model (T and R defined as dictionaries)
# 2 = Deterministic model (T and R defined as arrays)
# 3 = Probabilistic model (T and R defined as dictionaries)
# 4 = Probabilistic model (T and R defined as arrays)
double_Q = False # True = use double Q-learning
# False = don't use double Q-learning
# ======= Main Code ======= #
np.random.seed(1)
# Read the map layout from the csv file specified on the command line
if (len(sys.argv) != 2):
print("Usage: python test.py <csv-filename>")
sys.exit(1)
map_layout = np.asarray(np.loadtxt(sys.argv[1], delimiter=','), dtype=int)
# Initialize robot and map quantities
bot = rb.robot(map_layout, map_elements, reward_list, move_list, max_steps=max_steps,
random_rate=random_rate)
# Initialize the Q-learner
num_states = map_layout.size
num_actions = move_list.shape[0]
learner = ql.QLearner(num_states, num_actions, alpha=alpha, gamma=gamma, rar=rar,
radr=radr, dyna=dyna, double_Q=double_Q, model_type=model_type)
# Build the Q-table(s)
scores, steps = bot.optimize_path(learner, episodes)
# Print results
print()
print("REWARDS: mean = {0:6.1f}, median = {1:6.1f}, std = {2:5.1f}"
.format(np.mean(scores), np.median(scores), np.std(scores)))
print("STEPS: mean = {0:6.1f}, median = {1:6.1f}, std = {2:5.1f}"
.format(np.mean(steps), np.median(steps), np.std(steps)))
print("Number of updates done: ", learner.count_update_Q)
if (dyna > 0 and (model_type == 2 or model_type == 4)):
print("Number of updates skipped: ", learner.count_skip)
# Print best map and corresponding rewards and steps
best_map, best_reward, best_step = bot.best_path(learner)
bot.show_map(best_map)
print("BEST PATH: rewards = {0:5.1f}, Steps = {1:5.1f}".
format(best_reward, best_step))
| [
"numpy.mean",
"numpy.median",
"QLearner.QLearner",
"robot.robot",
"numpy.std",
"numpy.array",
"numpy.random.seed",
"sys.exit",
"numpy.loadtxt"
] | [((4817, 4859), 'numpy.array', 'np.array', (['[-1.0, -1.0, -1.0, +1.0, -100.0]'], {}), '([-1.0, -1.0, -1.0, +1.0, -100.0])\n', (4825, 4859), True, 'import numpy as np\n'), ((5097, 5143), 'numpy.array', 'np.array', (['[[-1, 0], [0, +1], [+1, 0], [0, -1]]'], {}), '([[-1, 0], [0, +1], [+1, 0], [0, -1]])\n', (5105, 5143), True, 'import numpy as np\n'), ((6932, 6949), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (6946, 6949), True, 'import numpy as np\n'), ((7232, 7341), 'robot.robot', 'rb.robot', (['map_layout', 'map_elements', 'reward_list', 'move_list'], {'max_steps': 'max_steps', 'random_rate': 'random_rate'}), '(map_layout, map_elements, reward_list, move_list, max_steps=\n max_steps, random_rate=random_rate)\n', (7240, 7341), True, 'import robot as rb\n'), ((7452, 7591), 'QLearner.QLearner', 'ql.QLearner', (['num_states', 'num_actions'], {'alpha': 'alpha', 'gamma': 'gamma', 'rar': 'rar', 'radr': 'radr', 'dyna': 'dyna', 'double_Q': 'double_Q', 'model_type': 'model_type'}), '(num_states, num_actions, alpha=alpha, gamma=gamma, rar=rar,\n radr=radr, dyna=dyna, double_Q=double_Q, model_type=model_type)\n', (7463, 7591), True, 'import QLearner as ql\n'), ((7100, 7111), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7108, 7111), False, 'import sys\n'), ((7136, 7174), 'numpy.loadtxt', 'np.loadtxt', (['sys.argv[1]'], {'delimiter': '""","""'}), "(sys.argv[1], delimiter=',')\n", (7146, 7174), True, 'import numpy as np\n'), ((7796, 7811), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (7803, 7811), True, 'import numpy as np\n'), ((7813, 7830), 'numpy.median', 'np.median', (['scores'], {}), '(scores)\n', (7822, 7830), True, 'import numpy as np\n'), ((7832, 7846), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (7838, 7846), True, 'import numpy as np\n'), ((7933, 7947), 'numpy.mean', 'np.mean', (['steps'], {}), '(steps)\n', (7940, 7947), True, 'import numpy as np\n'), ((7949, 7965), 'numpy.median', 'np.median', (['steps'], {}), '(steps)\n', (7958, 7965), True, 'import numpy as np\n'), ((7967, 7980), 'numpy.std', 'np.std', (['steps'], {}), '(steps)\n', (7973, 7980), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import Dataset
import os
from PIL import Image
import numpy as np
import PIL
import torch.nn as nn
from config import opt
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import random
import math
class TextureDataset(Dataset):
"""Dataset wrapping images from a random folder with textures
Arguments:
Path to image folder
Extension of images
PIL transforms
"""
def __init__(self, img_path, transform=None,scale=1):
self.img_path = img_path
self.transform = transform
if True:##ok this is for 1 worker only!
names = os.listdir(img_path)
self.X_train =[]
for n in names:
name = os.path.join(self.img_path, n)
try:
img = Image.open(name)
try:
img = img.convert('RGB')##fixes truncation???
except:
pass
if scale!=1:
img=img.resize((int(img.size[0]*scale),int(img.size[1]*scale)),PIL.Image.LANCZOS)
except Exception as e:
print (e,name)
continue
self.X_train +=[img]
print (n,"img added", img.size,"total length",len(self.X_train))
if len(self.X_train) > 4000:
break ##usually want to avoid so many files
##this affects epoch length..
if len(self.X_train) < 2000:
c = int(2000/len(self.X_train))
self.X_train*=c
def __getitem__(self, index):
if False:
name =self.img_path + self.X_train[index]
img = Image.open(name)
else:
img= self.X_train[index]#np.random.randint(len(self.X_train))
if self.transform is not None:
img2 = self.transform(img)
label =0
#print ('data returned',img2.data.shape)
return img2, label
def __len__(self):
return len(self.X_train)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if opt.zPeriodic:
# 2*nPeriodic initial spread values
# slowest wave 0.5 pi-- full cycle after 4 steps in noise tensor
# fastest wave 1.5pi step -- full cycle in 0.66 steps
def initWave(nPeriodic):
buf = []
for i in range(nPeriodic // 4+1):
v = 0.5 + i / float(nPeriodic//4+1e-10)
buf += [0, v, v, 0]
buf += [0, -v, v, 0] # #so from other quadrants as well..
buf = buf[:2*nPeriodic]
awave = np.array(buf, dtype=np.float32) * np.pi
awave = torch.FloatTensor(awave).unsqueeze(-1).unsqueeze(-1).unsqueeze(0)
return awave
waveNumbers = initWave(opt.zPeriodic).to(device)
class Waver(nn.Module):
def __init__(self, input_size=(25, 20, 5, 5)):
super(Waver, self).__init__()
if opt.zGL > 0:
K = 60
batch_size, zGl, NZ, NZ = input_size
layers = [nn.Flatten(start_dim=0, end_dim=-1)]
layers += [nn.Linear(batch_size * zGl * NZ * NZ, K)]
layers += [nn.ReLU(True)]
layers += [nn.Linear(K, batch_size * 2 * opt.zPeriodic * NZ * NZ)]
self.learnedWN = nn.Sequential(*layers)
else:##static
self.learnedWN = nn.Parameter(torch.zeros(opt.zPeriodic * 2).uniform_(-1, 1).unsqueeze(-1).unsqueeze(-1).unsqueeze(0) * 0.2)
def forward(self, c, zGL=None):
if opt.zGL > 0:
#c shape: (batch_size, 2*opt.zPeriodic, NZ, NZ)
#waveNumbers shape: (1, 2*opt.zPeriodic, 1, 1)
#self.learnedWN(zGL) output shape : (batch_size, 2*opt.zPeriodic, NZ, NZ)
#returned shape will be : (batch_size, 2*opt.zPeriodic, NZ, NZ)
learned_wavenumbers = self.learnedWN(zGL).view(opt.batch_size, 2*opt.zPeriodic, opt.NZ, opt.NZ)
return (waveNumbers + 5*learned_wavenumbers) * c
return (waveNumbers + self.learnedWN) * c
learnedWN = Waver(input_size=(opt.batch_size, opt.zGL, opt.NZ, opt.NZ))
else:
learnedWN = None
##inplace set noise
def setNoise(noise):
noise = noise.detach() * 1.0
noise.uniform_(-1, 1) # normal_(0, 1)
if opt.zGL:
noise[:, :opt.zGL] = noise[:, :opt.zGL, :1, :1].repeat(1, 1, noise.shape[2], noise.shape[3])
if opt.zPeriodic:
xv, yv = torch.meshgrid(
torch.arange(noise.shape[2], dtype=torch.float, device=device),
torch.arange(noise.shape[3], dtype=torch.float, device=device),
)
c = torch.cat((xv.unsqueeze(0), yv.unsqueeze(0)), 0).unsqueeze(0)
c = c.repeat(noise.shape[0], opt.zPeriodic, 1, 1)
# #now c has canonic coordinate system -- multiply by wave numbers
raw = learnedWN(c, noise[:, :opt.zGL])
#random phase offset , it mimics random positional extraction of patches from the real images
offset = (noise[:, -opt.zPeriodic:, :1, :1] * 1.0).uniform_(-1, 1) * 6.28
offset = offset.repeat(1, 1, noise.shape[2], noise.shape[3])
wave = torch.sin(raw[:, ::2] + raw[:, 1::2] + offset)
noise[:, -opt.zPeriodic:] = wave
return noise
def save_model(epoch, generator, generator_optimizer, discriminator, discriminator_optimizer, output_folder):
# saving training result
# generator.save(add_state={'optimizer_state_dict' : generator_optimizer.state_dict()},
# file_name=os.path.join(output_folder,'generator_param_fin_{}.pth'.format(epoch+1, datetime.now().strftime("%Y%m%d_%H-%M-%S"))))
# discriminator.save(add_state={'optimizer_state_dict' : discriminator_optimizer.state_dict()},
# file_name=os.path.join(output_folder,'discriminator_param_fin_{}.pth'.format(epoch+1, datetime.now().strftime("%Y%m%d_%H-%M-%S"))))
model_output_path = os.path.join(output_folder,"generator_model_e{}.pth".format(epoch))
optimizer_output_path = os.path.join(output_folder,"generator_optimizer_e{}.pth".format(epoch))
torch.save(generator.state_dict(), model_output_path)
torch.save(generator_optimizer.state_dict(), optimizer_output_path)
def plot_loss(log_dir):
plt.figure(figsize=(5,5))
#find on csv file
csv_path = [p for p in Path(log_dir).rglob("*.csv")][0]
df = pd.read_csv(csv_path,index_col=None)
loss_path = os.path.join(log_dir, "plot")
os.makedirs(loss_path, exist_ok=True)
plt.subplot(2,1,1)
plt.plot(df["epoch"], df["lossG"], color="r")
plt.xlabel("epoch")
plt.title("generator_loss")
plt.subplot(2,1,2)
plt.plot(df["epoch"], df["lossD"], color="b")
plt.xlabel("epoch")
plt.title("discriminator_loss_real&fakeimg")
plt.tight_layout(pad=2.0)
plt.savefig(os.path.join(loss_path,"loss.jpg"))
plt.close()
#============================================================
plt.subplot(3,1,1)
plt.plot(df["epoch"], df["D_x"], color="r")
plt.xlabel("epoch")
plt.title("D_output_on_realimgs")
plt.subplot(3,1,2)
plt.plot(df["epoch"], df["D_G_z1"], color="r")
plt.xlabel("epoch")
plt.title("D_output_on_fakeimgs_fakelabel")
plt.subplot(3,1,3)
plt.plot(df["epoch"], df["D_G_z2"], color="r")
plt.xlabel("epoch")
plt.title("D_output_on_fakeimgs_reallabel")
plt.tight_layout(h_pad=1.0)
plt.savefig(os.path.join(loss_path,"D_output.jpg"))
plt.close()
def smooth_real_labels(y, percentage=0.382):
#randomize the label into range 0.7 to 1.2 according to GAN Hacks by S.Chintala
unraveled_y = y.view(-1)
len_unraveled_y = len(unraveled_y)
amount = math.ceil(len_unraveled_y*percentage)
random_idx = random.sample(range(len_unraveled_y), amount)
for i in range(len(unraveled_y)):
if i in random_idx:
unraveled_y[i] = 1 - 0.3 + (random.random() * 0.5)
return unraveled_y.view(y.shape) | [
"torch.nn.ReLU",
"pandas.read_csv",
"torch.nn.Sequential",
"torch.sin",
"numpy.array",
"torch.cuda.is_available",
"torch.arange",
"os.listdir",
"pathlib.Path",
"torch.nn.Flatten",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.title",
... | [((6041, 6067), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (6051, 6067), True, 'import matplotlib.pyplot as plt\n'), ((6153, 6190), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {'index_col': 'None'}), '(csv_path, index_col=None)\n', (6164, 6190), True, 'import pandas as pd\n'), ((6205, 6234), 'os.path.join', 'os.path.join', (['log_dir', '"""plot"""'], {}), "(log_dir, 'plot')\n", (6217, 6234), False, 'import os\n'), ((6237, 6274), 'os.makedirs', 'os.makedirs', (['loss_path'], {'exist_ok': '(True)'}), '(loss_path, exist_ok=True)\n', (6248, 6274), False, 'import os\n'), ((6278, 6298), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (6289, 6298), True, 'import matplotlib.pyplot as plt\n'), ((6299, 6344), 'matplotlib.pyplot.plot', 'plt.plot', (["df['epoch']", "df['lossG']"], {'color': '"""r"""'}), "(df['epoch'], df['lossG'], color='r')\n", (6307, 6344), True, 'import matplotlib.pyplot as plt\n'), ((6347, 6366), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6357, 6366), True, 'import matplotlib.pyplot as plt\n'), ((6369, 6396), 'matplotlib.pyplot.title', 'plt.title', (['"""generator_loss"""'], {}), "('generator_loss')\n", (6378, 6396), True, 'import matplotlib.pyplot as plt\n'), ((6402, 6422), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (6413, 6422), True, 'import matplotlib.pyplot as plt\n'), ((6423, 6468), 'matplotlib.pyplot.plot', 'plt.plot', (["df['epoch']", "df['lossD']"], {'color': '"""b"""'}), "(df['epoch'], df['lossD'], color='b')\n", (6431, 6468), True, 'import matplotlib.pyplot as plt\n'), ((6471, 6490), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6481, 6490), True, 'import matplotlib.pyplot as plt\n'), ((6493, 6537), 'matplotlib.pyplot.title', 'plt.title', (['"""discriminator_loss_real&fakeimg"""'], {}), "('discriminator_loss_real&fakeimg')\n", (6502, 6537), True, 'import matplotlib.pyplot as plt\n'), ((6543, 6568), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(2.0)'}), '(pad=2.0)\n', (6559, 6568), True, 'import matplotlib.pyplot as plt\n'), ((6621, 6632), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6630, 6632), True, 'import matplotlib.pyplot as plt\n'), ((6701, 6721), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (6712, 6721), True, 'import matplotlib.pyplot as plt\n'), ((6722, 6765), 'matplotlib.pyplot.plot', 'plt.plot', (["df['epoch']", "df['D_x']"], {'color': '"""r"""'}), "(df['epoch'], df['D_x'], color='r')\n", (6730, 6765), True, 'import matplotlib.pyplot as plt\n'), ((6768, 6787), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6778, 6787), True, 'import matplotlib.pyplot as plt\n'), ((6790, 6823), 'matplotlib.pyplot.title', 'plt.title', (['"""D_output_on_realimgs"""'], {}), "('D_output_on_realimgs')\n", (6799, 6823), True, 'import matplotlib.pyplot as plt\n'), ((6827, 6847), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (6838, 6847), True, 'import matplotlib.pyplot as plt\n'), ((6848, 6894), 'matplotlib.pyplot.plot', 'plt.plot', (["df['epoch']", "df['D_G_z1']"], {'color': '"""r"""'}), "(df['epoch'], df['D_G_z1'], color='r')\n", (6856, 6894), True, 'import matplotlib.pyplot as plt\n'), ((6897, 6916), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6907, 6916), True, 'import matplotlib.pyplot as plt\n'), ((6919, 6962), 'matplotlib.pyplot.title', 'plt.title', (['"""D_output_on_fakeimgs_fakelabel"""'], {}), "('D_output_on_fakeimgs_fakelabel')\n", (6928, 6962), True, 'import matplotlib.pyplot as plt\n'), ((6966, 6986), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (6977, 6986), True, 'import matplotlib.pyplot as plt\n'), ((6987, 7033), 'matplotlib.pyplot.plot', 'plt.plot', (["df['epoch']", "df['D_G_z2']"], {'color': '"""r"""'}), "(df['epoch'], df['D_G_z2'], color='r')\n", (6995, 7033), True, 'import matplotlib.pyplot as plt\n'), ((7036, 7055), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (7046, 7055), True, 'import matplotlib.pyplot as plt\n'), ((7058, 7101), 'matplotlib.pyplot.title', 'plt.title', (['"""D_output_on_fakeimgs_reallabel"""'], {}), "('D_output_on_fakeimgs_reallabel')\n", (7067, 7101), True, 'import matplotlib.pyplot as plt\n'), ((7107, 7134), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(1.0)'}), '(h_pad=1.0)\n', (7123, 7134), True, 'import matplotlib.pyplot as plt\n'), ((7191, 7202), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7200, 7202), True, 'import matplotlib.pyplot as plt\n'), ((7413, 7452), 'math.ceil', 'math.ceil', (['(len_unraveled_y * percentage)'], {}), '(len_unraveled_y * percentage)\n', (7422, 7452), False, 'import math\n'), ((2133, 2158), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2156, 2158), False, 'import torch\n'), ((4977, 5023), 'torch.sin', 'torch.sin', (['(raw[:, ::2] + raw[:, 1::2] + offset)'], {}), '(raw[:, ::2] + raw[:, 1::2] + offset)\n', (4986, 5023), False, 'import torch\n'), ((6583, 6618), 'os.path.join', 'os.path.join', (['loss_path', '"""loss.jpg"""'], {}), "(loss_path, 'loss.jpg')\n", (6595, 6618), False, 'import os\n'), ((7149, 7188), 'os.path.join', 'os.path.join', (['loss_path', '"""D_output.jpg"""'], {}), "(loss_path, 'D_output.jpg')\n", (7161, 7188), False, 'import os\n'), ((662, 682), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (672, 682), False, 'import os\n'), ((1753, 1769), 'PIL.Image.open', 'Image.open', (['name'], {}), '(name)\n', (1763, 1769), False, 'from PIL import Image\n'), ((2612, 2643), 'numpy.array', 'np.array', (['buf'], {'dtype': 'np.float32'}), '(buf, dtype=np.float32)\n', (2620, 2643), True, 'import numpy as np\n'), ((4340, 4402), 'torch.arange', 'torch.arange', (['noise.shape[2]'], {'dtype': 'torch.float', 'device': 'device'}), '(noise.shape[2], dtype=torch.float, device=device)\n', (4352, 4402), False, 'import torch\n'), ((4411, 4473), 'torch.arange', 'torch.arange', (['noise.shape[3]'], {'dtype': 'torch.float', 'device': 'device'}), '(noise.shape[3], dtype=torch.float, device=device)\n', (4423, 4473), False, 'import torch\n'), ((763, 793), 'os.path.join', 'os.path.join', (['self.img_path', 'n'], {}), '(self.img_path, n)\n', (775, 793), False, 'import os\n'), ((3250, 3272), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3263, 3272), True, 'import torch.nn as nn\n'), ((841, 857), 'PIL.Image.open', 'Image.open', (['name'], {}), '(name)\n', (851, 857), False, 'from PIL import Image\n'), ((3017, 3052), 'torch.nn.Flatten', 'nn.Flatten', ([], {'start_dim': '(0)', 'end_dim': '(-1)'}), '(start_dim=0, end_dim=-1)\n', (3027, 3052), True, 'import torch.nn as nn\n'), ((3073, 3113), 'torch.nn.Linear', 'nn.Linear', (['(batch_size * zGl * NZ * NZ)', 'K'], {}), '(batch_size * zGl * NZ * NZ, K)\n', (3082, 3113), True, 'import torch.nn as nn\n'), ((3134, 3147), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3141, 3147), True, 'import torch.nn as nn\n'), ((3168, 3222), 'torch.nn.Linear', 'nn.Linear', (['K', '(batch_size * 2 * opt.zPeriodic * NZ * NZ)'], {}), '(K, batch_size * 2 * opt.zPeriodic * NZ * NZ)\n', (3177, 3222), True, 'import torch.nn as nn\n'), ((7607, 7622), 'random.random', 'random.random', ([], {}), '()\n', (7620, 7622), False, 'import random\n'), ((6113, 6126), 'pathlib.Path', 'Path', (['log_dir'], {}), '(log_dir)\n', (6117, 6126), False, 'from pathlib import Path\n'), ((2664, 2688), 'torch.FloatTensor', 'torch.FloatTensor', (['awave'], {}), '(awave)\n', (2681, 2688), False, 'import torch\n'), ((3331, 3361), 'torch.zeros', 'torch.zeros', (['(opt.zPeriodic * 2)'], {}), '(opt.zPeriodic * 2)\n', (3342, 3361), False, 'import torch\n')] |
from PIL import Image, ImageDraw
import sys
import math, random
from itertools import product
from utils.ufarray import *
import numpy as np
def perform_dws(dws_energy, class_map, bbox_map,cutoff=0,min_ccoponent_size=0, return_ccomp_img = False):
bbox_list = []
dws_energy = np.squeeze(dws_energy)
class_map = np.squeeze(class_map)
bbox_map = np.squeeze(bbox_map)
# Treshhold and binarize dws energy
binar_energy = (dws_energy <= cutoff) * 255
# get connected components
labels, out_img = find_connected_comp(np.transpose(binar_energy)) # works with inverted indices
# invert labels dict
labels_inv = {}
for k, v in labels.items():
labels_inv[v] = labels_inv.get(v, [])
labels_inv[v].append(k)
# filter components that are too small
for key in list(labels_inv):
# print(key)
# print(len(labels_inv[key]))
if len(labels_inv[key]) < min_ccoponent_size:
del labels_inv[key]
for key in labels_inv.keys():
# add additional dict structure to each component and convert to numpy array
labels_inv[key] = dict(pixel_coords=np.asanyarray(labels_inv[key]))
# use average over all pixel coordinates
labels_inv[key]["center"] = np.average(labels_inv[key]["pixel_coords"],0).astype(int)
# mayority vote for class --> transposed
labels_inv[key]["class"] = np.bincount(class_map[labels_inv[key]["pixel_coords"][:, 1], labels_inv[key]["pixel_coords"][:, 0]]).argmax()
# average for box size --> transposed
#labels_inv[key]["bbox_size"] = np.average(bbox_map[labels_inv[key]["pixel_coords"][:, 1], labels_inv[key]["pixel_coords"][:, 0]],0).astype(int)
labels_inv[key]["bbox_size"] = np.amax(
bbox_map[labels_inv[key]["pixel_coords"][:, 1], labels_inv[key]["pixel_coords"][:, 0]], 0).astype(int)
# produce bbox element, append to list
bbox = []
bbox.append(int(np.round(labels_inv[key]["center"][0] - (labels_inv[key]["bbox_size"][1]/2.0), 0))) # xmin
bbox.append(int(np.round(labels_inv[key]["center"][1] - (labels_inv[key]["bbox_size"][0]/2.0), 0))) # ymin
bbox.append(int(np.round(labels_inv[key]["center"][0] + (labels_inv[key]["bbox_size"][1]/2.0), 0))) # xmax
bbox.append(int(np.round(labels_inv[key]["center"][1] + (labels_inv[key]["bbox_size"][0]/2.0), 0))) # ymax
bbox.append(int(labels_inv[key]["class"]))
bbox_list.append(bbox)
if return_ccomp_img:
return bbox_list, out_img
return bbox_list
def get_class(component,class_map):
return None
def get_bbox(component,):
return None
#
# Implements 8-connectivity connected component labeling
#
# Algorithm obtained from "Optimizing Two-Pass Connected-Component Labeling
# by <NAME>, <NAME>, and <NAME>
#
def find_connected_comp(input):
data = input
width, height = input.shape
# Union find data structure
uf = UFarray()
#
# First pass
#
# Dictionary of point:label pairs
labels = {}
for y, x in product(range(height), range(width)):
#
# Pixel names were chosen as shown:
#
# -------------
# | a | b | c |
# -------------
# | d | e | |
# -------------
# | | | |
# -------------
#
# The current pixel is e
# a, b, c, and d are its neighbors of interest
#
# 255 is white, 0 is black
# White pixels part of the background, so they are ignored
# If a pixel lies outside the bounds of the image, it default to white
#
# If the current pixel is white, it's obviously not a component...
if data[x, y] == 255:
pass
# If pixel b is in the image and black:
# a, d, and c are its neighbors, so they are all part of the same component
# Therefore, there is no reason to check their labels
# so simply assign b's label to e
elif y > 0 and data[x, y - 1] == 0:
labels[x, y] = labels[(x, y - 1)]
# If pixel c is in the image and black:
# b is its neighbor, but a and d are not
# Therefore, we must check a and d's labels
elif x + 1 < width and y > 0 and data[x + 1, y - 1] == 0:
c = labels[(x + 1, y - 1)]
labels[x, y] = c
# If pixel a is in the image and black:
# Then a and c are connected through e
# Therefore, we must union their sets
if x > 0 and data[x - 1, y - 1] == 0:
a = labels[(x - 1, y - 1)]
uf.union(c, a)
# If pixel d is in the image and black:
# Then d and c are connected through e
# Therefore we must union their sets
elif x > 0 and data[x - 1, y] == 0:
d = labels[(x - 1, y)]
uf.union(c, d)
# If pixel a is in the image and black:
# We already know b and c are white
# d is a's neighbor, so they already have the same label
# So simply assign a's label to e
elif x > 0 and y > 0 and data[x - 1, y - 1] == 0:
labels[x, y] = labels[(x - 1, y - 1)]
# If pixel d is in the image and black
# We already know a, b, and c are white
# so simpy assign d's label to e
elif x > 0 and data[x - 1, y] == 0:
labels[x, y] = labels[(x - 1, y)]
# All the neighboring pixels are white,
# Therefore the current pixel is a new component
else:
labels[x, y] = uf.makeLabel()
#
# Second pass
#
uf.flatten()
colors = {}
# Image to display the components in a nice, colorful way
output_img = Image.new("RGB", (width, height))
outdata = output_img.load()
for (x, y) in labels:
# Name of the component the current point belongs to
component = uf.find(labels[(x, y)])
# Update the labels with correct information
labels[(x, y)] = component
# Associate a random color with this component
if component not in colors:
colors[component] = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
# Colorize the image
outdata[x, y] = colors[component]
return (labels, output_img) | [
"numpy.amax",
"numpy.average",
"PIL.Image.new",
"numpy.asanyarray",
"numpy.squeeze",
"numpy.transpose",
"numpy.bincount",
"random.randint",
"numpy.round"
] | [((286, 308), 'numpy.squeeze', 'np.squeeze', (['dws_energy'], {}), '(dws_energy)\n', (296, 308), True, 'import numpy as np\n'), ((325, 346), 'numpy.squeeze', 'np.squeeze', (['class_map'], {}), '(class_map)\n', (335, 346), True, 'import numpy as np\n'), ((362, 382), 'numpy.squeeze', 'np.squeeze', (['bbox_map'], {}), '(bbox_map)\n', (372, 382), True, 'import numpy as np\n'), ((5825, 5858), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)'], {}), "('RGB', (width, height))\n", (5834, 5858), False, 'from PIL import Image, ImageDraw\n'), ((546, 572), 'numpy.transpose', 'np.transpose', (['binar_energy'], {}), '(binar_energy)\n', (558, 572), True, 'import numpy as np\n'), ((1147, 1177), 'numpy.asanyarray', 'np.asanyarray', (['labels_inv[key]'], {}), '(labels_inv[key])\n', (1160, 1177), True, 'import numpy as np\n'), ((1264, 1310), 'numpy.average', 'np.average', (["labels_inv[key]['pixel_coords']", '(0)'], {}), "(labels_inv[key]['pixel_coords'], 0)\n", (1274, 1310), True, 'import numpy as np\n'), ((1406, 1511), 'numpy.bincount', 'np.bincount', (["class_map[labels_inv[key]['pixel_coords'][:, 1], labels_inv[key][\n 'pixel_coords'][:, 0]]"], {}), "(class_map[labels_inv[key]['pixel_coords'][:, 1], labels_inv[key\n ]['pixel_coords'][:, 0]])\n", (1417, 1511), True, 'import numpy as np\n'), ((1754, 1857), 'numpy.amax', 'np.amax', (["bbox_map[labels_inv[key]['pixel_coords'][:, 1], labels_inv[key][\n 'pixel_coords'][:, 0]]", '(0)'], {}), "(bbox_map[labels_inv[key]['pixel_coords'][:, 1], labels_inv[key][\n 'pixel_coords'][:, 0]], 0)\n", (1761, 1857), True, 'import numpy as np\n'), ((1968, 2054), 'numpy.round', 'np.round', (["(labels_inv[key]['center'][0] - labels_inv[key]['bbox_size'][1] / 2.0)", '(0)'], {}), "(labels_inv[key]['center'][0] - labels_inv[key]['bbox_size'][1] / \n 2.0, 0)\n", (1976, 2054), True, 'import numpy as np\n'), ((2083, 2169), 'numpy.round', 'np.round', (["(labels_inv[key]['center'][1] - labels_inv[key]['bbox_size'][0] / 2.0)", '(0)'], {}), "(labels_inv[key]['center'][1] - labels_inv[key]['bbox_size'][0] / \n 2.0, 0)\n", (2091, 2169), True, 'import numpy as np\n'), ((2198, 2284), 'numpy.round', 'np.round', (["(labels_inv[key]['center'][0] + labels_inv[key]['bbox_size'][1] / 2.0)", '(0)'], {}), "(labels_inv[key]['center'][0] + labels_inv[key]['bbox_size'][1] / \n 2.0, 0)\n", (2206, 2284), True, 'import numpy as np\n'), ((2313, 2399), 'numpy.round', 'np.round', (["(labels_inv[key]['center'][1] + labels_inv[key]['bbox_size'][0] / 2.0)", '(0)'], {}), "(labels_inv[key]['center'][1] + labels_inv[key]['bbox_size'][0] / \n 2.0, 0)\n", (2321, 2399), True, 'import numpy as np\n'), ((6238, 6260), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (6252, 6260), False, 'import math, random\n'), ((6262, 6284), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (6276, 6284), False, 'import math, random\n'), ((6286, 6308), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (6300, 6308), False, 'import math, random\n')] |
"""
Code to extract a box-like region, typically for another modeler to use
as a boundary contition. In cases where it gets velocity in addition to
the rho-grid variables the grid limits mimic the standard ROMS organization,
with the outermost corners being on the rho-grid.
Job definitions are in LO_user/extract/box/job_definitions.py
Testing:
run extract_box -gtx cas6_v3_lo8b -job sequim0 -test True
same but with all flags:
run extract_box -gtx cas6_v3_lo8b -ro 2 -0 2019.07.04 -1 2019.07.06 -lt daily -job sequim0 -test True
this command replicates what post/surface0 does
run extract_box -gtx cas6_v3_lo8b -ro 2 -0 2019.07.04 -1 2019.07.04 -lt hourly -job surface0 -uv_to_rho True -surf True
or
python extract_box.py -gtx cas6_v3_lo8b -ro 2 -0 2019.07.04 -1 2019.07.04 -lt hourly -job surface0 -uv_to_rho True -surf True
Performance: this is very fast, takes just a few seconds for three days on boiler (for yang_sequim).
"""
# imports
import sys
import argparse
from lo_tools import Lfun, zfun, zrfun
from subprocess import Popen as Po
from subprocess import PIPE as Pi
import os
from time import time
import numpy as np
import xarray as xr
pid = os.getpid()
print(' extract_box '.center(60,'='))
print('PID for this job = ' + str(pid))
# command line arugments
parser = argparse.ArgumentParser()
# which run to use
parser.add_argument('-gtx', '--gtagex', type=str) # e.g. cas6_v3_l08b
parser.add_argument('-ro', '--roms_out_num', type=int) # 2 = Ldir['roms_out2'], etc.
# select time period and frequency
parser.add_argument('-0', '--ds0', type=str) # e.g. 2019.07.04
parser.add_argument('-1', '--ds1', type=str) # e.g. 2019.07.06
parser.add_argument('-lt', '--list_type', type=str) # list type: hourly, daily, weekly
# select job name
parser.add_argument('-job', type=str) # job name
# these flags get only surface or bottom fields if True
# - cannot have both True -
parser.add_argument('-surf', default=False, type=Lfun.boolean_string)
parser.add_argument('-bot', default=False, type=Lfun.boolean_string)
# set this to True to interpolate all u, and v fields to the rho-grid
parser.add_argument('-uv_to_rho', default=False, type=Lfun.boolean_string)
# Optional: set max number of subprocesses to run at any time
parser.add_argument('-Nproc', type=int, default=10)
# Optional: for testing
parser.add_argument('-test', '--testing', default=False, type=Lfun.boolean_string)
# get the args and put into Ldir
args = parser.parse_args()
# test that main required arguments were provided
argsd = args.__dict__
for a in ['gtagex']:
if argsd[a] == None:
print('*** Missing required argument: ' + a)
sys.exit()
gridname, tag, ex_name = args.gtagex.split('_')
# get the dict Ldir
Ldir = Lfun.Lstart(gridname=gridname, tag=tag, ex_name=ex_name)
# add more entries to Ldir
for a in argsd.keys():
if a not in Ldir.keys():
Ldir[a] = argsd[a]
# testing
if Ldir['testing']:
Ldir['roms_out_num'] = 2
Ldir['ds0'] = '2019.07.04'
Ldir['ds1'] = '2019.07.06'
Ldir['list_type'] = 'daily'
# set where to look for model output
if Ldir['roms_out_num'] == 0:
pass
elif Ldir['roms_out_num'] > 0:
Ldir['roms_out'] = Ldir['roms_out' + str(Ldir['roms_out_num'])]
# check for input conflicts:
if Ldir['surf'] and Ldir['bot']:
print('Error: cannot have surf and bot both True.')
sys.exit()
# output location
out_dir = Ldir['LOo'] / 'extract' / Ldir['gtagex'] / 'box'
Lfun.make_dir(out_dir)
if Ldir['surf']:
box_fn = out_dir / (Ldir['job'] + '_surf_' + Ldir['ds0'] + '_' + Ldir['ds1'] + '.nc')
elif Ldir['bot']:
box_fn = out_dir / (Ldir['job'] + '_bot_' + Ldir['ds0'] + '_' + Ldir['ds1'] + '.nc')
else:
box_fn = out_dir / (Ldir['job'] + '_' + Ldir['ds0'] + '_' + Ldir['ds1'] + '.nc')
box_fn.unlink(missing_ok=True)
# name the temp dir to accumulate individual extractions
temp_dir = out_dir / ('temp_' + Ldir['job'])
Lfun.make_dir(temp_dir, clean=True)
# get list of files to work on
fn_list = Lfun.get_fn_list(Ldir['list_type'], Ldir, Ldir['ds0'], Ldir['ds1'])
if Ldir['testing']:
fn_list = fn_list[:5]
G, S, T = zrfun.get_basic_info(fn_list[0])
Lon = G['lon_rho'][0,:]
Lat = G['lat_rho'][:,0]
def check_bounds(lon, lat):
# error checking
if (lon < Lon[0]) or (lon > Lon[-1]):
print('ERROR: lon out of bounds ')
sys.exit()
if (lat < Lat[0]) or (lat > Lat[-1]):
print('ERROR: lat out of bounds ')
sys.exit()
# get indices
ilon = zfun.find_nearest_ind(Lon, lon)
ilat = zfun.find_nearest_ind(Lat, lat)
return ilon, ilat
# get the indices and check that they are in the grid
pth = Ldir['LOu'] / 'extract' / 'box'
if str(pth) not in sys.path:
sys.path.append(str(pth))
import job_definitions
from importlib import reload
reload(job_definitions)
aa, vn_list = job_definitions.get_box(Ldir['job'], Lon, Lat)
lon0, lon1, lat0, lat1 = aa
ilon0, ilat0 = check_bounds(lon0, lat0)
ilon1, ilat1 = check_bounds(lon1, lat1)
# NOTE: ncks indexing is zero-based but is INCLUSIVE of the last point.
# NOTE: ncks extractions retain singleton dimensions
# do the extractions
N = len(fn_list)
proc_list = []
tt0 = time()
print('Working on ' + box_fn.name + ' (' + str(N) + ' times)')
for ii in range(N):
fn = fn_list[ii]
sys.stdout.flush()
# extract one day at a time using ncks
count_str = ('000000' + str(ii))[-6:]
out_fn = temp_dir / ('box_' + count_str + '.nc')
cmd_list1 = ['ncks',
'-v', vn_list,
'-d', 'xi_rho,'+str(ilon0)+','+str(ilon1), '-d', 'eta_rho,'+str(ilat0)+','+str(ilat1),
'-d', 'xi_u,'+str(ilon0)+','+str(ilon1-1), '-d', 'eta_u,'+str(ilat0)+','+str(ilat1),
'-d', 'xi_v,'+str(ilon0)+','+str(ilon1), '-d', 'eta_v,'+str(ilat0)+','+str(ilat1-1)]
if Ldir['surf']:
cmd_list1 += ['-d','s_rho,'+str(S['N']-1)]
elif Ldir['bot']:
cmd_list1 += ['-d','s_rho,0']
cmd_list1 += ['-O', str(fn), str(out_fn)]
proc = Po(cmd_list1, stdout=Pi, stderr=Pi)
proc_list.append(proc)
# screen output about progress
if (np.mod(ii,10) == 0) and ii>0:
print(str(ii), end=', ')
sys.stdout.flush()
if (np.mod(ii,50) == 0) and (ii > 0):
print('') # line feed
sys.stdout.flush()
if (ii == N-1):
print(str(ii))
sys.stdout.flush()
# Nproc controls how many ncks subprocesses we allow to stack up
# before we require them all to finish.
if ((np.mod(ii,Ldir['Nproc']) == 0) and (ii > 0)) or (ii == N-1):
for proc in proc_list:
proc.communicate()
# make sure everyone is finished before continuing
proc_list = []
ii += 1
# Ensure that all days have the same fill value. This was required for cas6_v3_lo8b
# when passing from 2021.10.31 to 2021.11.01 because they had inconsistent fill values,
# which leaks through the ncrcat call below.
tt1 = time()
enc_dict = {'_FillValue':1e20}
vn_List = vn_list.split(',')
Enc_dict = {vn:enc_dict for vn in vn_List}
for out_fn in list(temp_dir.glob('box_*.nc')):
ds = xr.load_dataset(out_fn) # need to load, not open, for overwrite
ds.to_netcdf(out_fn, encoding=Enc_dict)
ds.close()
print(' - Time for adding fill value = %0.2f sec' % (time()- tt1))
# concatenate the records into one file
# This bit of code is a nice example of how to replicate a bash pipe
pp1 = Po(['ls', str(temp_dir)], stdout=Pi)
pp2 = Po(['grep','box'], stdin=pp1.stdout, stdout=Pi)
cmd_list = ['ncrcat','-p', str(temp_dir), '-O', str(box_fn)]
proc = Po(cmd_list, stdin=pp2.stdout, stdout=Pi, stderr=Pi)
stdout, stderr = proc.communicate()
if Ldir['testing']:
if len(stdout) > 0:
print('\n'+stdout.decode())
if len(stderr) > 0:
print('\n'+stderr.decode())
print('Time for initial extraction = %0.2f sec' % (time()- tt0))
# add z variables
if (Ldir['surf']==False) and (Ldir['bot']==False):
tt0 = time()
ds = xr.load_dataset(box_fn) # have to load in order to add new variables
NT, N, NR, NC = ds.salt.shape
ds['z_rho'] = (('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), np.nan*np.ones((NT, N, NR, NC)))
ds['z_w'] = (('ocean_time', 's_w', 'eta_rho', 'xi_rho'), np.nan*np.ones((NT, N+1, NR, NC)))
ds.z_rho.attrs = {'units':'m', 'long_name': 'vertical position on s_rho grid, positive up'}
ds.z_w.attrs = {'units':'m', 'long_name': 'vertical position on s_w grid, positive up'}
for ii in range(NT):
h = ds.h.values
zeta = ds.zeta[ii,:,:].values
z_rho, z_w = zrfun.get_z(h, zeta, S)
ds['z_rho'][ii,:,:,:] = z_rho
ds['z_w'][ii,:,:,:] = z_w
ds.to_netcdf(box_fn)
ds.close()
print('Time to add z variables = %0.2f sec' % (time()- tt0))
if Ldir['uv_to_rho']:
# interpolate anything on the u and v grids to the rho grid, assuming
# zero values where masked, and leaving a masked ring around the outermost edge
tt0 = time()
ds = xr.load_dataset(box_fn) # have to load in order to add new variables
Maskr = ds.mask_rho.values == 1 # True over water
NR, NC = Maskr.shape
for vn in ds.data_vars:
if ('xi_u' in ds[vn].dims) and ('ocean_time' in ds[vn].dims):
if len(ds[vn].dims) == 4:
uu = ds[vn].values
NT, N, NRu, NCu = uu.shape
uu[np.isnan(uu)] = 0
UU = (uu[:,:,1:-1,1:]+uu[:,:,1:-1,:-1])/2
uuu = np.nan * np.ones((NT, N, NR, NC))
uuu[:,:,1:-1,1:-1] = UU
Maskr3 = np.tile(Maskr.reshape(1,1,NR,NC),[NT,N,1,1])
uuu[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), uuu)})
elif len(ds[vn].dims) == 3:
uu = ds[vn].values
NT, NRu, NCu = uu.shape
uu[np.isnan(uu)] = 0
UU = (uu[:,1:-1,1:]+uu[:,1:-1,:-1])/2
uuu = np.nan * np.ones((NT, NR, NC))
uuu[:,1:-1,1:-1] = UU
Maskr3 = np.tile(Maskr.reshape(1,NR,NC),[NT,1,1])
uuu[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 'eta_rho', 'xi_rho'), uuu)})
elif ('xi_v' in ds[vn].dims) and ('ocean_time' in ds[vn].dims):
if len(ds[vn].dims) == 4:
vv = ds[vn].values
NT, N, NRv, NCv = vv.shape
vv[np.isnan(vv)] = 0
VV = (vv[:,:,1:,1:-1]+vv[:,:,:-1,1:-1])/2
vvv = np.nan * np.ones((NT, N, NR, NC))
vvv[:,:,1:-1,1:-1] = VV
Maskr3 = np.tile(Maskr.reshape(1,1,NR,NC),[NT,N,1,1])
vvv[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), vvv)})
elif len(ds[vn].dims) == 3:
vv = ds[vn].values
NT, NRv, NCv = vv.shape
vv[np.isnan(vv)] = 0
VV = (vv[:,1:,1:-1]+vv[:,:-1,1:-1])/2
vvv = np.nan * np.ones((NT, NR, NC))
vvv[:,1:-1,1:-1] = VV
Maskr3 = np.tile(Maskr.reshape(1,NR,NC),[NT,1,1])
vvv[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 'eta_rho', 'xi_rho'), vvv)})
ds.to_netcdf(box_fn)
ds.close()
print('Time to interpolate uv variables to rho grid = %0.2f sec' % (time()- tt0))
# squeeze and compress the resulting file
tt0 = time()
ds = xr.load_dataset(box_fn)
ds = ds.squeeze() # remove singleton dimensions
enc_dict = {'zlib':True, 'complevel':1, '_FillValue':1e20}
Enc_dict = {vn:enc_dict for vn in ds.data_vars if 'ocean_time' in ds[vn].dims}
ds.to_netcdf(box_fn, encoding=Enc_dict)
ds.close()
print('Time to compress = %0.2f sec' % (time()- tt0))
# clean up
Lfun.make_dir(temp_dir, clean=True)
temp_dir.rmdir()
print('Size of full rho-grid = %s' % (str(G['lon_rho'].shape)))
print(' Contents of extracted box file: '.center(60,'-'))
# check on the results
ds = xr.open_dataset(box_fn)
for vn in ds.data_vars:
print('%s %s max/min = %0.4f/%0.4f' % (vn, str(ds[vn].shape), ds[vn].max(), ds[vn].min()))
ds.close()
print('\nPath to file:\n%s' % (str(box_fn)))
| [
"sys.exit",
"job_definitions.get_box",
"lo_tools.Lfun.make_dir",
"numpy.mod",
"lo_tools.Lfun.Lstart",
"lo_tools.zrfun.get_z",
"argparse.ArgumentParser",
"subprocess.Popen",
"os.getpid",
"sys.stdout.flush",
"lo_tools.Lfun.get_fn_list",
"numpy.ones",
"numpy.isnan",
"xarray.open_dataset",
"... | [((1163, 1174), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1172, 1174), False, 'import os\n'), ((1288, 1313), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1311, 1313), False, 'import argparse\n'), ((2719, 2775), 'lo_tools.Lfun.Lstart', 'Lfun.Lstart', ([], {'gridname': 'gridname', 'tag': 'tag', 'ex_name': 'ex_name'}), '(gridname=gridname, tag=tag, ex_name=ex_name)\n', (2730, 2775), False, 'from lo_tools import Lfun, zfun, zrfun\n'), ((3425, 3447), 'lo_tools.Lfun.make_dir', 'Lfun.make_dir', (['out_dir'], {}), '(out_dir)\n', (3438, 3447), False, 'from lo_tools import Lfun, zfun, zrfun\n'), ((3887, 3922), 'lo_tools.Lfun.make_dir', 'Lfun.make_dir', (['temp_dir'], {'clean': '(True)'}), '(temp_dir, clean=True)\n', (3900, 3922), False, 'from lo_tools import Lfun, zfun, zrfun\n'), ((3965, 4032), 'lo_tools.Lfun.get_fn_list', 'Lfun.get_fn_list', (["Ldir['list_type']", 'Ldir', "Ldir['ds0']", "Ldir['ds1']"], {}), "(Ldir['list_type'], Ldir, Ldir['ds0'], Ldir['ds1'])\n", (3981, 4032), False, 'from lo_tools import Lfun, zfun, zrfun\n'), ((4089, 4121), 'lo_tools.zrfun.get_basic_info', 'zrfun.get_basic_info', (['fn_list[0]'], {}), '(fn_list[0])\n', (4109, 4121), False, 'from lo_tools import Lfun, zfun, zrfun\n'), ((4762, 4785), 'importlib.reload', 'reload', (['job_definitions'], {}), '(job_definitions)\n', (4768, 4785), False, 'from importlib import reload\n'), ((4800, 4846), 'job_definitions.get_box', 'job_definitions.get_box', (["Ldir['job']", 'Lon', 'Lat'], {}), "(Ldir['job'], Lon, Lat)\n", (4823, 4846), False, 'import job_definitions\n'), ((5141, 5147), 'time.time', 'time', ([], {}), '()\n', (5145, 5147), False, 'from time import time\n'), ((6870, 6876), 'time.time', 'time', ([], {}), '()\n', (6874, 6876), False, 'from time import time\n'), ((7385, 7433), 'subprocess.Popen', 'Po', (["['grep', 'box']"], {'stdin': 'pp1.stdout', 'stdout': 'Pi'}), "(['grep', 'box'], stdin=pp1.stdout, stdout=Pi)\n", (7387, 7433), True, 'from subprocess import Popen as Po\n'), ((7501, 7553), 'subprocess.Popen', 'Po', (['cmd_list'], {'stdin': 'pp2.stdout', 'stdout': 'Pi', 'stderr': 'Pi'}), '(cmd_list, stdin=pp2.stdout, stdout=Pi, stderr=Pi)\n', (7503, 7553), True, 'from subprocess import Popen as Po\n'), ((11335, 11341), 'time.time', 'time', ([], {}), '()\n', (11339, 11341), False, 'from time import time\n'), ((11347, 11370), 'xarray.load_dataset', 'xr.load_dataset', (['box_fn'], {}), '(box_fn)\n', (11362, 11370), True, 'import xarray as xr\n'), ((11674, 11709), 'lo_tools.Lfun.make_dir', 'Lfun.make_dir', (['temp_dir'], {'clean': '(True)'}), '(temp_dir, clean=True)\n', (11687, 11709), False, 'from lo_tools import Lfun, zfun, zrfun\n'), ((11878, 11901), 'xarray.open_dataset', 'xr.open_dataset', (['box_fn'], {}), '(box_fn)\n', (11893, 11901), True, 'import xarray as xr\n'), ((3332, 3342), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3340, 3342), False, 'import sys\n'), ((4461, 4492), 'lo_tools.zfun.find_nearest_ind', 'zfun.find_nearest_ind', (['Lon', 'lon'], {}), '(Lon, lon)\n', (4482, 4492), False, 'from lo_tools import Lfun, zfun, zrfun\n'), ((4504, 4535), 'lo_tools.zfun.find_nearest_ind', 'zfun.find_nearest_ind', (['Lat', 'lat'], {}), '(Lat, lat)\n', (4525, 4535), False, 'from lo_tools import Lfun, zfun, zrfun\n'), ((5256, 5274), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5272, 5274), False, 'import sys\n'), ((5931, 5966), 'subprocess.Popen', 'Po', (['cmd_list1'], {'stdout': 'Pi', 'stderr': 'Pi'}), '(cmd_list1, stdout=Pi, stderr=Pi)\n', (5933, 5966), True, 'from subprocess import Popen as Po\n'), ((7036, 7059), 'xarray.load_dataset', 'xr.load_dataset', (['out_fn'], {}), '(out_fn)\n', (7051, 7059), True, 'import xarray as xr\n'), ((7875, 7881), 'time.time', 'time', ([], {}), '()\n', (7879, 7881), False, 'from time import time\n'), ((7891, 7914), 'xarray.load_dataset', 'xr.load_dataset', (['box_fn'], {}), '(box_fn)\n', (7906, 7914), True, 'import xarray as xr\n'), ((8876, 8882), 'time.time', 'time', ([], {}), '()\n', (8880, 8882), False, 'from time import time\n'), ((8892, 8915), 'xarray.load_dataset', 'xr.load_dataset', (['box_fn'], {}), '(box_fn)\n', (8907, 8915), True, 'import xarray as xr\n'), ((2633, 2643), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2641, 2643), False, 'import sys\n'), ((4317, 4327), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4325, 4327), False, 'import sys\n'), ((4421, 4431), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4429, 4431), False, 'import sys\n'), ((6109, 6127), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6125, 6127), False, 'import sys\n'), ((6208, 6226), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6224, 6226), False, 'import sys\n'), ((6278, 6296), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6294, 6296), False, 'import sys\n'), ((8484, 8507), 'lo_tools.zrfun.get_z', 'zrfun.get_z', (['h', 'zeta', 'S'], {}), '(h, zeta, S)\n', (8495, 8507), False, 'from lo_tools import Lfun, zfun, zrfun\n'), ((6038, 6052), 'numpy.mod', 'np.mod', (['ii', '(10)'], {}), '(ii, 10)\n', (6044, 6052), True, 'import numpy as np\n'), ((6136, 6150), 'numpy.mod', 'np.mod', (['ii', '(50)'], {}), '(ii, 50)\n', (6142, 6150), True, 'import numpy as np\n'), ((7212, 7218), 'time.time', 'time', ([], {}), '()\n', (7216, 7218), False, 'from time import time\n'), ((7781, 7787), 'time.time', 'time', ([], {}), '()\n', (7785, 7787), False, 'from time import time\n'), ((8066, 8090), 'numpy.ones', 'np.ones', (['(NT, N, NR, NC)'], {}), '((NT, N, NR, NC))\n', (8073, 8090), True, 'import numpy as np\n'), ((8160, 8188), 'numpy.ones', 'np.ones', (['(NT, N + 1, NR, NC)'], {}), '((NT, N + 1, NR, NC))\n', (8167, 8188), True, 'import numpy as np\n'), ((11648, 11654), 'time.time', 'time', ([], {}), '()\n', (11652, 11654), False, 'from time import time\n'), ((6428, 6453), 'numpy.mod', 'np.mod', (['ii', "Ldir['Nproc']"], {}), "(ii, Ldir['Nproc'])\n", (6434, 6453), True, 'import numpy as np\n'), ((8671, 8677), 'time.time', 'time', ([], {}), '()\n', (8675, 8677), False, 'from time import time\n'), ((11272, 11278), 'time.time', 'time', ([], {}), '()\n', (11276, 11278), False, 'from time import time\n'), ((9273, 9285), 'numpy.isnan', 'np.isnan', (['uu'], {}), '(uu)\n', (9281, 9285), True, 'import numpy as np\n'), ((9380, 9404), 'numpy.ones', 'np.ones', (['(NT, N, NR, NC)'], {}), '((NT, N, NR, NC))\n', (9387, 9404), True, 'import numpy as np\n'), ((9771, 9783), 'numpy.isnan', 'np.isnan', (['uu'], {}), '(uu)\n', (9779, 9783), True, 'import numpy as np\n'), ((9874, 9895), 'numpy.ones', 'np.ones', (['(NT, NR, NC)'], {}), '((NT, NR, NC))\n', (9881, 9895), True, 'import numpy as np\n'), ((10320, 10332), 'numpy.isnan', 'np.isnan', (['vv'], {}), '(vv)\n', (10328, 10332), True, 'import numpy as np\n'), ((10427, 10451), 'numpy.ones', 'np.ones', (['(NT, N, NR, NC)'], {}), '((NT, N, NR, NC))\n', (10434, 10451), True, 'import numpy as np\n'), ((10818, 10830), 'numpy.isnan', 'np.isnan', (['vv'], {}), '(vv)\n', (10826, 10830), True, 'import numpy as np\n'), ((10921, 10942), 'numpy.ones', 'np.ones', (['(NT, NR, NC)'], {}), '((NT, NR, NC))\n', (10928, 10942), True, 'import numpy as np\n')] |
import argparse
import os
import random
from envs import MappingEnvironment, LocalISM
import numpy as np
parser = argparse.ArgumentParser()
# General Stuff
parser.add_argument('--experiment', default='runs/myopic', help='folder to put results of experiment in')
# Environment
parser.add_argument('--N', type=int, default=25, help='size of grid')
parser.add_argument('--map_p', type=float, default=.1, help='probability map location is occupied')
parser.add_argument('--prims', action='store_true', help='prims algorithm for filling in map')
parser.add_argument('--episode_length', type=int, default=300, help='length of episode')
# Sensor
parser.add_argument('--sensor_type', default='local', help='local | range')
parser.add_argument('--sensor_span', type=int, default=1, help='span of sensor')
parser.add_argument('--sensor_p', type=float, default=.8, help='probability sensor reading is correct')
parser.add_argument('--seed', type=int, default=random.randint(0, 10000), help='random seed')
opt = parser.parse_args()
print(opt)
random.seed(opt.seed)
np.random.seed(opt.seed)
# make experiment path
os.makedirs(opt.experiment, exist_ok=True)
with open(os.path.join(opt.experiment, 'config.txt'), 'w') as f:
f.write(str(opt))
# Initialize sensor
if opt.sensor_type == 'local':
ism_proto = lambda x: LocalISM(x, span=opt.sensor_span, p_correct=opt.sensor_p)
else:
raise Exception('sensor type not supported.')
# Initialize environment
env = MappingEnvironment(ism_proto, N=opt.N, p=opt.map_p, episode_length=opt.episode_length, prims=opt.prims)
# Test
rewards = []
for k in range(1000):
obs = env.reset()
done = False
R = 0
while not done:
# Perform a_t according to actor_criticb
best_ent = 0
best_action = 0
for i, (x, y) in enumerate([[1, 0], [-1, 0], [0, 1], [0, -1]]):
p = (obs[opt.N-1+x, opt.N-1+y, 0]+1)/2
mask = np.ones((3, 3))
mask[1,1] = 0
ent = obs[opt.N-1-1+x:opt.N-1+2+x, opt.N-1-1+y:opt.N-1+2+y, 1]
expected_ent = (1-p) * np.sum(mask * (ent+1)/2)
if expected_ent > best_ent:
best_ent = expected_ent
best_action = i
if random.random() < 0:
a = random.randint(0, 3)
else:
a = best_action
# Receive reward r_t and new state s_t+1
obs, reward, done, info = env.step(a)
R += reward
print (R)
rewards.append(R)
np.save(os.path.join(opt.experiment, 'rewards_test'), rewards)
| [
"numpy.ones",
"os.makedirs",
"argparse.ArgumentParser",
"envs.LocalISM",
"os.path.join",
"random.seed",
"envs.MappingEnvironment",
"numpy.sum",
"numpy.random.seed",
"random.random",
"random.randint"
] | [((117, 142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (140, 142), False, 'import argparse\n'), ((1040, 1061), 'random.seed', 'random.seed', (['opt.seed'], {}), '(opt.seed)\n', (1051, 1061), False, 'import random\n'), ((1062, 1086), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (1076, 1086), True, 'import numpy as np\n'), ((1111, 1153), 'os.makedirs', 'os.makedirs', (['opt.experiment'], {'exist_ok': '(True)'}), '(opt.experiment, exist_ok=True)\n', (1122, 1153), False, 'import os\n'), ((1465, 1573), 'envs.MappingEnvironment', 'MappingEnvironment', (['ism_proto'], {'N': 'opt.N', 'p': 'opt.map_p', 'episode_length': 'opt.episode_length', 'prims': 'opt.prims'}), '(ism_proto, N=opt.N, p=opt.map_p, episode_length=opt.\n episode_length, prims=opt.prims)\n', (1483, 1573), False, 'from envs import MappingEnvironment, LocalISM\n'), ((2480, 2524), 'os.path.join', 'os.path.join', (['opt.experiment', '"""rewards_test"""'], {}), "(opt.experiment, 'rewards_test')\n", (2492, 2524), False, 'import os\n'), ((955, 979), 'random.randint', 'random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (969, 979), False, 'import random\n'), ((1164, 1206), 'os.path.join', 'os.path.join', (['opt.experiment', '"""config.txt"""'], {}), "(opt.experiment, 'config.txt')\n", (1176, 1206), False, 'import os\n'), ((1319, 1376), 'envs.LocalISM', 'LocalISM', (['x'], {'span': 'opt.sensor_span', 'p_correct': 'opt.sensor_p'}), '(x, span=opt.sensor_span, p_correct=opt.sensor_p)\n', (1327, 1376), False, 'from envs import MappingEnvironment, LocalISM\n'), ((1918, 1933), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (1925, 1933), True, 'import numpy as np\n'), ((2218, 2233), 'random.random', 'random.random', ([], {}), '()\n', (2231, 2233), False, 'import random\n'), ((2255, 2275), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (2269, 2275), False, 'import random\n'), ((2070, 2098), 'numpy.sum', 'np.sum', (['(mask * (ent + 1) / 2)'], {}), '(mask * (ent + 1) / 2)\n', (2076, 2098), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Meeko hydrate molecule
#
import numpy as np
from .utils import geomutils
from .utils import obutils
class HydrateMoleculeLegacy:
def __init__(self, distance=3.0, charge=0, atom_type="W"):
"""Initialize the legacy hydrate typer for AutoDock 4.2.x
Args:
distance (float): distance between water molecules and ligand heavy atoms. (default: 3.0)
charge (float): partial charge of the water molecule. Not use for the hydrated docking. (default: 0)
atom_type (str): atom type of the water molecule. (default: W)
"""
self._distance = distance
self._charge = charge
self._atom_type = atom_type
self._bond_type = 1
self._rotatable = False
self._hb_config = {'HD': {1: (1, 1)}, # neigh: 1, wat: 1, sp1
'OA': {
1: (2, 2), # neigh: 1, wat: 2, sp2
2: (2, 3) # neigh: 2, wat: 2, sp3
},
'SA': {
1: (2, 2), # neigh: 1, wat: 2, sp2
2: (2, 3) # neigh: 2, wat: 2, sp3
},
'NA': {
1: (1, 1), # neigh: 1, wat: 3, sp1
2: (1, 2), # neigh: 2, wat: 1, sp2
3: (1, 3) # neigh: 3, wat: 1, sp3
}
}
def _place_sp1_one_water(self, anchor_xyz, neighbor_xyz, hb_length=3.0):
position = anchor_xyz + geomutils.vector(neighbor_xyz, anchor_xyz)
position = geomutils.resize_vector(position, hb_length, anchor_xyz)
positions = np.array([position])
return positions
def _place_sp2_one_water(self, anchor_xyz, neighbor1_xyz, neighbor2_xyz, hb_length=3.0):
position = geomutils.atom_to_move(anchor_xyz, [neighbor1_xyz, neighbor2_xyz])
position = geomutils.resize_vector(position, hb_length, anchor_xyz)
positions = np.array([position])
return positions
def _place_sp2_two_waters(self, anchor_xyz, neighbor1_xyz, neighbor2_xyz, hb_lengths, angles):
if len(hb_lengths) != 2:
raise ValueError()
if len(angles) != 2:
raise ValueError()
positions = []
r = geomutils.rotation_axis(neighbor1_xyz, anchor_xyz, neighbor2_xyz, origin=anchor_xyz)
p = neighbor1_xyz
# We rotate p to get each vectors if necessary
for hb_length, angle in zip(hb_lengths, angles):
vector = p
if angle != 0.:
position = geomutils.rotate_point(vector, anchor_xyz, r, angle)
position = geomutils.resize_vector(position, hb_length, anchor_xyz)
positions.append(position)
positions = np.array(positions)
return positions
def _place_sp3_one_water(self, anchor_xyz, neighbor1_xyz, neighbor2_xyz, neighbor3_xyz, hb_length):
# We have to normalize bonds, otherwise the water molecule is not well placed
v1 = anchor_xyz + geomutils.normalize(geomutils.vector(anchor_xyz, neighbor1_xyz))
v2 = anchor_xyz + geomutils.normalize(geomutils.vector(anchor_xyz, neighbor2_xyz))
v3 = anchor_xyz + geomutils.normalize(geomutils.vector(anchor_xyz, neighbor3_xyz))
position = geomutils.atom_to_move(anchor_xyz, [v1, v2, v3])
position = geomutils.resize_vector(position, hb_length, anchor_xyz)
positions = np.array([position])
return positions
def _place_sp3_two_waters(self, anchor_xyz, neighbor1_xyz, neighbor2_xyz, hb_lengths, angles):
if len(hb_lengths) != 2:
raise ValueError()
if len(angles) != 2:
raise ValueError()
positions = []
v1 = anchor_xyz + geomutils.normalize(geomutils.vector(anchor_xyz, neighbor1_xyz))
v2 = anchor_xyz + geomutils.normalize(geomutils.vector(anchor_xyz, neighbor2_xyz))
r = anchor_xyz + geomutils.normalize(geomutils.vector(v1, v2))
p = geomutils.atom_to_move(anchor_xyz, [v1, v2])
# We rotate p to get each vectors if necessary
for hb_length, angle in zip(hb_lengths, angles):
vector = p
if angle != 0.:
position = geomutils.rotate_point(vector, anchor_xyz, r, angle)
position = geomutils.resize_vector(position, hb_length, anchor_xyz)
positions.append(position)
positions = np.array(positions)
return positions
def hydrate(self, mol):
"""Add water molecules to the ligand
Args:
mol (OBMol): input OBMol molecule object
"""
setup = mol.setup
water_anchors = []
water_positions = []
# It will be the same distance for all of the water molecules
hb_length = self._distance
for a, neighbors in setup.graph.items():
atom_type = setup.get_atom_type(a)
anchor_xyz = setup.get_coord(a)
neighbor1_xyz = setup.get_coord(neighbors[0])
positions = np.array([])
n_wat = None
hyb = None
if atom_type in self._hb_config:
try:
n_wat, hyb = self._hb_config[atom_type][len(neighbors)]
except KeyError:
raise RuntimeError('Cannot place water molecules on atom %d of type %s with %d neighbors.' % (a, atom_type, len(neighbors)))
water_anchors.append(a)
if hyb == 1:
if n_wat == 1:
# Example: X-HD
positions = self._place_sp1_one_water(anchor_xyz,
neighbor1_xyz,
hb_length - 1.0)
elif hyb == 2:
if n_wat == 1:
# Example: X-Nitrogen-X
neighbor2_xyz = setup.get_coord(neighbors[1])
positions = self._place_sp2_one_water(anchor_xyz,
neighbor1_xyz, neighbor2_xyz,
hb_length)
elif n_wat == 2:
# Example: C=0 (backbone oxygen)
tmp_neighbors = [x for x in setup.get_neigh(neighbors[0]) if not x == a]
neighbor2_xyz = setup.get_coord(tmp_neighbors[0])
positions = self._place_sp2_two_waters(anchor_xyz,
neighbor1_xyz, neighbor2_xyz,
[hb_length, hb_length],
[-np.radians(120), np.radians(120)])
elif n_wat == 3:
hyb = 3
elif hyb == 3:
if n_wat == 1:
# Example: Ammonia
neighbor2_xyz = setup.get_coord(neighbors[1])
neighbor3_xyz = setup.get_coord(neighbors[2])
positions = self._place_sp3_one_water(anchor_xyz,
neighbor1_xyz, neighbor2_xyz, neighbor3_xyz,
hb_length)
elif n_wat == 2:
# Example: O-HD (Oxygen in hydroxyl group)
neighbor2_xyz = setup.get_coord(neighbors[1])
positions = self._place_sp3_two_waters(anchor_xyz,
neighbor1_xyz, neighbor2_xyz,
[hb_length, hb_length],
[-np.radians(60), np.radians(60)])
elif n_wat == 3:
positions = np.array([])
if positions.size:
water_positions.append(positions)
for water_anchor, waters_on_anchor in zip(water_anchors, water_positions):
for water_on_anchor in waters_on_anchor:
tmp = setup.pdbinfo[water_anchor]
pdbinfo = obutils.PDBAtomInfo('WAT', tmp.resName, tmp.resNum, tmp.chain)
setup.add_pseudo(water_on_anchor, self._charge, [water_anchor], self._atom_type,
self._bond_type, self._rotatable, pdbinfo)
| [
"numpy.radians",
"numpy.array"
] | [((1867, 1887), 'numpy.array', 'np.array', (['[position]'], {}), '([position])\n', (1875, 1887), True, 'import numpy as np\n'), ((2190, 2210), 'numpy.array', 'np.array', (['[position]'], {}), '([position])\n', (2198, 2210), True, 'import numpy as np\n'), ((2993, 3012), 'numpy.array', 'np.array', (['positions'], {}), '(positions)\n', (3001, 3012), True, 'import numpy as np\n'), ((3668, 3688), 'numpy.array', 'np.array', (['[position]'], {}), '([position])\n', (3676, 3688), True, 'import numpy as np\n'), ((4659, 4678), 'numpy.array', 'np.array', (['positions'], {}), '(positions)\n', (4667, 4678), True, 'import numpy as np\n'), ((5278, 5290), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5286, 5290), True, 'import numpy as np\n'), ((6978, 6993), 'numpy.radians', 'np.radians', (['(120)'], {}), '(120)\n', (6988, 6993), True, 'import numpy as np\n'), ((8093, 8105), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8101, 8105), True, 'import numpy as np\n'), ((6961, 6976), 'numpy.radians', 'np.radians', (['(120)'], {}), '(120)\n', (6971, 6976), True, 'import numpy as np\n'), ((8011, 8025), 'numpy.radians', 'np.radians', (['(60)'], {}), '(60)\n', (8021, 8025), True, 'import numpy as np\n'), ((7995, 8009), 'numpy.radians', 'np.radians', (['(60)'], {}), '(60)\n', (8005, 8009), True, 'import numpy as np\n')] |
import streamlit as st
import json
from joblib import dump, load
import numpy as np
import glob
with open('params.json') as f:
config = json.load(f)
features = config['feature_names']
models = glob.glob('artifacts/*.joblib')+glob.glob('artifacts/*.pkl')
if len(models)>0:
model = load(models[0])
st.title('Welcome to')
st.title(config['model_name'])
# show feature fields
if "sklearn" in str(type(model)): # tabular model
feature_vals = []
for i,feature in enumerate(features):
f_type = config["feature_types"][i]
if f_type["type"] == "float":
feature_vals.append(st.number_input(f'Which {feature}?', float(f_type["limit"][0]), float(f_type["limit"][1])))
pred = model.predict(np.array(feature_vals).reshape(1, -1) )[0]
st.title('prediction is')
st.title(config['classes'][pred])
elif 'torch' in str(type(model)):
# load image
# do the fucking prediction
pass
| [
"numpy.array",
"joblib.load",
"json.load",
"glob.glob",
"streamlit.title"
] | [((302, 324), 'streamlit.title', 'st.title', (['"""Welcome to"""'], {}), "('Welcome to')\n", (310, 324), True, 'import streamlit as st\n'), ((325, 355), 'streamlit.title', 'st.title', (["config['model_name']"], {}), "(config['model_name'])\n", (333, 355), True, 'import streamlit as st\n'), ((138, 150), 'json.load', 'json.load', (['f'], {}), '(f)\n', (147, 150), False, 'import json\n'), ((197, 228), 'glob.glob', 'glob.glob', (['"""artifacts/*.joblib"""'], {}), "('artifacts/*.joblib')\n", (206, 228), False, 'import glob\n'), ((229, 257), 'glob.glob', 'glob.glob', (['"""artifacts/*.pkl"""'], {}), "('artifacts/*.pkl')\n", (238, 257), False, 'import glob\n'), ((285, 300), 'joblib.load', 'load', (['models[0]'], {}), '(models[0])\n', (289, 300), False, 'from joblib import dump, load\n'), ((740, 765), 'streamlit.title', 'st.title', (['"""prediction is"""'], {}), "('prediction is')\n", (748, 765), True, 'import streamlit as st\n'), ((767, 800), 'streamlit.title', 'st.title', (["config['classes'][pred]"], {}), "(config['classes'][pred])\n", (775, 800), True, 'import streamlit as st\n'), ((695, 717), 'numpy.array', 'np.array', (['feature_vals'], {}), '(feature_vals)\n', (703, 717), True, 'import numpy as np\n')] |
import numpy as np
import requests
import random
import pandas as pd
import time
import multiprocessing
url = 'https://raw.githubusercontent.com/dwyl/english-words/master/words_alpha.txt'
existingWords = requests.get(url)
existingWords = existingWords.text.split()
existingWords = [word for word in existingWords if (len(word) == 4 or len(word) == 5 or len(word) == 6)]
def genLetter():
alphabet = 'abcdefghijklmnopqrstuvwxyz'
randletter = random.choice(alphabet)
return randletter
def createboard(rand=False, dimensions = '4x4'):
if rand == True:
indexableboard = np.array([
[genLetter(), genLetter(), genLetter(), genLetter()],
[genLetter(), genLetter(), genLetter(), genLetter()],
[genLetter(), genLetter(), genLetter(), genLetter()],
[genLetter(), genLetter(), genLetter(), genLetter()],
])
df = pd.DataFrame(data=indexableboard)
else:
line1 = input("Input letters:\n\n").lower()
line2 = input().lower()
line3 = input().lower()
line4 = input().lower()
indexableboard = np.array([
[line1[0], line1[1], line1[2], line1[3]],
[line2[0], line2[1], line2[2], line2[3]],
[line3[0], line3[1], line3[2], line3[3]],
[line4[0], line4[1], line4[2], line4[3]]
])
df = pd.DataFrame(data=indexableboard)
return df
def getSurrounding(df, row, column):
possible = []
if row+1 < 4:
possible.append([row+1, column])
if row-1 > -1:
possible.append([row-1, column])
if column +1 < 4:
possible.append([row, column+1])
if column -1 > -1:
possible.append([row, column-1])
if row+1 < 4 and column +1 < 4:
possible.append([row+1, column+1])
if row+1 < 4 and column -1 > -1:
possible.append([row+1, column-1])
if row-1 > -1 and column +1 < 4:
possible.append([row-1, column+1])
if row-1 > -1 and column -1 > -1:
possible.append([row-1, column-1])
return possible
def solveBoard(df, beginningval, return_list):
words = []
board = df.copy()
row = beginningval[0]
column = beginningval[1]
surroundings = getSurrounding(board, row, column)
letter1 = board.iloc[row, column]
board.iloc[row, column] = np.NaN
for level1 in surroundings:
wordlist = ''
wordlist+=letter1
row = level1[0]
column = level1[1]
if type(board.iloc[row, column]) == str:
letter2 = board.iloc[row, column]
surroundings1 = getSurrounding(board, row, column)
board.iloc[row, column] = np.NaN
wordlist+=letter2
for level2 in surroundings1:
board2 = board.copy()
wordlist2 = wordlist
row = level2[0]
column = level2[1]
if type(board2.iloc[row, column]) == str:
letter3 = board2.iloc[row, column]
wordlist2+=letter3
surroundings2 = getSurrounding(board2, row, column)
board2.iloc[row, column] = np.NaN
words.append(wordlist2)
for level3 in surroundings2:
board3 = board2.copy()
wordlist3 = wordlist2
row = level3[0]
column = level3[1]
if type(board3.iloc[row, column]) == str:
letter4 = board3.iloc[row, column]
wordlist3+=letter4
surroundings3 = getSurrounding(board3, row, column)
board3.iloc[row, column] = np.NaN
words.append(wordlist3)
for level4 in surroundings3:
board4 = board3.copy()
wordlist4 = wordlist3
row = level4[0]
column = level4[1]
if type(board4.iloc[row, column]) == str:
letter5 = board4.iloc[row, column]
wordlist4+=letter5
surroundings4 = getSurrounding(board4, row, column)
board4.iloc[row, column] = np.NaN
words.append(wordlist4)
for level5 in surroundings4:
board5 = board4.copy()
wordlist5 = wordlist4
row = level5[0]
column = level5[1]
if type(board5.iloc[row, column]) == str:
letter6 = board5.iloc[row, column]
wordlist5+=letter6
board5.iloc[row, column] = np.NaN
words.append(wordlist5)
#print(words)
possible = [word for word in words if word in existingWords]
return_list += possible
if __name__ == '__main__':
yes = []
for i in range(4):
for z in range(4):
yes.append((i,z))
processes = []
board = createboard(rand=False)
startime = time.time()
manager = multiprocessing.Manager()
return_list = manager.list()
for m in yes:
p = multiprocessing.Process(target=solveBoard, args=(board, m, return_list))
processes.append(p)
p.start()
for process in processes:
process.join()
endtime = time.time()
print(f'Finished successfully in {endtime-startime}')
print(sorted(return_list, key=len, reverse=True)) | [
"random.choice",
"pandas.DataFrame",
"multiprocessing.Process",
"requests.get",
"numpy.array",
"multiprocessing.Manager",
"time.time"
] | [((213, 230), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (225, 230), False, 'import requests\n'), ((460, 483), 'random.choice', 'random.choice', (['alphabet'], {}), '(alphabet)\n', (473, 483), False, 'import random\n'), ((4981, 4992), 'time.time', 'time.time', ([], {}), '()\n', (4990, 4992), False, 'import time\n'), ((5006, 5031), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (5029, 5031), False, 'import multiprocessing\n'), ((5278, 5289), 'time.time', 'time.time', ([], {}), '()\n', (5287, 5289), False, 'import time\n'), ((924, 957), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'indexableboard'}), '(data=indexableboard)\n', (936, 957), True, 'import pandas as pd\n'), ((1127, 1314), 'numpy.array', 'np.array', (['[[line1[0], line1[1], line1[2], line1[3]], [line2[0], line2[1], line2[2],\n line2[3]], [line3[0], line3[1], line3[2], line3[3]], [line4[0], line4[1\n ], line4[2], line4[3]]]'], {}), '([[line1[0], line1[1], line1[2], line1[3]], [line2[0], line2[1],\n line2[2], line2[3]], [line3[0], line3[1], line3[2], line3[3]], [line4[0\n ], line4[1], line4[2], line4[3]]])\n', (1135, 1314), True, 'import numpy as np\n'), ((1363, 1396), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'indexableboard'}), '(data=indexableboard)\n', (1375, 1396), True, 'import pandas as pd\n'), ((5092, 5164), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'solveBoard', 'args': '(board, m, return_list)'}), '(target=solveBoard, args=(board, m, return_list))\n', (5115, 5164), False, 'import multiprocessing\n')] |
#
# Progression of infection within individuals
#
import random
import numpy as np
import pyEpiabm as pe
from pyEpiabm.core import Person
from pyEpiabm.property import InfectionStatus
from pyEpiabm.utility import StateTransitionMatrix, TransitionTimeMatrix
from .abstract_sweep import AbstractSweep
class HostProgressionSweep(AbstractSweep):
"""Class for sweeping through population and updating host infection status
and time to next infection status change.
"""
def __init__(self):
"""Initialise parameters to be used in class methods. State
transition matrix is set where each row of the matrix corresponds
to a current infection status of a person. The columns of that
row then indicate the transition probabilities to the remaining
infection statuses. Number of infection states is set by
taking the size of the InfectionStatus enum. Transition time matrix
is also initialised and associated parameters are called from the
parameters class.
"""
# Instantiate state transition matrix
matrix_object = StateTransitionMatrix()
self.state_transition_matrix =\
matrix_object.create_state_transition_matrix()
self.number_of_states = len(InfectionStatus)
assert self.state_transition_matrix.shape == \
(self.number_of_states, self.number_of_states),\
'Matrix dimensions must match number of infection states'
# Instantiate transmission time matrix
time_matrix_object = TransitionTimeMatrix()
self.transition_time_matrix =\
time_matrix_object.create_transition_time_matrix()
# Instantiate parameters to be used in update transition time
# method
self.latent_to_symptom_delay =\
pe.Parameters.instance().latent_to_sympt_delay
self.model_time_step = 1 / pe.Parameters.instance().time_steps_per_day
self.delay = np.floor(self.latent_to_symptom_delay
/ self.model_time_step)
@staticmethod
def set_infectiousness(person: Person):
"""Assigns the infectiousness of a person for when they go from
the exposed infection state to the next state, either InfectAsympt,
InfectMild or InfectGP.
Called right after an exposed person has been given its
new infection status in the call method below.
This static method is non private as it is also used by the initial
infected sweep to give new infected individuals an infectiousness.
Parameters
----------
Person : Person
Instance of person class with infection status attributes
Returns
-------
float
Infectiousness of a person
"""
init_infectiousness = np.random.gamma(1, 1)
if person.infection_status == InfectionStatus.InfectASympt:
infectiousness = (init_infectiousness
* pe.Parameters.instance().asympt_infectiousness)
elif (person.infection_status == InfectionStatus.InfectMild or
person.infection_status == InfectionStatus.InfectGP):
infectiousness = (init_infectiousness
* pe.Parameters.instance().sympt_infectiousness)
person.infectiousness = infectiousness
def _update_next_infection_status(self, person: Person):
"""Assigns next infection status based on current infection status
and on probabilities of transition to different statuses. Weights
are taken from row in state transition matrix that corresponds to
the person's current infection status. Weights are then used in
random.choices method to select person's next infection status.
Parameters
----------
Person : Person
Instance of person class with infection status attributes
"""
if person.infection_status in [InfectionStatus.Recovered,
InfectionStatus.Dead]:
person.next_infection_status = None
else:
row_index = person.infection_status.name
weights = self.state_transition_matrix.loc[row_index].to_numpy()
outcomes = range(1, self.number_of_states + 1)
if len(weights) != len(outcomes):
raise AssertionError('The number of infection statuses must' +
'match the number of transition' +
'probabilities')
next_infection_status_number = random.choices(outcomes, weights)[0]
next_infection_status =\
InfectionStatus(next_infection_status_number)
person.next_infection_status = next_infection_status
def _update_time_status_change(self, person, time):
"""Calculates transition time as calculated in CovidSim,
and updates the time_of_status_change for the given
Person, given as the time until next infection status
for a person who has a new infection status. If it is expected that
the person will not transition again (for example in Recovered or Dead
statuses), then the time of status change is set to infinity.
Parameters
----------
Person : Person
Instance of Person class with :class:`InfectionStatus` attributes
time : float
Current simulation time
"""
# Defines the transition time. If the person will not transition again,
# the transition time is set to infinity. Else, the transition time is
# defined using the TransitionTimeMatrix class, with the method
# `choose` from the InverseCdf class.
if person.infection_status in [InfectionStatus.Recovered,
InfectionStatus.Dead]:
transition_time = np.inf
else:
row_index = person.infection_status.name
column_index = person.next_infection_status.name
transition_time_icdf_object =\
self.transition_time_matrix.loc[row_index, column_index]
# Checks for susceptible to exposed case
# where transition time is zero
try:
transition_time =\
transition_time_icdf_object.icdf_choose_noexp()
except AttributeError as e:
if "object has no attribute 'icdf_choose_noexp'" in str(e):
transition_time = transition_time_icdf_object
assert isinstance(
transition_time_icdf_object,
(float, int)), \
("Entries of transition time matrix" +
"must either be ICDF" + " objects or numbers")
else:
raise
# Adds delay to transition time for first level symptomatic infection
# statuses (InfectMild or InfectGP), as is done in CovidSim.
if person.infection_status in [InfectionStatus.InfectMild,
InfectionStatus.InfectGP]:
time += self.delay
# Assigns the time of status change using current time and transition
# time:
person.time_of_status_change = time + transition_time
def __call__(self, time: float):
"""Sweeps through all people in the population, updates
their infection status if it is time and assigns them their
next infection status and the time of their next status change.
Parameters
----------
time : float
Current simulation time
"""
for cell in self._population.cells:
for person in cell.persons:
if person.time_of_status_change is None:
assert person.infection_status \
in [InfectionStatus.Susceptible]
continue # pragma: no cover
while person.time_of_status_change <= time:
person.update_status(person.next_infection_status)
if person.infection_status in \
[InfectionStatus.InfectASympt,
InfectionStatus.InfectMild,
InfectionStatus.InfectGP]:
self.set_infectiousness(person)
self._update_next_infection_status(person)
self._update_time_status_change(person, time)
| [
"pyEpiabm.Parameters.instance",
"pyEpiabm.property.InfectionStatus",
"pyEpiabm.utility.TransitionTimeMatrix",
"numpy.floor",
"random.choices",
"numpy.random.gamma",
"pyEpiabm.utility.StateTransitionMatrix"
] | [((1116, 1139), 'pyEpiabm.utility.StateTransitionMatrix', 'StateTransitionMatrix', ([], {}), '()\n', (1137, 1139), False, 'from pyEpiabm.utility import StateTransitionMatrix, TransitionTimeMatrix\n'), ((1555, 1577), 'pyEpiabm.utility.TransitionTimeMatrix', 'TransitionTimeMatrix', ([], {}), '()\n', (1575, 1577), False, 'from pyEpiabm.utility import StateTransitionMatrix, TransitionTimeMatrix\n'), ((1967, 2028), 'numpy.floor', 'np.floor', (['(self.latent_to_symptom_delay / self.model_time_step)'], {}), '(self.latent_to_symptom_delay / self.model_time_step)\n', (1975, 2028), True, 'import numpy as np\n'), ((2834, 2855), 'numpy.random.gamma', 'np.random.gamma', (['(1)', '(1)'], {}), '(1, 1)\n', (2849, 2855), True, 'import numpy as np\n'), ((1820, 1844), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (1842, 1844), True, 'import pyEpiabm as pe\n'), ((4709, 4754), 'pyEpiabm.property.InfectionStatus', 'InfectionStatus', (['next_infection_status_number'], {}), '(next_infection_status_number)\n', (4724, 4754), False, 'from pyEpiabm.property import InfectionStatus\n'), ((1902, 1926), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (1924, 1926), True, 'import pyEpiabm as pe\n'), ((4619, 4652), 'random.choices', 'random.choices', (['outcomes', 'weights'], {}), '(outcomes, weights)\n', (4633, 4652), False, 'import random\n'), ((3006, 3030), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3028, 3030), True, 'import pyEpiabm as pe\n'), ((3275, 3299), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3297, 3299), True, 'import pyEpiabm as pe\n')] |
#!/usr/bin/env python3
import numpy as np
#############################################################
class Person():
def __init__(self, _id, pos, moveinterval, destiny):
self.id = _id
self.destiny = destiny
self.pos = pos
self.moveinterval = moveinterval
self.path = np.array([], dtype=np.int64)
self.stepstogoal = -1
| [
"numpy.array"
] | [((317, 345), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (325, 345), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import operator
import numpy as np
from matplotlib import pyplot as plt
def plot_many_images(images, titles, rows=1, columns=2):
"""Plots each image in a given list as a grid structure. using Matplotlib."""
for i, image in enumerate(images):
plt.subplot(rows, columns, i+1)
plt.imshow(image, 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([]) # Hide tick marks
plt.show()
def show_image(img):
"""Shows an image until any key is pressed"""
# print(type(img))
# print(img.shape)
# cv2.imshow('image', img) # Display the image
# cv2.imwrite('images/gau_sudoku3.jpg', img)
# cv2.waitKey(0) # Wait for any key to be pressed (with the image window active)
# cv2.destroyAllWindows() # Close all windows
return img
def show_digits(digits, colour=255):
"""Shows list of 81 extracted digits in a grid format"""
rows = []
with_border = [cv2.copyMakeBorder(img.copy(), 1, 1, 1, 1, cv2.BORDER_CONSTANT, None, colour) for img in digits]
for i in range(9):
row = np.concatenate(with_border[i * 9:((i + 1) * 9)], axis=1)
rows.append(row)
img = show_image(np.concatenate(rows))
return img
def convert_when_colour(colour, img):
"""Dynamically converts an image to colour if the input colour is a tuple and the image is grayscale."""
if len(colour) == 3:
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return img
def display_points(in_img, points, radius=5, colour=(0, 0, 255)):
"""Draws circular points on an image."""
img = in_img.copy()
# Dynamically change to a colour image if necessary
if len(colour) == 3:
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for point in points:
img = cv2.circle(img, tuple(int(x) for x in point), radius, colour, -1)
show_image(img)
return img
def display_rects(in_img, rects, colour=(0, 0, 255)):
"""Displays rectangles on the image."""
img = convert_when_colour(colour, in_img.copy())
for rect in rects:
img = cv2.rectangle(img, tuple(int(x) for x in rect[0]), tuple(int(x) for x in rect[1]), colour)
show_image(img)
return img
def display_contours(in_img, contours, colour=(0, 0, 255), thickness=2):
"""Displays contours on the image."""
img = convert_when_colour(colour, in_img.copy())
img = cv2.drawContours(img, contours, -1, colour, thickness)
show_image(img)
def pre_process_image(img, skip_dilate=False):
"""Uses a blurring function, adaptive thresholding and dilation to expose the main features of an image."""
# Gaussian blur with a kernal size (height, width) of 9.
# Note that kernal sizes must be positive and odd and the kernel must be square.
proc = cv2.GaussianBlur(img.copy(), (9, 9), 0)
# Adaptive threshold using 11 nearest neighbour pixels
proc = cv2.adaptiveThreshold(proc, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
# Invert colours, so gridlines have non-zero pixel values.
# Necessary to dilate the image, otherwise will look like erosion instead.
proc = cv2.bitwise_not(proc, proc)
if not skip_dilate:
# Dilate the image to increase the size of the grid lines.
kernel = np.array([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]],np.uint8)
proc = cv2.dilate(proc, kernel)
return proc
def find_corners_of_largest_polygon(img):
"""Finds the 4 extreme corners of the largest contour in the image."""
opencv_version = cv2.__version__.split('.')[0]
if opencv_version == '3':
_, contours, h = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Find contours
else:
contours, h = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Find contours
contours = sorted(contours, key=cv2.contourArea, reverse=True) # Sort by area, descending
polygon = contours[0] # Largest image
# Use of `operator.itemgetter` with `max` and `min` allows us to get the index of the point
# Each point is an array of 1 coordinate, hence the [0] getter, then [0] or [1] used to get x and y respectively.
# Bottom-right point has the largest (x + y) value
# Top-left has point smallest (x + y) value
# Bottom-left point has smallest (x - y) value
# Top-right point has largest (x - y) value
bottom_right, _ = max(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_left, _ = min(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
bottom_left, _ = min(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_right, _ = max(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
# Return an array of all 4 points using the indices
# Each point is in its own array of one coordinate
return [polygon[top_left][0], polygon[top_right][0], polygon[bottom_right][0], polygon[bottom_left][0]]
def distance_between(p1, p2):
"""Returns the scalar distance between two points"""
a = p2[0] - p1[0]
b = p2[1] - p1[1]
return np.sqrt((a ** 2) + (b ** 2))
def crop_and_warp(img, crop_rect):
"""Crops and warps a rectangular section from an image into a square of similar size."""
# Rectangle described by top left, top right, bottom right and bottom left points
top_left, top_right, bottom_right, bottom_left = crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3]
# Explicitly set the data type to float32 or `getPerspectiveTransform` will throw an error
src = np.array([top_left, top_right, bottom_right, bottom_left], dtype='float32')
# Get the longest side in the rectangle
side = max([
distance_between(bottom_right, top_right),
distance_between(top_left, bottom_left),
distance_between(bottom_right, bottom_left),
distance_between(top_left, top_right)
])
# Describe a square with side of the calculated length, this is the new perspective we want to warp to
dst = np.array([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]], dtype='float32')
# Gets the transformation matrix for skewing the image to fit a square by comparing the 4 before and after points
m = cv2.getPerspectiveTransform(src, dst)
# Performs the transformation on the original image
return cv2.warpPerspective(img, m, (int(side), int(side)))
def infer_grid(img):
"""Infers 81 cell grid from a square image."""
squares = []
side = img.shape[:1]
side = side[0] / 9
# Note that we swap j and i here so the rectangles are stored in the list reading left-right instead of top-down.
for j in range(9):
for i in range(9):
p1 = (i * side, j * side) # Top left corner of a bounding box
p2 = ((i + 1) * side, (j + 1) * side) # Bottom right corner of bounding box
squares.append((p1, p2))
return squares
def cut_from_rect(img, rect):
"""Cuts a rectangle from an image using the top left and bottom right points."""
return img[int(rect[0][1]):int(rect[1][1]), int(rect[0][0]):int(rect[1][0])]
def scale_and_centre(img, size, margin=0, background=0):
"""Scales and centres an image onto a new background square."""
h, w = img.shape[:2]
def centre_pad(length):
"""Handles centering for a given length that may be odd or even."""
if length % 2 == 0:
side1 = int((size - length) / 2)
side2 = side1
else:
side1 = int((size - length) / 2)
side2 = side1 + 1
return side1, side2
def scale(r, x):
return int(r * x)
if h > w:
t_pad = int(margin / 2)
b_pad = t_pad
ratio = (size - margin) / h
w, h = scale(ratio, w), scale(ratio, h)
l_pad, r_pad = centre_pad(w)
else:
l_pad = int(margin / 2)
r_pad = l_pad
ratio = (size - margin) / w
w, h = scale(ratio, w), scale(ratio, h)
t_pad, b_pad = centre_pad(h)
img = cv2.resize(img, (w, h))
img = cv2.copyMakeBorder(img, t_pad, b_pad, l_pad, r_pad, cv2.BORDER_CONSTANT, None, background)
return cv2.resize(img, (size, size))
def find_largest_feature(inp_img, scan_tl=None, scan_br=None):
"""
Uses the fact the `floodFill` function returns a bounding box of the area it filled to find the biggest
connected pixel structure in the image. Fills this structure in white, reducing the rest to black.
"""
img = inp_img.copy() # Copy the image, leaving the original untouched
height, width = img.shape[:2]
max_area = 0
seed_point = (None, None)
if scan_tl is None:
scan_tl = [0, 0]
if scan_br is None:
scan_br = [width, height]
# Loop through the image
for x in range(scan_tl[0], scan_br[0]):
for y in range(scan_tl[1], scan_br[1]):
# Only operate on light or white squares
if img.item(y, x) == 255 and x < width and y < height: # Note that .item() appears to take input as y, x
area = cv2.floodFill(img, None, (x, y), 64)
if area[0] > max_area: # Gets the maximum bound area which should be the grid
max_area = area[0]
seed_point = (x, y)
# Colour everything grey (compensates for features outside of our middle scanning range
for x in range(width):
for y in range(height):
if img.item(y, x) == 255 and x < width and y < height:
cv2.floodFill(img, None, (x, y), 64)
mask = np.zeros((height + 2, width + 2), np.uint8) # Mask that is 2 pixels bigger than the image
# Highlight the main feature
if all([p is not None for p in seed_point]):
cv2.floodFill(img, mask, seed_point, 255)
top, bottom, left, right = height, 0, width, 0
for x in range(width):
for y in range(height):
if img.item(y, x) == 64: # Hide anything that isn't the main feature
cv2.floodFill(img, mask, (x, y), 0)
# Find the bounding parameters
if img.item(y, x) == 255:
top = y if y < top else top
bottom = y if y > bottom else bottom
left = x if x < left else left
right = x if x > right else right
bbox = [[left, top], [right, bottom]]
return img, np.array(bbox, dtype='float32'), seed_point
def extract_digit(img, rect, size):
"""Extracts a digit (if one exists) from a Sudoku square."""
digit = cut_from_rect(img, rect) # Get the digit box from the whole square
# Use fill feature finding to get the largest feature in middle of the box
# Margin used to define an area in the middle we would expect to find a pixel belonging to the digit
h, w = digit.shape[:2]
margin = int(np.mean([h, w]) / 2.5)
_, bbox, seed = find_largest_feature(digit, [margin, margin], [w - margin, h - margin])
digit = cut_from_rect(digit, bbox)
# Scale and pad the digit so that it fits a square of the digit size we're using for machine learning
w = bbox[1][0] - bbox[0][0]
h = bbox[1][1] - bbox[0][1]
# Ignore any small bounding boxes
if w > 0 and h > 0 and (w * h) > 100 and len(digit) > 0:
return scale_and_centre(digit, size, 4)
else:
return np.zeros((size, size), np.uint8)
def get_digits(img, squares, size):
"""Extracts digits from their cells and builds an array"""
digits = []
img = pre_process_image(img.copy(), skip_dilate=True)
# cv2.imshow('img', img)
for square in squares:
digits.append(extract_digit(img, square, size))
return digits
def parse_grid(path):
original = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
processed = pre_process_image(original)
# cv2.namedWindow('processed',cv2.WINDOW_AUTOSIZE)
# processed_img = cv2.resize(processed, (500, 500)) # Resize image
# cv2.imshow('processed', processed_img)
corners = find_corners_of_largest_polygon(processed)
cropped = crop_and_warp(original, corners)
# cv2.namedWindow('cropped',cv2.WINDOW_AUTOSIZE)
# cropped_img = cv2.resize(cropped, (500, 500)) # Resize image
# cv2.imshow('cropped', cropped_img)
squares = infer_grid(cropped)
# print(squares)
digits = get_digits(cropped, squares, 28)
# print(digits)
final_image = show_digits(digits)
return final_image
def extract_sudoku(image_path):
final_image = parse_grid(image_path)
return final_image
#if __name__ == '__main__':
# main() | [
"numpy.sqrt",
"numpy.array",
"operator.itemgetter",
"matplotlib.pyplot.imshow",
"cv2.__version__.split",
"numpy.mean",
"matplotlib.pyplot.yticks",
"numpy.concatenate",
"cv2.drawContours",
"matplotlib.pyplot.xticks",
"cv2.getPerspectiveTransform",
"cv2.floodFill",
"cv2.cvtColor",
"matplotli... | [((414, 424), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (422, 424), True, 'from matplotlib import pyplot as plt\n'), ((2470, 2524), 'cv2.drawContours', 'cv2.drawContours', (['img', 'contours', '(-1)', 'colour', 'thickness'], {}), '(img, contours, -1, colour, thickness)\n', (2486, 2524), False, 'import cv2\n'), ((2954, 3049), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['proc', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(11)', '(2)'], {}), '(proc, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 11, 2)\n', (2975, 3049), False, 'import cv2\n'), ((3190, 3217), 'cv2.bitwise_not', 'cv2.bitwise_not', (['proc', 'proc'], {}), '(proc, proc)\n', (3205, 3217), False, 'import cv2\n'), ((5114, 5138), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 + b ** 2)'], {}), '(a ** 2 + b ** 2)\n', (5121, 5138), True, 'import numpy as np\n'), ((5559, 5634), 'numpy.array', 'np.array', (['[top_left, top_right, bottom_right, bottom_left]'], {'dtype': '"""float32"""'}), "([top_left, top_right, bottom_right, bottom_left], dtype='float32')\n", (5567, 5634), True, 'import numpy as np\n'), ((5982, 6073), 'numpy.array', 'np.array', (['[[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]]'], {'dtype': '"""float32"""'}), "([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]],\n dtype='float32')\n", (5990, 6073), True, 'import numpy as np\n'), ((6191, 6228), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (6218, 6228), False, 'import cv2\n'), ((7769, 7792), 'cv2.resize', 'cv2.resize', (['img', '(w, h)'], {}), '(img, (w, h))\n', (7779, 7792), False, 'import cv2\n'), ((7800, 7894), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', 't_pad', 'b_pad', 'l_pad', 'r_pad', 'cv2.BORDER_CONSTANT', 'None', 'background'], {}), '(img, t_pad, b_pad, l_pad, r_pad, cv2.BORDER_CONSTANT,\n None, background)\n', (7818, 7894), False, 'import cv2\n'), ((7899, 7928), 'cv2.resize', 'cv2.resize', (['img', '(size, size)'], {}), '(img, (size, size))\n', (7909, 7928), False, 'import cv2\n'), ((9136, 9179), 'numpy.zeros', 'np.zeros', (['(height + 2, width + 2)', 'np.uint8'], {}), '((height + 2, width + 2), np.uint8)\n', (9144, 9179), True, 'import numpy as np\n'), ((11101, 11139), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_GRAYSCALE'], {}), '(path, cv2.IMREAD_GRAYSCALE)\n', (11111, 11139), False, 'import cv2\n'), ((278, 311), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'columns', '(i + 1)'], {}), '(rows, columns, i + 1)\n', (289, 311), True, 'from matplotlib import pyplot as plt\n'), ((312, 337), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image', '"""gray"""'], {}), "(image, 'gray')\n", (322, 337), True, 'from matplotlib import pyplot as plt\n'), ((340, 360), 'matplotlib.pyplot.title', 'plt.title', (['titles[i]'], {}), '(titles[i])\n', (349, 360), True, 'from matplotlib import pyplot as plt\n'), ((1058, 1112), 'numpy.concatenate', 'np.concatenate', (['with_border[i * 9:(i + 1) * 9]'], {'axis': '(1)'}), '(with_border[i * 9:(i + 1) * 9], axis=1)\n', (1072, 1112), True, 'import numpy as np\n'), ((1161, 1181), 'numpy.concatenate', 'np.concatenate', (['rows'], {}), '(rows)\n', (1175, 1181), True, 'import numpy as np\n'), ((3312, 3383), 'numpy.array', 'np.array', (['[[0.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 0.0]]', 'np.uint8'], {}), '([[0.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 0.0]], np.uint8)\n', (3320, 3383), True, 'import numpy as np\n'), ((3383, 3407), 'cv2.dilate', 'cv2.dilate', (['proc', 'kernel'], {}), '(proc, kernel)\n', (3393, 3407), False, 'import cv2\n'), ((3556, 3582), 'cv2.__version__.split', 'cv2.__version__.split', (['"""."""'], {}), "('.')\n", (3577, 3582), False, 'import cv2\n'), ((9306, 9347), 'cv2.floodFill', 'cv2.floodFill', (['img', 'mask', 'seed_point', '(255)'], {}), '(img, mask, seed_point, 255)\n', (9319, 9347), False, 'import cv2\n'), ((9824, 9855), 'numpy.array', 'np.array', (['bbox'], {'dtype': '"""float32"""'}), "(bbox, dtype='float32')\n", (9832, 9855), True, 'import numpy as np\n'), ((10725, 10757), 'numpy.zeros', 'np.zeros', (['(size, size)', 'np.uint8'], {}), '((size, size), np.uint8)\n', (10733, 10757), True, 'import numpy as np\n'), ((363, 377), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (373, 377), True, 'from matplotlib import pyplot as plt\n'), ((379, 393), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (389, 393), True, 'from matplotlib import pyplot as plt\n'), ((1402, 1439), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (1414, 1439), False, 'import cv2\n'), ((1767, 1804), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (1779, 1804), False, 'import cv2\n'), ((4446, 4468), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (4465, 4468), False, 'import operator\n'), ((4545, 4567), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (4564, 4567), False, 'import operator\n'), ((4647, 4669), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (4666, 4669), False, 'import operator\n'), ((4747, 4769), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (4766, 4769), False, 'import operator\n'), ((10263, 10278), 'numpy.mean', 'np.mean', (['[h, w]'], {}), '([h, w])\n', (10270, 10278), True, 'import numpy as np\n'), ((1475, 1512), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (1487, 1512), False, 'import cv2\n'), ((1840, 1877), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (1852, 1877), False, 'import cv2\n'), ((8719, 8755), 'cv2.floodFill', 'cv2.floodFill', (['img', 'None', '(x, y)', '(64)'], {}), '(img, None, (x, y), 64)\n', (8732, 8755), False, 'import cv2\n'), ((9090, 9126), 'cv2.floodFill', 'cv2.floodFill', (['img', 'None', '(x, y)', '(64)'], {}), '(img, None, (x, y), 64)\n', (9103, 9126), False, 'import cv2\n'), ((9525, 9560), 'cv2.floodFill', 'cv2.floodFill', (['img', 'mask', '(x, y)', '(0)'], {}), '(img, mask, (x, y), 0)\n', (9538, 9560), False, 'import cv2\n')] |
from ..tools.velocity_embedding import quiver_autoscale, velocity_embedding
from ..tools.utils import groups_to_bool
from .utils import *
from .scatter import scatter
from .docs import doc_scatter, doc_params
from sklearn.neighbors import NearestNeighbors
from scipy.stats import norm as normal
from matplotlib import rcParams
import matplotlib.pyplot as pl
import numpy as np
def compute_velocity_on_grid(
X_emb,
V_emb,
density=None,
smooth=None,
n_neighbors=None,
min_mass=None,
autoscale=True,
adjust_for_stream=False,
cutoff_perc=None,
):
# remove invalid cells
idx_valid = np.isfinite(X_emb.sum(1) + V_emb.sum(1))
X_emb = X_emb[idx_valid]
V_emb = V_emb[idx_valid]
# prepare grid
n_obs, n_dim = X_emb.shape
density = 1 if density is None else density
smooth = 0.5 if smooth is None else smooth
grs = []
for dim_i in range(n_dim):
m, M = np.min(X_emb[:, dim_i]), np.max(X_emb[:, dim_i])
m = m - 0.01 * np.abs(M - m)
M = M + 0.01 * np.abs(M - m)
gr = np.linspace(m, M, int(50 * density))
grs.append(gr)
meshes_tuple = np.meshgrid(*grs)
X_grid = np.vstack([i.flat for i in meshes_tuple]).T
# estimate grid velocities
if n_neighbors is None:
n_neighbors = int(n_obs / 50)
nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=-1)
nn.fit(X_emb)
dists, neighs = nn.kneighbors(X_grid)
scale = np.mean([(g[1] - g[0]) for g in grs]) * smooth
weight = normal.pdf(x=dists, scale=scale)
p_mass = weight.sum(1)
V_grid = (V_emb[neighs] * weight[:, :, None]).sum(1)
V_grid /= np.maximum(1, p_mass)[:, None]
if min_mass is None:
min_mass = 1
if adjust_for_stream:
X_grid = np.stack([np.unique(X_grid[:, 0]), np.unique(X_grid[:, 1])])
ns = int(np.sqrt(len(V_grid[:, 0])))
V_grid = V_grid.T.reshape(2, ns, ns)
mass = np.sqrt((V_grid ** 2).sum(0))
min_mass = 10 ** (min_mass - 6) # default min_mass = 1e-5
min_mass = np.clip(min_mass, None, np.max(mass) * 0.9)
cutoff = mass.reshape(V_grid[0].shape) < min_mass
if cutoff_perc is None:
cutoff_perc = 5
length = np.sum(np.mean(np.abs(V_emb[neighs]), axis=1), axis=1).T
length = length.reshape(ns, ns)
cutoff |= length < np.percentile(length, cutoff_perc)
V_grid[0][cutoff] = np.nan
else:
min_mass *= np.percentile(p_mass, 99) / 100
X_grid, V_grid = X_grid[p_mass > min_mass], V_grid[p_mass > min_mass]
if autoscale:
V_grid /= 3 * quiver_autoscale(X_grid, V_grid)
return X_grid, V_grid
@doc_params(scatter=doc_scatter)
def velocity_embedding_grid(
adata,
basis=None,
vkey="velocity",
density=None,
smooth=None,
min_mass=None,
arrow_size=None,
arrow_length=None,
arrow_color=None,
scale=None,
autoscale=True,
n_neighbors=None,
recompute=None,
X=None,
V=None,
X_grid=None,
V_grid=None,
principal_curve=False,
color=None,
use_raw=None,
layer=None,
color_map=None,
colorbar=True,
palette=None,
size=None,
alpha=0.2,
perc=None,
sort_order=True,
groups=None,
components=None,
projection="2d",
legend_loc="none",
legend_fontsize=None,
legend_fontweight=None,
xlabel=None,
ylabel=None,
title=None,
fontsize=None,
figsize=None,
dpi=None,
frameon=None,
show=None,
save=None,
ax=None,
ncols=None,
**kwargs,
):
"""\
Scatter plot of velocities on a grid.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
density: `float` (default: 1)
Amount of velocities to show - 0 none to 1 all
arrow_size: `float` or triple `headlength, headwidth, headaxislength` (default: 1)
Size of arrows.
arrow_length: `float` (default: 1)
Length of arrows.
scale: `float` (default: 1)
Length of velocities in the embedding.
min_mass: `float` or `None` (default: `None`)
Minimum threshold for mass to be shown.
It can range between 0 (all velocities) and 100 (large velocities).
smooth: `float` (default: 0.5)
Multiplication factor for scale in Gaussian kernel around grid point.
n_neighbors: `int` (default: None)
Number of neighbors to consider around grid point.
X: `np.ndarray` (default: None)
embedding grid point coordinates
V: `np.ndarray` (default: None)
embedding grid velocity coordinates
{scatter}
Returns
-------
`matplotlib.Axis` if `show==False`
"""
basis = default_basis(adata, **kwargs) if basis is None else get_basis(adata, basis)
if vkey == "all":
lkeys = list(adata.layers.keys())
vkey = [key for key in lkeys if "velocity" in key and "_u" not in key]
color, color_map = kwargs.pop("c", color), kwargs.pop("cmap", color_map)
colors = make_unique_list(color, allow_array=True)
layers, vkeys = make_unique_list(layer), make_unique_list(vkey)
if V is None:
for key in vkeys:
if recompute or velocity_embedding_changed(adata, basis=basis, vkey=key):
velocity_embedding(adata, basis=basis, vkey=key)
color, layer, vkey = colors[0], layers[0], vkeys[0]
color = default_color(adata) if color is None else color
if X_grid is None or V_grid is None:
_adata = (
adata[groups_to_bool(adata, groups, groupby=color)]
if groups is not None and color in adata.obs.keys()
else adata
)
comps, obsm = get_components(components, basis), _adata.obsm
X_emb = np.array(obsm[f"X_{basis}"][:, comps]) if X is None else X[:, :2]
V_emb = np.array(obsm[f"{vkey}_{basis}"][:, comps]) if V is None else V[:, :2]
X_grid, V_grid = compute_velocity_on_grid(
X_emb=X_emb,
V_emb=V_emb,
density=density,
autoscale=autoscale,
smooth=smooth,
n_neighbors=n_neighbors,
min_mass=min_mass,
)
scatter_kwargs = {
"basis": basis,
"perc": perc,
"use_raw": use_raw,
"sort_order": sort_order,
"alpha": alpha,
"components": components,
"projection": projection,
"legend_loc": legend_loc,
"groups": groups,
"legend_fontsize": legend_fontsize,
"legend_fontweight": legend_fontweight,
"palette": palette,
"color_map": color_map,
"frameon": frameon,
"xlabel": xlabel,
"ylabel": ylabel,
"colorbar": colorbar,
"dpi": dpi,
"fontsize": fontsize,
"show": False,
"save": False,
}
multikey = (
colors
if len(colors) > 1
else layers
if len(layers) > 1
else vkeys
if len(vkeys) > 1
else None
)
if multikey is not None:
if title is None:
title = list(multikey)
elif isinstance(title, (list, tuple)):
title *= int(np.ceil(len(multikey) / len(title)))
ncols = len(multikey) if ncols is None else min(len(multikey), ncols)
nrows = int(np.ceil(len(multikey) / ncols))
figsize = rcParams["figure.figsize"] if figsize is None else figsize
figsize, dpi = get_figure_params(figsize, dpi, ncols)
gs_figsize = (figsize[0] * ncols, figsize[1] * nrows)
ax = []
for i, gs in enumerate(
pl.GridSpec(nrows, ncols, pl.figure(None, gs_figsize, dpi=dpi))
):
if i < len(multikey):
ax.append(
velocity_embedding_grid(
adata,
density=density,
scale=scale,
size=size,
min_mass=min_mass,
smooth=smooth,
n_neighbors=n_neighbors,
principal_curve=principal_curve,
ax=pl.subplot(gs),
arrow_size=arrow_size,
arrow_length=arrow_length,
color=colors[i] if len(colors) > 1 else color,
layer=layers[i] if len(layers) > 1 else layer,
vkey=vkeys[i] if len(vkeys) > 1 else vkey,
title=title[i] if isinstance(title, (list, tuple)) else title,
X_grid=None if len(vkeys) > 1 else X_grid,
V_grid=None if len(vkeys) > 1 else V_grid,
autoscale=False if len(vkeys) > 1 else autoscale,
**scatter_kwargs,
**kwargs,
)
)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
else:
ax, show = get_ax(ax, show, figsize, dpi)
hl, hw, hal = default_arrow(arrow_size)
if arrow_length is not None:
scale = 1 / arrow_length
if scale is None:
scale = 1
if arrow_color is None:
arrow_color = "grey"
quiver_kwargs = {"angles": "xy", "scale_units": "xy", "edgecolors": "k"}
quiver_kwargs.update({"scale": scale, "width": 0.001, "headlength": hl / 2})
quiver_kwargs.update({"headwidth": hw / 2, "headaxislength": hal / 2})
quiver_kwargs.update({"color": arrow_color, "linewidth": 0.2, "zorder": 3})
for arg in list(kwargs):
if arg in quiver_kwargs:
quiver_kwargs.update({arg: kwargs[arg]})
else:
scatter_kwargs.update({arg: kwargs[arg]})
ax.quiver(
X_grid[:, 0], X_grid[:, 1], V_grid[:, 0], V_grid[:, 1], **quiver_kwargs
)
if principal_curve:
curve = adata.uns["principal_curve"]["projections"]
pl.plot(curve[:, 0], curve[:, 1], c="w", lw=6, zorder=4)
pl.plot(curve[:, 0], curve[:, 1], c="k", lw=3, zorder=5)
size = 4 * default_size(adata) if size is None else size
ax = scatter(
adata,
layer=layer,
color=color,
size=size,
title=title,
ax=ax,
zorder=0,
**scatter_kwargs,
)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
| [
"numpy.mean",
"numpy.abs",
"numpy.unique",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"scipy.stats.norm.pdf",
"numpy.vstack",
"sklearn.neighbors.NearestNeighbors",
"numpy.min",
"numpy.percentile",
"numpy.meshgrid",
"numpy.maximum",
"matplotlib.pyplo... | [((1146, 1163), 'numpy.meshgrid', 'np.meshgrid', (['*grs'], {}), '(*grs)\n', (1157, 1163), True, 'import numpy as np\n'), ((1328, 1380), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'n_neighbors', 'n_jobs': '(-1)'}), '(n_neighbors=n_neighbors, n_jobs=-1)\n', (1344, 1380), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1514, 1546), 'scipy.stats.norm.pdf', 'normal.pdf', ([], {'x': 'dists', 'scale': 'scale'}), '(x=dists, scale=scale)\n', (1524, 1546), True, 'from scipy.stats import norm as normal\n'), ((1177, 1218), 'numpy.vstack', 'np.vstack', (['[i.flat for i in meshes_tuple]'], {}), '([i.flat for i in meshes_tuple])\n', (1186, 1218), True, 'import numpy as np\n'), ((1454, 1491), 'numpy.mean', 'np.mean', (['[(g[1] - g[0]) for g in grs]'], {}), '([(g[1] - g[0]) for g in grs])\n', (1461, 1491), True, 'import numpy as np\n'), ((1646, 1667), 'numpy.maximum', 'np.maximum', (['(1)', 'p_mass'], {}), '(1, p_mass)\n', (1656, 1667), True, 'import numpy as np\n'), ((930, 953), 'numpy.min', 'np.min', (['X_emb[:, dim_i]'], {}), '(X_emb[:, dim_i])\n', (936, 953), True, 'import numpy as np\n'), ((955, 978), 'numpy.max', 'np.max', (['X_emb[:, dim_i]'], {}), '(X_emb[:, dim_i])\n', (961, 978), True, 'import numpy as np\n'), ((2354, 2388), 'numpy.percentile', 'np.percentile', (['length', 'cutoff_perc'], {}), '(length, cutoff_perc)\n', (2367, 2388), True, 'import numpy as np\n'), ((2455, 2480), 'numpy.percentile', 'np.percentile', (['p_mass', '(99)'], {}), '(p_mass, 99)\n', (2468, 2480), True, 'import numpy as np\n'), ((5750, 5788), 'numpy.array', 'np.array', (["obsm[f'X_{basis}'][:, comps]"], {}), "(obsm[f'X_{basis}'][:, comps])\n", (5758, 5788), True, 'import numpy as np\n'), ((5832, 5875), 'numpy.array', 'np.array', (["obsm[f'{vkey}_{basis}'][:, comps]"], {}), "(obsm[f'{vkey}_{basis}'][:, comps])\n", (5840, 5875), True, 'import numpy as np\n'), ((10010, 10066), 'matplotlib.pyplot.plot', 'pl.plot', (['curve[:, 0]', 'curve[:, 1]'], {'c': '"""w"""', 'lw': '(6)', 'zorder': '(4)'}), "(curve[:, 0], curve[:, 1], c='w', lw=6, zorder=4)\n", (10017, 10066), True, 'import matplotlib.pyplot as pl\n'), ((10079, 10135), 'matplotlib.pyplot.plot', 'pl.plot', (['curve[:, 0]', 'curve[:, 1]'], {'c': '"""k"""', 'lw': '(3)', 'zorder': '(5)'}), "(curve[:, 0], curve[:, 1], c='k', lw=3, zorder=5)\n", (10086, 10135), True, 'import matplotlib.pyplot as pl\n'), ((1002, 1015), 'numpy.abs', 'np.abs', (['(M - m)'], {}), '(M - m)\n', (1008, 1015), True, 'import numpy as np\n'), ((1039, 1052), 'numpy.abs', 'np.abs', (['(M - m)'], {}), '(M - m)\n', (1045, 1052), True, 'import numpy as np\n'), ((1777, 1800), 'numpy.unique', 'np.unique', (['X_grid[:, 0]'], {}), '(X_grid[:, 0])\n', (1786, 1800), True, 'import numpy as np\n'), ((1802, 1825), 'numpy.unique', 'np.unique', (['X_grid[:, 1]'], {}), '(X_grid[:, 1])\n', (1811, 1825), True, 'import numpy as np\n'), ((2074, 2086), 'numpy.max', 'np.max', (['mass'], {}), '(mass)\n', (2080, 2086), True, 'import numpy as np\n'), ((7611, 7647), 'matplotlib.pyplot.figure', 'pl.figure', (['None', 'gs_figsize'], {'dpi': 'dpi'}), '(None, gs_figsize, dpi=dpi)\n', (7620, 7647), True, 'import matplotlib.pyplot as pl\n'), ((2245, 2266), 'numpy.abs', 'np.abs', (['V_emb[neighs]'], {}), '(V_emb[neighs])\n', (2251, 2266), True, 'import numpy as np\n'), ((8125, 8139), 'matplotlib.pyplot.subplot', 'pl.subplot', (['gs'], {}), '(gs)\n', (8135, 8139), True, 'import matplotlib.pyplot as pl\n')] |
import numpy as np
np.random.seed(0)
import torch
torch.manual_seed(0)
from torch.utils.data import Dataset, DataLoader, ConcatDataset, RandomSampler
import torchvision
import imageio
import importlib
import random
import glob
import os
import transforms_3d
class patch_DS(Dataset):
"""Implementation of torch.utils.data.Dataset for set of .tiff files, which iterates over the raw and label datasets
patch by patch with a given stride.
"""
def __init__(self, root_dcm, root_mask, phase, transformer_config, patient_ids, patch_shape, stride_shape, patch_builder_cls,
voi_shape, precrop, seed_fn=None):
"""
Args:
root_dcm: path to directory containing raw data.
root_mask: path to directory containing label data.
phase: 'train' for training, 'val' for validation, 'test' for testing; data augmentation is performed
only during the 'train' phase.
transformer_config: dictionary of transformations and parameters for data augmentation.
patient_ids: set of patients' ids for dataset during the phase.
patch_shape: the shape of the patch DxHxW.
stride_shape: the shape of the stride DxHxW.
slice_builder_cls: defines how to sample patches from the image.
voi_shape: shape of each image DxHxW.
precrop: necessity of precroppping.
"""
self.root_dcm = root_dcm
self.root_mask = root_mask
assert phase in ['train', 'val', 'test']
self.phase = phase
self.transformer_config = transformer_config
self.patient_ids = patient_ids
self.patch_shape = patch_shape
self.stride_shape = stride_shape
self.patch_builder_cls = patch_builder_cls
self.voi_shape = voi_shape
self.precrop = precrop
self.seed_fn = seed_fn
self.to_tensor_transform = torchvision.transforms.ToTensor()
self.filenames_dcm = []
self.filenames_mask = []
self.raws = []
self.labels = []
for i in patient_ids:
filenames_img = glob.glob(os.path.join(root_dcm+str(i), '*.tiff'))
filenames_img.sort()
filenames_m = [x.replace('dicom','mask') for x in filenames_img]
self.filenames_dcm.append(filenames_img)
self.filenames_mask.append(filenames_m)
depth = len(filenames_img)
if self.precrop:
z1 = (depth-self.voi_shape[2])//2
z2 = z1+self.voi_shape[2]
else:
z1 = 0
z2 = depth
# read raw scan
raw_img = np.zeros(self.voi_shape, dtype='uint8') # create zero image
for fn in filenames_img[z1:z2]:
img = imageio.imread(fn)
if self.precrop:
img = self._center_crop(img,self.voi_shape[:2])
raw_img[:, :, filenames_img[z1:z2].index(fn)] = img
self.raws.append(raw_img)
# read mask for scan
label_img = np.zeros(self.voi_shape, dtype='uint8') # create zero mask
for fn in filenames_m[z1:z2]:
m = imageio.imread(fn)
if self.precrop:
m = self._center_crop(m,self.voi_shape[:2])
label_img[:, :, filenames_m[z1:z2].index(fn)] = m
self.labels.append(label_img)
self.raws = np.array(self.raws)
self.labels = np.array(self.labels)
min_value, max_value, mean, std = self._calculate_stats(self.raws)
print (f'Input stats: min={min_value}, max={max_value}, mean={mean}, std={std}')
self.transformer = transforms_3d.get_transformer(self.transformer_config, min_value=min_value, max_value=max_value,
mean=mean, std=std, phase=self.phase)
self.raw_transform = self.transformer.raw_transform()
self.label_transform = self.transformer.label_transform()
patch_builder = patch_builder_cls(self.raws, self.labels, patch_shape, stride_shape)
self.raw_patches = patch_builder.raw_patches
self.label_patches= patch_builder.label_patches
self.len = len(self.raw_patches)
def _set_seed(self, seed):
random.seed(seed)
torch.manual_seed(seed)
if self.seed_fn:
self.seed_fn(seed)
@staticmethod
def _calculate_stats(inputs):
return np.min(inputs), np.max(inputs), np.mean(inputs), np.std(inputs)
def __getitem__(self, index):
raw_idx = self.raw_patches[index]
label_idx = self.label_patches[index]
image = self.raws[raw_idx]
image = image.reshape(self.patch_shape)
mask = self.labels[label_idx]
mask = mask.reshape(self.patch_shape)
seed = random.randint(0, 2**32)
self._set_seed(seed)
image = self.raw_transform(image)
image = self.to_tensor_transform(image)
self._set_seed(seed)
mask = self.label_transform(mask)
mask = self.to_tensor_transform(mask)
return image, mask
def _center_crop(self, img, roi_shape):
y_size, x_size = roi_shape
y1 = (img.shape[0]-y_size)//2
x1 = (img.shape[1]-x_size)//2
return img[y1:y1+y_size, x1:x1+x_size]
def __len__(self):
return self.len
class PatchBuilder:
"""Sample patches from the image."""
def __init__(self, raw_dataset, label_dataset, patch_shape, stride_shape):
"""
Args:
raw_dataset: array of raw data.
label_dataset: array of label data.
patch_shape: the shape of the patch DxHxW.
stride_shape: the shape of the stride DxHxW.
"""
self._raw_patches = self._build_patches(raw_dataset, patch_shape, stride_shape)
if label_dataset is None:
self._label_patches = None
else:
self._label_patches = self._build_patches(label_dataset, patch_shape, stride_shape)
@property
def raw_patches(self):
return self._raw_patches
@property
def label_patches(self):
return self._label_patches
@staticmethod
def _build_patches(dataset, patch_shape, stride_shape):
"""Iterate over a given dataset patch-by-patch with a given stride and builds an array of slice positions.
Args:
dataset: array of label data.
patch_shape: the shape of the patch DxHxW.
stride_shape: the shape of the stride DxHxW.
Returns:
list of slices [(slice, slice, slice, slice), ...]
"""
slices = []
assert len(dataset.shape) == 4, 'Supports only 4D (NxDxHxW)'
num_patients, i_z, i_y, i_x = dataset.shape
k_z, k_y, k_x = patch_shape
s_z, s_y, s_x = stride_shape
for p in range(num_patients):
z_steps = PatchBuilder._gen_indices(i_z, k_z, s_z)
for z in z_steps:
y_steps = PatchBuilder._gen_indices(i_y, k_y, s_y)
for y in y_steps:
x_steps = PatchBuilder._gen_indices(i_x, k_x, s_x)
for x in x_steps:
slice_idx = (
slice(z, z + k_z),
slice(y, y + k_y),
slice(x, x + k_x)
)
slice_idx = (slice(p, p+1),) + slice_idx # patient id
slices.append(slice_idx)
return slices
@staticmethod
def _gen_indices(i, k, s):
"""
Args:
i (int): image size.
k (int): patch size.
s (int): stride size.
Returns:
generator of slides start positions
"""
assert i >= k, 'Sample size should be bigger than the patch size'
for j in range(0, i - k + 1, s):
yield j
if (j + k < i)&(i!=s):
yield i - k
def _get_patch_builder_cls(class_name):
m = importlib.import_module('dataloader')
clazz = getattr(m, class_name)
return clazz
def get_train_loaders(config):
"""Return dictionary containing the training and validation loaders (torch.utils.data.DataLoader).
Args:
config: a top level configuration object containing the 'loaders' key.
Returns: dict {'train': <train_loader>, 'val': <val_loader>}: dictionary containing the training and validation loaders.
"""
assert 'loaders' in config, 'Could not find data loaders configuration'
loaders_config = config['loaders']
print ('Creating training and validation set loaders...')
# get train and validation files
objects = loaders_config['objects']
assert isinstance(objects, list)
voi_shape = loaders_config['voi_shape']
dicom_path = loaders_config['dicom_path']
mask_path = loaders_config['mask_path']
train_ids = tuple(loaders_config['train_patient_ids'])
train_patch = tuple(loaders_config['train_patch'])
train_stride = tuple(loaders_config['train_stride'])
val_ids = tuple(loaders_config['val_patient_ids'])
val_patch = tuple(loaders_config['val_patch'])
val_stride = tuple(loaders_config['val_stride'])
transformer_config = loaders_config['transformer']
precrop = loaders_config['precrop']
# get train slice_builder_cls
train_patch_builder_str = loaders_config.get('train_patch_builder', 'PatchBuilder')
print (f'Train s builder class: {train_patch_builder_str}')
train_patch_builder_cls = _get_patch_builder_cls(train_patch_builder_str)
train_datasets = []
for obj in objects:
root_dcm = dicom_path+'_'+obj+ '/'
root_mask = mask_path+'_'+obj+ '/'
try:
print (f'Loading training set from: {root_dcm}...')
train_dataset = patch_DS(root_dcm, root_mask, 'train', transformer_config,
train_ids, train_patch, train_stride,
train_patch_builder_cls, voi_shape,precrop, seed_fn=None)
train_datasets.append(train_dataset)
except Exception:
print (f'Skipping training set: {root_dcm}')
# get val slice_builder_cls
val_patch_builder_str = loaders_config.get('val_patch_builder', 'PatchBuilder')
print (f'Val patch builder class: {val_patch_builder_str}')
val_patch_builder_cls = _get_patch_builder_cls(val_patch_builder_str)
val_datasets = []
for obj in objects:
root_dcm = dicom_path+'_'+obj+ '/'
root_mask = mask_path+'_'+obj+ '/'
try:
print (f'Loading val set from: {root_dcm}...')
val_dataset = patch_DS(root_dcm, root_mask, 'val', transformer_config,
val_ids, val_patch, val_stride,
val_patch_builder_cls, voi_shape, precrop, seed_fn=None)
val_datasets.append(val_dataset)
except Exception:
print(f'Skipping val set: {root_dcm}')
num_workers = loaders_config.get('num_workers', 1)
print (f'Number of workers for train/val dataloader: {num_workers}')
batch_size = loaders_config.get('batch_size', 1)
print (f'Batch size for train/val loader: {batch_size}')
train_dataset_size = loaders_config.get('train_dataset_size', 1)
train_rand_sampler = RandomSampler(ConcatDataset(train_datasets), replacement=True, num_samples=train_dataset_size)
return {'train': DataLoader(ConcatDataset(train_datasets), batch_size=batch_size, shuffle=False, sampler=train_rand_sampler,
num_workers=num_workers),
'val': DataLoader(ConcatDataset(val_datasets), batch_size=batch_size, shuffle=False, num_workers=num_workers)
}
| [
"torch.utils.data.ConcatDataset",
"torch.manual_seed",
"numpy.mean",
"importlib.import_module",
"numpy.std",
"random.seed",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"numpy.min",
"imageio.imread",
"torchvision.transforms.ToTensor",
"transforms_3d.get_transformer",
"... | [((19, 36), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (33, 36), True, 'import numpy as np\n'), ((50, 70), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (67, 70), False, 'import torch\n'), ((8191, 8228), 'importlib.import_module', 'importlib.import_module', (['"""dataloader"""'], {}), "('dataloader')\n", (8214, 8228), False, 'import importlib\n'), ((1933, 1966), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (1964, 1966), False, 'import torchvision\n'), ((3461, 3480), 'numpy.array', 'np.array', (['self.raws'], {}), '(self.raws)\n', (3469, 3480), True, 'import numpy as np\n'), ((3503, 3524), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (3511, 3524), True, 'import numpy as np\n'), ((3725, 3863), 'transforms_3d.get_transformer', 'transforms_3d.get_transformer', (['self.transformer_config'], {'min_value': 'min_value', 'max_value': 'max_value', 'mean': 'mean', 'std': 'std', 'phase': 'self.phase'}), '(self.transformer_config, min_value=min_value,\n max_value=max_value, mean=mean, std=std, phase=self.phase)\n', (3754, 3863), False, 'import transforms_3d\n'), ((4329, 4346), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4340, 4346), False, 'import random\n'), ((4355, 4378), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (4372, 4378), False, 'import torch\n'), ((4906, 4932), 'random.randint', 'random.randint', (['(0)', '(2 ** 32)'], {}), '(0, 2 ** 32)\n', (4920, 4932), False, 'import random\n'), ((11545, 11574), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (['train_datasets'], {}), '(train_datasets)\n', (11558, 11574), False, 'from torch.utils.data import Dataset, DataLoader, ConcatDataset, RandomSampler\n'), ((2682, 2721), 'numpy.zeros', 'np.zeros', (['self.voi_shape'], {'dtype': '"""uint8"""'}), "(self.voi_shape, dtype='uint8')\n", (2690, 2721), True, 'import numpy as np\n'), ((3094, 3133), 'numpy.zeros', 'np.zeros', (['self.voi_shape'], {'dtype': '"""uint8"""'}), "(self.voi_shape, dtype='uint8')\n", (3102, 3133), True, 'import numpy as np\n'), ((4515, 4529), 'numpy.min', 'np.min', (['inputs'], {}), '(inputs)\n', (4521, 4529), True, 'import numpy as np\n'), ((4531, 4545), 'numpy.max', 'np.max', (['inputs'], {}), '(inputs)\n', (4537, 4545), True, 'import numpy as np\n'), ((4547, 4562), 'numpy.mean', 'np.mean', (['inputs'], {}), '(inputs)\n', (4554, 4562), True, 'import numpy as np\n'), ((4564, 4578), 'numpy.std', 'np.std', (['inputs'], {}), '(inputs)\n', (4570, 4578), True, 'import numpy as np\n'), ((11658, 11687), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (['train_datasets'], {}), '(train_datasets)\n', (11671, 11687), False, 'from torch.utils.data import Dataset, DataLoader, ConcatDataset, RandomSampler\n'), ((11839, 11866), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (['val_datasets'], {}), '(val_datasets)\n', (11852, 11866), False, 'from torch.utils.data import Dataset, DataLoader, ConcatDataset, RandomSampler\n'), ((2810, 2828), 'imageio.imread', 'imageio.imread', (['fn'], {}), '(fn)\n', (2824, 2828), False, 'import imageio\n'), ((3216, 3234), 'imageio.imread', 'imageio.imread', (['fn'], {}), '(fn)\n', (3230, 3234), False, 'import imageio\n')] |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_index_equal
from evalml.pipelines import RegressionPipeline
def test_regression_init():
clf = RegressionPipeline(
component_graph=["Imputer", "One Hot Encoder", "Random Forest Regressor"]
)
assert clf.parameters == {
"Imputer": {
"categorical_impute_strategy": "most_frequent",
"numeric_impute_strategy": "mean",
"categorical_fill_value": None,
"numeric_fill_value": None,
},
"One Hot Encoder": {
"top_n": 10,
"features_to_encode": None,
"categories": None,
"drop": "if_binary",
"handle_unknown": "ignore",
"handle_missing": "error",
},
"Random Forest Regressor": {"n_estimators": 100, "max_depth": 6, "n_jobs": -1},
}
assert clf.name == "Random Forest Regressor w/ Imputer + One Hot Encoder"
assert clf.random_seed == 0
parameters = {"One Hot Encoder": {"top_n": 20}}
clf = RegressionPipeline(
component_graph=["Imputer", "One Hot Encoder", "Random Forest Regressor"],
parameters=parameters,
custom_name="Custom Pipeline",
random_seed=42,
)
assert clf.parameters == {
"Imputer": {
"categorical_impute_strategy": "most_frequent",
"numeric_impute_strategy": "mean",
"categorical_fill_value": None,
"numeric_fill_value": None,
},
"One Hot Encoder": {
"top_n": 20,
"features_to_encode": None,
"categories": None,
"drop": "if_binary",
"handle_unknown": "ignore",
"handle_missing": "error",
},
"Random Forest Regressor": {"n_estimators": 100, "max_depth": 6, "n_jobs": -1},
}
assert clf.name == "Custom Pipeline"
assert clf.random_seed == 42
@pytest.mark.parametrize("target_type", ["category", "string", "bool"])
def test_invalid_targets_regression_pipeline(
breast_cancer_local, wine_local, target_type, dummy_regression_pipeline_class
):
X, y = wine_local
if target_type == "category":
y = pd.Series(y).astype("category")
if target_type == "bool":
X, y = breast_cancer_local
y = y.map({"malignant": False, "benign": True})
mock_regression_pipeline = dummy_regression_pipeline_class(parameters={})
with pytest.raises(
ValueError, match="Regression pipeline can only handle numeric target data"
):
mock_regression_pipeline.fit(X, y)
def test_woodwork_regression_pipeline(diabetes_local, linear_regression_pipeline_class):
X, y = diabetes_local
regression_pipeline = linear_regression_pipeline_class(
parameters={"Linear Regressor": {"n_jobs": 1}}
)
regression_pipeline.fit(X, y)
assert not pd.isnull(regression_pipeline.predict(X)).any()
@pytest.mark.parametrize(
"index",
[
list(range(-5, 0)),
list(range(100, 105)),
[f"row_{i}" for i in range(5)],
pd.date_range("2020-09-08", periods=5),
],
)
def test_pipeline_transform_and_predict_with_custom_index(
index,
linear_regression_pipeline_class,
):
X = pd.DataFrame(
{"categories": [f"cat_{i}" for i in range(5)], "numbers": np.arange(5)},
index=index,
)
X.ww.init(logical_types={"categories": "categorical"})
y = pd.Series([0, 1.0, 1, 1, 0], index=index)
pipeline = linear_regression_pipeline_class(
parameters={"Linear Regressor": {"n_jobs": 1}}
)
pipeline.fit(X, y)
predictions = pipeline.predict(X)
assert_index_equal(predictions.index, X.index)
| [
"pandas.Series",
"evalml.pipelines.RegressionPipeline",
"numpy.arange",
"pandas.testing.assert_index_equal",
"pytest.mark.parametrize",
"pytest.raises",
"pandas.date_range"
] | [((1943, 2013), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_type"""', "['category', 'string', 'bool']"], {}), "('target_type', ['category', 'string', 'bool'])\n", (1966, 2013), False, 'import pytest\n'), ((188, 285), 'evalml.pipelines.RegressionPipeline', 'RegressionPipeline', ([], {'component_graph': "['Imputer', 'One Hot Encoder', 'Random Forest Regressor']"}), "(component_graph=['Imputer', 'One Hot Encoder',\n 'Random Forest Regressor'])\n", (206, 285), False, 'from evalml.pipelines import RegressionPipeline\n'), ((1065, 1237), 'evalml.pipelines.RegressionPipeline', 'RegressionPipeline', ([], {'component_graph': "['Imputer', 'One Hot Encoder', 'Random Forest Regressor']", 'parameters': 'parameters', 'custom_name': '"""Custom Pipeline"""', 'random_seed': '(42)'}), "(component_graph=['Imputer', 'One Hot Encoder',\n 'Random Forest Regressor'], parameters=parameters, custom_name=\n 'Custom Pipeline', random_seed=42)\n", (1083, 1237), False, 'from evalml.pipelines import RegressionPipeline\n'), ((3449, 3490), 'pandas.Series', 'pd.Series', (['[0, 1.0, 1, 1, 0]'], {'index': 'index'}), '([0, 1.0, 1, 1, 0], index=index)\n', (3458, 3490), True, 'import pandas as pd\n'), ((3666, 3712), 'pandas.testing.assert_index_equal', 'assert_index_equal', (['predictions.index', 'X.index'], {}), '(predictions.index, X.index)\n', (3684, 3712), False, 'from pandas.testing import assert_index_equal\n'), ((2453, 2548), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Regression pipeline can only handle numeric target data"""'}), "(ValueError, match=\n 'Regression pipeline can only handle numeric target data')\n", (2466, 2548), False, 'import pytest\n'), ((3091, 3129), 'pandas.date_range', 'pd.date_range', (['"""2020-09-08"""'], {'periods': '(5)'}), "('2020-09-08', periods=5)\n", (3104, 3129), True, 'import pandas as pd\n'), ((3339, 3351), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (3348, 3351), True, 'import numpy as np\n'), ((2213, 2225), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (2222, 2225), True, 'import pandas as pd\n')] |
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from RK_Driver import dcm_from_q
# Make a 2D plot of the results for the angular velocity
# Inputs:
# - Time array (time[ N ]) [sec]
# - Angular rate array (om_arr[ N ][ 3 ]) [rad/s]
# - fig-name:
# -- "0" if no figure is to be saved
# -- string which is the fig name otherwise
def plot_2D( time , om_arr , fig_name ):
# Plot the results
fig, ax = plt.subplots()
ax.plot( time , np.transpose( om_arr )[ 0 ] , linestyle = "solid" , label = r"\omega_x" )
ax.plot( time , np.transpose( om_arr )[ 1 ] , linestyle = "solid" , label = r"\omega_y" )
ax.plot( time , np.transpose( om_arr )[ 2 ] , linestyle = "solid" , label = r"\omega_z" )
plt.xlabel( "Time (s)" )
plt.ylabel( "Angular Rate [rad/s]" )
plt.title( "Intermediate Axis Plot" )
plt.grid()
if fig_name != "0":
fig.savefig( fig_name + ".png" , format = "png" )
plt.show( )
# Make a 2D plot comparing 2 angular velocity results
# Inputs:
# - First time array (time_1[ N ]) [sec]
# - Second time array (time_2[ N ]) [sec]
# - First angular rate array (om_arr_1[ N ][ 3 ]) [rad/s]
# - Second angular rate array (om_arr_2[ N ][ 3 ]) [rad/s]
# - fig-name:
# -- "0" if no figure is to be saved
# -- string which is the fig name otherwise
# NOTE: The first figure will be with solid line, second one will be with dots on top!
def plot_2D_comparison( time_1 , time_2 , om_arr_1 , om_arr_2 , fig_name ):
# Plot the results
fig, ax = plt.subplots()
ax.plot( time_1 , np.transpose( om_arr_1 )[ 0 ] , linestyle = "solid" , label = r"$\omega_x$ numerical" , color = "red" )
ax.plot( time_1 , np.transpose( om_arr_1 )[ 1 ] , linestyle = "solid" , label = r"$\omega_y$ numerical" , color = "green" )
ax.plot( time_1 , np.transpose( om_arr_1 )[ 2 ] , linestyle = "solid" , label = r"$\omega_z$ numerical" , color = "blue" )
ax.plot( time_2 , np.transpose( om_arr_2 )[ 0 ] , linestyle = "dotted" , linewidth = 3 , label = r"$\omega_x$ perturbed" , color = "forestgreen" )
ax.plot( time_2 , np.transpose( om_arr_2 )[ 1 ] , linestyle = "dotted" , linewidth = 3 , label = r"$\omega_y$ perturbed" , color = "royalblue" )
ax.plot( time_2 , np.transpose( om_arr_2 )[ 2 ] , linestyle = "dotted" , linewidth = 3 , label = r"$\omega_z$ perturbed" , color = "brown" )
plt.xlabel( "Time (s)" )
plt.ylabel( "Angular Rate [rad/s]" )
plt.title( "Exact vs. Perturbative comparison" )
plt.legend( loc = "upper right" )
plt.grid()
if fig_name != "0":
fig.savefig( fig_name + ".eps" , format = "eps" )
plt.show( )
# Make a 3D animation of the results for the angular velocity
# Inputs:
# - Time array (time[ N ]) [sec]
# - Angular rate array (om_arr[ N ][ 3 ]) [rad/s]
def animate_3D( time , om_arr ):
ax = plt.figure( ).add_subplot( projection = "3d" )
for i in range( 0 , len( time ) ):
plt.cla( )
ax.quiver( 0 , 0 , 0 , om_arr[ i ][ 0 ] , om_arr[ i ][ 1 ] , om_arr[ i ][ 2 ] , normalize = True )
ax.set_xlim3d( -1.2 , 1.2 )
ax.set_ylim3d( -1.2 , 1.2 )
ax.set_zlim3d( -1.2 , 1.2 )
plt.pause( 0.1 )
# Make a 3D animation of 3 basis vectors (dreibein) as the body-fixed axes
# Inputs:
# - Time array (time[ N ]) [sec]
# - Unit quaternion array (q_arr[ N ][ 4 ]) [-]
def animate_dreibein_old( time , q_arr ):
# Define the initial vectors for [ 1 , 0 , 0 , 0 ]
e_1 = [ 1.0 , 0.0 , 0.0 ]
e_2 = [ 0.0 , 1.0 , 0.0 ]
e_3 = [ 0.0 , 0.0 , 1.0 ]
# These forma triad (dreibein) of vectors { e_1 , e_2 , e_3 } which are orthonormal
# They will be rotated to visualize a body's motion
# Initialize 3 vectors which will hold the rotated dreibein for each step
v_1 = np.zeros( 3 )
v_2 = np.zeros( 3 )
v_3 = np.zeros( 3 )
ax = plt.figure( ).add_subplot( projection = "3d" )
for i in range( 0 , len( time ) ):
# Compute the DCM from the current quaternion
dcm_q = dcm_from_q( q_arr[ i ] )
v_1 = np.matmul( dcm_q , e_1 )
v_2 = np.matmul( dcm_q , e_2 )
v_3 = np.matmul( dcm_q , e_3 )
plt.cla( )
ax.quiver( 0 , 0 , 0 , v_1[ 0 ] , v_1[ 1 ] , v_1[ 2 ] , normalize = True , color = "red" )
ax.quiver( 0 , 0 , 0 , v_2[ 0 ] , v_2[ 1 ] , v_2[ 2 ] , normalize = True , color = "green" )
ax.quiver( 0 , 0 , 0 , v_3[ 0 ] , v_3[ 1 ] , v_3[ 2 ] , normalize = True , color = "blue" )
ax.set_xlim3d( -1.2 , 1.2 )
ax.set_ylim3d( -1.2 , 1.2 )
ax.set_zlim3d( -1.2 , 1.2 )
plt.pause( 0.01 )
# Update the arrows for the dreibein
# Inputs:
# - Unit quaternion at current time step q_arr[ 4 ] [-]
# Output:
# - 3D array with zeros (the origin of the vectors)
# - 3D array containing v_i (the three vectors) for i = 0, 1, 2 TRANSPOSED
def get_arrows( q_arr ):
# Define the initial vectors for [ 1 , 0 , 0 , 0 ]
e_1 = [ 1.0 , 0.0 , 0.0 ]
e_2 = [ 0.0 , 1.0 , 0.0 ]
e_3 = [ 0.0 , 0.0 , 1.0 ]
# These forma triad (dreibein) of vectors { e_1 , e_2 , e_3 } which are orthonormal
# They will be rotated to visualize a body's motion
# Initialize 3 vectors which will hold the rotated dreibein for each step
v_1 = np.zeros( 3 )
v_2 = np.zeros( 3 )
v_3 = np.zeros( 3 )
# Compute the DCM from the current quaternion
dcm_q = dcm_from_q( q_arr )
v_1 = np.matmul( dcm_q , e_1 )
v_2 = np.matmul( dcm_q , e_2 )
v_3 = np.matmul( dcm_q , e_3 )
o_vec = np.zeros( ( 3 , 3 ) ) # Origin of the vectors
v_vec = np.transpose( np.array( [ v_1 , v_2 , v_3 ] ) ) # Matrix indexing each of the vector components
return o_vec[ 0 ], o_vec[ 1 ], o_vec[ 2 ], v_vec[ 0 ], v_vec[ 1 ], v_vec[ 2 ]
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"RK_Driver.dcm_from_q",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.matmul",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.title",
"numpy.transpose",
"matplotlib.pyplot.cla",
"matplotlib.py... | [((496, 510), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (508, 510), True, 'import matplotlib.pyplot as plt\n'), ((799, 821), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (809, 821), True, 'import matplotlib.pyplot as plt\n'), ((828, 862), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Angular Rate [rad/s]"""'], {}), "('Angular Rate [rad/s]')\n", (838, 862), True, 'import matplotlib.pyplot as plt\n'), ((869, 904), 'matplotlib.pyplot.title', 'plt.title', (['"""Intermediate Axis Plot"""'], {}), "('Intermediate Axis Plot')\n", (878, 904), True, 'import matplotlib.pyplot as plt\n'), ((912, 922), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (920, 922), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1019, 1021), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1598), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1596, 1598), True, 'import matplotlib.pyplot as plt\n'), ((2432, 2454), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (2442, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2461, 2495), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Angular Rate [rad/s]"""'], {}), "('Angular Rate [rad/s]')\n", (2471, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2502, 2548), 'matplotlib.pyplot.title', 'plt.title', (['"""Exact vs. Perturbative comparison"""'], {}), "('Exact vs. Perturbative comparison')\n", (2511, 2548), True, 'import matplotlib.pyplot as plt\n'), ((2555, 2584), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2565, 2584), True, 'import matplotlib.pyplot as plt\n'), ((2594, 2604), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2602, 2604), True, 'import matplotlib.pyplot as plt\n'), ((2693, 2703), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2701, 2703), True, 'import matplotlib.pyplot as plt\n'), ((3848, 3859), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3856, 3859), True, 'import numpy as np\n'), ((3872, 3883), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3880, 3883), True, 'import numpy as np\n'), ((3896, 3907), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3904, 3907), True, 'import numpy as np\n'), ((5330, 5341), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5338, 5341), True, 'import numpy as np\n'), ((5354, 5365), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5362, 5365), True, 'import numpy as np\n'), ((5378, 5389), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5386, 5389), True, 'import numpy as np\n'), ((5455, 5472), 'RK_Driver.dcm_from_q', 'dcm_from_q', (['q_arr'], {}), '(q_arr)\n', (5465, 5472), False, 'from RK_Driver import dcm_from_q\n'), ((5486, 5507), 'numpy.matmul', 'np.matmul', (['dcm_q', 'e_1'], {}), '(dcm_q, e_1)\n', (5495, 5507), True, 'import numpy as np\n'), ((5521, 5542), 'numpy.matmul', 'np.matmul', (['dcm_q', 'e_2'], {}), '(dcm_q, e_2)\n', (5530, 5542), True, 'import numpy as np\n'), ((5556, 5577), 'numpy.matmul', 'np.matmul', (['dcm_q', 'e_3'], {}), '(dcm_q, e_3)\n', (5565, 5577), True, 'import numpy as np\n'), ((5594, 5610), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (5602, 5610), True, 'import numpy as np\n'), ((3007, 3016), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3014, 3016), True, 'import matplotlib.pyplot as plt\n'), ((3243, 3257), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (3252, 3257), True, 'import matplotlib.pyplot as plt\n'), ((4086, 4106), 'RK_Driver.dcm_from_q', 'dcm_from_q', (['q_arr[i]'], {}), '(q_arr[i])\n', (4096, 4106), False, 'from RK_Driver import dcm_from_q\n'), ((4126, 4147), 'numpy.matmul', 'np.matmul', (['dcm_q', 'e_1'], {}), '(dcm_q, e_1)\n', (4135, 4147), True, 'import numpy as np\n'), ((4165, 4186), 'numpy.matmul', 'np.matmul', (['dcm_q', 'e_2'], {}), '(dcm_q, e_2)\n', (4174, 4186), True, 'import numpy as np\n'), ((4204, 4225), 'numpy.matmul', 'np.matmul', (['dcm_q', 'e_3'], {}), '(dcm_q, e_3)\n', (4213, 4225), True, 'import numpy as np\n'), ((4238, 4247), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4245, 4247), True, 'import matplotlib.pyplot as plt\n'), ((4667, 4682), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (4676, 4682), True, 'import matplotlib.pyplot as plt\n'), ((5666, 5691), 'numpy.array', 'np.array', (['[v_1, v_2, v_3]'], {}), '([v_1, v_2, v_3])\n', (5674, 5691), True, 'import numpy as np\n'), ((532, 552), 'numpy.transpose', 'np.transpose', (['om_arr'], {}), '(om_arr)\n', (544, 552), True, 'import numpy as np\n'), ((626, 646), 'numpy.transpose', 'np.transpose', (['om_arr'], {}), '(om_arr)\n', (638, 646), True, 'import numpy as np\n'), ((720, 740), 'numpy.transpose', 'np.transpose', (['om_arr'], {}), '(om_arr)\n', (732, 740), True, 'import numpy as np\n'), ((1622, 1644), 'numpy.transpose', 'np.transpose', (['om_arr_1'], {}), '(om_arr_1)\n', (1634, 1644), True, 'import numpy as np\n'), ((1748, 1770), 'numpy.transpose', 'np.transpose', (['om_arr_1'], {}), '(om_arr_1)\n', (1760, 1770), True, 'import numpy as np\n'), ((1876, 1898), 'numpy.transpose', 'np.transpose', (['om_arr_1'], {}), '(om_arr_1)\n', (1888, 1898), True, 'import numpy as np\n'), ((2004, 2026), 'numpy.transpose', 'np.transpose', (['om_arr_2'], {}), '(om_arr_2)\n', (2016, 2026), True, 'import numpy as np\n'), ((2155, 2177), 'numpy.transpose', 'np.transpose', (['om_arr_2'], {}), '(om_arr_2)\n', (2167, 2177), True, 'import numpy as np\n'), ((2304, 2326), 'numpy.transpose', 'np.transpose', (['om_arr_2'], {}), '(om_arr_2)\n', (2316, 2326), True, 'import numpy as np\n'), ((2904, 2916), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2914, 2916), True, 'import matplotlib.pyplot as plt\n'), ((3920, 3932), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3930, 3932), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 20:58:20 2021
@author: <NAME>
@email: <EMAIL>
Conjunto básico de testes de unidade para avaliar as instâncias utilizadas.
"""
from unittest import TestCase, main
import os
import numpy as np
import pandas as pd
class BasicTests(TestCase):
#instancia sem problema
def test_file_analysis_instance1(self):
path = os.path.join('data', 'instances', 'instance-1.csv')
df = pd.read_csv(path, header=None)
values_init_phase0 = df.iloc[0][:10].values
values_final_phase0 = df.iloc[0][799999:800003].values
values_init_phase1 = df.iloc[1][:10].values
values_final_phase1 = df.iloc[1][799999:800003].values
values_init_phase2 = df.iloc[2][:10].values
values_final_phase2 = df.iloc[2][799999:800003].values
serie_init_phase0 = np.array([18, 18, 17, 18, 18, 18, 19, 18, 18, 17])
serie_final_phase0 = np.array([17, 0, 0, 0])
serie_init_phase1 = np.array([1, 0, -1, 1, 0, 0, 1, 0, 0, 0])
serie_final_phase1 = np.array([0, 1, 1, 0])
serie_init_phase2 = np.array([-19, -19, -20, -19, -19, -20, -18, -19, -20, -19])
serie_final_phase2 = np.array([-19, 2, 2, 0])
self.assertEqual((values_init_phase0 == serie_init_phase0).all(), True)
self.assertEqual((values_final_phase0 == serie_final_phase0).all(), True)
self.assertEqual((values_init_phase1 == serie_init_phase1).all(), True)
self.assertEqual((values_final_phase1 == serie_final_phase1).all(), True)
self.assertEqual((values_init_phase2 == serie_init_phase2).all(), True)
self.assertEqual((values_final_phase2 == serie_final_phase2).all(), True)
#instancia com problema
def test_file_analysis_instance153(self):
path = os.path.join('data', 'instances', 'instance-153.csv')
df = pd.read_csv(path, header=None)
values_init_phase0 = df.iloc[0][:10].values
values_final_phase0 = df.iloc[0][799999:800003].values
values_init_phase1 = df.iloc[1][:10].values
values_final_phase1 = df.iloc[1][799999:800003].values
values_init_phase2 = df.iloc[2][:10].values
values_final_phase2 = df.iloc[2][799999:800003].values
serie_init_phase0 = np.array([-15, -13, -13, -13, -13, -14, -14, -15, -16, -16])
serie_final_phase0 = np.array([-11, 456, 0, 1])
serie_init_phase1 = np.array([18, 22, 21, 20, 22, 19, 20, 20, 16, 20])
serie_final_phase1 = np.array([21, 457, 1, 1])
serie_init_phase2 = np.array([-7, -3, -4, -6, -3, -6, -5, -5, -8, -5])
serie_final_phase2 = np.array([-3, 458, 2, 1])
self.assertEqual((values_init_phase0 == serie_init_phase0).all(), True)
self.assertEqual((values_final_phase0 == serie_final_phase0).all(), True)
self.assertEqual((values_init_phase1 == serie_init_phase1).all(), True)
self.assertEqual((values_final_phase1 == serie_final_phase1).all(), True)
self.assertEqual((values_init_phase2 == serie_init_phase2).all(), True)
self.assertEqual((values_final_phase2 == serie_final_phase2).all(), True)
if __name__ == '__main__':
main()
| [
"unittest.main",
"numpy.array",
"os.path.join",
"pandas.read_csv"
] | [((3205, 3211), 'unittest.main', 'main', ([], {}), '()\n', (3209, 3211), False, 'from unittest import TestCase, main\n'), ((410, 461), 'os.path.join', 'os.path.join', (['"""data"""', '"""instances"""', '"""instance-1.csv"""'], {}), "('data', 'instances', 'instance-1.csv')\n", (422, 461), False, 'import os\n'), ((475, 505), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None'}), '(path, header=None)\n', (486, 505), True, 'import pandas as pd\n'), ((879, 929), 'numpy.array', 'np.array', (['[18, 18, 17, 18, 18, 18, 19, 18, 18, 17]'], {}), '([18, 18, 17, 18, 18, 18, 19, 18, 18, 17])\n', (887, 929), True, 'import numpy as np\n'), ((959, 982), 'numpy.array', 'np.array', (['[17, 0, 0, 0]'], {}), '([17, 0, 0, 0])\n', (967, 982), True, 'import numpy as np\n'), ((1011, 1052), 'numpy.array', 'np.array', (['[1, 0, -1, 1, 0, 0, 1, 0, 0, 0]'], {}), '([1, 0, -1, 1, 0, 0, 1, 0, 0, 0])\n', (1019, 1052), True, 'import numpy as np\n'), ((1082, 1104), 'numpy.array', 'np.array', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (1090, 1104), True, 'import numpy as np\n'), ((1133, 1193), 'numpy.array', 'np.array', (['[-19, -19, -20, -19, -19, -20, -18, -19, -20, -19]'], {}), '([-19, -19, -20, -19, -19, -20, -18, -19, -20, -19])\n', (1141, 1193), True, 'import numpy as np\n'), ((1223, 1247), 'numpy.array', 'np.array', (['[-19, 2, 2, 0]'], {}), '([-19, 2, 2, 0])\n', (1231, 1247), True, 'import numpy as np\n'), ((1828, 1881), 'os.path.join', 'os.path.join', (['"""data"""', '"""instances"""', '"""instance-153.csv"""'], {}), "('data', 'instances', 'instance-153.csv')\n", (1840, 1881), False, 'import os\n'), ((1895, 1925), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None'}), '(path, header=None)\n', (1906, 1925), True, 'import pandas as pd\n'), ((2299, 2359), 'numpy.array', 'np.array', (['[-15, -13, -13, -13, -13, -14, -14, -15, -16, -16]'], {}), '([-15, -13, -13, -13, -13, -14, -14, -15, -16, -16])\n', (2307, 2359), True, 'import numpy as np\n'), ((2389, 2415), 'numpy.array', 'np.array', (['[-11, 456, 0, 1]'], {}), '([-11, 456, 0, 1])\n', (2397, 2415), True, 'import numpy as np\n'), ((2444, 2494), 'numpy.array', 'np.array', (['[18, 22, 21, 20, 22, 19, 20, 20, 16, 20]'], {}), '([18, 22, 21, 20, 22, 19, 20, 20, 16, 20])\n', (2452, 2494), True, 'import numpy as np\n'), ((2524, 2549), 'numpy.array', 'np.array', (['[21, 457, 1, 1]'], {}), '([21, 457, 1, 1])\n', (2532, 2549), True, 'import numpy as np\n'), ((2578, 2628), 'numpy.array', 'np.array', (['[-7, -3, -4, -6, -3, -6, -5, -5, -8, -5]'], {}), '([-7, -3, -4, -6, -3, -6, -5, -5, -8, -5])\n', (2586, 2628), True, 'import numpy as np\n'), ((2658, 2683), 'numpy.array', 'np.array', (['[-3, 458, 2, 1]'], {}), '([-3, 458, 2, 1])\n', (2666, 2683), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
from pynverse import inversefunc
from IPython import get_ipython
get_ipython().magic('reset -sf')
import pandas as pd
from scipy.optimize import leastsq, least_squares, curve_fit
import os
from scipy import interpolate
import scipy.integrate as integrate
def theta1_func(H_value,R,n1,n2):
if n1>n2:
tc=np.arcsin(n2/n1)
H=lambda theta1 :R*np.sin(theta1)/np.cos(np.arcsin(n1/n2*np.sin(theta1))-theta1)*1/(1-np.tan(np.arcsin(n1/n2*np.sin(theta1))-theta1)/np.tan(np.arcsin(n1/n2*np.sin(theta1))))
theta=inversefunc(H,y_values=H_value,domain=[-tc, tc])
if H_value>=0:
h=R*np.sin(theta)/np.cos(np.arcsin(n1/n2*np.sin(theta))-theta)
theta_scattering=np.arcsin(R*np.sin(theta)/h)
else:
h=R*np.sin(theta)/np.cos(np.arcsin(n1/n2*np.sin(theta))-theta)
theta_scattering=math.pi-np.arcsin(R*np.sin(theta)/h)
else:
tc=np.arcsin(n1/n2)
H=lambda theta1 :R*np.sin(theta1)/np.cos(np.arcsin(n1/n2*np.sin(theta1))-theta1)*1/(1+np.tan(np.arcsin(n1/n2*np.sin(theta1))-theta1)/np.tan(np.arcsin(n1/n2*np.sin(theta1))))
theta=inversefunc(H,y_values=H_value,domain=[-tc, tc])
if H_value<=0:
h=R*np.sin(theta)/np.cos(np.arcsin(n1/n2*np.sin(theta))-theta)
theta_scattering=np.arcsin(R*np.sin(theta)/h)
else:
h=R*np.sin(theta)/np.cos(np.arcsin(n1/n2*np.sin(theta))-theta)
theta_scattering=math.pi-np.arcsin(R*np.sin(theta)/h)
return h,theta_scattering
def SingExp(x, amp, decay, baseline ):
"""Model a decaying sine wave and subtract data."""
model = (amp * np.exp(-x/decay))**2 + baseline
return model
def DoubleExp(x, amp1, decay1, amp2, decay2, baseline ):
"""Model a decaying sine wave and subtract data."""
model = (amp1 * np.exp(-x/decay1) + amp2 * np.exp(-x/decay2))**2 + baseline
return model
def DoubleStretchExp(x, amp1, decay1, amp2, decay2, baseline, beta,gamma ):
"""Model a decaying sine wave and subtract data."""
model = (amp1 * np.exp(-x/decay1))**(2*beta) + (amp2 * np.exp(-x/decay2))**(2*gamma) + baseline
return model
def SingleStretchExp(x, amp1, decay1, baseline, beta):
"""Model a decaying sine wave and subtract data."""
model = amp1 * np.exp(-(x/decay1)) **beta + baseline
return model
def TripleExp(x, amp1, decay1, amp2, decay2, amp3, decay3, baseline ):
"""Model a decaying sine wave and subtract data."""
model = (amp1 * np.exp(-x/decay1) + amp2 * np.exp(-x/decay2) + amp3 * np.exp(-x/decay3) )**2 + baseline
return model
def StretchExp(x, amp, decay, baseline, beta ):
"""Model a decaying sine wave and subtract data."""
model = (amp * np.exp(-x / decay))**(2*beta) + baseline
return model
def FromTautoTheta(tau,tau_err,T,R_particle,wavelength,nu,n):
kb=1.38064852*10**-23
D = kb*T/(6*math.pi*nu*(R_particle*10**-9))
theta = 2* np.arcsin( (1/(D*tau))**0.5*wavelength/(4*math.pi*n) ) *360/(2*math.pi)
theta_err = 2 * 1 / ( 1- ( wavelength / (4 * n * math.pi * 1 / ( D * tau )**0.5 ) )**2 )**0.5 * wavelength / (8 * n * math.pi) * 1 / D**0.5 * tau**-1.5 * tau_err *360/(2*math.pi)
return D, theta, theta_err
def SFinterpolation(x,y):
f = interpolate.interp1d(x, y)
return f
def SFintegration(x,y,x0,xmax):
f = interpolate.interp1d(x, y)
I = integrate.quad(f , x0, xmax)
return I
def AsymmetryCalculator(func):
ass = []
for i in range(int(len(func)/2)):
ass.append( ( np.abs(func[i] - func[-i]) / 2 ) / np.mean(np.asarray(func)) )
asymmerty = np.sum(np.asarray(ass))
return asymmerty
def truncate(n, decimals=0):
multiplier = 10 ** decimals
return int(n * multiplier) / multiplier
| [
"IPython.get_ipython",
"numpy.abs",
"scipy.integrate.quad",
"numpy.arcsin",
"numpy.asarray",
"scipy.interpolate.interp1d",
"numpy.exp",
"pynverse.inversefunc",
"numpy.sin"
] | [((3502, 3528), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'y'], {}), '(x, y)\n', (3522, 3528), False, 'from scipy import interpolate\n'), ((3587, 3613), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'y'], {}), '(x, y)\n', (3607, 3613), False, 'from scipy import interpolate\n'), ((3623, 3650), 'scipy.integrate.quad', 'integrate.quad', (['f', 'x0', 'xmax'], {}), '(f, x0, xmax)\n', (3637, 3650), True, 'import scipy.integrate as integrate\n'), ((135, 148), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (146, 148), False, 'from IPython import get_ipython\n'), ((398, 416), 'numpy.arcsin', 'np.arcsin', (['(n2 / n1)'], {}), '(n2 / n1)\n', (407, 416), True, 'import numpy as np\n'), ((613, 663), 'pynverse.inversefunc', 'inversefunc', (['H'], {'y_values': 'H_value', 'domain': '[-tc, tc]'}), '(H, y_values=H_value, domain=[-tc, tc])\n', (624, 663), False, 'from pynverse import inversefunc\n'), ((1012, 1030), 'numpy.arcsin', 'np.arcsin', (['(n1 / n2)'], {}), '(n1 / n2)\n', (1021, 1030), True, 'import numpy as np\n'), ((1227, 1277), 'pynverse.inversefunc', 'inversefunc', (['H'], {'y_values': 'H_value', 'domain': '[-tc, tc]'}), '(H, y_values=H_value, domain=[-tc, tc])\n', (1238, 1277), False, 'from pynverse import inversefunc\n'), ((3899, 3914), 'numpy.asarray', 'np.asarray', (['ass'], {}), '(ass)\n', (3909, 3914), True, 'import numpy as np\n'), ((1780, 1798), 'numpy.exp', 'np.exp', (['(-x / decay)'], {}), '(-x / decay)\n', (1786, 1798), True, 'import numpy as np\n'), ((2476, 2497), 'numpy.exp', 'np.exp', (['(-(x / decay1))'], {}), '(-(x / decay1))\n', (2482, 2497), True, 'import numpy as np\n'), ((2948, 2966), 'numpy.exp', 'np.exp', (['(-x / decay)'], {}), '(-x / decay)\n', (2954, 2966), True, 'import numpy as np\n'), ((3174, 3240), 'numpy.arcsin', 'np.arcsin', (['((1 / (D * tau)) ** 0.5 * wavelength / (4 * math.pi * n))'], {}), '((1 / (D * tau)) ** 0.5 * wavelength / (4 * math.pi * n))\n', (3183, 3240), True, 'import numpy as np\n'), ((713, 726), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (719, 726), True, 'import numpy as np\n'), ((863, 876), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (869, 876), True, 'import numpy as np\n'), ((1327, 1340), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1333, 1340), True, 'import numpy as np\n'), ((1477, 1490), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1483, 1490), True, 'import numpy as np\n'), ((1982, 2001), 'numpy.exp', 'np.exp', (['(-x / decay1)'], {}), '(-x / decay1)\n', (1988, 2001), True, 'import numpy as np\n'), ((2009, 2028), 'numpy.exp', 'np.exp', (['(-x / decay2)'], {}), '(-x / decay2)\n', (2015, 2028), True, 'import numpy as np\n'), ((2230, 2249), 'numpy.exp', 'np.exp', (['(-x / decay1)'], {}), '(-x / decay1)\n', (2236, 2249), True, 'import numpy as np\n'), ((2269, 2288), 'numpy.exp', 'np.exp', (['(-x / decay2)'], {}), '(-x / decay2)\n', (2275, 2288), True, 'import numpy as np\n'), ((2752, 2771), 'numpy.exp', 'np.exp', (['(-x / decay3)'], {}), '(-x / decay3)\n', (2758, 2771), True, 'import numpy as np\n'), ((3796, 3822), 'numpy.abs', 'np.abs', (['(func[i] - func[-i])'], {}), '(func[i] - func[-i])\n', (3802, 3822), True, 'import numpy as np\n'), ((3839, 3855), 'numpy.asarray', 'np.asarray', (['func'], {}), '(func)\n', (3849, 3855), True, 'import numpy as np\n'), ((814, 827), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (820, 827), True, 'import numpy as np\n'), ((1428, 1441), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1434, 1441), True, 'import numpy as np\n'), ((2698, 2717), 'numpy.exp', 'np.exp', (['(-x / decay1)'], {}), '(-x / decay1)\n', (2704, 2717), True, 'import numpy as np\n'), ((2725, 2744), 'numpy.exp', 'np.exp', (['(-x / decay2)'], {}), '(-x / decay2)\n', (2731, 2744), True, 'import numpy as np\n'), ((443, 457), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (449, 457), True, 'import numpy as np\n'), ((972, 985), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (978, 985), True, 'import numpy as np\n'), ((1057, 1071), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (1063, 1071), True, 'import numpy as np\n'), ((1586, 1599), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1592, 1599), True, 'import numpy as np\n'), ((750, 763), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (756, 763), True, 'import numpy as np\n'), ((900, 913), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (906, 913), True, 'import numpy as np\n'), ((1364, 1377), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1370, 1377), True, 'import numpy as np\n'), ((1514, 1527), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1520, 1527), True, 'import numpy as np\n'), ((580, 594), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (586, 594), True, 'import numpy as np\n'), ((1194, 1208), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (1200, 1208), True, 'import numpy as np\n'), ((481, 495), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (487, 495), True, 'import numpy as np\n'), ((533, 547), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (539, 547), True, 'import numpy as np\n'), ((1095, 1109), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (1101, 1109), True, 'import numpy as np\n'), ((1147, 1161), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (1153, 1161), True, 'import numpy as np\n')] |
import json
import os.path
import time
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
# TODO: Fix bug with the epoch print in the last batch of the epochs
# TODO: Change steps to number of images
# TODO: Extract dataset name from path
class ImageClassifier:
def __init__(self):
# Device to use
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Device to bring back to cpu
self.device_cpu = torch.device("cpu")
# data_directory expects: /train /valid /test
self.data_directory = None
# dataset folder name
self.dataset = 'Flowers'
# Transforms for the training, validation, and testing sets
self.transform = {}
# ImageFolder data training, validation, and testing sets
self.data = {}
# Data loaders for the training, validation, and testing sets
self.batch_size = 32
self.loader = {}
# Normalization parameters
self.norm_mean = [0.485, 0.456, 0.406]
self.norm_std = [0.229, 0.224, 0.225]
# DL model
self.model = None
# DL architecture to load (default value)
self.arch = 'vgg13'
# Hidden units of the classifier (default value)
self.hidden_units = [512, 256]
# Number of classes of the classifier (set from data['train'].class_to_idx)
self.nclasses = None
# Criterion and probability function
self.criterion = nn.CrossEntropyLoss()
self.prob_func = nn.Softmax(dim=1)
# Criterion and probability function
#self.criterion = nn.NLLLoss()
#self.prob_func = torch.exp()
# Optimizer (Adam)
self.optimizer = None
# Optimizer learning_rate (default value)
self.learning_rate = 0.001
# Training settings
self.trainer = { 'epochs_to_train': 10,
'print_every': 10,
'mute': False,
'train_losses': [],
'validation_losses': [],
'accuracy_partials': [],
'valid_loss_min': np.inf,
'epoch_best': -1,
'epoch': [],
'step': [],
'epochs_acum': 0 }
# Training stats
self.running_train_loss = 0
self.step_cur = 0
self.step_last = 0
self.epochs_start = 0
self.epochs_last = 0
self.training_start_time = 0
self.training_last_time = 0
self.valid_time = 0
# Dictionaries
self.class_to_idx = None
self.idx_to_class = None
self.class_to_name = None
# Default checkpoint values
self.save_directory = 'checkpoints'
self.get_default_ckeckpoint_name = lambda: ('ckp_' + self.dataset
+ '_' + self.arch
+ '_' + "_".join(str(x) for x in self.hidden_units)
+ '_' + str(self.learning_rate))
#+ '_' + str(self.trainer['epochs_acum']))
def use_gpu(self, gpu):
if gpu:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device("cpu")
def load_data(self, data_directory):
try:
self.data_directory = os.path.expanduser(data_directory)
self.dataset = self.dataset # TODO: get dataset name as the folder name of the dataset
print("Loading dataset {} from {}".format(self.dataset, self.data_directory))
train_dir = os.path.join(self.data_directory, 'train')
valid_dir = os.path.join(self.data_directory, 'valid')
test_dir = os.path.join(self.data_directory, 'test')
# Define your transforms for the training, validation, and testing sets
self.transform['train'] = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(self.norm_mean, self.norm_std)])
self.transform['valid'] = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(self.norm_mean, self.norm_std)])
self.transform['test'] = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(self.norm_mean, self.norm_std)])
# Load the datasets with ImageFolder
self.data['train'] = datasets.ImageFolder(train_dir, transform=self.transform['train'])
self.data['valid'] = datasets.ImageFolder(valid_dir, transform=self.transform['valid'])
self.data['test'] = datasets.ImageFolder(test_dir, transform=self.transform['test'])
# Using the image datasets and the trainforms, define the dataloaders
self.loader['train'] = torch.utils.data.DataLoader(self.data['train'], batch_size=self.batch_size, shuffle=True)
self.loader['valid'] = torch.utils.data.DataLoader(self.data['valid'], batch_size=self.batch_size)
self.loader['test'] = torch.utils.data.DataLoader(self.data['test'], batch_size=self.batch_size, shuffle=True)
# Save class_to_idx and idx_to_class
self.class_to_idx = self.data['train'].class_to_idx
self.idx_to_class = { v: k for k, v in self.class_to_idx.items() }
# set classifier number of classes
self.nclasses = len(self.data['train'].class_to_idx)
return True
except Exception as e:
print("[ERR] Loading data:", str(e))
return False
def load_class_names(self, filepath):
filepath = os.path.expanduser(filepath)
try:
with open(filepath, 'r') as f:
self.class_to_name = json.load(f)
return True
except Exception as e:
print("[ERR] Loading class names json:", str(e))
return False
def process_image(self, image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Resize the images where the shortest side is 256 pixels, keeping the aspect ratio (thumbnail or resize)
image.thumbnail((256, 256))
# Crop Center
width, height = image.size
new_width, new_height = 224, 224
left = (width - new_width) / 2
top = (height - new_height) / 2
right = (width + new_width) / 2
bottom = (height + new_height) / 2
image = image.crop((left, top, right, bottom))
# Convert to numpy array
image_np = np.array(image)
# Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1:
image_np = image_np / image_np.max()
# Normalize
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image_np = (image_np - mean) / std
# Move color channel from 3rd to 1st
image_np = image_np.transpose(2, 1, 0)
return image_np
def crete_output_classifier(self, in_units):
units = self.hidden_units.copy()
units.insert(0,in_units)
layers_dict = OrderedDict([])
for i in range(len(units)-1):
layers_dict['fc'+str(i+1)] = nn.Linear(units[i], units[i+1])
#layers_dict['relu'+str(i+1)] = nn.ReLU(inplace=True)
layers_dict['relu'+str(i+1)] = nn.ReLU()
layers_dict['drop'+str(i+1)] = nn.Dropout(0.2)
layers_dict['fc'+str(len(self.hidden_units)+1)] = nn.Linear(units[-1], self.nclasses)
#layers_dict['output'] = nn.LogSoftmax(dim=1)
return nn.Sequential(layers_dict)
def create_model(self, arch=None, hidden_units=None):
self.arch = arch if arch is not None else self.arch
self.hidden_units = hidden_units if hidden_units is not None else self.hidden_units
print('Creating model:', self.arch, 'with hidden_units:', " ".join(str(x) for x in self.hidden_units), 'and nclass:', self.nclasses)
# create self.model from pre-trained network
if self.arch == 'vgg13':
self.model = models.vgg13(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in self.model.parameters():
param.requires_grad = False
# Replace last part of the pre-trained network
start_units = self.model.classifier[0].in_features #25088
self.model.classifier = self.crete_output_classifier(start_units)
self.model.param_to_optimize = self.model.classifier.parameters()
elif self.arch == 'vgg16_bn':
self.model = models.vgg16_bn(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in self.model.parameters():
param.requires_grad = False
# Replace last part of the pre-trained network
start_units = self.model.classifier[0].in_features #25088
self.model.classifier = self.crete_output_classifier(start_units)
self.model.param_to_optimize = self.model.classifier.parameters()
elif self.arch == 'densenet121':
self.model = models.densenet121(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in self.model.parameters():
param.requires_grad = False
# Replace last part of the pre-trained network
start_units = self.model.classifier.in_features #1024
self.model.classifier = self.crete_output_classifier(start_units)
self.model.param_to_optimize = self.model.classifier.parameters()
elif self.arch == 'resnet101':
self.model = models.resnet101(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in self.model.parameters():
param.requires_grad = False
# Replace last part of the pre-trained network
start_units = self.model.fc.in_features #2048
self.model.fc = self.crete_output_classifier(start_units)
self.model.param_to_optimize = self.model.fc.parameters()
else:
print("[ERR] creating_model invalid arch:", self.arch)
return None
self.model = self.model.to(self.device)
return self.model
def create_optimizer(self, lr=None):
self.learning_rate = lr if lr is not None else self.learning_rate
# Only train the classifier parameters, feature parameters are frozen
self.optimizer = optim.Adam(self.model.param_to_optimize, lr=self.learning_rate)
return self.optimizer
def print_stats(self):
e = self.trainer['epochs_acum']
nstep = len(self.loader['train'])
step_remaining = (self.epochs_last - e)*nstep - self.step_cur
time_spend = time.time() - self.training_last_time
speed = (self.step_cur - self.step_last)/time_spend
time_remaining = step_remaining / speed
print("Epoch: {}/{}.. ".format(e+1, self.epochs_last),
"Step: {}/{}.. ".format(self.step_cur, nstep),
"Train Loss: {:.3f}.. ".format(self.trainer['train_losses'][-1]),
"Valid Loss: {:.3f}.. ".format(self.trainer['validation_losses'][-1]),
"Valid Accuracy: {:.3f}.. ".format(self.trainer['accuracy_partials'][-1]),
"Time: {}s/{}s/{}m{}s".format(int(self.valid_time), int(time_spend), int(time_remaining//60), int(time_remaining%60)) )
#"Time: {}s".format(int(time_spend)),
#"Remainig: {}m{}s".format(int(time_remaining//60), int(time_remaining%60)))
def validation(self, save_ckp=False):
valid_loss, accuracy = 0, 0
self.valid_time = time.time()
self.model.eval() # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
with torch.no_grad():
for images, labels in self.loader['valid']:
# Move input and label tensors to the default self.device
images, labels = images.to(self.device), labels.to(self.device)
outputs = self.model.forward(images)
valid_loss += self.criterion(outputs, labels).item()
# Calculate accuracy
_, top_class = torch.max(outputs, 1)
accuracy += torch.mean((top_class == labels.data).type(torch.FloatTensor)).item()
self.model.train() # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
self.valid_time = time.time()-self.valid_time
valid_loss /= len(self.loader['valid'])
accuracy /= len(self.loader['valid'])
# Save results
self.trainer['train_losses'].append(self.running_train_loss/(self.step_cur-self.step_last))
self.trainer['validation_losses'].append(valid_loss)
self.trainer['accuracy_partials'].append(accuracy)
self.trainer['epoch'].append(self.trainer['epochs_acum'])
self.trainer['step'].append(self.step_cur)
# Print results
if not self.trainer['mute']:
self.print_stats()
# Save checkpoint
if save_ckp:
if valid_loss < self.trainer['valid_loss_min']:
self.trainer['valid_loss_min'] = valid_loss
self.trainer['epoch_best'] = self.trainer['epochs_acum']
self.save_checkpoint(best=True)
else:
self.save_checkpoint(best=False)
def train(self, epochs_to_train = None, save_directory = None, print_every = None):
self.trainer['epochs_to_train'] = epochs_to_train if epochs_to_train is not None else self.trainer['epochs_to_train']
self.trainer['print_every'] = print_every if print_every is not None else self.trainer['print_every']
self.save_directory = save_directory if save_directory is not None else self.save_directory
self.verify_directory()
print("Training {} epoch using {}".format(self.trainer['epochs_to_train'], self.device))
# Set variables for the training
self.running_train_loss, self.step_cur, self.step_last = 0, 0, 0
self.training_start_time, self.training_last_time = time.time(), time.time()
self.epochs_start = self.trainer['epochs_acum']
self.epochs_last = self.trainer['epochs_acum'] + self.trainer['epochs_to_train']
try:
# model in training mode, dropout is on
self.model.train() # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
for e in range(self.epochs_start, self.epochs_last):
for images, labels in self.loader['train']:
self.step_cur += 1
# Move input and label tensors to the default self.device
images, labels = images.to(self.device), labels.to(self.device)
self.optimizer.zero_grad()
output = self.model.forward(images)
loss = self.criterion(output, labels)
loss.backward()
self.optimizer.step()
self.running_train_loss += loss.item()
if self.step_cur % self.trainer['print_every'] == 0:
# Run validation pass, save and results
self.validation(save_ckp=False)
# Reset variables per end of print
self.running_train_loss = 0
self.step_last = self.step_cur
self.training_last_time = time.time()
# End of epoch
else:
self.trainer['epochs_acum'] += 1
# Run validation pass, save and results
self.validation(save_ckp=True)
# Reset variables per end of epoch
self.running_train_loss = 0
self.step_cur = 0
self.step_last = 0
self.training_last_time = time.time()
except KeyboardInterrupt:
print("Exiting training: KeyboardInterrupt")
# Run validation pass, save and results
print("Running final validation step...")
self.validation(save_ckp=True)
finally:
# Print the training time
time_duration = time.time()-self.training_start_time
print("Training duration: {}m{}s".format(int(time_duration//60), int(time_duration%60)))
# Plot the results
plt.plot(self.trainer['train_losses'], label='Training loss')
plt.plot(self.trainer['validation_losses'], label='Validation loss')
plt.plot(self.trainer['accuracy_partials'], label='Acuracy')
plt.legend(frameon=False)
plt.savefig(os.path.join(self.save_directory, self.get_default_ckeckpoint_name()+'.png'))
plt.show()
def test(self, topk = 2, show_failures=False):
print(f"Testing using: {str(self.device)}")
corrects_acum, accuracy_count = 0, 0
#images, labels = next(iter(self.loader['test']))
for images, labels in self.loader['test']:
# Move input and label tensors to the default self.device
images, labels = images.to(self.device), labels.to(self.device)
# Disable dropouts and turn off gradients to speed up this part
self.model.eval() # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
with torch.no_grad():
outputs = self.model.forward(images)
_, top_class = outputs.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
corrects = equals.sum()
corrects_acum += corrects
accuracy_count += images.size(0)
accuracy = float(corrects) / images.size(0) * 100
print('Accuracy partial: {}/{}[{:.2f}%]'.format(corrects, images.size(0), accuracy))
# if show_failures and accuracy < 1:
# print("The following are the mistakes:")
# # Bring back to cpu
# images, ps_all = images.to(self.device_cpu), ps_all.to(self.device_cpu)
# top_class, labels = top_class.to(self.device_cpu), labels.to(self.device_cpu)
# # Show the failures
# for i,img in enumerate(images):
# if top_class[i] != labels[i]:
# top_p_i, top_class_i = ps_all[i].topk(topk)
# self.view_classify(img, top_p_i, top_class_i, correct=labels[i].item())
accuracy_total = float(corrects_acum) / accuracy_count * 100
print('\nAccuracy total: {}/{}[{:.2f}%]'.format(corrects_acum, accuracy_count, accuracy_total))
def predict(self, image_path, topk=1, show_image=True):
''' Predict the class (or classes) of an image using a trained deep learning self.model.
'''
image_path = os.path.expanduser(image_path)
# Load image
try:
img_pil = Image.open(image_path)
except Exception as e:
print('[ERR] In predict opening image: ' + str(e))
return None, None
# Process image
image_np = self.process_image(img_pil)
image = torch.from_numpy(image_np).unsqueeze(0)
print("Predict using: ", self.device)
# input and label tensors to the default self.device
image = image.to(self.device, dtype=torch.float)
# Turn off gradients to speed up this part
self.model.eval() # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
with torch.no_grad():
# If model's output is log-softmax, take exponential to get the probabilities
# If model's output is linear, take softmax to get the probabilities
ps = self.prob_func(self.model.forward(image))
top_p, top_class = ps.topk(topk, dim=1)
# Bring back to CPU
image, top_p, top_class = image.to(self.device_cpu), top_p.to(self.device_cpu), top_class.to(self.device_cpu)
image, top_p, top_class = image.squeeze(0), top_p.squeeze(0), top_class.squeeze(0)
if show_image:
self.view_classify(image, top_p, top_class)
return {'top_p': top_p, 'top_class': top_class}
def print_predictions(self, predictions):
top_class = predictions['top_class'].numpy()
top_p = predictions['top_p'].numpy()
top_class_print = [self.idx_to_class[i] for i in top_class]
if self.class_to_name is not None:
top_class_print = [self.class_to_name[i] for i in top_class_print]
for p, c in zip(top_p, top_class_print):
print('[{}]: {:.2f}%'.format(c, p*100))
def view_classify(self, img, top_p, top_class, correct=None):
''' Function for viewing an image and it's predicted classes.
'''
topk = len(top_p)
fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2)
ax1 = self.imshow(img, ax1)
ax1.axis('off')
ax2.barh(np.arange(topk), top_p)
ax2.set_yticks(np.arange(topk))
ax2.set_aspect(0.1)
if self.class_to_name is None:
ax2.set_yticklabels(["{}[{}]".format(self.idx_to_class.get(i),i) for i in top_class.numpy()], size='small')
else:
ax2.set_yticklabels(["{}[{}]".format(self.class_to_name.get(self.idx_to_class.get(i)),i) for i in top_class.numpy()], size='small')
if correct is not None:
if self.class_to_name is None:
ax2.set_title('Class Prob. [correct:{}[{}]]'.format(self.idx_to_class.get(correct),correct))
else:
ax2.set_title('Class Prob. [correct:{}[{}]]'.format(self.class_to_name.get(self.idx_to_class.get(correct)),correct))
else:
ax2.set_title('Class Probability')
ax2.set_xlim(0, 1.1)
plt.tight_layout()
plt.show()
def imshow(self, image, ax=None, title=None, normalize=True):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
image = image.numpy().transpose((1, 2, 0))
if normalize:
mean = np.array(self.norm_mean)
std = np.array(self.norm_std)
image = std * image + mean
image = np.clip(image, 0, 1)
ax.imshow(image)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='both', length=0)
ax.set_xticklabels('')
ax.set_yticklabels('')
return ax
def verify_directory(self):
if not os.path.isdir(self.save_directory):
os.mkdir(self.save_directory)
def save_checkpoint(self, save_directory=None, best=False):
filepath_base = os.path.join(self.save_directory, self.get_default_ckeckpoint_name())
filepath = filepath_base + "_last.pth"
checkpoint = {
'arch': self.arch,
'hidden_units': self.hidden_units,
'nclasses': self.nclasses,
'class_to_idx': self.class_to_idx,
'idx_to_class': self.idx_to_class,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'learning_rate': self.learning_rate,
'trainer': self.trainer
}
torch.save(checkpoint, filepath)
print("Checkpoint saved:", filepath)
if best:
filepath = filepath_base + "_best.pth"
torch.save(checkpoint, filepath)
print("Checkpoint saved:", filepath)
def load_checkpoint(self, filepath='checkpoint.pth'):
filepath = os.path.expanduser(filepath)
if os.path.isfile(filepath):
print("Loading checkpoint '{}'".format(filepath))
checkpoint = torch.load(filepath)
# Build a model with the checkpoint data
self.arch = checkpoint['arch']
self.hidden_units = checkpoint['hidden_units']
self.nclasses = checkpoint['nclasses']
self.class_to_idx = checkpoint['class_to_idx']
self.idx_to_class = checkpoint['idx_to_class']
# create_model() needs to be here for model.to(device) be called before creating the optimizer...
self.create_model()
self.model.load_state_dict(checkpoint['model_state_dict'])
print(self.model)
self.learning_rate = checkpoint['learning_rate']
print("Optimizer:\n", self.create_optimizer())
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.trainer = checkpoint['trainer']
# Print checkpoint info
print('Trainer epoch_best/epochs_acum: {}/{}'.format(self.trainer['epoch_best'],
self.trainer['epochs_acum']))
print('Trainer min_loss:', self.trainer['valid_loss_min'])
print('Trainer accuracy Last/Max: {:.2f}%/{:.2f}%'.format(self.trainer['accuracy_partials'][-1]*100,
np.max(self.trainer['accuracy_partials'])*100))
return True
else:
print("[ERR] Loading checkpoint path '{}'".format(filepath))
return False
| [
"numpy.clip",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.nn.Sequential",
"torch.max",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"numpy.arange",
"matplotlib.pyplot.plot",
"numpy.max",
"torchvision.datasets.ImageFolder",
"torchvision.models.v... | [((646, 665), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (658, 665), False, 'import torch\n'), ((1694, 1715), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1713, 1715), False, 'from torch import nn, optim\n'), ((1742, 1759), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1752, 1759), False, 'from torch import nn, optim\n'), ((7757, 7772), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (7765, 7772), True, 'import numpy as np\n'), ((7983, 8014), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (7991, 8014), True, 'import numpy as np\n'), ((8029, 8060), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (8037, 8060), True, 'import numpy as np\n'), ((8378, 8393), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (8389, 8393), False, 'from collections import OrderedDict\n'), ((8742, 8777), 'torch.nn.Linear', 'nn.Linear', (['units[-1]', 'self.nclasses'], {}), '(units[-1], self.nclasses)\n', (8751, 8777), False, 'from torch import nn, optim\n'), ((8851, 8877), 'torch.nn.Sequential', 'nn.Sequential', (['layers_dict'], {}), '(layers_dict)\n', (8864, 8877), False, 'from torch import nn, optim\n'), ((11869, 11932), 'torch.optim.Adam', 'optim.Adam', (['self.model.param_to_optimize'], {'lr': 'self.learning_rate'}), '(self.model.param_to_optimize, lr=self.learning_rate)\n', (11879, 11932), False, 'from torch import nn, optim\n'), ((13064, 13075), 'time.time', 'time.time', ([], {}), '()\n', (13073, 13075), False, 'import time\n'), ((22772, 22809), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 9)', 'ncols': '(2)'}), '(figsize=(6, 9), ncols=2)\n', (22784, 22809), True, 'import matplotlib.pyplot as plt\n'), ((23736, 23754), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23752, 23754), True, 'import matplotlib.pyplot as plt\n'), ((23763, 23773), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23771, 23773), True, 'import matplotlib.pyplot as plt\n'), ((25457, 25489), 'torch.save', 'torch.save', (['checkpoint', 'filepath'], {}), '(checkpoint, filepath)\n', (25467, 25489), False, 'import torch\n'), ((3680, 3699), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3692, 3699), False, 'import torch\n'), ((5565, 5631), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': "self.transform['train']"}), "(train_dir, transform=self.transform['train'])\n", (5585, 5631), False, 'from torchvision import datasets, transforms, models\n'), ((5665, 5731), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['valid_dir'], {'transform': "self.transform['valid']"}), "(valid_dir, transform=self.transform['valid'])\n", (5685, 5731), False, 'from torchvision import datasets, transforms, models\n'), ((5764, 5828), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': "self.transform['test']"}), "(test_dir, transform=self.transform['test'])\n", (5784, 5828), False, 'from torchvision import datasets, transforms, models\n'), ((5947, 6040), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["self.data['train']"], {'batch_size': 'self.batch_size', 'shuffle': '(True)'}), "(self.data['train'], batch_size=self.batch_size,\n shuffle=True)\n", (5974, 6040), False, 'import torch\n'), ((6072, 6147), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["self.data['valid']"], {'batch_size': 'self.batch_size'}), "(self.data['valid'], batch_size=self.batch_size)\n", (6099, 6147), False, 'import torch\n'), ((6182, 6274), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["self.data['test']"], {'batch_size': 'self.batch_size', 'shuffle': '(True)'}), "(self.data['test'], batch_size=self.batch_size,\n shuffle=True)\n", (6209, 6274), False, 'import torch\n'), ((8473, 8506), 'torch.nn.Linear', 'nn.Linear', (['units[i]', 'units[i + 1]'], {}), '(units[i], units[i + 1])\n', (8482, 8506), False, 'from torch import nn, optim\n'), ((8614, 8623), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8621, 8623), False, 'from torch import nn, optim\n'), ((8667, 8682), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (8677, 8682), False, 'from torch import nn, optim\n'), ((9352, 9381), 'torchvision.models.vgg13', 'models.vgg13', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9364, 9381), False, 'from torchvision import datasets, transforms, models\n'), ((12170, 12181), 'time.time', 'time.time', ([], {}), '()\n', (12179, 12181), False, 'import time\n'), ((13187, 13202), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13200, 13202), False, 'import torch\n'), ((13886, 13897), 'time.time', 'time.time', ([], {}), '()\n', (13895, 13897), False, 'import time\n'), ((15575, 15586), 'time.time', 'time.time', ([], {}), '()\n', (15584, 15586), False, 'import time\n'), ((15588, 15599), 'time.time', 'time.time', ([], {}), '()\n', (15597, 15599), False, 'import time\n'), ((18165, 18226), 'matplotlib.pyplot.plot', 'plt.plot', (["self.trainer['train_losses']"], {'label': '"""Training loss"""'}), "(self.trainer['train_losses'], label='Training loss')\n", (18173, 18226), True, 'import matplotlib.pyplot as plt\n'), ((18239, 18307), 'matplotlib.pyplot.plot', 'plt.plot', (["self.trainer['validation_losses']"], {'label': '"""Validation loss"""'}), "(self.trainer['validation_losses'], label='Validation loss')\n", (18247, 18307), True, 'import matplotlib.pyplot as plt\n'), ((18320, 18380), 'matplotlib.pyplot.plot', 'plt.plot', (["self.trainer['accuracy_partials']"], {'label': '"""Acuracy"""'}), "(self.trainer['accuracy_partials'], label='Acuracy')\n", (18328, 18380), True, 'import matplotlib.pyplot as plt\n'), ((18393, 18418), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (18403, 18418), True, 'import matplotlib.pyplot as plt\n'), ((18533, 18543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18541, 18543), True, 'import matplotlib.pyplot as plt\n'), ((20822, 20844), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (20832, 20844), False, 'from PIL import Image\n'), ((21438, 21453), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21451, 21453), False, 'import torch\n'), ((22889, 22904), 'numpy.arange', 'np.arange', (['topk'], {}), '(topk)\n', (22898, 22904), True, 'import numpy as np\n'), ((22936, 22951), 'numpy.arange', 'np.arange', (['topk'], {}), '(topk)\n', (22945, 22951), True, 'import numpy as np\n'), ((23920, 23934), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (23932, 23934), True, 'import matplotlib.pyplot as plt\n'), ((24028, 24052), 'numpy.array', 'np.array', (['self.norm_mean'], {}), '(self.norm_mean)\n', (24036, 24052), True, 'import numpy as np\n'), ((24071, 24094), 'numpy.array', 'np.array', (['self.norm_std'], {}), '(self.norm_std)\n', (24079, 24094), True, 'import numpy as np\n'), ((24154, 24174), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (24161, 24174), True, 'import numpy as np\n'), ((25623, 25655), 'torch.save', 'torch.save', (['checkpoint', 'filepath'], {}), '(checkpoint, filepath)\n', (25633, 25655), False, 'import torch\n'), ((25954, 25974), 'torch.load', 'torch.load', (['filepath'], {}), '(filepath)\n', (25964, 25974), False, 'import torch\n'), ((544, 569), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (567, 569), False, 'import torch\n'), ((6906, 6918), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6915, 6918), False, 'import json\n'), ((9896, 9928), 'torchvision.models.vgg16_bn', 'models.vgg16_bn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9911, 9928), False, 'from torchvision import datasets, transforms, models\n'), ((13639, 13660), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (13648, 13660), False, 'import torch\n'), ((17983, 17994), 'time.time', 'time.time', ([], {}), '()\n', (17992, 17994), False, 'import time\n'), ((19175, 19190), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19188, 19190), False, 'import torch\n'), ((21057, 21083), 'torch.from_numpy', 'torch.from_numpy', (['image_np'], {}), '(image_np)\n', (21073, 21083), False, 'import torch\n'), ((3602, 3627), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3625, 3627), False, 'import torch\n'), ((4365, 4394), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (4390, 4394), False, 'from torchvision import datasets, transforms, models\n'), ((4452, 4485), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (4480, 4485), False, 'from torchvision import datasets, transforms, models\n'), ((4543, 4576), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4574, 4576), False, 'from torchvision import datasets, transforms, models\n'), ((4634, 4655), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4653, 4655), False, 'from torchvision import datasets, transforms, models\n'), ((4713, 4764), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['self.norm_mean', 'self.norm_std'], {}), '(self.norm_mean, self.norm_std)\n', (4733, 4764), False, 'from torchvision import datasets, transforms, models\n'), ((4826, 4848), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (4843, 4848), False, 'from torchvision import datasets, transforms, models\n'), ((4906, 4932), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (4927, 4932), False, 'from torchvision import datasets, transforms, models\n'), ((4990, 5011), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5009, 5011), False, 'from torchvision import datasets, transforms, models\n'), ((5069, 5120), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['self.norm_mean', 'self.norm_std'], {}), '(self.norm_mean, self.norm_std)\n', (5089, 5120), False, 'from torchvision import datasets, transforms, models\n'), ((5181, 5203), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (5198, 5203), False, 'from torchvision import datasets, transforms, models\n'), ((5261, 5287), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (5282, 5287), False, 'from torchvision import datasets, transforms, models\n'), ((5345, 5366), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5364, 5366), False, 'from torchvision import datasets, transforms, models\n'), ((5424, 5475), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['self.norm_mean', 'self.norm_std'], {}), '(self.norm_mean, self.norm_std)\n', (5444, 5475), False, 'from torchvision import datasets, transforms, models\n'), ((10446, 10481), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (10464, 10481), False, 'from torchvision import datasets, transforms, models\n'), ((17625, 17636), 'time.time', 'time.time', ([], {}), '()\n', (17634, 17636), False, 'import time\n'), ((11004, 11037), 'torchvision.models.resnet101', 'models.resnet101', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (11020, 11037), False, 'from torchvision import datasets, transforms, models\n'), ((17097, 17108), 'time.time', 'time.time', ([], {}), '()\n', (17106, 17108), False, 'import time\n'), ((27291, 27332), 'numpy.max', 'np.max', (["self.trainer['accuracy_partials']"], {}), "(self.trainer['accuracy_partials'])\n", (27297, 27332), True, 'import numpy as np\n')] |
import numpy.testing as npt
from handyspark import *
# boolean returns
def test_between(sdf, pdf):
hdf = sdf.toHandy()
hdf = hdf.assign(newcol=hdf.pandas['Age'].between(left=20, right=40))
hres = hdf.cols['newcol'][:20]
res = pdf['Age'].between(left=20, right=40)[:20]
npt.assert_array_equal(hres, res)
def test_isin(sdf, pdf):
hdf = sdf.toHandy()
hdf = hdf.assign(newcol=hdf.pandas['Age'].isin(values=[22, 40]))
hres = hdf.cols['newcol'][:20]
res = pdf['Age'].isin(values=[22, 40])[:20]
npt.assert_array_equal(hres, res)
def test_isna(sdf, pdf):
hdf = sdf.toHandy()
hdf = hdf.assign(newcol=hdf.pandas['Cabin'].isna())
hres = hdf.cols['newcol'][:20]
res = pdf['Cabin'].isna()[:20]
npt.assert_array_equal(hres, res)
def test_notna(sdf, pdf):
hdf = sdf.toHandy()
hdf = hdf.assign(newcol=hdf.pandas['Cabin'].notna())
hres = hdf.cols['newcol'][:20]
res = pdf['Cabin'].notna()[:20]
npt.assert_array_equal(hres, res)
# same type returns
def test_clip(sdf, pdf):
hdf = sdf.toHandy()
hdf = hdf.assign(newcol=hdf.pandas['Age'].clip(lower=5, upper=50))
hres = hdf.cols['newcol'][:20]
res = pdf['Age'].clip(lower=5, upper=50)[:20]
npt.assert_array_equal(hres, res)
def test_replace(sdf, pdf):
hdf = sdf.toHandy()
hdf = hdf.assign(newcol=hdf.pandas['Age'].replace(to_replace=5, value=0))
hres = hdf.cols['newcol'][:20]
res = pdf['Age'].replace(to_replace=5, value=0)[:20]
npt.assert_array_equal(hres, res)
def test_round(sdf, pdf):
hdf = sdf.toHandy()
hdf = hdf.assign(newcol=hdf.pandas['Fare'].round(decimals=0))
hres = hdf.cols['newcol'][:20]
res = pdf['Fare'].round(decimals=0)[:20]
npt.assert_array_equal(hres, res)
| [
"numpy.testing.assert_array_equal"
] | [((290, 323), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['hres', 'res'], {}), '(hres, res)\n', (312, 323), True, 'import numpy.testing as npt\n'), ((530, 563), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['hres', 'res'], {}), '(hres, res)\n', (552, 563), True, 'import numpy.testing as npt\n'), ((744, 777), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['hres', 'res'], {}), '(hres, res)\n', (766, 777), True, 'import numpy.testing as npt\n'), ((961, 994), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['hres', 'res'], {}), '(hres, res)\n', (983, 994), True, 'import numpy.testing as npt\n'), ((1225, 1258), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['hres', 'res'], {}), '(hres, res)\n', (1247, 1258), True, 'import numpy.testing as npt\n'), ((1486, 1519), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['hres', 'res'], {}), '(hres, res)\n', (1508, 1519), True, 'import numpy.testing as npt\n'), ((1721, 1754), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['hres', 'res'], {}), '(hres, res)\n', (1743, 1754), True, 'import numpy.testing as npt\n')] |
#################################################################################################################
#### GUI Interface for users
#################################################################################################################
from tkinter import *
import tkinter as tk
import tkinter.messagebox
import keras
import numpy as np
from sklearn.preprocessing import StandardScaler
root = Tk()
root.geometry('1500x1500')
root.title("Prediction Form")
label = Label(root, text="Prediction form",width=20,font=("bold", 20))
label.place(x=375,y=20)
label_0 = Label(root, text="Full Name",width=20,font=("bold", 10))
label_0.place(x=0,y=50)
entry_0 = Entry(root)
entry_0.place(x=200,y=50)
label_1 = Label(root, text="Age",width=10,font=("bold", 10))
label_1.place(x=750,y=50)
entry_1 = Entry(root)
entry_1.place(x=950,y=50)
label_2 = Label(root, text="Gender",width=20,font=("bold", 10))
label_2.place(x=0,y=80)
global var2
var2 = IntVar()
Radiobutton(root, text="Male",padx = 5, variable=var2, value=1).place(x=375,y=80)
Radiobutton(root, text="Female",padx = 20, variable=var2, value=0).place(x=750,y=80)
label_3 = Label(root, text="Address",width=20,font=("bold", 10))
label_3.place(x=0,y=110)
global var3
var3 = IntVar()
Radiobutton(root, text="Urban",padx = 5, variable=var3, value=0).place(x=375,y=110)
Radiobutton(root, text="Rural",padx = 20, variable=var3, value=1).place(x=750,y=110)
label_4 = Label(root, text="Parent's Cohabitation Status",width=24,font=("bold", 10))
label_4.place(x=0,y=140)
global var4
var4 = IntVar()
Radiobutton(root, text="Apart",padx = 5, variable=var4, value=1).place(x=375,y=140)
Radiobutton(root, text="Together",padx = 20, variable=var4, value=0).place(x=750,y=140)
label_5 = Label(root, text="Mother's Education",width=20,font=("bold", 10))
label_5.place(x=0,y=170)
global var5
var5 = IntVar()
Radiobutton(root, text="none",padx = 5, variable=var5, value=0).place(x=250,y=170)
Radiobutton(root, text="primary education",padx = 20, variable=var5, value=1).place(x=400,y=170)
Radiobutton(root, text="5th to 9th grade",padx = 5, variable=var5, value=2).place(x=630,y=170)
Radiobutton(root, text="secondary education",padx = 20, variable=var5, value=3).place(x=820,y=170)
Radiobutton(root, text="higher education",padx = 20, variable=var5, value=4).place(x=1010,y=170)
label_6 = Label(root, text="Father's Education",width=20,font=("bold", 10))
label_6.place(x=0,y=200)
global var6
var6 = IntVar()
Radiobutton(root, text="none",padx = 5, variable=var6, value=0).place(x=250,y=200)
Radiobutton(root, text="primary education",padx = 20, variable=var6, value=1).place(x=400,y=200)
Radiobutton(root, text="5th to 9th grade",padx = 5, variable=var6, value=2).place(x=630,y=200)
Radiobutton(root, text="secondary education",padx = 20, variable=var6, value=3).place(x=820,y=200)
Radiobutton(root, text="higher education",padx = 20, variable=var6, value=4).place(x=1010,y=200)
label_7 = Label(root, text="<NAME>",width=20,font=("bold", 10))
label_7.place(x=0,y=230)
global var7
var7 = IntVar()
Radiobutton(root, text="teacher",padx = 5, variable=var7, value=4).place(x=250,y=230)
Radiobutton(root, text="health care related",padx = 20, variable=var7, value=1).place(x=400,y=230)
Radiobutton(root, text="services",padx = 5, variable=var7, value=3).place(x=630,y=230)
Radiobutton(root, text="at_home",padx = 20, variable=var7, value=0).place(x=820,y=230)
Radiobutton(root, text="other",padx = 20, variable=var7, value=2).place(x=1010,y=230)
label_8 = Label(root, text="<NAME>",width=20,font=("bold", 10))
label_8.place(x=0,y=260)
global var8
var8 = IntVar()
Radiobutton(root, text="teacher",padx = 5, variable=var8, value=4).place(x=250,y=260)
Radiobutton(root, text="health care related",padx = 20, variable=var8, value=1).place(x=400,y=260)
Radiobutton(root, text="services",padx = 5, variable=var8, value=3).place(x=630,y=260)
Radiobutton(root, text="at_home",padx = 20, variable=var8, value=0).place(x=820,y=260)
Radiobutton(root, text="other",padx = 20, variable=var8, value=2).place(x=1010,y=260)
label_9 = Label(root, text="Travel Time",width=20,font=("bold", 10))
label_9.place(x=0,y=290)
global var9
var9 = IntVar()
Radiobutton(root, text="<15 min",padx = 5, variable=var9, value=1).place(x=270,y=290)
Radiobutton(root, text="15-30 min",padx = 20, variable=var9, value=2).place(x=550,y=290)
Radiobutton(root, text="30-60 min",padx = 5, variable=var9, value=3).place(x=830,y=290)
Radiobutton(root, text=">60 min",padx = 20, variable=var9, value=4).place(x=1110,y=290)
label_10 = Label(root, text="Study Time",width=20,font=("bold", 10))
label_10.place(x=0,y=320)
global var10
var10 = IntVar()
Radiobutton(root, text="<2 hours",padx = 5, variable=var10, value=1).place(x=270,y=320)
Radiobutton(root, text="2 to 5 hours",padx = 20, variable=var10, value=2).place(x=550,y=320)
Radiobutton(root, text="5 to 10 hours",padx = 5, variable=var10, value=3).place(x=830,y=320)
Radiobutton(root, text=">10 hours",padx = 20, variable=var10, value=4).place(x=1110,y=320)
label_11 = Label(root, text="number of past class failures",width=24,font=("bold", 10))
label_11.place(x=0,y=350)
global var11
var11 = IntVar()
Radiobutton(root, text="0",padx = 5, variable=var11, value=0).place(x=270,y=350)
Radiobutton(root, text="1",padx = 20, variable=var11, value=1).place(x=550,y=350)
Radiobutton(root, text="2",padx = 5, variable=var11, value=2).place(x=830,y=350)
Radiobutton(root, text="higher",padx = 20, variable=var11, value=3).place(x=1110,y=350)
label_12 = Label(root, text="Extra Education Support",width=24,font=("bold", 10))
label_12.place(x=0,y=380)
global var12
var12 = IntVar()
Radiobutton(root, text="NO",padx = 5, variable=var12, value=0).place(x=200,y=380)
Radiobutton(root, text="Yes",padx = 20, variable=var12, value=1).place(x=250,y=380)
label_13 = Label(root, text="Extra Paid Classes",width=20,font=("bold", 10))
label_13.place(x=420,y=380)
global var13
var13 = IntVar()
Radiobutton(root, text="NO",padx = 5, variable=var13, value=0).place(x=620,y=380)
Radiobutton(root, text="Yes",padx = 20, variable=var13, value=1).place(x=670,y=380)
label_14 = Label(root, text="Want higher Education",width=20,font=("bold", 10))
label_14.place(x=910,y=380)
global var14
var14 = IntVar()
Radiobutton(root, text="NO",padx = 5, variable=var14, value=0).place(x=1110,y=380)
Radiobutton(root, text="Yes",padx = 20, variable=var14, value=1).place(x=1160,y=380)
label_15 = Label(root, text="Internet Access",width=20,font=("bold", 10))
label_15.place(x=0,y=410)
global var15
var15 = IntVar()
Radiobutton(root, text="NO",padx = 5, variable=var15, value=0).place(x=200,y=410)
Radiobutton(root, text="Yes",padx = 20, variable=var15, value=1).place(x=250,y=410)
label_16 = Label(root, text="Romantic Relationship",width=20,font=("bold", 10))
label_16.place(x=750,y=410)
var16 = IntVar()
Radiobutton(root, text="NO",padx = 5, variable=var16, value=0).place(x=950,y=410)
Radiobutton(root, text="Yes",padx = 20, variable=var16, value=1).place(x=1000,y=410)
label_17 = Label(root, text="Quality of family relationship",width=24,font=("bold", 10))
label_17.place(x=0,y=440)
global var17
var17 = IntVar()
Radiobutton(root, text="1(Very Bad)",padx = 5, variable=var17, value=1).place(x=250,y=440)
Radiobutton(root, text="2",padx = 20, variable=var17, value=2).place(x=410,y=440)
Radiobutton(root, text="3",padx = 5, variable=var17, value=3).place(x=560,y=440)
Radiobutton(root, text="4",padx = 20, variable=var17, value=4).place(x=710,y=440)
Radiobutton(root, text="5(Excellent)",padx = 20, variable=var17, value=5).place(x=860,y=440)
label_18 = Label(root, text="Free time after school",width=24,font=("bold", 10))
label_18.place(x=0,y=470)
global var18
var18 = IntVar()
Radiobutton(root, text="1(Very low)",padx = 5, variable=var18, value=1).place(x=250,y=470)
Radiobutton(root, text="2",padx = 20, variable=var18, value=2).place(x=410,y=470)
Radiobutton(root, text="3",padx = 5, variable=var18, value=3).place(x=560,y=470)
Radiobutton(root, text="4",padx = 20, variable=var18, value=4).place(x=710,y=470)
Radiobutton(root, text="5(Very High)",padx = 20, variable=var18, value=5).place(x=860,y=470)
label_19 = Label(root, text="Going out with friends",width=24,font=("bold", 10))
label_19.place(x=0,y=500)
global var19
var19 = IntVar()
Radiobutton(root, text="1(Very low)",padx = 5, variable=var19, value=1).place(x=250,y=500)
Radiobutton(root, text="2",padx = 20, variable=var19, value=2).place(x=410,y=500)
Radiobutton(root, text="3",padx = 5, variable=var19, value=3).place(x=560,y=500)
Radiobutton(root, text="4",padx = 20, variable=var19, value=4).place(x=710,y=500)
Radiobutton(root, text="5(Very high)",padx = 20, variable=var19, value=5).place(x=860,y=500)
label_20 = Label(root, text="Workday alcohol consumption",width=24,font=("bold", 10))
label_20.place(x=0,y=530)
global var20
var20 = IntVar()
Radiobutton(root, text="1(Very low)",padx = 5, variable=var20, value=1).place(x=250,y=530)
Radiobutton(root, text="2",padx = 20, variable=var20, value=2).place(x=410,y=530)
Radiobutton(root, text="3",padx = 5, variable=var20, value=3).place(x=560,y=530)
Radiobutton(root, text="4",padx = 20, variable=var20, value=4).place(x=710,y=530)
Radiobutton(root, text="5(Very High)",padx = 20, variable=var20, value=5).place(x=860,y=530)
label_21 = Label(root, text="Weekend alcohol consumption",width=24,font=("bold", 10))
label_21.place(x=0,y=560)
global var21
var21 = IntVar()
Radiobutton(root, text="1(Very low)",padx = 5, variable=var21, value=1).place(x=250,y=560)
Radiobutton(root, text="2",padx = 20, variable=var21, value=2).place(x=410,y=560)
Radiobutton(root, text="3",padx = 5, variable=var21, value=3).place(x=560,y=560)
Radiobutton(root, text="4",padx = 20, variable=var21, value=4).place(x=710,y=560)
Radiobutton(root, text="5(Very high)",padx = 20, variable=var21, value=5).place(x=860,y=560)
label_22 = Label(root, text="Current health status",width=24,font=("bold", 10))
label_22.place(x=0,y=590)
global var22
var22 = IntVar()
Radiobutton(root, text="1(Very Bad)",padx = 5, variable=var22, value=1).place(x=250,y=590)
Radiobutton(root, text="2",padx = 20, variable=var22, value=2).place(x=410,y=590)
Radiobutton(root, text="3",padx = 5, variable=var22, value=3).place(x=560,y=590)
Radiobutton(root, text="4",padx = 20, variable=var22, value=4).place(x=710,y=590)
Radiobutton(root, text="5(Very Good)",padx = 20, variable=var22, value=5).place(x=860,y=590)
label_23 = Label(root, text="Absences (Range: 0 to 93) ",width=24,font=("bold", 10))
label_23.place(x=0,y=610)
entry_23 = Entry(root)
entry_23.place(x=375,y=610)
def client_exit():
root.destroy()
def show_result():
i1 = int(entry_1.get())
i2 = int(var2.get())
i3 = int(var3.get())
i4 = int(var4.get())
i5 = int(var5.get())
i6 = int(var6.get())
i7 = int(var7.get())
i8 = int(var8.get())
i9 = int(var9.get())
i10 = int(var10.get())
i11 = int(var11.get())
i12 = int(var12.get())
i13 = int(var13.get())
i14 = int(var14.get())
i15 = int(var15.get())
i16 = int(var16.get())
i17 = int(var17.get())
i18 = int(var18.get())
i19 = int(var19.get())
i20 = int(var20.get())
i21 = int(var21.get())
i22 = int(var22.get())
i23 = int(entry_23.get())
print(i2,i1,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23)
np.array([[i2,i1,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23]])
sc = StandardScaler()
classifier = keras.models.load_model('/home/niharika/Desktop/ML_Project/ANN_student(2).model')
new_prediction = classifier.predict(sc.fit_transform(np.array([[i2,i1,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23]])))
a = float(new_prediction)
print(a)
if a > 0.5:
pred = "You will PASS the Examination"
else:
pred = "You will FAIL the Examination"
print(pred)
tk.messagebox.showinfo( "Prediction", pred )
Button(root, text='Submit',width=20,bg='brown',fg='white', command = show_result).place(x=550,y=670)
Button(root, text='EXIT',width=20,bg='brown',fg='white', command = client_exit).place(x=550,y=700)
root.mainloop()
| [
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"tkinter.messagebox.showinfo",
"keras.models.load_model"
] | [((11493, 11615), 'numpy.array', 'np.array', (['[[i2, i1, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15, i16,\n i17, i18, i19, i20, i21, i22, i23]]'], {}), '([[i2, i1, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15,\n i16, i17, i18, i19, i20, i21, i22, i23]])\n', (11501, 11615), True, 'import numpy as np\n'), ((11603, 11619), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (11617, 11619), False, 'from sklearn.preprocessing import StandardScaler\n'), ((11641, 11727), 'keras.models.load_model', 'keras.models.load_model', (['"""/home/niharika/Desktop/ML_Project/ANN_student(2).model"""'], {}), "(\n '/home/niharika/Desktop/ML_Project/ANN_student(2).model')\n", (11664, 11727), False, 'import keras\n'), ((12100, 12142), 'tkinter.messagebox.showinfo', 'tk.messagebox.showinfo', (['"""Prediction"""', 'pred'], {}), "('Prediction', pred)\n", (12122, 12142), True, 'import tkinter as tk\n'), ((11785, 11907), 'numpy.array', 'np.array', (['[[i2, i1, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15, i16,\n i17, i18, i19, i20, i21, i22, i23]]'], {}), '([[i2, i1, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15,\n i16, i17, i18, i19, i20, i21, i22, i23]])\n', (11793, 11907), True, 'import numpy as np\n')] |
'''
BSD 3-Clause License
Copyright (c) 2020, <NAME>, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import numpy as np
from numpy import ndarray
from cv2 import filter2D, imwrite
from PIL import Image, UnidentifiedImageError
import os
class ImageProcess:
'''
TODO change all variable of the class to private members
Main class for implementing some Image processing and image minipulation
Algorithm implemented:
TODO to implement Average and Lens Blurr
-Image blur/smooth
+Gaussian blur/smooth
+Average blurr
+lens blurr
TODO to implement image resizing
-Image Resize
+Bicubic
+Bilinear
-Image converting to Grayscale
-Image inverting
-Image Blendding
+color dodge
-Image Padding
'''
def __init__(self):
pass
# Some utils functions
def _rotate_image_90(self, img: ndarray, k: int) -> ndarray:
"""
TODO to implement image rotation without using np.rot90()
Rotates the image if the img.shape[0]<img.shape[1]
i.e height is less than width as PIL.Image.open()
reads image the as [h,w,c]
args:
img - [ndarray] an image of type np.ndarray
k - [int] Integer to define the number of time
to rotate image 90 degree i.e k*90 degree
returns:
img - [ndarray] an rotated image of type np.ndarray
"""
if img.shape[0] < img.shape[1]:
self.y = np.rot90(img, k)
return self.y
else:
return img
def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:
"""
It returns the gassian distribution of the given ndarray
args:
[x] - [ndarray]
mu - [float] mean of the gaussian distribution
sigma - [float] standard deviation of the gaussian distribution
return:
ndarray - Gaussian distribution of the given x ndarray with
standard deviation sigma and mean mu
"""
return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(
-np.power(
(x - mu) / sigma, 2) / 2)
def _generate_gaussian_kernel(self, size: int, sigma: float = 1.0, mu: float = 0.0) -> ndarray:
"""
Generate gaussian kernel of given given size and dims (sizexsize)
args:
size - [int] deifnes the size of the kernel (sizexsize)
sigma - [float] standard diviation of gaussian
distribution. It cannot be 0.0
mu - [float] mean of the gaussian distribution
return:
kernel2D - [ndarray] gaussian kernel the values are in range (0,1)
"""
# create the 1D array of equally spaced distance point of given size
self.kernel_1d = np.linspace(-(size//2), size//2, size)
# get the gaussian distribution of the 1D array
self.kernel_1d = self._gaussian_distribution(
self.kernel_1d, mu, sigma)
# Compute the outer product of kernel1D tranpose and kernel1D
self.kernel_2d = np.outer(self.kernel_1d.T, self.kernel_1d)
# normalize the the outer product to suish the values between 0.0-1.0
self.kernel_2d *= 1.0/self.kernel_2d.max()
return self.kernel_2d
def _pad_image(self, img: ndarray, pad_width: int = 10) -> ndarray:
"""
TODO to implement padding for RGB images.
NOTE: Currently it can pad only grayscale image only
Pads the image from all side with zeros.
args:
img - [ndarray] image to padded
pad_width - [int] width of the pad around the image
return:
padded_img - [ndarray] image with padding around
"""
self.padded_img = np.zeros(
(img.shape[0] + pad_width*2, img.shape[1]+pad_width*2))
self.padded_img[pad_width:-pad_width, pad_width:-pad_width] = img
return self.padded_img
def _normalize_img(self, img: ndarray, range_end: float = 1.0) -> ndarray:
"""
NOTE: range should be > 0.0
Normalize the image pixel values in range (0,range_range).
args:
img - [ndarray] input image to be normalized.
range_end -[float]
return:
img - [ndarray] normalized image
"""
return (img/img.max())*range_end
def _isGrayscale(self, img: ndarray) -> bool:
"""
Checks if image is grayscale or not
arg:
img - [ndarray] image to check
return
bool
"""
if len(np.squeeze(img).shape) == 2:
return True
else:
return False
# main functions
def loadImage(self, path: str) -> ndarray:
"""
reads the image from the path and returns a numpy array
args:
[path] - str
returns:
[img] numpy.ndarray with shape [h,w,c](for RGB channels)
or (h,w,1)(for grayscale image)
"""
try:
self.img = np.asarray(Image.open(path))
except FileNotFoundError:
print("NO such File {}".format(path))
return None
return self.img
def saveImage(self, img: ndarray, path: str, name: str) -> bool:
self.__isSaved = imwrite(os.path.join(path, name), img)
return self.__isSaved
def RGB2GRAY(self, img: ndarray) -> ndarray:
"""
Converts a RGB image to Grayscale image
args:
img - [ndarray] image that to be converted to grayscale
return:
img - [ndarray] Converted grayscale image
"""
# checks if the image is already in grayscale format
if self._isGrayscale(img):
return img
else:
self.rgb_weights = np.array([0.2126, 0.7152, 0.0722])
return np.dot(img[..., :3], self.rgb_weights)
def invertImage(self, img: ndarray) -> ndarray:
"""
Inverts an image
args:
img - [ndarray] image that to be inverted
return:
img - [ndarray] inverted image
"""
return img.max() - img
def naiveConvolve2D(self, img: ndarray, kernel: ndarray) -> ndarray:
"""
TODO:to implement fater version of the convolution operatration
and add striding to downsample image
NOTE:It is a naive approach to convolve image with kernel.
Convolves image with the given kernel
args:
img - [ndarray] input image
kernel - [ndarray] kernel of any size
return:
convolved2d - [ndarray] a convolved
"""
self.kernel_size = kernel.shape[0]
self.convolved_output = np.zeros_like(img)
self.padded_image = self._pad_image(img, pad_width=self.kernel_size-2)
for x in range(img.shape[1]):
for y in range(img.shape[0]):
self.convolved_output[y, x] = (
kernel * self.padded_image[y:y+self.kernel_size, x:x+self.kernel_size]).sum()
return self.convolved_output
def gaussianBlur(self, img: ndarray, kernel_size: int = 21, sigma: float = 10.0) -> ndarray:
"""
NOTE: For now we use cv2.filter2d and not naiveConvolve2d to speed up computation
Apply gaussian blurr on given image
args:
img - [ndarray] Image to be blurred
kernel_size - [int] size of the kernel (sizexsize)
sigma - [float] standard deviation for gaussian distribution
return:
blurred_img - [ndarray] blurred image
"""
if not isinstance(img, ndarray):
raise "image should be in form of np.ndarray"
self.__kernel = self._generate_gaussian_kernel(kernel_size, sigma)
self.__blurrImg = filter2D(img, -1, self.__kernel)
self.__blurrImg = self._normalize_img(self.__blurrImg, range_end=255.0)
return self.__blurrImg
def colorDodge(self, img1: ndarray, img2: ndarray) -> ndarray:
"""
TODO to implement different type of image blending
It blends the image1 with image2 as background using colorDodge
args:
img1 - [ndarray] image 1 should be normalized within the range (0,1)
img2 - [ndarray] image 2 should be normalized within the range (0,1)
return:
blended_img - [ndarray] Image1 blended with Image2x
"""
if img1.max()>1.0 and img2.max()>1.0:
raise "np.ndarray should be normalized within the range (0,1)"
self.blended_img = img2/((1.0 - img1)+10e-12)
self.blended_img[self.blended_img > 1.0] = 1.0
self.blended_img = self._normalize_img(
self.blended_img, range_end=255.0)
return self.blended_img
| [
"PIL.Image.open",
"numpy.sqrt",
"numpy.power",
"os.path.join",
"cv2.filter2D",
"numpy.squeeze",
"numpy.array",
"numpy.linspace",
"numpy.outer",
"numpy.zeros",
"numpy.dot",
"numpy.rot90",
"numpy.zeros_like"
] | [((4385, 4427), 'numpy.linspace', 'np.linspace', (['(-(size // 2))', '(size // 2)', 'size'], {}), '(-(size // 2), size // 2, size)\n', (4396, 4427), True, 'import numpy as np\n'), ((4669, 4711), 'numpy.outer', 'np.outer', (['self.kernel_1d.T', 'self.kernel_1d'], {}), '(self.kernel_1d.T, self.kernel_1d)\n', (4677, 4711), True, 'import numpy as np\n'), ((5357, 5427), 'numpy.zeros', 'np.zeros', (['(img.shape[0] + pad_width * 2, img.shape[1] + pad_width * 2)'], {}), '((img.shape[0] + pad_width * 2, img.shape[1] + pad_width * 2))\n', (5365, 5427), True, 'import numpy as np\n'), ((8341, 8359), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (8354, 8359), True, 'import numpy as np\n'), ((9430, 9462), 'cv2.filter2D', 'filter2D', (['img', '(-1)', 'self.__kernel'], {}), '(img, -1, self.__kernel)\n', (9438, 9462), False, 'from cv2 import filter2D, imwrite\n'), ((3040, 3056), 'numpy.rot90', 'np.rot90', (['img', 'k'], {}), '(img, k)\n', (3048, 3056), True, 'import numpy as np\n'), ((6904, 6928), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (6916, 6928), False, 'import os\n'), ((7406, 7440), 'numpy.array', 'np.array', (['[0.2126, 0.7152, 0.0722]'], {}), '([0.2126, 0.7152, 0.0722])\n', (7414, 7440), True, 'import numpy as np\n'), ((7460, 7498), 'numpy.dot', 'np.dot', (['img[..., :3]', 'self.rgb_weights'], {}), '(img[..., :3], self.rgb_weights)\n', (7466, 7498), True, 'import numpy as np\n'), ((6649, 6665), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (6659, 6665), False, 'from PIL import Image, UnidentifiedImageError\n'), ((3633, 3651), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3640, 3651), True, 'import numpy as np\n'), ((6181, 6196), 'numpy.squeeze', 'np.squeeze', (['img'], {}), '(img)\n', (6191, 6196), True, 'import numpy as np\n'), ((3684, 3713), 'numpy.power', 'np.power', (['((x - mu) / sigma)', '(2)'], {}), '((x - mu) / sigma, 2)\n', (3692, 3713), True, 'import numpy as np\n')] |
# 어휘 사전과 워드 임베딩을 만들고, 학습을 위해 대화 데이터를 읽어들이는 유틸리티들의 모음
import tensorflow as tf
import numpy as np
import re
import codecs
from config import FLAGS
class Dialog():
_PAD_ = "_PAD_" # 빈칸 채우는 심볼
_STA_ = "_STA_" # 디코드 입력 시퀀스의 시작 심볼
_EOS_ = "_EOS_" # 디코드 입출력 시퀀스의 종료 심볼
_UNK_ = "_UNK_" # 사전에 없는 단어를 나타내는 심볼
_PAD_ID_ = 0
_STA_ID_ = 1
_EOS_ID_ = 2
_UNK_ID_ = 3
_PRE_DEFINED_ = [_PAD_ID_, _STA_ID_, _EOS_ID_, _UNK_ID_]
def __init__(self):
self.vocab_list = []
self.vocab_dict = {}
self.vocab_size = 0
self.examples = []
self._index_in_epoch = 0
def decode(self, indices, string=False):
tokens = [[self.vocab_list[i] for i in dec] for dec in indices]
if string:
return self.decode_to_string(tokens[0])
else:
return tokens
def decode_to_string(self, tokens):
text = ' '.join(tokens)
return text.strip()
def cut_eos(self, indices):
eos_idx = indices.index(self._EOS_ID_)
return indices[:eos_idx]
def is_eos(self, voc_id):
return voc_id == self._EOS_ID_
def is_defined(self, voc_id):
return voc_id in self._PRE_DEFINED_
def max_len(self, batch_set):
max_len_input = 0
max_len_output = 0
for i in range(0, len(batch_set), 2):
len_input = len(batch_set[i])
len_output = len(batch_set[i+1])
if len_input > max_len_input:
max_len_input = len_input
if len_output > max_len_output:
max_len_output = len_output
return max_len_input, max_len_output + 1
def pad(self, seq, max_len, start=None, eos=None):
if start:
padded_seq = [self._STA_ID_] + seq
elif eos:
padded_seq = seq + [self._EOS_ID_]
else:
padded_seq = seq
if len(padded_seq) < max_len:
return padded_seq + ([self._PAD_ID_] * (max_len - len(padded_seq)))
else:
return padded_seq
def pad_left(self, seq, max_len):
if len(seq) < max_len:
return ([self._PAD_ID_] * (max_len - len(seq))) + seq
else:
return seq
def transform(self, input, output, input_max, output_max):
enc_input = self.pad(input, input_max)
dec_input = self.pad(output, output_max, start=True)
target = self.pad(output, output_max, eos=True)
# 구글 방식으로 입력을 인코더에 역순으로 입력한다.
enc_input.reverse()
enc_input = np.eye(self.vocab_size)[enc_input]
dec_input = np.eye(self.vocab_size)[dec_input]
return enc_input, dec_input, target
def next_batch(self, batch_size):
enc_input = []
dec_input = []
target = []
start = self._index_in_epoch
if self._index_in_epoch + batch_size < len(self.examples) - 1:
self._index_in_epoch = self._index_in_epoch + batch_size
else:
self._index_in_epoch = 0
batch_set = self.examples[start:start+batch_size]
# 작은 데이터셋을 실험하기 위한 꼼수
# 현재의 답변을 다음 질문의 질문으로 하고, 다음 질문을 답변으로 하여 데이터를 늘린다.
if FLAGS.data_loop is True:
batch_set = batch_set + batch_set[1:] + batch_set[0:1]
# TODO: 구글처럼 버킷을 이용한 방식으로 변경
# 간단하게 만들기 위해 구글처럼 버킷을 쓰지 않고 같은 배치는 같은 사이즈를 사용하도록 만듬
max_len_input, max_len_output = self.max_len(batch_set)
for i in range(0, len(batch_set) - 1, 2):
enc, dec, tar = self.transform(batch_set[i], batch_set[i+1],
max_len_input, max_len_output)
enc_input.append(enc)
dec_input.append(dec)
target.append(tar)
return enc_input, dec_input, target
def tokens_to_ids(self, tokens):
ids = []
for t in tokens:
if t in self.vocab_dict:
ids.append(self.vocab_dict[t])
else:
ids.append(self._UNK_ID_)
return ids
def ids_to_tokens(self, ids):
tokens = []
for i in ids:
tokens.append(self.vocab_list[i])
return tokens
def load_examples(self, data_path):
self.examples = []
with open(data_path, 'r', encoding='utf-8') as content_file:
for line in content_file:
tokens = self.tokenizer(line.strip())
ids = self.tokens_to_ids(tokens)
self.examples.append(ids)
def tokenizer(self, sentence):
# 공백으로 나누고 특수문자는 따로 뽑아낸다.
words = []
_TOKEN_RE_ = re.compile("([.,!?\"':;)(])")
for fragment in sentence.strip().split():
words.extend(_TOKEN_RE_.split(fragment))
return [w for w in words if w]
def build_vocab(self, data_path, vocab_path):
with open(data_path, 'r', encoding='utf-8') as content_file:
content = content_file.read()
words = self.tokenizer(content)
words = list(set(words))
with open(vocab_path, 'w') as vocab_file:
for w in words:
vocab_file.write(w + '\n')
def load_vocab(self, vocab_path):
self.vocab_list = self._PRE_DEFINED_ + []
with open(vocab_path, 'r', encoding='utf-8') as vocab_file:
for line in vocab_file:
self.vocab_list.append(line.strip())
# {'_PAD_': 0, '_STA_': 1, '_EOS_': 2, '_UNK_': 3, 'Hello': 4, 'World': 5, ...}
self.vocab_dict = {n: i for i, n in enumerate(self.vocab_list)}
self.vocab_size = len(self.vocab_list)
def main(_):
dialog = Dialog()
if FLAGS.data_path and FLAGS.voc_test:
print("다음 데이터로 어휘 사전을 테스트합니다.", FLAGS.data_path)
dialog.load_vocab(FLAGS.voc_path)
dialog.load_examples(FLAGS.data_path)
enc, dec, target = dialog.next_batch(10)
print(target)
enc, dec, target = dialog.next_batch(10)
print(target)
elif FLAGS.data_path and FLAGS.voc_build:
print("다음 데이터에서 어휘 사전을 생성합니다.", FLAGS.data_path)
dialog.build_vocab(FLAGS.data_path, FLAGS.voc_path)
elif FLAGS.voc_test:
dialog.load_vocab(FLAGS.voc_path)
print(dialog.vocab_dict)
if __name__ == "__main__":
tf.app.run()
| [
"numpy.eye",
"re.compile",
"tensorflow.app.run"
] | [((6255, 6267), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (6265, 6267), True, 'import tensorflow as tf\n'), ((4594, 4623), 're.compile', 're.compile', (['"""([.,!?"\':;)(])"""'], {}), '(\'([.,!?"\\\':;)(])\')\n', (4604, 4623), False, 'import re\n'), ((2543, 2566), 'numpy.eye', 'np.eye', (['self.vocab_size'], {}), '(self.vocab_size)\n', (2549, 2566), True, 'import numpy as np\n'), ((2598, 2621), 'numpy.eye', 'np.eye', (['self.vocab_size'], {}), '(self.vocab_size)\n', (2604, 2621), True, 'import numpy as np\n')] |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import numpy as np
from tqdm import trange
import nnabla as nn
import nnabla.functions as F
from nnabla.logger import logger
from nnabla.utils.image_utils import imsave
from models import SpadeGenerator, encode_inputs
from utils import *
def get_config():
parser = argparse.ArgumentParser()
parser.add_argument("--load_params", "-L", required=True, type=str,
help="A path for parameter to load. "
"model config file (config.yaml) is automatically detected on the same directory."
"If you want to change model settings, you can edit this config.yaml manually.")
args, subargs = parser.parse_known_args()
conf_path = os.path.join(os.path.dirname(args.load_params), "config.yaml")
conf = read_yaml(conf_path)
conf.save_path = os.path.dirname(args.load_params)
conf.update(args.__dict__)
return conf
class Generator(object):
def __init__(self, conf, use_inst):
self.conf = conf
self.use_inst = use_inst
self.ist_mask = nn.Variable(
shape=(conf.batch_size, ) + conf.image_shape)
self.obj_mask = nn.Variable(
shape=(conf.batch_size, ) + conf.image_shape)
self.fake = self.define_network()
def define_network(self):
if self.use_inst:
obj_onehot, bm = encode_inputs(self.ist_mask, self.obj_mask,
n_ids=self.conf.n_class)
mask = F.concatenate(obj_onehot, bm, axis=1)
else:
om = self.obj_mask
if len(om.shape) == 3:
om = F.reshape(om, om.shape + (1,))
obj_onehot = F.one_hot(om, shape=(self.conf.n_class, ))
mask = F.transpose(obj_onehot, (0, 3, 1, 2))
generator = SpadeGenerator(
self.conf.g_ndf, image_shape=self.conf.image_shape)
z = F.randn(shape=(self.conf.batch_size, self.conf.z_dim))
fake = generator(z, mask)
# Pixel intensities of fake are [-1, 1]. Rescale it to [0, 1]
fake = (fake + 1) / 2
return fake
@staticmethod
def _check_ndarray(x):
if not isinstance(x, np.ndarray):
raise ValueError("image must be np.ndarray.")
def __call__(self, ist_label, obj_label):
if self.use_inst and ist_label is not None:
self._check_ndarray(ist_label)
self.ist_mask.d = ist_label
self._check_ndarray(obj_label)
self.obj_mask.d = obj_label
self.fake.forward(clear_buffer=True)
return self.fake.d
def generate():
rng = np.random.RandomState(803)
conf = get_config()
# set context
comm = init_nnabla(conf)
# find all test data
if conf.dataset == "cityscapes":
data_list = get_cityscape_datalist(
conf.cityscapes, data_type="val", save_file=comm.rank == 0)
conf.n_class = conf.cityscapes.n_label_ids
use_inst = True
data_iter = create_cityscapes_iterator(conf.batch_size, data_list, comm=comm,
image_shape=conf.image_shape, rng=rng,
flip=False)
elif conf.dataset == "ade20k":
data_list = get_ade20k_datalist(
conf.ade20k, data_type="val", save_file=comm.rank == 0)
conf.n_class = conf.ade20k.n_label_ids + 1 # class id + unknown
use_inst = False
load_shape = tuple(
x + 30 for x in conf.image_shape) if conf.use_crop else conf.image_shape
data_iter = create_ade20k_iterator(conf.batch_size, data_list, comm=comm,
load_shape=load_shape, crop_shape=conf.image_shape,
rng=rng, flip=False)
else:
raise NotImplementedError(
"Currently dataset {} is not supported.".format(conf.dataset))
# define generator
generator = Generator(conf, use_inst)
# load parameters
if not os.path.exists(conf.load_params):
logger.warn("Path to load params is not found."
" Loading params is skipped and generated result will be unreasonable. ({})".format(conf.load_params))
else:
print("load parameters from {}".format(conf.load_params))
nn.load_parameters(conf.load_params)
niter = get_iteration_per_epoch(
data_iter._size, conf.batch_size, round="ceil")
progress_iterator = trange(
niter, desc="[Generating Images]", disable=comm.rank > 0)
# for label2color
label2color = Colorize(conf.n_class)
save_path = os.path.join(conf.save_path, "generated")
if not os.path.exists(save_path):
os.makedirs(save_path, exist_ok=True)
logger.info("Generated images will be saved on '{}'.".format(save_path))
cnt = 0
for i in progress_iterator:
if conf.dataset == "cityscapes":
_, instance_id, object_id = data_iter.next()
elif conf.dataset == "ade20k":
_, object_id = data_iter.next()
instance_id = None
else:
raise NotImplemented()
gen = generator(instance_id, object_id)
id_colorized = label2color(object_id).astype(np.uint8)
valid = conf.batch_size
if cnt > data_iter._size:
valid = data_iter._size - conf.batch_size * (i - 1)
for j in range(valid):
gen_image_path = os.path.join(
save_path, "res_{}_{}.png".format(comm.rank, cnt + j))
input_image_path = os.path.join(
save_path, "input_{}_{}.png".format(comm.rank, cnt + j))
imsave(gen_image_path, gen[j], channel_first=True)
imsave(input_image_path, id_colorized[j])
cnt += conf.batch_size
if __name__ == '__main__':
generate()
| [
"models.encode_inputs",
"os.path.exists",
"nnabla.functions.randn",
"models.SpadeGenerator",
"nnabla.functions.one_hot",
"nnabla.functions.transpose",
"argparse.ArgumentParser",
"os.makedirs",
"nnabla.utils.image_utils.imsave",
"os.path.join",
"nnabla.load_parameters",
"nnabla.functions.concat... | [((846, 871), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (869, 871), False, 'import argparse\n'), ((1407, 1440), 'os.path.dirname', 'os.path.dirname', (['args.load_params'], {}), '(args.load_params)\n', (1422, 1440), False, 'import os\n'), ((3193, 3219), 'numpy.random.RandomState', 'np.random.RandomState', (['(803)'], {}), '(803)\n', (3214, 3219), True, 'import numpy as np\n'), ((5050, 5114), 'tqdm.trange', 'trange', (['niter'], {'desc': '"""[Generating Images]"""', 'disable': '(comm.rank > 0)'}), "(niter, desc='[Generating Images]', disable=comm.rank > 0)\n", (5056, 5114), False, 'from tqdm import trange\n'), ((5205, 5246), 'os.path.join', 'os.path.join', (['conf.save_path', '"""generated"""'], {}), "(conf.save_path, 'generated')\n", (5217, 5246), False, 'import os\n'), ((1304, 1337), 'os.path.dirname', 'os.path.dirname', (['args.load_params'], {}), '(args.load_params)\n', (1319, 1337), False, 'import os\n'), ((1640, 1696), 'nnabla.Variable', 'nn.Variable', ([], {'shape': '((conf.batch_size,) + conf.image_shape)'}), '(shape=(conf.batch_size,) + conf.image_shape)\n', (1651, 1696), True, 'import nnabla as nn\n'), ((1735, 1791), 'nnabla.Variable', 'nn.Variable', ([], {'shape': '((conf.batch_size,) + conf.image_shape)'}), '(shape=(conf.batch_size,) + conf.image_shape)\n', (1746, 1791), True, 'import nnabla as nn\n'), ((2384, 2450), 'models.SpadeGenerator', 'SpadeGenerator', (['self.conf.g_ndf'], {'image_shape': 'self.conf.image_shape'}), '(self.conf.g_ndf, image_shape=self.conf.image_shape)\n', (2398, 2450), False, 'from models import SpadeGenerator, encode_inputs\n'), ((2476, 2530), 'nnabla.functions.randn', 'F.randn', ([], {'shape': '(self.conf.batch_size, self.conf.z_dim)'}), '(shape=(self.conf.batch_size, self.conf.z_dim))\n', (2483, 2530), True, 'import nnabla.functions as F\n'), ((4596, 4628), 'os.path.exists', 'os.path.exists', (['conf.load_params'], {}), '(conf.load_params)\n', (4610, 4628), False, 'import os\n'), ((4894, 4930), 'nnabla.load_parameters', 'nn.load_parameters', (['conf.load_params'], {}), '(conf.load_params)\n', (4912, 4930), True, 'import nnabla as nn\n'), ((5258, 5283), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (5272, 5283), False, 'import os\n'), ((5293, 5330), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (5304, 5330), False, 'import os\n'), ((1936, 2004), 'models.encode_inputs', 'encode_inputs', (['self.ist_mask', 'self.obj_mask'], {'n_ids': 'self.conf.n_class'}), '(self.ist_mask, self.obj_mask, n_ids=self.conf.n_class)\n', (1949, 2004), False, 'from models import SpadeGenerator, encode_inputs\n'), ((2068, 2105), 'nnabla.functions.concatenate', 'F.concatenate', (['obj_onehot', 'bm'], {'axis': '(1)'}), '(obj_onehot, bm, axis=1)\n', (2081, 2105), True, 'import nnabla.functions as F\n'), ((2263, 2304), 'nnabla.functions.one_hot', 'F.one_hot', (['om'], {'shape': '(self.conf.n_class,)'}), '(om, shape=(self.conf.n_class,))\n', (2272, 2304), True, 'import nnabla.functions as F\n'), ((2325, 2362), 'nnabla.functions.transpose', 'F.transpose', (['obj_onehot', '(0, 3, 1, 2)'], {}), '(obj_onehot, (0, 3, 1, 2))\n', (2336, 2362), True, 'import nnabla.functions as F\n'), ((6235, 6285), 'nnabla.utils.image_utils.imsave', 'imsave', (['gen_image_path', 'gen[j]'], {'channel_first': '(True)'}), '(gen_image_path, gen[j], channel_first=True)\n', (6241, 6285), False, 'from nnabla.utils.image_utils import imsave\n'), ((6298, 6339), 'nnabla.utils.image_utils.imsave', 'imsave', (['input_image_path', 'id_colorized[j]'], {}), '(input_image_path, id_colorized[j])\n', (6304, 6339), False, 'from nnabla.utils.image_utils import imsave\n'), ((2207, 2237), 'nnabla.functions.reshape', 'F.reshape', (['om', '(om.shape + (1,))'], {}), '(om, om.shape + (1,))\n', (2216, 2237), True, 'import nnabla.functions as F\n')] |
"""
Goal - Calculate distance travelled by each fish
Date - Mar 11 2021
"""
import os
import pathlib
from pprint import pprint
import numpy as np
from scipy import stats
from scipy.spatial import distance
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import trajectorytools as tt
import trajectorytools.plot as ttplot
import trajectorytools.socialcontext as ttsocial
from trajectorytools.constants import dir_of_data
import csv
import pickle
import argparse
import pandas as pd
def position(tr): #shape returns tr.s.shape
return(tr.s)
def speed(tr): #speed(tr).shape returns tr.speed.shape - 2
v = (position(tr)[2:] - position(tr)[:-2]) / 2
b = np.linalg.norm(v, axis=-1)
return(b*60)
def acceleration(tr): #shape returns tr.acceleration.shape - 2
a = position(tr)[2:] - 2 * position(tr)[1:-1] + position(tr)[:-2]
aa = np.linalg.norm(a, axis=-1)
return(aa*3600)
def e(tr): #e.shape returns tr.speed.shape - 2
vel = (position(tr)[2:] - position(tr)[:-2]) / 2
n = np.linalg.norm(v,axis = 2)
return(vel/n[...,np.newaxis])
def filter_low_pass(tr, roi1 = 30, roi2 = 3340): #ind (for individual) starts from 0, roi - edge of region of interest
position_mask0 = np.ma.masked_where((speed(tr)[1:-1] > roi1)|(speed(tr)[0:-2] > roi1)|(speed(tr)[2:] > roi1)|(acceleration(tr)[1:-1] > roi2)|(acceleration(tr)[0:-2] > roi2)|(acceleration(tr)[2:] > roi2), position(tr)[2:-2,:,0],copy=False)
position_mask1 = np.ma.masked_where((speed(tr)[1:-1] > roi1)|(speed(tr)[0:-2] > roi1)|(speed(tr)[2:] > roi1)|(acceleration(tr)[1:-1] > roi2)|(acceleration(tr)[0:-2] > roi2)|(acceleration(tr)[2:] > roi2), position(tr)[2:-2,:,1],copy=False)
return(position_mask0,position_mask1)
def filter_speed_low_pass(tr, roi1 = 30, roi2 = 3340):
speed_mask = np.ma.masked_where((speed(tr) > roi1)|(acceleration(tr) > roi2), speed(tr),copy=False)
return(speed_mask)
def filter_acc_low_pass(tr, roi1 = 30, roi2 = 3340):
acc_mask = np.ma.masked_where((speed(tr) > roi1)|(acceleration(tr) > roi2), acceleration(tr),copy=False)
return(acc_mask)#[~acc_mask.mask].data)
def distance_loom(tr, looms, n, roi1 = 30, roi2 = 3340):
d = filter_speed_low_pass(tr,roi1,roi2)[(looms[n]+500):(looms[n] + 700)]
dd = d.sum(axis=0)
return(np.nanmean(dd)/60)
def distance_before_loom(tr, looms,roi1 = 30, roi2 = 3340):
d = filter_speed_low_pass(tr,roi1,roi2)[0:(looms[0]+500)]
dd = d.sum(axis=0)
return(np.nanmean(dd)/60)
def distance_total(tr, roi1 = 30, roi2 = 3340):
d = filter_speed_low_pass(tr,roi1,roi2)[0:80000]
dd = d.sum(axis=0)
return(np.nanmean(dd)/60)
temperature = [9,13,17,21,25,29]#range(9,30,4)
group = [1,2,4,8,16]
replication = range(10) # number of replicates per treatment
met = pd.read_csv('../../data/temp_collective/roi/metadata_w_loom.csv')
with open('../../data/temp_collective/roi/distance_wo_loom.csv', mode='w') as stats_speed:
writer = csv.writer(stats_speed, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow([
'Temperature', 'Groupsize', 'Replicate', 'Trial', 'Date', 'Subtrial',
'Time_fish_in', 'Time_start_record','Distance before loom','Total distance'])
for i in temperature:
for j in group:
for k in replication:
#print(i,j,k+1)
if j == 1:
trajectories_file_path = '../../data/temp_collective/roi/'+str(i)+'/' +str(j)+'/GS_'+str(j)+'_T_'+str(i)+'_roi_'+str(k+1)+'/trajectories.npy'
else:
trajectories_file_path = '../../data/temp_collective/roi/'+str(i)+'/' +str(j)+'/GS_'+str(j)+'_T_'+str(i)+'_roi_'+str(k+1)+'/trajectories_wo_gaps.npy'
try:
tr = tt.Trajectories.from_idtrackerai(trajectories_file_path, center=True).normalise_by('body_length')
tr.new_time_unit(tr.params['frame_rate'], 'seconds')
except FileNotFoundError:
print(i,j,k)
print('File not found')
continue
looms = []
for m in range(len(met.Temperature)):
if met.Temperature[m] == i and met.Groupsize[m] == j and met.Replicate[m] == (k+1):
looms.append(met['Loom 1'][m])
looms.append(met['Loom 2'][m])
looms.append(met['Loom 3'][m])
looms.append(met['Loom 4'][m])
looms.append(met['Loom 5'][m])
writer.writerow([
i,j,k+1,met.Trial[m],met.Date[m],met.Subtrial[m],
met.Time_fish_in[m],met.Time_start_record[m],
distance_before_loom(tr,looms), distance_total(tr)])
| [
"pandas.read_csv",
"csv.writer",
"numpy.nanmean",
"numpy.linalg.norm",
"trajectorytools.Trajectories.from_idtrackerai"
] | [((2854, 2919), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/temp_collective/roi/metadata_w_loom.csv"""'], {}), "('../../data/temp_collective/roi/metadata_w_loom.csv')\n", (2865, 2919), True, 'import pandas as pd\n'), ((687, 713), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'axis': '(-1)'}), '(v, axis=-1)\n', (701, 713), True, 'import numpy as np\n'), ((874, 900), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {'axis': '(-1)'}), '(a, axis=-1)\n', (888, 900), True, 'import numpy as np\n'), ((1040, 1065), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'axis': '(2)'}), '(v, axis=2)\n', (1054, 1065), True, 'import numpy as np\n'), ((3025, 3110), 'csv.writer', 'csv.writer', (['stats_speed'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(stats_speed, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL\n )\n', (3035, 3110), False, 'import csv\n'), ((2365, 2379), 'numpy.nanmean', 'np.nanmean', (['dd'], {}), '(dd)\n', (2375, 2379), True, 'import numpy as np\n'), ((2541, 2555), 'numpy.nanmean', 'np.nanmean', (['dd'], {}), '(dd)\n', (2551, 2555), True, 'import numpy as np\n'), ((2696, 2710), 'numpy.nanmean', 'np.nanmean', (['dd'], {}), '(dd)\n', (2706, 2710), True, 'import numpy as np\n'), ((3860, 3929), 'trajectorytools.Trajectories.from_idtrackerai', 'tt.Trajectories.from_idtrackerai', (['trajectories_file_path'], {'center': '(True)'}), '(trajectories_file_path, center=True)\n', (3892, 3929), True, 'import trajectorytools as tt\n')] |
# in this tutorial, you will learn how to use for loop statement in python
import numpy as np
# Aim: We want to print "Hello" 10 times:
# np.arange creates a sequence from 0-9.
# in each loop i is given a number in the sequence (in order)
# the ":" is the beginning of the loop"
# The moment you press enter after the ":", a tab space (indent) is created for you in Spyder (and most python IDEs)
# All statments with an indent are considered to be a part of that loop.
# One should be careful not to create/delete extra spaces in the beginning of such statements. This will lead to problems. The indent space plays the role similar to that of curly brackets in C/C++
# To come out of the loop, at the end of the last statement of the loop, press shift + tab
for i in np.arange(0,10):
print("{0} Hello".format(i)) # observe the space before this statement (Don't delete it). Its called a indent which is auto created when you press enter after pressing the : characater. You can also press the tab button to create one.
print("loop over")
# Assignment
# write a python code using loops to print out series like:
# 1,1
# 1,2
# 1,4
# 1,5
# 2,1
# 2,2
# 2,3
# 2,4
# 2,5
# Hint: You will require a loop inside a loop. The second for loop statement must be singe indented and the content of the second for loop must be double indented.
# In case, you could not do it, the answer is given below.
#
#
#
#
#
#
#
# for i in np.arange(1,3):
# for j in np.arange(1,6):
# print("{0},{1}".format(i,j))
# print("loop over")
| [
"numpy.arange"
] | [((774, 790), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (783, 790), True, 'import numpy as np\n')] |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Property data types.
Ability to import, etc. from text files is
part of the methods in the type.
Import property database from textfile(s):
* See :meth:`PropertyData.from_csv`, for the expected format for data.
* See :meth:`PropertyMetadata()` for the expected format for metadata.
"""
# stdlib
import csv
import json
import logging
# third-party
try:
import pandas as pd
import numpy as np
except ImportError:
np, pd = None, None
# local
from .util import get_file
from . import tabular
__author__ = '<NAME>'
_log = logging.getLogger(__name__)
class AddedCSVColumnError(KeyError):
"""Error for :meth:PropertyData.add_csv()
"""
def __init__(self, names, how_bad, column_type=''):
ctype = column_type + ' ' if column_type else ''
if len(names) == 1:
msg = 'Added CSV data {} {}column "{}"'.format(
how_bad, ctype, list(names)[0]
)
else:
msg = 'Added CSV data {} {}columns: {}'.format(
how_bad, ctype, ', '.join(list(names))
)
KeyError.__init__(self, msg)
class Fields(tabular.Fields):
"""Constants for fields.
"""
# Values for "type" field
C_STATE, C_PROP = 'state', 'property'
class PropertyTable(tabular.Table):
"""Property data and metadata together (at last!)
"""
def __init__(self, data=None, **kwargs):
"""Constructor.
"""
if isinstance(data, PropertyData):
pdata = data
elif isinstance(data, list):
pdata = PropertyData(data)
else:
raise TypeError('list or PropertyData object required')
super(PropertyTable, self).__init__(data=pdata, **kwargs)
@classmethod
def load(cls, file_or_path, validate=True):
"""Create PropertyTable from JSON input.
Args:
file_or_path (file or str): Filename or file object
from which to read the JSON-formatted data.
validate (bool): If true, apply validation to input JSON data.
Example input::
{
"meta": [
{"datatype": "MEA",
"info": "J. Chem. Eng. Data, 2009, Vol 54, pg. 306-310",
"notes": "r is MEA weight fraction in aqueous soln.",
"authors": "<NAME>., <NAME>., <NAME>.",
"title": "Density and Viscosity of ..."}
],
"data": [
{"name": "Viscosity Value",
"units": "mPa-s",
"values": [2.6, 6.2],
"error_type": "absolute",
"errors": [0.06, 0.004],
"type": "property"},
{"name": "r",
"units": "",
"values": [0.2, 1000],
"type": "state"}
]
}
"""
fp = get_file(file_or_path)
d = json.load(fp)
PropertyTable._validate_json(d)
metalist = d[Fields.META]
meta = [PropertyMetadata(m) for m in metalist]
data = PropertyData(d[Fields.DATA])
tbl = PropertyTable(data=data)
for m in meta:
tbl.add_metadata(m)
return tbl
class PropertyData(tabular.TabularData):
"""Class representing property data that knows how to
construct itself from a CSV file.
You can build objects from multiple CSV files as well.
See the property database section of the API docs for
details, or read the code in :meth:`add_csv` and the
tests in :mod:`idaes_dmf.propdb.tests.test_mergecsv`.
"""
embedded_units = r'(.*)\((.*)\)'
def __init__(self, data):
"""Construct new object from input list.
Example input::
[{
"name": "Density Data",
"units": "g/cm^3",
"values": [1.0053, 1.0188, .., ],
"errors": [.00005, .., .00005],
"error_type": "absolute",
"type": "property"
}, ...etc...]
Args:
data (list): Input data columns
Returns:
(PropertyData) New instance.
"""
super(PropertyData, self).__init__(data, error_column=True)
self._nstates = len(self.states)
@property
def states(self):
return [c for c in self.columns if self._is_state(c)]
@property
def properties(self):
return [c for c in self.columns if self._is_prop(c)]
@staticmethod
def _is_state(c):
return c[Fields.COLTYPE] == Fields.C_STATE
@staticmethod
def _is_prop(c):
return c[Fields.COLTYPE] == Fields.C_PROP
def names(self, states=True, properties=True):
"""Get column names.
Args:
states (bool): If False, exclude "state" data, e.g. the
ambient temperature, and only
include measured property values.
properties (bool): If False, excluse property data
Returns:
list[str]: List of column names.
"""
result = []
if states:
result.extend([v[Fields.DATA_NAME] for v in self.states])
if properties:
result.extend([v[Fields.DATA_NAME] for v in self.properties])
return result
def is_state_column(self, index):
"""Whether given column is state.
Args:
index (int): Index of column
Returns:
(bool) State or property and the column number.
Raises:
IndexError: No column at that index.
"""
col = self.columns[index]
return self._is_state(col)
def is_property_column(self, index):
"""Whether given column is a property. See :meth:`is_state_column`."""
return not self.is_state_column(index)
def as_arr(self, states=True):
"""Export property data as arrays.
Args:
states (bool): If False, exclude "state" data, e.g. the
ambient temperature, and only
include measured property values.
Returns:
(values[M,N], errors[M,N]) Two arrays of floats,
each with M columns having N values.
Raises:
ValueError if the columns are not all the same length
"""
n, values, errors = None, [], []
# extract state columns
if states:
for v in self.states:
vals = v[Fields.DATA_VALUES]
if n is None:
n = len(vals)
elif len(vals) != n:
raise ValueError(
'State values "{}" length {} != {}'.format(
v[Fields.DATA_NAME], len(vals), n
)
)
values.append(vals)
errors.append([0] * len(vals))
# extract property columns
for v in self.properties:
vals = v[Fields.DATA_VALUES]
if n is None:
n = len(vals)
elif len(vals) != n:
raise ValueError(
'Property values "{}" length {} != {}'.format(
v[Fields.DATA_NAME], len(vals), n
)
)
values.append(v[Fields.DATA_VALUES])
errors.append(v[Fields.DATA_ERRORS])
return values, errors
def values_dataframe(self, states=True):
"""Get values as a dataframe.
Args:
states (bool): see :meth:`names()`.
Returns:
(pd.DataFrame) Pandas dataframe for values.
Raises:
ImportError: If `pandas` or `numpy` were never
successfully imported.
"""
return self._get_prop_dataframe(Fields.DATA_VALUES, states)
def errors_dataframe(self, states=False):
"""Get errors as a dataframe.
Args:
states (bool): If False, exclude state data.
This is the default, because states do not
normally have associated error information.
Returns:
pd.DataFrame: Pandas dataframe for values.
Raises:
ImportError: If `pandas` or `numpy` were never
successfully imported.
"""
return self._get_prop_dataframe(Fields.DATA_ERRORS, states)
def _get_prop_dataframe(self, field, states):
self._check_pandas_import()
a1, names = [], []
if states:
a1 = [v[field] for v in self.states]
names = [v[Fields.DATA_NAME] for v in self.states]
a1.extend([v[field] for v in self.properties])
names.extend([v[Fields.DATA_NAME] for v in self.properties])
a2 = np.array(a1).transpose()
return pd.DataFrame(a2, columns=names)
@staticmethod
def from_csv(file_or_path, nstates=0):
"""Import the CSV data.
Expected format of the files is a header plus data rows.
Header: Index-column, Column-name(1), Error-column(1), \
Column-name(2), Error-column(2), ..
Data: <index>, <val>, <errval>, <val>, <errval>, ..
Column-name is in the format "Name (units)"
Error-column is in the format "<type> Error", where "<type>" is
the error type.
Args:
file_or_path (file-like or str): Input file
nstates (int): Number of state columns, appearing
first before property columns.
Returns:
PropertyData: New properties instance
"""
input_file = get_file(file_or_path)
csv_file = csv.reader(input_file)
row = next(csv_file)
names, data = PropertyData._prop_parse_csv_headers(nstates, row)
for row in csv_file:
# print('@@ parse csv row: {}'.format(row))
PropertyData._parse_csv_row(data, row, error_column=True)
obj = PropertyData(data)
return obj
def add_csv(self, file_or_path, strict=False):
"""Add to existing object from a new CSV file.
Depending on the value of the `strict` argument (see
below), the new file may or may not have the same
properties as the object -- but it always needs to have
the same number of state columns, and in the same order.
.. note:: Data that is "missing" because of property columns in
one CSV and not the other will be filled with `float(nan)` values.
Args:
file_or_path (file or str): Input file. This should be in exactly
the same format as expected by :meth:from_csv().
strict (bool): If true, require that the columns in the input
CSV match columns in this object. Otherwise, only require
that *state* columns in input CSV match columns in this
object. New property columns are added, and matches
to existing property columns will append the data.
Raises:
AddedCSVColumnError: If the new CSV column headers are not the
same as the ones in this object.
Returns:
(int) Number of added rows
"""
nstates = self._nstates
input_file = get_file(file_or_path)
csv_file = csv.reader(input_file)
# Parse the header
row = next(csv_file)
hdr_names, hdr_data = PropertyData._prop_parse_csv_headers(nstates, row)
# print('@@ add_csv, column names = {}, data columns = {}'
# .format(hdr_names, self.names()))
# Check that set of keys in new data is the same
cur_keys = set(self.names())
new_keys = set(hdr_names)
# This is used to re-order input data
rowmap = None
if strict:
if cur_keys > new_keys:
missing = cur_keys - new_keys
raise AddedCSVColumnError(missing, 'is missing')
elif new_keys > cur_keys:
extra = new_keys - cur_keys
raise AddedCSVColumnError(extra, 'has extra')
elif new_keys != cur_keys:
extra = new_keys - cur_keys
missing = cur_keys - new_keys
namelist = (
'(' + ','.join(extra) + ')',
'instead of',
'(' + ','.join(missing) + ')',
)
raise AddedCSVColumnError(namelist, 'has different')
else:
# check that all states are in common
hdr_states = filter(self._is_state, hdr_data)
new_states = [s[Fields.DATA_NAME] for s in hdr_states]
new_states = set(new_states)
cur_states = set(self.names(properties=False))
if new_states != cur_states:
extra = new_states - cur_states
missing = cur_states - new_states
if extra and missing:
namelist = (
'(' + ','.join(extra) + ')',
'instead of',
'(' + ','.join(missing) + ')',
)
raise AddedCSVColumnError(
namelist, 'has different', column_type='state'
)
elif extra:
raise AddedCSVColumnError(extra, 'has extra', column_type='state')
elif missing:
raise AddedCSVColumnError(
missing, 'is missing', column_type='state'
)
else:
raise RuntimeError('unexpected branch')
# check that at least one property is in common
new_prop = new_keys - new_states
if not new_prop:
return 0 # no data
cur_prop = set(self.names(states=False))
# Add columns for all properties only found on the input,
# and initialize values to a list of NaN's as long as the
# current table, so data in all fields will be the same length.
# Initialize rowmap with mapping for state columns
rowmap = [-1] * len(hdr_names)
idx = 0
for i, s in enumerate(hdr_data):
if s[Fields.COLTYPE] == Fields.C_PROP:
continue
rowmap[i] = idx
idx += 1
nan_list = [float('nan')] * self.num_rows
idx = 0
for i, value in enumerate(hdr_data):
if value[Fields.COLTYPE] == Fields.C_STATE:
continue
name = value[Fields.DATA_NAME]
if name not in cur_prop:
value[Fields.DATA_NAME] = name
value[Fields.DATA_VALUES] = nan_list[:]
value[Fields.DATA_ERRORS] = nan_list[:]
value[Fields.COLTYPE] = Fields.C_PROP
self._data.append(value)
rowmap[i] = len(self.properties) - 1
else:
rowmap[i] = idx + self._nstates
idx += 1
# print("@@ rowmap = {}".format(rowmap))
# Parse the new data
num_added = 0
new_rowlen = 1 + 2 * len(self.names())
for row in csv_file:
if rowmap:
# Re-order according to the rowmap.
# By initializing with NaN, any columns not in the
# input, but in the current data, will be replaced with NaN
# values.
row2 = [float('nan')] * new_rowlen
# print('@@ row={} row2-init={}'.format(row, row2))
for i, j in enumerate(rowmap):
row2[j * 2 + 1] = row[i * 2 + 1] # value
row2[j * 2 + 2] = row[i * 2 + 2] # error
row = row2
self._parse_csv_row(self._data, row, error_column=True)
num_added += 1
self._nrows += 1
return num_added
@classmethod
def _prop_parse_csv_headers(cls, nstates, headers):
"""Parse a row of CSV headers which are pairs
of columns like "<name> [(units)], <error-type> Error".
Returns:
(names, data). Names is a list of all the column names.
Data is a dict with two keys, "properties" and "states".
Each value will be a list of property/state objects.
"""
names, data = cls._parse_csv_headers(headers, error_column=True)
for i in range(0, nstates):
data[i][Fields.COLTYPE] = Fields.C_STATE
for i in range(nstates, len(data)):
data[i][Fields.COLTYPE] = Fields.C_PROP
return names, data
class PropertyMetadata(tabular.Metadata):
"""Class to import property metadata.
"""
pass
class PropertyColumn(tabular.Column):
"""Data column for a property.
"""
type_name = 'Property'
def __init__(self, name, data):
tabular.Column.__init__(self, name, data)
self.errors = data[Fields.DATA_ERRORS]
self.error_type = data[Fields.DATA_ERRTYPE]
def data(self):
return {
Fields.DATA_UNITS: self.units,
Fields.DATA_VALUES: self.values,
Fields.DATA_ERRORS: self.errors,
Fields.DATA_ERRTYPE: self.error_type,
}
class StateColumn(tabular.Column):
"""Data column for a state.
"""
type_name = 'State'
def __init__(self, name, data):
tabular.Column.__init__(self, name, data)
self.errors = [0.0] * len(self)
self.error_type = 'none'
def data(self):
return {Fields.DATA_UNITS: self.units, Fields.DATA_VALUES: self.values}
def convert_csv(meta_csv, datatype, data_csv, nstates, output):
meta = PropertyMetadata.from_csv(meta_csv)
meta.datatype = datatype
data = PropertyData.from_csv(data_csv, nstates)
obj = PropertyTable(data=data, metadata=meta)
ofile = get_file(output, mode='w')
obj.dump(ofile)
| [
"logging.getLogger",
"pandas.DataFrame",
"numpy.array",
"json.load",
"csv.reader"
] | [((1314, 1341), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1331, 1341), False, 'import logging\n'), ((3757, 3770), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (3766, 3770), False, 'import json\n'), ((9668, 9699), 'pandas.DataFrame', 'pd.DataFrame', (['a2'], {'columns': 'names'}), '(a2, columns=names)\n', (9680, 9699), True, 'import pandas as pd\n'), ((10513, 10535), 'csv.reader', 'csv.reader', (['input_file'], {}), '(input_file)\n', (10523, 10535), False, 'import csv\n'), ((12220, 12242), 'csv.reader', 'csv.reader', (['input_file'], {}), '(input_file)\n', (12230, 12242), False, 'import csv\n'), ((9628, 9640), 'numpy.array', 'np.array', (['a1'], {}), '(a1)\n', (9636, 9640), True, 'import numpy as np\n')] |
import csv
import librosa
import numpy as np
import soundfile as sf
import torch
from torch import Tensor
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from typing import Tuple
from src import constants
from src.model.config import Config, Input
from src.utils.split import Split
from src.utils.csv_info import STANDARDIZED_CSV_INFO
from src.utils.full_path import full_path
def _decode_non_mp3_file_like(file, new_sr):
# Source:
# https://huggingface.co/docs/datasets/_modules/datasets/features/audio.html#Audio
array, sampling_rate = sf.read(file)
array = array.T
array = librosa.to_mono(array)
if new_sr and new_sr != sampling_rate:
array = librosa.resample(
array,
orig_sr=sampling_rate,
target_sr=new_sr,
res_type="kaiser_best"
)
sampling_rate = new_sr
return array, sampling_rate
def load_audio(file_path: str, sampling_rate: int) -> torch.Tensor:
array, _ = _decode_non_mp3_file_like(file_path, sampling_rate)
array = np.float32(array)
return torch.from_numpy(array)
class MyCrop(torch.nn.Module):
def __init__(self, config: Config) -> None:
super().__init__()
if config.input == Input.AUDIO:
center_crop = transforms.CenterCrop((config.feat_seq_len, 1,))
elif config.input == Input.MFCC:
center_crop = transforms.CenterCrop((config.feat_seq_len, 40,))
elif config.input == Input.MFCC_EXT:
center_crop = transforms.CenterCrop((config.feat_seq_len, 120,))
elif config.input == Input.XLSR:
center_crop = transforms.CenterCrop((config.feat_seq_len, 1024,))
self.center_crop = center_crop
def forward(self, x):
out = self.center_crop(x)
return out
class MyNormalize(torch.nn.Module):
def __init__(self, mean: torch.Tensor, var: torch.Tensor) -> None:
super().__init__()
self.mean = mean
self.std = var.sqrt()
def forward(self, x: torch.Tensor):
out = (x - self.mean) / self.std
return out
def make_transform(
config: Config,
mean: torch.Tensor,
var: torch.Tensor,
):
return transforms.Compose([
MyCrop(config),
MyNormalize(mean, var),
])
class CsvDataset(Dataset):
def __init__(self, config: Config, split: Split, normalization_split: Split, example: bool) -> None:
super().__init__()
self.config = config
self.split = split
self.normalization_split = normalization_split
self.example = example
# For printing...
split_name = str(split).lower().split(".")[1]
example_str = "(example) " if example else ""
print(f"{example_str}Creating dataloader for {split_name} set.")
# Select train, val or test dataset.
dataset = constants.get_dataset(split, example)
normalization_dataset = constants.get_dataset(normalization_split, example)
file_name = str(config.input).lower().split(".")[1] # audio, mfcc, ...
# Load mean/var.
mu_path = normalization_dataset.norm_dir.joinpath(f"{file_name}.mu.pt")
var_path = normalization_dataset.norm_dir.joinpath(f"{file_name}.var.pt")
if not mu_path.exists() or not var_path.exists():
msg = f"Cannot find {file_name}.mu.pt and {file_name}.var.pt in {normalization_dataset.norm_dir}."
raise Exception(msg)
mean = torch.load(mu_path)
var = torch.load(var_path)
# Type to CSV column.
types = {
'audio': STANDARDIZED_CSV_INFO.col_audio_path, # 1st column
'mfcc': STANDARDIZED_CSV_INFO.col_mfcc_path, # 2nd column
'mfcc_ext': STANDARDIZED_CSV_INFO.col_mfcc_ext_path, # 3rd column
'xlsr': STANDARDIZED_CSV_INFO.col_xlsr_path, # 4th column
}
col_path = types[config.input.name.lower()]
# Load CSV.
self.csv_data = [] # feature_path, norm_mos
with open(dataset.csv_path, encoding="utf8", mode="r") as in_csv:
csv_reader = csv.reader(in_csv)
for idx, in_row in enumerate(csv_reader):
# Skip header row.
if idx == 0:
continue
# Save feature_path, norm_mos
file_path: str = in_row[col_path]
norm_mos = torch.tensor(
float(in_row[STANDARDIZED_CSV_INFO.col_norm_mos]))
self.csv_data.append([file_path, norm_mos])
# Create transform.
self.transform = make_transform(config, mean, var)
def __len__(self):
return len(self.csv_data)
def __getitem__(self, index) -> Tuple[Tensor, Tensor]:
# Load features and convert to Tensor.
file_path: str = self.csv_data[index][0]
if self.config.name == "audio":
features = load_audio(full_path(file_path), sampling_rate=16_000)
else:
features = torch.load(full_path(file_path))
features = self.transform(features)
norm_mos = self.csv_data[index][1]
return (features, norm_mos)
| [
"torchvision.transforms.CenterCrop",
"torch.load",
"librosa.to_mono",
"torch.from_numpy",
"src.utils.full_path.full_path",
"librosa.resample",
"soundfile.read",
"csv.reader",
"numpy.float32",
"src.constants.get_dataset"
] | [((587, 600), 'soundfile.read', 'sf.read', (['file'], {}), '(file)\n', (594, 600), True, 'import soundfile as sf\n'), ((633, 655), 'librosa.to_mono', 'librosa.to_mono', (['array'], {}), '(array)\n', (648, 655), False, 'import librosa\n'), ((1074, 1091), 'numpy.float32', 'np.float32', (['array'], {}), '(array)\n', (1084, 1091), True, 'import numpy as np\n'), ((1103, 1126), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (1119, 1126), False, 'import torch\n'), ((715, 808), 'librosa.resample', 'librosa.resample', (['array'], {'orig_sr': 'sampling_rate', 'target_sr': 'new_sr', 'res_type': '"""kaiser_best"""'}), "(array, orig_sr=sampling_rate, target_sr=new_sr, res_type=\n 'kaiser_best')\n", (731, 808), False, 'import librosa\n'), ((2883, 2920), 'src.constants.get_dataset', 'constants.get_dataset', (['split', 'example'], {}), '(split, example)\n', (2904, 2920), False, 'from src import constants\n'), ((2953, 3004), 'src.constants.get_dataset', 'constants.get_dataset', (['normalization_split', 'example'], {}), '(normalization_split, example)\n', (2974, 3004), False, 'from src import constants\n'), ((3490, 3509), 'torch.load', 'torch.load', (['mu_path'], {}), '(mu_path)\n', (3500, 3509), False, 'import torch\n'), ((3524, 3544), 'torch.load', 'torch.load', (['var_path'], {}), '(var_path)\n', (3534, 3544), False, 'import torch\n'), ((1301, 1348), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(config.feat_seq_len, 1)'], {}), '((config.feat_seq_len, 1))\n', (1322, 1348), True, 'import torchvision.transforms as transforms\n'), ((4123, 4141), 'csv.reader', 'csv.reader', (['in_csv'], {}), '(in_csv)\n', (4133, 4141), False, 'import csv\n'), ((1417, 1465), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(config.feat_seq_len, 40)'], {}), '((config.feat_seq_len, 40))\n', (1438, 1465), True, 'import torchvision.transforms as transforms\n'), ((4936, 4956), 'src.utils.full_path.full_path', 'full_path', (['file_path'], {}), '(file_path)\n', (4945, 4956), False, 'from src.utils.full_path import full_path\n'), ((5028, 5048), 'src.utils.full_path.full_path', 'full_path', (['file_path'], {}), '(file_path)\n', (5037, 5048), False, 'from src.utils.full_path import full_path\n'), ((1538, 1587), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(config.feat_seq_len, 120)'], {}), '((config.feat_seq_len, 120))\n', (1559, 1587), True, 'import torchvision.transforms as transforms\n'), ((1656, 1706), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(config.feat_seq_len, 1024)'], {}), '((config.feat_seq_len, 1024))\n', (1677, 1706), True, 'import torchvision.transforms as transforms\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import warnings
warnings.filterwarnings(action='once')
data = None;
matData = None;
def initData(csvName):
data = pd.read_csv(csvName)
matData = pd.DataFrame(columns=['Name','Diameter','Length','Reduced Diamter','Area','Reduced Area','UTS','Elastic Modulus','Total Fail Strain','Plastic Strain Fail','Elastic Strain Fail','Offset Yield'])
def addMaterial(matInfo):
if len(matInfo) < len(matData.columns):
print("Not enough entries in matInfo")
return
matData.loc[len(matData)] = matInfo
def area(diameter):
return math.pi * (diameter/2)**2
def findUTS(key):
return max(data[key])
def getYoungsModulus(stress,strain,plot=False):
# finds the Young's Modulus by finding the largest linear slope between 1/10 the number of data points
# returns the Young's Modulus in the same units as the input stress
dummyData = pd.DataFrame(data={'x':strain,'y':stress})
dummyData.dropna(inplace=True)
x=np.array(dummyData['x'][:int(len(dummyData['x'])/2)])
y=np.array(dummyData['y'][:int(len(dummyData['x'])/2)])
numPts = len(x)
minFitLength = 8
chi = 0
chi_min = 10000
i_best=0
j_best=0
m_best=0
for i in range(numPts - minFitLength):
for j in range(i+minFitLength, numPts):
coefs = np.polyfit(x[i:j],y[i:j],1)
y_lin = x * coefs[0] + coefs[1]
chi=0
for k in range(i,j):
chi += (y_lin[k] - y[k])**2
if chi < chi_min and coefs[0] > m_best:
i_best = i
j_best = j
chi_min = chi
m_best = coefs[0]
coefs = np.polyfit(x[i_best:j_best],y[i_best:j_best],1)
y_lin = x[i_best:j_best] * coefs[0] + coefs[1]
if(plot):
plt.plot(x,y,'ro')
plt.plot(x[i_best:j_best],y_lin,'b-')
print("Young's Modulus (MPa): " + str(m_best))
return m_best
def findElasticModulus(stressKey,strainKey):
strain = data[strainKey]
stress = data[stressKey]
return getYoungsModulus(stress,strain)
def getFailure(stress):
# finds the point of failure by looking for largest jump between two stresses
# returns index of point of failure
# stress = np.array(stress)[int(len(stress)/2):]
maxJump=0;
indexVal=0;
for i in range(2,len(stress)):
if( abs(stress[i] - stress[i-2]) > maxJump and stress[i] - stress[i-2] < 0 ):
maxJump = abs(stress[i] - stress[i-2])
indexVal = i
return indexVal-2
def findFailure(stressKey,strainKey):
stress = data[stressKey]
return data[strainKey][getFailure(stress)]
def findPlasticElasticFailureStrain(stressKey,strainKey,elasticModulus,totFailStrain):
failIndex = findFailure(data[stressKey])
failStress = data[stressKey][failIndex]
return [failStress/elasticModulus,totFailStrain-failStress/elasticModulus]
def getYieldStress(strain, stress, offset, E):
x = strain
y = stress
x_n = x[x>0]
x_n = x_n[y>0]
y_n = y[x>0]
y_n = y_n[y>0]
dummyData = pd.DataFrame(data={'x':x_n,'y':y_n})
dummyData.dropna(inplace=True)
x=np.array(dummyData['x'][:int(len(dummyData['x'])/2)])
y=np.array(dummyData['y'][:int(len(dummyData['x'])/2)])
f=lambda x : E*(x-offset)
u=np.linspace(0,0.2,100)
v=f(u)
minDiff = 1000
index = -1
for i in range(len(y)):
for j in range(len(v)):
if y[i]-v[j] < minDiff:
minDiff = y[i]-v[j]
index = j
print(v[j])
return v[j]
def findYieldStress(stressKey,strainKey,elasticModulus,offset=.002):
stress = data[stressKey]
strain = data[strainKey]
return getYieldStress(strain,stress,offset,elasticModulus)
def writeOut(fName):
f = open(fName,'w')
for i in range(matData.shape[0]):
f.write(matData['Type'][i]+'\n')
f.write(str(matData['Diameter (mm)'][i])+'\n')
f.write(str(matData['Length (m)'][i])+'\n')
f.write(str(matData["Young's Modulus (MPa)"][i])+'\n')
f.close()
def plotData(stressKeys,strainKeys,names,totalFailureStrain,fName=None):
for i,xKey,yKey in enumerate(zip(strainKeys,stressKeys)):
x = data[xKey]
y = data[yKey]
index = 0
for j in range(len(x)):
if x[j] == totalFailureStrain:
index = j
xy = [[a,b] for k, (a,b) in enumerate(zip(x,y)) if a>0 and b>0 and k<index]
plt.plot(xy[:,0],xy[:,1],label=names[i])
plt.xlabel('Strain')
plt.ylabel('Stress')
plt.title('Stress-Strain Curve')
plt.legend(loc=(1.05,.65))
if fName != None:
plt.savefig(fName)
| [
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.polyfit",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"warnings.filterwarnings",
"matplotlib.pyplot.legend"
] | [((100, 138), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""once"""'}), "(action='once')\n", (123, 138), False, 'import warnings\n'), ((204, 224), 'pandas.read_csv', 'pd.read_csv', (['csvName'], {}), '(csvName)\n', (215, 224), True, 'import pandas as pd\n'), ((239, 451), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Name', 'Diameter', 'Length', 'Reduced Diamter', 'Area', 'Reduced Area',\n 'UTS', 'Elastic Modulus', 'Total Fail Strain', 'Plastic Strain Fail',\n 'Elastic Strain Fail', 'Offset Yield']"}), "(columns=['Name', 'Diameter', 'Length', 'Reduced Diamter',\n 'Area', 'Reduced Area', 'UTS', 'Elastic Modulus', 'Total Fail Strain',\n 'Plastic Strain Fail', 'Elastic Strain Fail', 'Offset Yield'])\n", (251, 451), True, 'import pandas as pd\n'), ((953, 998), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'x': strain, 'y': stress}"}), "(data={'x': strain, 'y': stress})\n", (965, 998), True, 'import pandas as pd\n'), ((1728, 1777), 'numpy.polyfit', 'np.polyfit', (['x[i_best:j_best]', 'y[i_best:j_best]', '(1)'], {}), '(x[i_best:j_best], y[i_best:j_best], 1)\n', (1738, 1777), True, 'import numpy as np\n'), ((3135, 3174), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'x': x_n, 'y': y_n}"}), "(data={'x': x_n, 'y': y_n})\n", (3147, 3174), True, 'import pandas as pd\n'), ((3369, 3393), 'numpy.linspace', 'np.linspace', (['(0)', '(0.2)', '(100)'], {}), '(0, 0.2, 100)\n', (3380, 3393), True, 'import numpy as np\n'), ((4587, 4607), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Strain"""'], {}), "('Strain')\n", (4597, 4607), True, 'import matplotlib.pyplot as plt\n'), ((4612, 4632), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stress"""'], {}), "('Stress')\n", (4622, 4632), True, 'import matplotlib.pyplot as plt\n'), ((4637, 4669), 'matplotlib.pyplot.title', 'plt.title', (['"""Stress-Strain Curve"""'], {}), "('Stress-Strain Curve')\n", (4646, 4669), True, 'import matplotlib.pyplot as plt\n'), ((4674, 4702), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1.05, 0.65)'}), '(loc=(1.05, 0.65))\n', (4684, 4702), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1870), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""ro"""'], {}), "(x, y, 'ro')\n", (1858, 1870), True, 'import matplotlib.pyplot as plt\n'), ((1877, 1916), 'matplotlib.pyplot.plot', 'plt.plot', (['x[i_best:j_best]', 'y_lin', '"""b-"""'], {}), "(x[i_best:j_best], y_lin, 'b-')\n", (1885, 1916), True, 'import matplotlib.pyplot as plt\n'), ((4542, 4586), 'matplotlib.pyplot.plot', 'plt.plot', (['xy[:, 0]', 'xy[:, 1]'], {'label': 'names[i]'}), '(xy[:, 0], xy[:, 1], label=names[i])\n', (4550, 4586), True, 'import matplotlib.pyplot as plt\n'), ((4731, 4749), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fName'], {}), '(fName)\n', (4742, 4749), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1407), 'numpy.polyfit', 'np.polyfit', (['x[i:j]', 'y[i:j]', '(1)'], {}), '(x[i:j], y[i:j], 1)\n', (1388, 1407), True, 'import numpy as np\n')] |
import numpy as np
def _check_inverse(coeffs):
det = np.linalg.det(coeffs)
#import ipdb; ipdb.set_trace()
if np.isclose(det, 0.0):
raise ZeroDivisionError
def _matrix_sanity(coeffs):
assert(coeffs.ndim == 2)#, 'Input matrix must be 2 dimensional')
assert(coeffs.shape[0]+1 == coeffs.shape[1])
def _load_coefficients(coefficients_file):
coeffs = np.genfromtxt(coefficients_file, dtype=str)
_matrix_sanity(coeffs)
coeffs = np.matrix([[eval(x) for x in row] for row in coeffs])
return coeffs
def _check_matrices(coeffs, consts):
assert(coeffs.shape[0] == coeffs.shape[1])
assert(coeffs.shape[0] == consts.shape[0])
assert(consts.shape[1] == 1)
def _list_to_matrix(coefficients):
for l in coefficients:
assert(len(l) == len(coefficients[0]))
coeffs = np.matrix(coefficients)
_matrix_sanity(coeffs)
return coeffs
def handle_input(coefficients):
if isinstance(coefficients, str):
return _load_coefficients(coefficients)
elif isinstance(coefficients, list):
return _list_to_matrix(coefficients)
elif isinstance(coefficients, np.matrix):
_matrix_sanity(coefficients)
return coefficients
elif isinstance(coefficients, np.ndarray):
_matrix_sanity(coefficients)
return np.matrix(coefficients)
else:
raise(TypeError, 'Unsupported input type.')
def solve_linear_system(coefficients):
coeffs = handle_input(coefficients)
A = coeffs[:, :-1]
B = coeffs[:, -1].reshape((-1, 1))
_check_matrices(A, B)
try:
_check_inverse(A)
return A.I * B
except ZeroDivisionError:
print('Determinant of coefficients matrix is 0. No unique solution.')
return None
| [
"numpy.matrix",
"numpy.isclose",
"numpy.genfromtxt",
"numpy.linalg.det"
] | [((59, 80), 'numpy.linalg.det', 'np.linalg.det', (['coeffs'], {}), '(coeffs)\n', (72, 80), True, 'import numpy as np\n'), ((123, 143), 'numpy.isclose', 'np.isclose', (['det', '(0.0)'], {}), '(det, 0.0)\n', (133, 143), True, 'import numpy as np\n'), ((381, 424), 'numpy.genfromtxt', 'np.genfromtxt', (['coefficients_file'], {'dtype': 'str'}), '(coefficients_file, dtype=str)\n', (394, 424), True, 'import numpy as np\n'), ((825, 848), 'numpy.matrix', 'np.matrix', (['coefficients'], {}), '(coefficients)\n', (834, 848), True, 'import numpy as np\n'), ((1309, 1332), 'numpy.matrix', 'np.matrix', (['coefficients'], {}), '(coefficients)\n', (1318, 1332), True, 'import numpy as np\n')] |
'''
SVM2+
'''
# Author: <NAME> <<EMAIL>>
import numpy as np
import utils
from sklearn.base import BaseEstimator
from sklearn.svm import SVC
from sklearn.metrics.pairwise import (rbf_kernel,
linear_kernel,
polynomial_kernel,
sigmoid_kernel)
class SVC2Plus(BaseEstimator):
'''
SVM2+ for binary classification.
This implementation is based on scikit-learn's `SVC` class.
Parameters
----------
lmbda : float
Regularization parameter.
decision_kernel : 'rbf', 'linear', 'poly' or 'sigmoid'
Kernel in the decision space.
correcting_kernel : 'rbf', 'linear', 'poly' or 'sigmoid'
Kernel in the correcting space.
All other parameters are identical to those of sklearn.svm.SVC.
Attributes
----------
_positive_class_target : int
Target value to denote membership in the positive class.
_negative_class_target : int
Target value to denote membership in the negative class.
All other attributes are identical to those of sklearn.svm.SVC.
References
----------
<NAME> & <NAME>, <NAME>, Ivor & <NAME> & <NAME> Goh,
Rick & <NAME>. (2016). Simple and Efficient Learning using Privileged
Information.
'''
def __init__(self, lmbda=1.0, C=1.0, decision_kernel='rbf',
correcting_kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False, tol=0.001,
cache_size=200, class_weight=None, verbose=False, max_iter=-1,
decision_function_shape='ovr', random_state=None):
self.lmbda = lmbda
self.C = C
self.decision_kernel = decision_kernel
self.correcting_kernel = correcting_kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.shrinking = shrinking
self.probability = probability
self.tol = tol
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.decision_function_shape = decision_function_shape
self.random_state = random_state
self.support_ = None
self.support_vectors_ = None
self.n_support_ = None
self.dual_coef_ = None
self.intercept_ = None
# Because SVM2+ relies on the positive and negative class target values
# being being 1 and -1, respectively, it is good to store these.
self._positive_class_target = None
self._negative_class_target = None
def svm2plus_kernel(self, X, y, Z):
'''
Computes kernel (a.k.a. Gram matrix) for SVM2+, assuming a squared
hinge loss. For more information, see Xu et al. (2016).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Design matrix to compute SVM2+ kernel matrix for. If we wish to use
a basis function (e.g. rbf, poly), we must transform X _prior_ to
passing it to this function!
Z : array-like, shape (n_samples, n_privileged_features)
Privileged information.
y : array-like, shape (n_samples, )
Correct targets.
lmbda, C : floats
Regularization parameters.
'''
n = X.shape[0]
X = X.reshape(X.shape[0], -1)
Z = Z.reshape(Z.shape[0], -1)
y, _, _ = utils.binarize_targets(y)
if self.decision_kernel == 'rbf':
K = rbf_kernel(X, X, gamma=self.gamma)
elif self.decision_kernel == 'linear':
K = linear_kernel(X, X)
elif self.decision_kernel == 'poly':
K = polynomial_kernel(X, X,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0)
elif self.decision_kernel == 'sigmoid':
K = sigmoid_kernel(X, X,
gamma=self.gamma,
coef0=self.coef0)
else:
raise ValueError('''Kernel in decision space must be one of 'rbf',
'linear', 'poly' or 'sigmoid'.''')
if self.correcting_kernel == 'rbf':
K_tilde = rbf_kernel(Z, Z, gamma=self.gamma)
elif self.correcting_kernel == 'linear':
K_tilde = linear_kernel(Z, Z)
elif self.correcting_kernel == 'poly':
K_tilde = polynomial_kernel(Z, Z,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0)
elif self.correcting_kernel == 'sigmoid':
K_tilde = sigmoid_kernel(Z, Z,
gamma=self.gamma,
coef0=self.coef0)
else:
raise ValueError('''Kernel in correcting space must be one of 'rbf',
'linear', 'poly' or 'sigmoid'.''')
Q_lmbda = 1/self.lmbda * (K_tilde
- np.dot(
np.dot(K_tilde,
np.linalg.inv(
(self.lmbda/self.C)
* np.identity(n)
+ K_tilde)),
K_tilde))
return K + np.multiply(Q_lmbda,
np.outer(y, y.T))
def fit(self, X, y, Z):
'''
Fits SVM2+ model to data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples, )
Targets of training data.
Z : array-like, shape (n_samples, n_privileged_features)
Privileged information.
'''
X = X.reshape(X.shape[0], -1)
Z = Z.reshape(Z.shape[0], -1)
y_binarized, self._positive_class_target, \
self._negative_class_target = utils.binarize_targets(y)
if self.gamma == 'auto':
self.gamma = 1.0 / X.shape[1]
else:
self.gamma = self.gamma
# The parameters `degree`, `gamma` and `coef0` are irrelevant here, as
# the kernel is precomputed, and is not linear, poly, sigmoid or rbf.
clf = SVC(C=self.C, kernel='precomputed', shrinking=self.shrinking,
probability=self.probability, tol=self.tol,
cache_size=self.cache_size, class_weight=self.class_weight,
verbose=self.verbose, max_iter=self.max_iter,
decision_function_shape=self.decision_function_shape,
random_state=self.random_state)
# Pass in precomputed Gram matrix, not data
clf.fit(self.svm2plus_kernel(X, y_binarized, Z), y)
self.support_ = clf.support_
self.support_vectors_ = X[clf.support_]
self.n_support_ = clf.n_support_
self.dual_coef_ = clf.dual_coef_
self.intercept_ = clf.intercept_
def predict(self, X):
'''
Classifies new samples using the SVM2+ coefficients.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New samples to classify.
dual_coef : array-like, shape (n_support_vectors, )
Dual coefficients of support vectors in trained model.
In the literature, this is alpha * y.
support_vectors : array-like, shape (n_support_vectors, n_features)
Support vectors.
'''
X = X.reshape(X.shape[0], -1)
if self.decision_kernel == 'rbf':
kernel = rbf_kernel(X, self.support_vectors_, gamma=self.gamma)
elif self.decision_kernel == 'linear':
kernel = linear_kernel(X, self.support_vectors_)
elif self.decision_kernel == 'poly':
kernel = polynomial_kernel(X, self.support_vectors_,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0)
elif self.decision_kernel == 'sigmoid':
kernel = sigmoid_kernel(X, self.support_vectors_,
gamma=self.gamma,
coef0=self.coef0)
else:
raise ValueError('''Kernel in decision space must be one of 'rbf',
'linear', 'poly' or 'sigmoid'.''')
predictions = np.dot(self.dual_coef_, kernel.T) + self.intercept_
predictions = np.sign(predictions).flatten()
predictions = utils.unbinarize_targets(predictions,
self._positive_class_target,
self._negative_class_target)
return predictions
| [
"numpy.identity",
"sklearn.metrics.pairwise.sigmoid_kernel",
"sklearn.metrics.pairwise.rbf_kernel",
"sklearn.metrics.pairwise.polynomial_kernel",
"utils.unbinarize_targets",
"numpy.dot",
"numpy.outer",
"numpy.sign",
"sklearn.metrics.pairwise.linear_kernel",
"utils.binarize_targets",
"sklearn.svm... | [((3514, 3539), 'utils.binarize_targets', 'utils.binarize_targets', (['y'], {}), '(y)\n', (3536, 3539), False, 'import utils\n'), ((6224, 6249), 'utils.binarize_targets', 'utils.binarize_targets', (['y'], {}), '(y)\n', (6246, 6249), False, 'import utils\n'), ((6548, 6863), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'self.C', 'kernel': '"""precomputed"""', 'shrinking': 'self.shrinking', 'probability': 'self.probability', 'tol': 'self.tol', 'cache_size': 'self.cache_size', 'class_weight': 'self.class_weight', 'verbose': 'self.verbose', 'max_iter': 'self.max_iter', 'decision_function_shape': 'self.decision_function_shape', 'random_state': 'self.random_state'}), "(C=self.C, kernel='precomputed', shrinking=self.shrinking, probability=\n self.probability, tol=self.tol, cache_size=self.cache_size,\n class_weight=self.class_weight, verbose=self.verbose, max_iter=self.\n max_iter, decision_function_shape=self.decision_function_shape,\n random_state=self.random_state)\n", (6551, 6863), False, 'from sklearn.svm import SVC\n'), ((8859, 8959), 'utils.unbinarize_targets', 'utils.unbinarize_targets', (['predictions', 'self._positive_class_target', 'self._negative_class_target'], {}), '(predictions, self._positive_class_target, self.\n _negative_class_target)\n', (8883, 8959), False, 'import utils\n'), ((3599, 3633), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_kernel', (['X', 'X'], {'gamma': 'self.gamma'}), '(X, X, gamma=self.gamma)\n', (3609, 3633), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((4367, 4401), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_kernel', (['Z', 'Z'], {'gamma': 'self.gamma'}), '(Z, Z, gamma=self.gamma)\n', (4377, 4401), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((7887, 7941), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_kernel', (['X', 'self.support_vectors_'], {'gamma': 'self.gamma'}), '(X, self.support_vectors_, gamma=self.gamma)\n', (7897, 7941), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((8731, 8764), 'numpy.dot', 'np.dot', (['self.dual_coef_', 'kernel.T'], {}), '(self.dual_coef_, kernel.T)\n', (8737, 8764), True, 'import numpy as np\n'), ((3697, 3716), 'sklearn.metrics.pairwise.linear_kernel', 'linear_kernel', (['X', 'X'], {}), '(X, X)\n', (3710, 3716), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((4473, 4492), 'sklearn.metrics.pairwise.linear_kernel', 'linear_kernel', (['Z', 'Z'], {}), '(Z, Z)\n', (4486, 4492), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((5642, 5658), 'numpy.outer', 'np.outer', (['y', 'y.T'], {}), '(y, y.T)\n', (5650, 5658), True, 'import numpy as np\n'), ((8010, 8049), 'sklearn.metrics.pairwise.linear_kernel', 'linear_kernel', (['X', 'self.support_vectors_'], {}), '(X, self.support_vectors_)\n', (8023, 8049), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((8806, 8826), 'numpy.sign', 'np.sign', (['predictions'], {}), '(predictions)\n', (8813, 8826), True, 'import numpy as np\n'), ((3778, 3857), 'sklearn.metrics.pairwise.polynomial_kernel', 'polynomial_kernel', (['X', 'X'], {'degree': 'self.degree', 'gamma': 'self.gamma', 'coef0': 'self.coef0'}), '(X, X, degree=self.degree, gamma=self.gamma, coef0=self.coef0)\n', (3795, 3857), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((4562, 4641), 'sklearn.metrics.pairwise.polynomial_kernel', 'polynomial_kernel', (['Z', 'Z'], {'degree': 'self.degree', 'gamma': 'self.gamma', 'coef0': 'self.coef0'}), '(Z, Z, degree=self.degree, gamma=self.gamma, coef0=self.coef0)\n', (4579, 4641), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((8116, 8220), 'sklearn.metrics.pairwise.polynomial_kernel', 'polynomial_kernel', (['X', 'self.support_vectors_'], {'degree': 'self.degree', 'gamma': 'self.gamma', 'coef0': 'self.coef0'}), '(X, self.support_vectors_, degree=self.degree, gamma=self.\n gamma, coef0=self.coef0)\n', (8133, 8220), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((4024, 4080), 'sklearn.metrics.pairwise.sigmoid_kernel', 'sigmoid_kernel', (['X', 'X'], {'gamma': 'self.gamma', 'coef0': 'self.coef0'}), '(X, X, gamma=self.gamma, coef0=self.coef0)\n', (4038, 4080), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((4834, 4890), 'sklearn.metrics.pairwise.sigmoid_kernel', 'sigmoid_kernel', (['Z', 'Z'], {'gamma': 'self.gamma', 'coef0': 'self.coef0'}), '(Z, Z, gamma=self.gamma, coef0=self.coef0)\n', (4848, 4890), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((8402, 8478), 'sklearn.metrics.pairwise.sigmoid_kernel', 'sigmoid_kernel', (['X', 'self.support_vectors_'], {'gamma': 'self.gamma', 'coef0': 'self.coef0'}), '(X, self.support_vectors_, gamma=self.gamma, coef0=self.coef0)\n', (8416, 8478), False, 'from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, polynomial_kernel, sigmoid_kernel\n'), ((5445, 5459), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (5456, 5459), True, 'import numpy as np\n')] |
import prona2019Mod.utils as utils
import itertools as it
from six import iteritems, string_types, PY2, next
import numpy as np
import sys
def _is_single(obj):
"""
Check whether `obj` is a single document or an entire corpus.
Returns (is_single, new) 2-tuple, where `new` yields the same
sequence as `obj`.
`obj` is a single document if it is an iterable of strings. It
is a corpus if it is an iterable of documents.
"""
obj_iter = iter(obj)
temp_iter = obj_iter
try:
peek = next(obj_iter)
obj_iter = it.chain([peek], obj_iter)
except StopIteration:
# An empty object is a single document
return True, obj
if isinstance(peek, string_types):
# It's a document, return the iterator
return True, obj_iter
if temp_iter == obj:
# Checking for iterator to the object
return False, obj_iter
else:
# If the first item isn't a string, assume obj is a corpus
return False, obj
'''
def _apply(corpus, chunksize=None, **kwargs):
"""Apply the transformation to a whole corpus and get the result as another corpus.
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in BoW format.
chunksize : int, optional
If provided - more effective processing (by group of documents) will performed.
kwargs
Arbitrary keyword arguments.
Returns
-------
:class:`~gensim.interfaces.TransformedCorpus`
Transformed corpus.
"""
return TransformedCorpus(self, corpus, chunksize, **kwargs)
'''
def score_item(worda, wordb, components, scorer, phrasegrams):
"""score is retained from original dataset
"""
try:
return phrasegrams[tuple(components)][1]
except KeyError:
return -1
def analyze_sentence(sentence, threshold, common_terms, scorer,phrasegrams):
"""Analyze a sentence
`sentence` a token list representing the sentence to be analyzed.
`threshold` the minimum score for a bigram to be taken into account
`common_terms` the list of common terms, they have a special treatment
`scorer` the scorer function, as given to Phrases
"""
s = [utils.any2utf8(w) for w in sentence]
last_uncommon = None
in_between = []
# adding None is a trick that helps getting an automatic happy ending
# has it won't be a common_word, nor score
for word in s + [None]:
is_common = word in common_terms
if not is_common and last_uncommon:
chain = [last_uncommon] + in_between + [word]
# test between last_uncommon
score = score_item(
worda=last_uncommon,
wordb=word,
components=chain,
scorer=scorer,
phrasegrams=phrasegrams
)
if score > threshold:
yield (chain, score)
last_uncommon = None
in_between = []
else:
# release words individually
for w in it.chain([last_uncommon], in_between):
yield (w, None)
in_between = []
last_uncommon = word
elif not is_common:
last_uncommon = word
else: # common term
if last_uncommon:
# wait for uncommon resolution
in_between.append(word)
else:
yield (word, None)
def get_phrase(sentence,phrase_model):
is_single, sentence = _is_single(sentence)
if not is_single:
# if the input is an entire corpus (rather than a single sentence),
# return an iterable stream.
sys.exit("It is not a protein sequence")
delimiter = phrase_model['delimiter']
bigrams = analyze_sentence(
sentence,
threshold=phrase_model['threshold'],
common_terms=phrase_model['common_terms'],
scorer=None,
phrasegrams=phrase_model['phrasegrams']) # we will use our score_item function redefinition
new_s = []
for words, score in bigrams:
if score is not None:
words = delimiter.join(words)
new_s.append(words)
return [utils.to_unicode(w) for w in new_s]
def split_ngrams(seq, n):
"""
'AGAMQSASM' => [['AGA', 'MQS', 'ASM'], ['GAM','QSA'], ['AMQ', 'SAS']]
"""
all_ngrams=[]
for x in range(n):
all_ngrams.append(zip(*[iter(seq[x:])]*n))
str_ngrams = []
for ngrams in all_ngrams:
x = []
for ngram in ngrams:
x.append("".join(ngram))
str_ngrams.append(x)
return str_ngrams
def to_vecs(seq,phrase_model,kmer,word2vec_index):
"""
convert sequence to three n-length vectors
e.g. 'AGAMQSASM' => [ array([ ... * 100 ], array([ ... * 100 ], array([ ... * 100 ] ]
"""
ngram_patterns = split_ngrams(seq, kmer)
protvecs = []
for ngrams in ngram_patterns:
ngram_vecs = []
if phrase_model=='none':
ngramss = ngrams
else:
ngramss=get_phrase(get_phrase(ngrams,phrase_model),phrase_model)
for ngram in ngramss:
try:
ngram_vecs.append(np.array(word2vec_index[ngram]))
except KeyError:
continue
protvecs.append(sum(ngram_vecs))
return protvecs
| [
"itertools.chain",
"prona2019Mod.utils.to_unicode",
"numpy.array",
"prona2019Mod.utils.any2utf8",
"sys.exit",
"six.next"
] | [((527, 541), 'six.next', 'next', (['obj_iter'], {}), '(obj_iter)\n', (531, 541), False, 'from six import iteritems, string_types, PY2, next\n'), ((561, 587), 'itertools.chain', 'it.chain', (['[peek]', 'obj_iter'], {}), '([peek], obj_iter)\n', (569, 587), True, 'import itertools as it\n'), ((2215, 2232), 'prona2019Mod.utils.any2utf8', 'utils.any2utf8', (['w'], {}), '(w)\n', (2229, 2232), True, 'import prona2019Mod.utils as utils\n'), ((3732, 3772), 'sys.exit', 'sys.exit', (['"""It is not a protein sequence"""'], {}), "('It is not a protein sequence')\n", (3740, 3772), False, 'import sys\n'), ((4296, 4315), 'prona2019Mod.utils.to_unicode', 'utils.to_unicode', (['w'], {}), '(w)\n', (4312, 4315), True, 'import prona2019Mod.utils as utils\n'), ((3074, 3111), 'itertools.chain', 'it.chain', (['[last_uncommon]', 'in_between'], {}), '([last_uncommon], in_between)\n', (3082, 3111), True, 'import itertools as it\n'), ((5301, 5332), 'numpy.array', 'np.array', (['word2vec_index[ngram]'], {}), '(word2vec_index[ngram])\n', (5309, 5332), True, 'import numpy as np\n')] |
"""
Judge
suit: 最好的一组5张牌
cards: 手牌
Card: size=2数组表示(kind, digit)
"""
from collections import defaultdict
import enum
import numpy as np
from .poker import PokerDigit, PokerKind, PokerCard
class TexasLevel(enum.IntEnum):
# 皇家同花顺 和 同花顺 可以一起比较
straight_flush = 9 # 同花顺
four = 8 # 4条
full_house = 7 # 葫芦(3 + 2)
flush = 6 # 同花
straight = 5 # 顺子
three = 4 # 3条
two_pairs = 3 # 两对
pair = 2 # 对子
high_card = 1 # 高牌
unknown = 0
class TexasJudge(object):
SUIT_SIZE = 5
MAX_CARD_SIZE = 7
def __init__(self, is_debug=False):
self.is_debug = is_debug
def argmax(self, card_lists):
if not card_lists:
return []
return self._arg_max([self._get_level_suit(cs) for cs in card_lists])
def rank(self, card_lists):
if not card_lists:
return []
return self._rank([self._get_level_suit(cs) for cs in card_lists])
def _arg_max(self, level_best_suits):
"""
:param level_best_suits: size >= 1
:return: best indexes
"""
if self.is_debug:
for l, suit in level_best_suits:
print(l, end=" ")
for j in range(len(suit)):
print(PokerCard.short_format(*suit[j]), end=" ")
print("\t", end=" ")
best_indexes = [0]
best_level, best_suit = level_best_suits[0]
for i in range(1, len(level_best_suits)):
level, suit = level_best_suits[i]
if level < best_level:
continue
if level > best_level:
best_level, best_suit = level, suit
best_indexes = [i]
continue
worse = False
better = False
for j in range(len(best_suit)):
if j >= len(suit):
print("error")
if suit[j][1] < best_suit[j][1]:
worse = True
break
elif suit[j][1] > best_suit[j][1]:
better = True
break
if worse:
continue
if better:
best_level, best_suit = level, suit
best_indexes = [i]
continue
best_indexes.append(i)
if self.is_debug:
print(best_indexes)
return best_indexes
def _rank(self, level_best_suits):
# level + five cards, packed together as hex
packed_results = np.zeros(len(level_best_suits))
for n, (level, cards) in enumerate(level_best_suits):
result = level
for m, c in enumerate(cards):
result = (result << 4) + c[1]
packed_results[n] = result
if self.is_debug:
for n, packed in enumerate(packed_results):
print(level_best_suits[n][0], "%X" % packed)
indexes = np.argsort(packed_results)
ranks = np.zeros(len(level_best_suits), dtype=int)
rank_level = 0
last_repr = None
for index in indexes[::-1]:
if last_repr is not None and packed_results[index] != last_repr:
rank_level += 1
ranks[index] = rank_level
last_repr = packed_results[index]
if self.is_debug:
print(' '.join(str(l) for l in rank_level))
return ranks
@staticmethod
def _select_suit(cards, first_digit, second_digit, left_num):
suit = []
first_cards = []
second_cards = []
for card in sorted(cards, key=lambda x: x[1], reverse=True):
if card[1] == first_digit:
first_cards.append(card)
continue
if card[1] == second_digit:
second_cards.append(card)
continue
if len(suit) < left_num:
suit.append(card)
suit = first_cards + second_cards + suit
if len(suit) > TexasJudge.SUIT_SIZE: # 3 + 3
suit = suit[:TexasJudge.SUIT_SIZE]
return suit
def _check_flush(self, cards):
# check straight & flush (except straight it self)
best_level = TexasLevel.unknown
best_flush = None
last_kind = PokerKind.unknown
last_digit = PokerDigit.unknown
straight_flush_cnt = 0
flush_cnt = 0
ace = None
cards = list(sorted(cards, key=lambda c: (c[0] << 4) + c[1], reverse=True))
for index, (kind, digit) in enumerate(cards):
if kind != last_kind:
flush_cnt = 1
straight_flush_cnt = 1
last_digit = digit
last_kind = kind
if digit == PokerDigit.A:
ace = cards[index]
else:
ace = None
continue
flush_cnt += 1
if digit == last_digit - 1:
straight_flush_cnt += 1
else:
straight_flush_cnt = 1
if digit == PokerDigit.A:
ace = cards[index]
last_digit = digit
if flush_cnt >= self.SUIT_SIZE:
if straight_flush_cnt == self.SUIT_SIZE:
# the first and the best
best_level = TexasLevel.straight_flush
best_flush = cards[index + 1 - self.SUIT_SIZE: index + 1]
break
elif straight_flush_cnt == self.SUIT_SIZE - 1 and ace is not None and last_digit == 2:
# the last and the best
best_level = TexasLevel.straight_flush
best_flush = [*(cards[index + 2 - self.SUIT_SIZE: index + 1]), ace]
break
elif best_level < TexasLevel.flush:
# might find straight flush later
best_level = TexasLevel.flush
best_flush = cards[index + 1 - self.SUIT_SIZE: index + 1]
else:
continue
return best_level, best_flush
def _check_straight(self, cards):
straight_cnt = 0
cards = list(sorted(cards, key=lambda x: x[1], reverse=True))
last_digit = PokerDigit.unknown
suit = []
ace = None
for index, c in enumerate(cards):
digit = c[1]
if digit == PokerDigit.A:
ace = c
if digit == last_digit - 1:
straight_cnt += 1
suit.append(c)
elif digit == last_digit:
pass
else:
straight_cnt = 1
suit.clear()
suit.append(c)
last_digit = digit
if straight_cnt == self.SUIT_SIZE:
return TexasLevel.straight, suit
if straight_cnt == self.SUIT_SIZE - 1 and last_digit == 2 and ace is not None:
straight_cnt += 1
suit.append(ace)
return TexasLevel.straight, suit
return TexasLevel.unknown, None
def _check_nums(self, cards):
# check others
digit_counts = defaultdict(int)
for _, digit in cards:
digit_counts[digit] += 1
digit_counts = list(sorted(digit_counts.items(), key=lambda dc: dc[1] * 100 + dc[0], reverse=True))
if digit_counts[0][1] >= 4: # four
return TexasLevel.four, self._select_suit(cards, digit_counts[0][0], None, 1)
if digit_counts[0][1] == 3 and len(digit_counts) >= 2 and digit_counts[1][1] >= 2: # full house
return TexasLevel.full_house, self._select_suit(cards, digit_counts[0][0], digit_counts[1][0], 0)
if digit_counts[0][1] == 3: # three
return TexasLevel.three, self._select_suit(cards, digit_counts[0][0], None, 2)
if digit_counts[0][1] == 2 and len(digit_counts) >= 2 and digit_counts[1][1] == 2: # two pairs
return TexasLevel.two_pairs, self._select_suit(cards, digit_counts[0][0], digit_counts[1][0], 1)
if digit_counts[0][1] == 2: # pair
return TexasLevel.pair, self._select_suit(cards, digit_counts[0][0], None, 3)
return TexasLevel.high_card, self._select_suit(cards, None, None, 5)
def _get_level_suit(self, cards):
assert len(cards) <= self.MAX_CARD_SIZE
best_level, best_suit = self._check_flush(cards)
if best_level == TexasLevel.straight_flush:
return best_level, best_suit
num_level, num_suit = self._check_nums(cards)
if num_level > best_level:
best_level, best_suit = num_level, num_suit
if best_level > TexasLevel.straight:
return best_level, best_suit
straight_level, straight_suit = self._check_straight(cards)
if straight_level > best_level:
best_level, best_suit = straight_level, straight_suit
return best_level, best_suit
| [
"numpy.argsort",
"collections.defaultdict"
] | [((2997, 3023), 'numpy.argsort', 'np.argsort', (['packed_results'], {}), '(packed_results)\n', (3007, 3023), True, 'import numpy as np\n'), ((7192, 7208), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (7203, 7208), False, 'from collections import defaultdict\n')] |
import sys
import os; os.umask(7) # group permisions but that's all
import os.path as osp
import pdb
import json
import tqdm
import numpy as np
import torch
import torch.nn.functional as F
from dirtorch.utils.convenient import mkdir
from dirtorch.utils import common
from dirtorch.utils.pytorch_loader import get_loader
import dirtorch.test_dir as test
import dirtorch.nets as nets
import dirtorch.datasets as datasets
import pickle as pkl
import hashlib
def hash(x):
m = hashlib.md5()
m.update(str(x).encode('utf-8'))
return m.hexdigest()
def typename(x):
return type(x).__module__
def tonumpy(x):
if typename(x) == torch.__name__:
return x.cpu().numpy()
else:
return x
def pool(x, pooling='mean', gemp=3):
if len(x) == 1: return x[0]
x = torch.stack(x, dim=0)
if pooling == 'mean':
return torch.mean(x, dim=0)
elif pooling == 'gem':
def sympow(x, p, eps=1e-6):
s = torch.sign(x)
return (x*s).clamp(min=eps).pow(p) * s
x = sympow(x,gemp)
x = torch.mean(x, dim=0)
return sympow(x, 1/gemp)
else:
raise ValueError("Bad pooling mode: "+str(pooling))
def extract_features(db, net, trfs, pooling='mean', gemp=3, detailed=False, whiten=None,
threads=8, batch_size=16, output=None, dbg=()):
""" Extract features from trained model (network) on a given dataset.
"""
print("\n>> Extracting features...")
try:
query_db = db.get_query_db()
except NotImplementedError:
query_db = None
# extract DB feats
bdescs = []
qdescs = []
trfs_list = [trfs] if isinstance(trfs, str) else trfs
for trfs in trfs_list:
kw = dict(iscuda=net.iscuda, threads=threads, batch_size=batch_size, same_size='Pad' in trfs or 'Crop' in trfs)
bdescs.append( test.extract_image_features(db, trfs, net, desc="DB", **kw) )
# extract query feats
if query_db is not None:
qdescs.append( bdescs[-1] if db is query_db else test.extract_image_features(query_db, trfs, net, desc="query", **kw) )
# pool from multiple transforms (scales)
bdescs = tonumpy(F.normalize(pool(bdescs, pooling, gemp), p=2, dim=1))
if query_db is not None:
qdescs = tonumpy(F.normalize(pool(qdescs, pooling, gemp), p=2, dim=1))
if whiten is not None:
bdescs = common.whiten_features(bdescs, net.pca, **whiten)
if query_db is not None:
qdescs = common.whiten_features(qdescs, net.pca, **whiten)
mkdir(output, isfile=True)
if query_db is db or query_db is None:
np.save(output, bdescs)
else:
o = osp.splitext(output)
np.save(o[0]+'.qdescs'+o[1], qdescs)
np.save(o[0]+'.dbdescs'+o[1], bdescs)
print('Features extracted.')
def load_model( path, iscuda, whiten=None ):
checkpoint = common.load_checkpoint(path, iscuda)
net = nets.create_model(pretrained="", **checkpoint['model_options'])
net = common.switch_model_to_cuda(net, iscuda, checkpoint)
net.load_state_dict(checkpoint['state_dict'])
net.preprocess = checkpoint.get('preprocess', net.preprocess)
if whiten is not None and 'pca' in checkpoint:
if whiten in checkpoint['pca']:
net.pca = checkpoint['pca'][whiten]
return net
def learn_whiten( dataset, net, trfs='', pooling='mean', threads=8, batch_size=16):
descs = []
trfs_list = [trfs] if isinstance(trfs, str) else trfs
for trfs in trfs_list:
kw = dict(iscuda=net.iscuda, threads=threads, batch_size=batch_size, same_size='Pad' in trfs or 'Crop' in trfs)
descs.append( extract_image_features(dataset, trfs, net, desc="PCA", **kw) )
# pool from multiple transforms (scales)
descs = F.normalize(pool(descs, pooling), p=2, dim=1)
# learn pca with whiten
pca = common.learn_pca(descs.cpu().numpy(), whiten=True)
return pca
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Evaluate a model')
parser.add_argument('--dataset', '-d', type=str, required=True, help='Command to load dataset')
parser.add_argument('--checkpoint', type=str, required=True, help='path to weights')
parser.add_argument('--trfs', type=str, required=False, default='', nargs='+', help='test transforms (can be several)')
parser.add_argument('--pooling', type=str, default="gem", help='pooling scheme if several trf chains')
parser.add_argument('--gemp', type=int, default=3, help='GeM pooling power')
parser.add_argument('--center-bias', type=float, default=0, help='enforce some center bias')
parser.add_argument('--out-json', type=str, default="", help='path to output json')
parser.add_argument('--detailed', action='store_true', help='return detailed evaluation')
parser.add_argument('--output', type=str, default="", help='path to output features')
parser.add_argument('--threads', type=int, default=8, help='number of thread workers')
parser.add_argument('--gpu', type=int, nargs='+', help='GPU ids')
parser.add_argument('--dbg', default=(), nargs='*', help='debugging options')
# post-processing
parser.add_argument('--whiten', type=str, default=None, help='applies whitening')
parser.add_argument('--whitenp', type=float, default=0.5, help='whitening power, default is 0.5 (i.e., the sqrt)')
parser.add_argument('--whitenv', type=int, default=None, help='number of components, default is None (i.e. all components)')
parser.add_argument('--whitenm', type=float, default=1.0, help='whitening multiplier, default is 1.0 (i.e. no multiplication)')
args = parser.parse_args()
args.iscuda = common.torch_set_gpu(args.gpu)
dataset = datasets.create(args.dataset)
print("Dataset:", dataset)
net = load_model(args.checkpoint, args.iscuda, args.whiten)
if args.center_bias:
assert hasattr(net,'center_bias')
net.center_bias = args.center_bias
if hasattr(net, 'module') and hasattr(net.module,'center_bias'):
net.module.center_bias = args.center_bias
if args.whiten and not hasattr(net, 'pca'):
# Learn PCA if necessary
if os.path.exists(args.whiten):
with open(args.whiten, 'rb') as f:
net.pca = pkl.load(f)
else:
pca_path = '_'.join([args.checkpoint, args.whiten, args.pooling, hash(args.trfs), 'pca.pkl'])
db = datasets.create(args.whiten)
print('Dataset for learning the PCA with whitening:', db)
pca = learn_whiten(db, net, pooling=args.pooling, trfs=args.trfs, threads=args.threads)
chk = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
if 'pca' not in chk: chk['pca'] = {}
chk['pca'][args.whiten] = pca
torch.save(chk, args.checkpoint)
net.pca = pca
if args.whiten:
args.whiten = {'whitenp': args.whitenp, 'whitenv': args.whitenv, 'whitenm': args.whitenm}
# Evaluate
res = extract_features(dataset, net, args.trfs, pooling=args.pooling, gemp=args.gemp, detailed=args.detailed,
threads=args.threads, dbg=args.dbg, whiten=args.whiten, output=args.output)
| [
"dirtorch.utils.common.load_checkpoint",
"dirtorch.nets.create_model",
"dirtorch.utils.common.torch_set_gpu",
"numpy.save",
"os.path.exists",
"argparse.ArgumentParser",
"dirtorch.datasets.create",
"torch.mean",
"os.umask",
"hashlib.md5",
"os.path.splitext",
"torch.sign",
"pickle.load",
"di... | [((22, 33), 'os.umask', 'os.umask', (['(7)'], {}), '(7)\n', (30, 33), False, 'import os\n'), ((482, 495), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (493, 495), False, 'import hashlib\n'), ((798, 819), 'torch.stack', 'torch.stack', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (809, 819), False, 'import torch\n'), ((2545, 2571), 'dirtorch.utils.convenient.mkdir', 'mkdir', (['output'], {'isfile': '(True)'}), '(output, isfile=True)\n', (2550, 2571), False, 'from dirtorch.utils.convenient import mkdir\n'), ((2878, 2914), 'dirtorch.utils.common.load_checkpoint', 'common.load_checkpoint', (['path', 'iscuda'], {}), '(path, iscuda)\n', (2900, 2914), False, 'from dirtorch.utils import common\n'), ((2925, 2988), 'dirtorch.nets.create_model', 'nets.create_model', ([], {'pretrained': '""""""'}), "(pretrained='', **checkpoint['model_options'])\n", (2942, 2988), True, 'import dirtorch.nets as nets\n'), ((2999, 3051), 'dirtorch.utils.common.switch_model_to_cuda', 'common.switch_model_to_cuda', (['net', 'iscuda', 'checkpoint'], {}), '(net, iscuda, checkpoint)\n', (3026, 3051), False, 'from dirtorch.utils import common\n'), ((3982, 4037), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate a model"""'}), "(description='Evaluate a model')\n", (4005, 4037), False, 'import argparse\n'), ((5694, 5724), 'dirtorch.utils.common.torch_set_gpu', 'common.torch_set_gpu', (['args.gpu'], {}), '(args.gpu)\n', (5714, 5724), False, 'from dirtorch.utils import common\n'), ((5740, 5769), 'dirtorch.datasets.create', 'datasets.create', (['args.dataset'], {}), '(args.dataset)\n', (5755, 5769), True, 'import dirtorch.datasets as datasets\n'), ((861, 881), 'torch.mean', 'torch.mean', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (871, 881), False, 'import torch\n'), ((2386, 2435), 'dirtorch.utils.common.whiten_features', 'common.whiten_features', (['bdescs', 'net.pca'], {}), '(bdescs, net.pca, **whiten)\n', (2408, 2435), False, 'from dirtorch.utils import common\n'), ((2623, 2646), 'numpy.save', 'np.save', (['output', 'bdescs'], {}), '(output, bdescs)\n', (2630, 2646), True, 'import numpy as np\n'), ((2669, 2689), 'os.path.splitext', 'osp.splitext', (['output'], {}), '(output)\n', (2681, 2689), True, 'import os.path as osp\n'), ((2698, 2738), 'numpy.save', 'np.save', (["(o[0] + '.qdescs' + o[1])", 'qdescs'], {}), "(o[0] + '.qdescs' + o[1], qdescs)\n", (2705, 2738), True, 'import numpy as np\n'), ((2743, 2784), 'numpy.save', 'np.save', (["(o[0] + '.dbdescs' + o[1])", 'bdescs'], {}), "(o[0] + '.dbdescs' + o[1], bdescs)\n", (2750, 2784), True, 'import numpy as np\n'), ((6197, 6224), 'os.path.exists', 'os.path.exists', (['args.whiten'], {}), '(args.whiten)\n', (6211, 6224), False, 'import os\n'), ((1065, 1085), 'torch.mean', 'torch.mean', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (1075, 1085), False, 'import torch\n'), ((1854, 1913), 'dirtorch.test_dir.extract_image_features', 'test.extract_image_features', (['db', 'trfs', 'net'], {'desc': '"""DB"""'}), "(db, trfs, net, desc='DB', **kw)\n", (1881, 1913), True, 'import dirtorch.test_dir as test\n'), ((2490, 2539), 'dirtorch.utils.common.whiten_features', 'common.whiten_features', (['qdescs', 'net.pca'], {}), '(qdescs, net.pca, **whiten)\n', (2512, 2539), False, 'from dirtorch.utils import common\n'), ((6448, 6476), 'dirtorch.datasets.create', 'datasets.create', (['args.whiten'], {}), '(args.whiten)\n', (6463, 6476), True, 'import dirtorch.datasets as datasets\n'), ((6666, 6736), 'torch.load', 'torch.load', (['args.checkpoint'], {'map_location': '(lambda storage, loc: storage)'}), '(args.checkpoint, map_location=lambda storage, loc: storage)\n', (6676, 6736), False, 'import torch\n'), ((6840, 6872), 'torch.save', 'torch.save', (['chk', 'args.checkpoint'], {}), '(chk, args.checkpoint)\n', (6850, 6872), False, 'import torch\n'), ((961, 974), 'torch.sign', 'torch.sign', (['x'], {}), '(x)\n', (971, 974), False, 'import torch\n'), ((6299, 6310), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (6307, 6310), True, 'import pickle as pkl\n'), ((2041, 2109), 'dirtorch.test_dir.extract_image_features', 'test.extract_image_features', (['query_db', 'trfs', 'net'], {'desc': '"""query"""'}), "(query_db, trfs, net, desc='query', **kw)\n", (2068, 2109), True, 'import dirtorch.test_dir as test\n')] |
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
import warnings
from collections import defaultdict
from contextlib import ExitStack
from typing import Dict, List, Tuple
from uuid import uuid4
import numpy as np
from pyarrow import dataset as ds, parquet as pq
import cudf
from cudf._lib import parquet as libparquet
from cudf.api.types import is_list_like
from cudf.core.column import as_column, build_categorical_column
from cudf.utils import ioutils
from cudf.utils.utils import _cudf_nvtx_annotate
@_cudf_nvtx_annotate
def _write_parquet(
df,
paths,
compression="snappy",
index=None,
statistics="ROWGROUP",
metadata_file_path=None,
int96_timestamps=False,
row_group_size_bytes=None,
row_group_size_rows=None,
partitions_info=None,
**kwargs,
):
if is_list_like(paths) and len(paths) > 1:
if partitions_info is None:
ValueError("partition info is required for multiple paths")
elif not is_list_like(partitions_info):
ValueError("partition info must be list-like for multiple paths")
elif not len(paths) == len(partitions_info):
ValueError("partitions_info and paths must be of same size")
if is_list_like(partitions_info) and len(partitions_info) > 1:
if not is_list_like(paths):
ValueError("paths must be list-like when partitions_info provided")
paths_or_bufs = [
ioutils.get_writer_filepath_or_buffer(path, mode="wb", **kwargs)
for path in paths
]
common_args = {
"index": index,
"compression": compression,
"statistics": statistics,
"metadata_file_path": metadata_file_path,
"int96_timestamps": int96_timestamps,
"row_group_size_bytes": row_group_size_bytes,
"row_group_size_rows": row_group_size_rows,
"partitions_info": partitions_info,
}
if all([ioutils.is_fsspec_open_file(buf) for buf in paths_or_bufs]):
with ExitStack() as stack:
fsspec_objs = [stack.enter_context(file) for file in paths_or_bufs]
file_objs = [
ioutils.get_IOBase_writer(file_obj) for file_obj in fsspec_objs
]
write_parquet_res = libparquet.write_parquet(
df, filepaths_or_buffers=file_objs, **common_args
)
else:
write_parquet_res = libparquet.write_parquet(
df, filepaths_or_buffers=paths_or_bufs, **common_args
)
return write_parquet_res
# Logic chosen to match: https://arrow.apache.org/
# docs/_modules/pyarrow/parquet.html#write_to_dataset
@_cudf_nvtx_annotate
def write_to_dataset(
df,
root_path,
filename=None,
partition_cols=None,
fs=None,
preserve_index=False,
return_metadata=False,
**kwargs,
):
"""Wraps `to_parquet` to write partitioned Parquet datasets.
For each combination of partition group and value,
subdirectories are created as follows:
.. code-block:: bash
root_dir/
group=value1
<filename>.parquet
...
group=valueN
<filename>.parquet
Parameters
----------
df : cudf.DataFrame
root_path : string,
The root directory of the dataset
filename : string, default None
The file name to use (within each partition directory). If None,
a random uuid4 hex string will be used for each file name.
fs : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
preserve_index : bool, default False
Preserve index values in each parquet file.
partition_cols : list,
Column names by which to partition the dataset
Columns are partitioned in the order they are given
return_metadata : bool, default False
Return parquet metadata for written data. Returned metadata will
include the file-path metadata (relative to `root_path`).
**kwargs : dict,
kwargs for to_parquet function.
"""
fs = ioutils._ensure_filesystem(fs, root_path, **kwargs)
fs.mkdirs(root_path, exist_ok=True)
if partition_cols is not None and len(partition_cols) > 0:
(
full_paths,
metadata_file_paths,
grouped_df,
part_offsets,
_,
) = _get_partitioned(
df,
root_path,
partition_cols,
filename,
fs,
preserve_index,
**kwargs,
)
if return_metadata:
kwargs["metadata_file_path"] = metadata_file_paths
metadata = to_parquet(
grouped_df,
full_paths,
index=preserve_index,
partition_offsets=part_offsets,
**kwargs,
)
else:
filename = filename or _generate_filename()
full_path = fs.sep.join([root_path, filename])
if return_metadata:
kwargs["metadata_file_path"] = filename
metadata = df.to_parquet(full_path, index=preserve_index, **kwargs)
return metadata
@ioutils.doc_read_parquet_metadata()
@_cudf_nvtx_annotate
def read_parquet_metadata(path):
"""{docstring}"""
pq_file = pq.ParquetFile(path)
num_rows = pq_file.metadata.num_rows
num_row_groups = pq_file.num_row_groups
col_names = pq_file.schema.names
return num_rows, num_row_groups, col_names
@_cudf_nvtx_annotate
def _process_dataset(
paths, fs, filters=None, row_groups=None, categorical_partitions=True,
):
# Returns:
# file_list - Expanded/filtered list of paths
# row_groups - Filtered list of row-group selections
# partition_keys - list of partition keys for each file
# partition_categories - Categories for each partition
# The general purpose of this function is to (1) expand
# directory input into a list of paths (using the pyarrow
# dataset API), (2) to apply row-group filters, and (3)
# to discover directory-partitioning information
# Deal with case that the user passed in a directory name
file_list = paths
if len(paths) == 1 and ioutils.is_directory(paths[0]):
paths = ioutils.stringify_pathlike(paths[0])
# Convert filters to ds.Expression
if filters is not None:
filters = pq._filters_to_expression(filters)
# Initialize ds.FilesystemDataset
dataset = ds.dataset(
paths, filesystem=fs, format="parquet", partitioning="hive",
)
file_list = dataset.files
if len(file_list) == 0:
raise FileNotFoundError(f"{paths} could not be resolved to any files")
# Deal with directory partitioning
# Get all partition keys (without filters)
partition_categories = defaultdict(list)
file_fragment = None
for file_fragment in dataset.get_fragments():
keys = ds._get_partition_keys(file_fragment.partition_expression)
if not (keys or partition_categories):
# Bail - This is not a directory-partitioned dataset
break
for k, v in keys.items():
if v not in partition_categories[k]:
partition_categories[k].append(v)
if not categorical_partitions:
# Bail - We don't need to discover all categories.
# We only need to save the partition keys from this
# first `file_fragment`
break
if partition_categories and file_fragment is not None:
# Check/correct order of `categories` using last file_frag,
# because `_get_partition_keys` does NOT preserve the
# partition-hierarchy order of the keys.
cat_keys = [
part.split("=")[0]
for part in file_fragment.path.split(fs.sep)
if "=" in part
]
if set(partition_categories) == set(cat_keys):
partition_categories = {
k: partition_categories[k]
for k in cat_keys
if k in partition_categories
}
# If we do not have partitioned data and
# are not filtering, we can return here
if filters is None and not partition_categories:
return file_list, row_groups, [], {}
# Record initial row_groups input
row_groups_map = {}
if row_groups is not None:
# Make sure paths and row_groups map 1:1
# and save the initial mapping
if len(paths) != len(file_list):
raise ValueError(
"Cannot specify a row_group selection for a directory path."
)
row_groups_map = {path: rgs for path, rgs in zip(paths, row_groups)}
# Apply filters and discover partition columns
partition_keys = []
if partition_categories or filters is not None:
file_list = []
if filters is not None:
row_groups = []
for file_fragment in dataset.get_fragments(filter=filters):
path = file_fragment.path
# Extract hive-partition keys, and make sure they
# are orederd the same as they are in `partition_categories`
if partition_categories:
raw_keys = ds._get_partition_keys(
file_fragment.partition_expression
)
partition_keys.append(
[
(name, raw_keys[name])
for name in partition_categories.keys()
]
)
# Apply row-group filtering
selection = row_groups_map.get(path, None)
if selection is not None or filters is not None:
filtered_row_groups = [
rg_info.id
for rg_fragment in file_fragment.split_by_row_group(
filters, schema=dataset.schema,
)
for rg_info in rg_fragment.row_groups
]
file_list.append(path)
if filters is not None:
if selection is None:
row_groups.append(filtered_row_groups)
else:
row_groups.append(
[
rg_id
for rg_id in filtered_row_groups
if rg_id in selection
]
)
return (
file_list,
row_groups,
partition_keys,
partition_categories if categorical_partitions else {},
)
@ioutils.doc_read_parquet()
@_cudf_nvtx_annotate
def read_parquet(
filepath_or_buffer,
engine="cudf",
columns=None,
filters=None,
row_groups=None,
skiprows=None,
num_rows=None,
strings_to_categorical=False,
use_pandas_metadata=True,
use_python_file_object=True,
categorical_partitions=True,
open_file_options=None,
*args,
**kwargs,
):
"""{docstring}"""
# Do not allow the user to set file-opening options
# when `use_python_file_object=False` is specified
if use_python_file_object is False:
if open_file_options:
raise ValueError(
"open_file_options is not currently supported when "
"use_python_file_object is set to False."
)
open_file_options = {}
# Multiple sources are passed as a list. If a single source is passed,
# wrap it in a list for unified processing downstream.
if not is_list_like(filepath_or_buffer):
filepath_or_buffer = [filepath_or_buffer]
# a list of row groups per source should be passed. make the list of
# lists that is expected for multiple sources
if row_groups is not None:
if not is_list_like(row_groups):
row_groups = [[row_groups]]
elif not is_list_like(row_groups[0]):
row_groups = [row_groups]
# Check columns input
if columns is not None:
if not is_list_like(columns):
raise ValueError("Expected list like for columns")
# Start by trying construct a filesystem object, so we
# can apply filters on remote file-systems
fs, paths = ioutils._get_filesystem_and_paths(filepath_or_buffer, **kwargs)
# Use pyarrow dataset to detect/process directory-partitioned
# data and apply filters. Note that we can only support partitioned
# data and filtering if the input is a single directory or list of
# paths.
partition_keys = []
partition_categories = {}
if fs and paths:
(
paths,
row_groups,
partition_keys,
partition_categories,
) = _process_dataset(
paths,
fs,
filters=filters,
row_groups=row_groups,
categorical_partitions=categorical_partitions,
)
elif filters is not None:
raise ValueError("cudf cannot apply filters to open file objects.")
filepath_or_buffer = paths if paths else filepath_or_buffer
filepaths_or_buffers = []
if use_python_file_object:
open_file_options = _default_open_file_options(
open_file_options, columns, row_groups, fs=fs,
)
for i, source in enumerate(filepath_or_buffer):
tmp_source, compression = ioutils.get_filepath_or_buffer(
path_or_data=source,
compression=None,
fs=fs,
use_python_file_object=use_python_file_object,
open_file_options=open_file_options,
**kwargs,
)
if compression is not None:
raise ValueError(
"URL content-encoding decompression is not supported"
)
if isinstance(tmp_source, list):
filepath_or_buffer.extend(tmp_source)
else:
filepaths_or_buffers.append(tmp_source)
# Warn user if they are not using cudf for IO
# (There is a good chance this was not the intention)
if engine != "cudf":
warnings.warn(
"Using CPU via PyArrow to read Parquet dataset. "
"This option is both inefficient and unstable!"
)
if filters is not None:
warnings.warn(
"Parquet row-group filtering is only supported with "
"'engine=cudf'. Use pandas or pyarrow API directly "
"for full CPU-based filtering functionality."
)
return _parquet_to_frame(
filepaths_or_buffers,
engine,
*args,
columns=columns,
row_groups=row_groups,
skiprows=skiprows,
num_rows=num_rows,
strings_to_categorical=strings_to_categorical,
use_pandas_metadata=use_pandas_metadata,
partition_keys=partition_keys,
partition_categories=partition_categories,
**kwargs,
)
@_cudf_nvtx_annotate
def _parquet_to_frame(
paths_or_buffers,
*args,
row_groups=None,
partition_keys=None,
partition_categories=None,
**kwargs,
):
# If this is not a partitioned read, only need
# one call to `_read_parquet`
if not partition_keys:
return _read_parquet(
paths_or_buffers, *args, row_groups=row_groups, **kwargs,
)
# For partitioned data, we need a distinct read for each
# unique set of partition keys. Therefore, we start by
# aggregating all paths with matching keys using a dict
plan = {}
for i, (keys, path) in enumerate(zip(partition_keys, paths_or_buffers)):
rgs = row_groups[i] if row_groups else None
tkeys = tuple(keys)
if tkeys in plan:
plan[tkeys][0].append(path)
if rgs is not None:
plan[tkeys][1].append(rgs)
else:
plan[tkeys] = ([path], None if rgs is None else [rgs])
dfs = []
for part_key, (key_paths, key_row_groups) in plan.items():
# Add new DataFrame to our list
dfs.append(
_read_parquet(
key_paths, *args, row_groups=key_row_groups, **kwargs,
)
)
# Add partition columns to the last DataFrame
for (name, value) in part_key:
if partition_categories and name in partition_categories:
# Build the categorical column from `codes`
codes = as_column(
partition_categories[name].index(value),
length=len(dfs[-1]),
)
dfs[-1][name] = build_categorical_column(
categories=partition_categories[name],
codes=codes,
size=codes.size,
offset=codes.offset,
ordered=False,
)
else:
# Not building categorical columns, so
# `value` is already what we want
dfs[-1][name] = as_column(value, length=len(dfs[-1]))
# Concatenate dfs and return.
# Assume we can ignore the index if it has no name.
return (
cudf.concat(dfs, ignore_index=dfs[-1].index.name is None)
if len(dfs) > 1
else dfs[0]
)
@_cudf_nvtx_annotate
def _read_parquet(
filepaths_or_buffers,
engine,
columns=None,
row_groups=None,
skiprows=None,
num_rows=None,
strings_to_categorical=None,
use_pandas_metadata=None,
*args,
**kwargs,
):
# Simple helper function to dispatch between
# cudf and pyarrow to read parquet data
if engine == "cudf":
return libparquet.read_parquet(
filepaths_or_buffers,
columns=columns,
row_groups=row_groups,
skiprows=skiprows,
num_rows=num_rows,
strings_to_categorical=strings_to_categorical,
use_pandas_metadata=use_pandas_metadata,
)
else:
return cudf.DataFrame.from_arrow(
pq.ParquetDataset(filepaths_or_buffers).read_pandas(
columns=columns, *args, **kwargs
)
)
@ioutils.doc_to_parquet()
@_cudf_nvtx_annotate
def to_parquet(
df,
path,
engine="cudf",
compression="snappy",
index=None,
partition_cols=None,
partition_file_name=None,
partition_offsets=None,
statistics="ROWGROUP",
metadata_file_path=None,
int96_timestamps=False,
row_group_size_bytes=None,
row_group_size_rows=None,
*args,
**kwargs,
):
"""{docstring}"""
if engine == "cudf":
# Ensure that no columns dtype is 'category'
for col in df._column_names:
if partition_cols is None or col not in partition_cols:
if df[col].dtype.name == "category":
raise ValueError(
"'category' column dtypes are currently not "
+ "supported by the gpu accelerated parquet writer"
)
if partition_cols:
if metadata_file_path is not None:
warnings.warn(
"metadata_file_path will be ignored/overwritten when "
"partition_cols are provided. To request returning the "
"metadata binary blob, pass `return_metadata=True`"
)
kwargs.update(
{
"compression": compression,
"statistics": statistics,
"int96_timestamps": int96_timestamps,
"row_group_size_bytes": row_group_size_bytes,
"row_group_size_rows": row_group_size_rows,
}
)
return write_to_dataset(
df,
filename=partition_file_name,
partition_cols=partition_cols,
root_path=path,
preserve_index=index,
**kwargs,
)
if partition_offsets:
kwargs["partitions_info"] = list(
zip(
partition_offsets,
np.roll(partition_offsets, -1) - partition_offsets,
)
)[:-1]
return _write_parquet(
df,
paths=path if is_list_like(path) else [path],
compression=compression,
index=index,
statistics=statistics,
metadata_file_path=metadata_file_path,
int96_timestamps=int96_timestamps,
row_group_size_bytes=row_group_size_bytes,
row_group_size_rows=row_group_size_rows,
**kwargs,
)
else:
if partition_offsets is not None:
warnings.warn(
"partition_offsets will be ignored when engine is not cudf"
)
# If index is empty set it to the expected default value of True
if index is None:
index = True
# Convert partition_file_name to a call back
if partition_file_name:
partition_file_name = lambda x: partition_file_name # noqa: E731
pa_table = df.to_arrow(preserve_index=index)
return pq.write_to_dataset(
pa_table,
root_path=path,
partition_filename_cb=partition_file_name,
partition_cols=partition_cols,
*args,
**kwargs,
)
@ioutils.doc_merge_parquet_filemetadata()
def merge_parquet_filemetadata(filemetadata_list):
"""{docstring}"""
return libparquet.merge_filemetadata(filemetadata_list)
def _generate_filename():
return uuid4().hex + ".parquet"
@_cudf_nvtx_annotate
def _get_partitioned(
df,
root_path,
partition_cols,
filename=None,
fs=None,
preserve_index=False,
**kwargs,
):
fs = ioutils._ensure_filesystem(fs, root_path, **kwargs)
fs.mkdirs(root_path, exist_ok=True)
if not (set(df._data) - set(partition_cols)):
raise ValueError("No data left to save outside partition columns")
part_names, part_offsets, _, grouped_df = df.groupby(
partition_cols
)._grouped()
if not preserve_index:
grouped_df.reset_index(drop=True, inplace=True)
grouped_df.drop(columns=partition_cols, inplace=True)
# Copy the entire keys df in one operation rather than using iloc
part_names = part_names.to_pandas().to_frame(index=False)
full_paths = []
metadata_file_paths = []
for keys in part_names.itertuples(index=False):
subdir = fs.sep.join(
[f"{name}={val}" for name, val in zip(partition_cols, keys)]
)
prefix = fs.sep.join([root_path, subdir])
fs.mkdirs(prefix, exist_ok=True)
filename = filename or _generate_filename()
full_path = fs.sep.join([prefix, filename])
full_paths.append(full_path)
metadata_file_paths.append(fs.sep.join([subdir, filename]))
return full_paths, metadata_file_paths, grouped_df, part_offsets, filename
ParquetWriter = libparquet.ParquetWriter
class ParquetDatasetWriter:
@_cudf_nvtx_annotate
def __init__(
self,
path,
partition_cols,
index=None,
compression=None,
statistics="ROWGROUP",
) -> None:
"""
Write a parquet file or dataset incrementally
Parameters
----------
path : str
File path or Root Directory path. Will be used as Root Directory
path while writing a partitioned dataset.
partition_cols : list
Column names by which to partition the dataset
Columns are partitioned in the order they are given
index : bool, default None
If ``True``, include the dataframe’s index(es) in the file output.
If ``False``, they will not be written to the file. If ``None``,
index(es) other than RangeIndex will be saved as columns.
compression : {'snappy', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
statistics : {'ROWGROUP', 'PAGE', 'NONE'}, default 'ROWGROUP'
Level at which column statistics should be included in file.
Examples
________
Using a context
>>> df1 = cudf.DataFrame({"a": [1, 1, 2, 2, 1], "b": [9, 8, 7, 6, 5]})
>>> df2 = cudf.DataFrame({"a": [1, 3, 3, 1, 3], "b": [4, 3, 2, 1, 0]})
>>> with ParquetDatasetWriter("./dataset", partition_cols=["a"]) as cw:
... cw.write_table(df1)
... cw.write_table(df2)
By manually calling ``close()``
>>> cw = ParquetDatasetWriter("./dataset", partition_cols=["a"])
>>> cw.write_table(df1)
>>> cw.write_table(df2)
>>> cw.close()
Both the methods will generate the same directory structure
.. code-block:: bash
dataset/
a=1
<filename>.parquet
a=2
<filename>.parquet
a=3
<filename>.parquet
"""
self.path = path
self.common_args = {
"index": index,
"compression": compression,
"statistics": statistics,
}
self.partition_cols = partition_cols
# Collection of `ParquetWriter`s, and the corresponding
# partition_col values they're responsible for
self._chunked_writers: List[
Tuple[libparquet.ParquetWriter, List[str], str]
] = []
# Map of partition_col values to their ParquetWriter's index
# in self._chunked_writers for reverse lookup
self.path_cw_map: Dict[str, int] = {}
self.filename = None
@_cudf_nvtx_annotate
def write_table(self, df):
"""
Write a dataframe to the file/dataset
"""
(
paths,
metadata_file_paths,
grouped_df,
offsets,
self.filename,
) = _get_partitioned(
df,
self.path,
self.partition_cols,
preserve_index=self.common_args["index"],
filename=self.filename,
)
existing_cw_batch = defaultdict(dict)
new_cw_paths = []
for path, part_info, meta_path in zip(
paths,
zip(offsets, np.roll(offsets, -1) - offsets),
metadata_file_paths,
):
if path in self.path_cw_map: # path is a currently open file
cw_idx = self.path_cw_map[path]
existing_cw_batch[cw_idx][path] = part_info
else: # path not currently handled by any chunked writer
new_cw_paths.append((path, part_info, meta_path))
# Write out the parts of grouped_df currently handled by existing cw's
for cw_idx, path_to_part_info_map in existing_cw_batch.items():
cw = self._chunked_writers[cw_idx][0]
# match found paths with this cw's paths and nullify partition info
# for partition_col values not in this batch
this_cw_part_info = [
path_to_part_info_map.get(path, (0, 0))
for path in self._chunked_writers[cw_idx][1]
]
cw.write_table(grouped_df, this_cw_part_info)
# Create new cw for unhandled paths encountered in this write_table
new_paths, part_info, meta_paths = zip(*new_cw_paths)
self._chunked_writers.append(
(
ParquetWriter(new_paths, **self.common_args),
new_paths,
meta_paths,
)
)
new_cw_idx = len(self._chunked_writers) - 1
self.path_cw_map.update({k: new_cw_idx for k in new_paths})
self._chunked_writers[-1][0].write_table(grouped_df, part_info)
@_cudf_nvtx_annotate
def close(self, return_metadata=False):
"""
Close all open files and optionally return footer metadata as a binary
blob
"""
metadata = [
cw.close(metadata_file_path=meta_path if return_metadata else None)
for cw, _, meta_path in self._chunked_writers
]
if return_metadata:
return (
merge_parquet_filemetadata(metadata)
if len(metadata) > 1
else metadata[0]
)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def _default_open_file_options(
open_file_options, columns, row_groups, fs=None
):
"""
Set default fields in open_file_options.
Copies and updates `open_file_options` to
include column and row-group information
under the "precache_options" key. By default,
we set "method" to "parquet", but precaching
will be disabled if the user chooses `method=None`
Parameters
----------
open_file_options : dict or None
columns : list
row_groups : list
fs : fsspec.AbstractFileSystem, Optional
"""
if fs and ioutils._is_local_filesystem(fs):
# Quick return for local fs
return open_file_options or {}
# Assume remote storage if `fs` was not specified
open_file_options = (open_file_options or {}).copy()
precache_options = open_file_options.pop("precache_options", {}).copy()
if precache_options.get("method", "parquet") == "parquet":
precache_options.update(
{
"method": "parquet",
"engine": precache_options.get("engine", "pyarrow"),
"columns": columns,
"row_groups": row_groups,
}
)
open_file_options["precache_options"] = precache_options
return open_file_options
| [
"cudf.utils.ioutils._get_filesystem_and_paths",
"cudf.utils.ioutils.stringify_pathlike",
"cudf.api.types.is_list_like",
"cudf.utils.ioutils.doc_to_parquet",
"pyarrow.parquet._filters_to_expression",
"cudf.utils.ioutils.get_filepath_or_buffer",
"pyarrow.parquet.write_to_dataset",
"cudf.utils.ioutils.ge... | [((5122, 5157), 'cudf.utils.ioutils.doc_read_parquet_metadata', 'ioutils.doc_read_parquet_metadata', ([], {}), '()\n', (5155, 5157), False, 'from cudf.utils import ioutils\n'), ((10517, 10543), 'cudf.utils.ioutils.doc_read_parquet', 'ioutils.doc_read_parquet', ([], {}), '()\n', (10541, 10543), False, 'from cudf.utils import ioutils\n'), ((17987, 18011), 'cudf.utils.ioutils.doc_to_parquet', 'ioutils.doc_to_parquet', ([], {}), '()\n', (18009, 18011), False, 'from cudf.utils import ioutils\n'), ((21254, 21294), 'cudf.utils.ioutils.doc_merge_parquet_filemetadata', 'ioutils.doc_merge_parquet_filemetadata', ([], {}), '()\n', (21292, 21294), False, 'from cudf.utils import ioutils\n'), ((4059, 4110), 'cudf.utils.ioutils._ensure_filesystem', 'ioutils._ensure_filesystem', (['fs', 'root_path'], {}), '(fs, root_path, **kwargs)\n', (4085, 4110), False, 'from cudf.utils import ioutils\n'), ((5249, 5269), 'pyarrow.parquet.ParquetFile', 'pq.ParquetFile', (['path'], {}), '(path)\n', (5263, 5269), True, 'from pyarrow import dataset as ds, parquet as pq\n'), ((6428, 6499), 'pyarrow.dataset.dataset', 'ds.dataset', (['paths'], {'filesystem': 'fs', 'format': '"""parquet"""', 'partitioning': '"""hive"""'}), "(paths, filesystem=fs, format='parquet', partitioning='hive')\n", (6438, 6499), True, 'from pyarrow import dataset as ds, parquet as pq\n'), ((6766, 6783), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6777, 6783), False, 'from collections import defaultdict\n'), ((12142, 12205), 'cudf.utils.ioutils._get_filesystem_and_paths', 'ioutils._get_filesystem_and_paths', (['filepath_or_buffer'], {}), '(filepath_or_buffer, **kwargs)\n', (12175, 12205), False, 'from cudf.utils import ioutils\n'), ((21380, 21428), 'cudf._lib.parquet.merge_filemetadata', 'libparquet.merge_filemetadata', (['filemetadata_list'], {}), '(filemetadata_list)\n', (21409, 21428), True, 'from cudf._lib import parquet as libparquet\n'), ((21665, 21716), 'cudf.utils.ioutils._ensure_filesystem', 'ioutils._ensure_filesystem', (['fs', 'root_path'], {}), '(fs, root_path, **kwargs)\n', (21691, 21716), False, 'from cudf.utils import ioutils\n'), ((803, 822), 'cudf.api.types.is_list_like', 'is_list_like', (['paths'], {}), '(paths)\n', (815, 822), False, 'from cudf.api.types import is_list_like\n'), ((1210, 1239), 'cudf.api.types.is_list_like', 'is_list_like', (['partitions_info'], {}), '(partitions_info)\n', (1222, 1239), False, 'from cudf.api.types import is_list_like\n'), ((1417, 1481), 'cudf.utils.ioutils.get_writer_filepath_or_buffer', 'ioutils.get_writer_filepath_or_buffer', (['path'], {'mode': '"""wb"""'}), "(path, mode='wb', **kwargs)\n", (1454, 1481), False, 'from cudf.utils import ioutils\n'), ((2364, 2443), 'cudf._lib.parquet.write_parquet', 'libparquet.write_parquet', (['df'], {'filepaths_or_buffers': 'paths_or_bufs'}), '(df, filepaths_or_buffers=paths_or_bufs, **common_args)\n', (2388, 2443), True, 'from cudf._lib import parquet as libparquet\n'), ((6169, 6199), 'cudf.utils.ioutils.is_directory', 'ioutils.is_directory', (['paths[0]'], {}), '(paths[0])\n', (6189, 6199), False, 'from cudf.utils import ioutils\n'), ((6217, 6253), 'cudf.utils.ioutils.stringify_pathlike', 'ioutils.stringify_pathlike', (['paths[0]'], {}), '(paths[0])\n', (6243, 6253), False, 'from cudf.utils import ioutils\n'), ((6340, 6374), 'pyarrow.parquet._filters_to_expression', 'pq._filters_to_expression', (['filters'], {}), '(filters)\n', (6365, 6374), True, 'from pyarrow import dataset as ds, parquet as pq\n'), ((6874, 6932), 'pyarrow.dataset._get_partition_keys', 'ds._get_partition_keys', (['file_fragment.partition_expression'], {}), '(file_fragment.partition_expression)\n', (6896, 6932), True, 'from pyarrow import dataset as ds, parquet as pq\n'), ((11459, 11491), 'cudf.api.types.is_list_like', 'is_list_like', (['filepath_or_buffer'], {}), '(filepath_or_buffer)\n', (11471, 11491), False, 'from cudf.api.types import is_list_like\n'), ((13260, 13439), 'cudf.utils.ioutils.get_filepath_or_buffer', 'ioutils.get_filepath_or_buffer', ([], {'path_or_data': 'source', 'compression': 'None', 'fs': 'fs', 'use_python_file_object': 'use_python_file_object', 'open_file_options': 'open_file_options'}), '(path_or_data=source, compression=None, fs=fs,\n use_python_file_object=use_python_file_object, open_file_options=\n open_file_options, **kwargs)\n', (13290, 13439), False, 'from cudf.utils import ioutils\n'), ((13964, 14083), 'warnings.warn', 'warnings.warn', (['"""Using CPU via PyArrow to read Parquet dataset. This option is both inefficient and unstable!"""'], {}), "(\n 'Using CPU via PyArrow to read Parquet dataset. This option is both inefficient and unstable!'\n )\n", (13977, 14083), False, 'import warnings\n'), ((16998, 17055), 'cudf.concat', 'cudf.concat', (['dfs'], {'ignore_index': '(dfs[-1].index.name is None)'}), '(dfs, ignore_index=dfs[-1].index.name is None)\n', (17009, 17055), False, 'import cudf\n'), ((17487, 17712), 'cudf._lib.parquet.read_parquet', 'libparquet.read_parquet', (['filepaths_or_buffers'], {'columns': 'columns', 'row_groups': 'row_groups', 'skiprows': 'skiprows', 'num_rows': 'num_rows', 'strings_to_categorical': 'strings_to_categorical', 'use_pandas_metadata': 'use_pandas_metadata'}), '(filepaths_or_buffers, columns=columns, row_groups=\n row_groups, skiprows=skiprows, num_rows=num_rows,\n strings_to_categorical=strings_to_categorical, use_pandas_metadata=\n use_pandas_metadata)\n', (17510, 17712), True, 'from cudf._lib import parquet as libparquet\n'), ((21031, 21172), 'pyarrow.parquet.write_to_dataset', 'pq.write_to_dataset', (['pa_table', '*args'], {'root_path': 'path', 'partition_filename_cb': 'partition_file_name', 'partition_cols': 'partition_cols'}), '(pa_table, *args, root_path=path, partition_filename_cb=\n partition_file_name, partition_cols=partition_cols, **kwargs)\n', (21050, 21172), True, 'from pyarrow import dataset as ds, parquet as pq\n'), ((26075, 26092), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (26086, 26092), False, 'from collections import defaultdict\n'), ((28898, 28930), 'cudf.utils.ioutils._is_local_filesystem', 'ioutils._is_local_filesystem', (['fs'], {}), '(fs)\n', (28926, 28930), False, 'from cudf.utils import ioutils\n'), ((1285, 1304), 'cudf.api.types.is_list_like', 'is_list_like', (['paths'], {}), '(paths)\n', (1297, 1304), False, 'from cudf.api.types import is_list_like\n'), ((1892, 1924), 'cudf.utils.ioutils.is_fsspec_open_file', 'ioutils.is_fsspec_open_file', (['buf'], {}), '(buf)\n', (1919, 1924), False, 'from cudf.utils import ioutils\n'), ((1966, 1977), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (1975, 1977), False, 'from contextlib import ExitStack\n'), ((2220, 2295), 'cudf._lib.parquet.write_parquet', 'libparquet.write_parquet', (['df'], {'filepaths_or_buffers': 'file_objs'}), '(df, filepaths_or_buffers=file_objs, **common_args)\n', (2244, 2295), True, 'from cudf._lib import parquet as libparquet\n'), ((11713, 11737), 'cudf.api.types.is_list_like', 'is_list_like', (['row_groups'], {}), '(row_groups)\n', (11725, 11737), False, 'from cudf.api.types import is_list_like\n'), ((11933, 11954), 'cudf.api.types.is_list_like', 'is_list_like', (['columns'], {}), '(columns)\n', (11945, 11954), False, 'from cudf.api.types import is_list_like\n'), ((14155, 14326), 'warnings.warn', 'warnings.warn', (['"""Parquet row-group filtering is only supported with \'engine=cudf\'. Use pandas or pyarrow API directly for full CPU-based filtering functionality."""'], {}), '(\n "Parquet row-group filtering is only supported with \'engine=cudf\'. Use pandas or pyarrow API directly for full CPU-based filtering functionality."\n )\n', (14168, 14326), False, 'import warnings\n'), ((20568, 20642), 'warnings.warn', 'warnings.warn', (['"""partition_offsets will be ignored when engine is not cudf"""'], {}), "('partition_offsets will be ignored when engine is not cudf')\n", (20581, 20642), False, 'import warnings\n'), ((21468, 21475), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (21473, 21475), False, 'from uuid import uuid4\n'), ((968, 997), 'cudf.api.types.is_list_like', 'is_list_like', (['partitions_info'], {}), '(partitions_info)\n', (980, 997), False, 'from cudf.api.types import is_list_like\n'), ((2110, 2145), 'cudf.utils.ioutils.get_IOBase_writer', 'ioutils.get_IOBase_writer', (['file_obj'], {}), '(file_obj)\n', (2135, 2145), False, 'from cudf.utils import ioutils\n'), ((9155, 9213), 'pyarrow.dataset._get_partition_keys', 'ds._get_partition_keys', (['file_fragment.partition_expression'], {}), '(file_fragment.partition_expression)\n', (9177, 9213), True, 'from pyarrow import dataset as ds, parquet as pq\n'), ((11796, 11823), 'cudf.api.types.is_list_like', 'is_list_like', (['row_groups[0]'], {}), '(row_groups[0])\n', (11808, 11823), False, 'from cudf.api.types import is_list_like\n'), ((16444, 16577), 'cudf.core.column.build_categorical_column', 'build_categorical_column', ([], {'categories': 'partition_categories[name]', 'codes': 'codes', 'size': 'codes.size', 'offset': 'codes.offset', 'ordered': '(False)'}), '(categories=partition_categories[name], codes=codes,\n size=codes.size, offset=codes.offset, ordered=False)\n', (16468, 16577), False, 'from cudf.core.column import as_column, build_categorical_column\n'), ((18940, 19122), 'warnings.warn', 'warnings.warn', (['"""metadata_file_path will be ignored/overwritten when partition_cols are provided. To request returning the metadata binary blob, pass `return_metadata=True`"""'], {}), "(\n 'metadata_file_path will be ignored/overwritten when partition_cols are provided. To request returning the metadata binary blob, pass `return_metadata=True`'\n )\n", (18953, 19122), False, 'import warnings\n'), ((17858, 17897), 'pyarrow.parquet.ParquetDataset', 'pq.ParquetDataset', (['filepaths_or_buffers'], {}), '(filepaths_or_buffers)\n', (17875, 17897), True, 'from pyarrow import dataset as ds, parquet as pq\n'), ((20136, 20154), 'cudf.api.types.is_list_like', 'is_list_like', (['path'], {}), '(path)\n', (20148, 20154), False, 'from cudf.api.types import is_list_like\n'), ((26211, 26231), 'numpy.roll', 'np.roll', (['offsets', '(-1)'], {}), '(offsets, -1)\n', (26218, 26231), True, 'import numpy as np\n'), ((19973, 20003), 'numpy.roll', 'np.roll', (['partition_offsets', '(-1)'], {}), '(partition_offsets, -1)\n', (19980, 20003), True, 'import numpy as np\n')] |
""" Benchmarks for QuickBundles
Run all benchmarks with::
import dipy.segment as dipysegment
dipysegment.bench()
With Pytest, Run this benchmark with:
pytest -svv -c bench.ini /path/to/bench_quickbundles.py
"""
import numpy as np
import nibabel as nib
from dipy.data import get_fnames
import dipy.tracking.streamline as streamline_utils
from dipy.segment.metric import Metric
from dipy.segment.quickbundles import QuickBundles as QB_Old
from dipy.segment.clustering import QuickBundles as QB_New
from numpy.testing import assert_equal
from dipy.testing import assert_arrays_equal
from numpy.testing import assert_array_equal, measure
class MDFpy(Metric):
def are_compatible(self, shape1, shape2):
return shape1 == shape2
def dist(self, features1, features2):
dist = np.sqrt(np.sum((features1 - features2)**2, axis=1))
dist = np.sum(dist / len(features1))
return dist
def bench_quickbundles():
dtype = "float32"
repeat = 10
nb_points = 12
streams, hdr = nib.trackvis.read(get_fnames('fornix'))
fornix = [s[0].astype(dtype) for s in streams]
fornix = streamline_utils.set_number_of_points(fornix, nb_points)
# Create eight copies of the fornix to be clustered (one in each octant).
streamlines = []
streamlines += [s + np.array([100, 100, 100], dtype) for s in fornix]
streamlines += [s + np.array([100, -100, 100], dtype) for s in fornix]
streamlines += [s + np.array([100, 100, -100], dtype) for s in fornix]
streamlines += [s + np.array([100, -100, -100], dtype) for s in fornix]
streamlines += [s + np.array([-100, 100, 100], dtype) for s in fornix]
streamlines += [s + np.array([-100, -100, 100], dtype) for s in fornix]
streamlines += [s + np.array([-100, 100, -100], dtype) for s in fornix]
streamlines += [s + np.array([-100, -100, -100], dtype) for s in fornix]
# The expected number of clusters of the fornix using threshold=10 is 4.
threshold = 10.
expected_nb_clusters = 4 * 8
print("Timing QuickBundles 1.0 vs. 2.0")
qb = QB_Old(streamlines, threshold, pts=None)
qb1_time = measure("QB_Old(streamlines, threshold, nb_points)", repeat)
print("QuickBundles time: {0:.4}sec".format(qb1_time))
assert_equal(qb.total_clusters, expected_nb_clusters)
sizes1 = [qb.partitions()[i]['N'] for i in range(qb.total_clusters)]
indices1 = [qb.partitions()[i]['indices']
for i in range(qb.total_clusters)]
qb2 = QB_New(threshold)
qb2_time = measure("clusters = qb2.cluster(streamlines)", repeat)
print("QuickBundles2 time: {0:.4}sec".format(qb2_time))
print("Speed up of {0}x".format(qb1_time / qb2_time))
clusters = qb2.cluster(streamlines)
sizes2 = map(len, clusters)
indices2 = map(lambda c: c.indices, clusters)
assert_equal(len(clusters), expected_nb_clusters)
assert_array_equal(list(sizes2), sizes1)
assert_arrays_equal(indices2, indices1)
qb = QB_New(threshold, metric=MDFpy())
qb3_time = measure("clusters = qb.cluster(streamlines)", repeat)
print("QuickBundles2_python time: {0:.4}sec".format(qb3_time))
print("Speed up of {0}x".format(qb1_time / qb3_time))
clusters = qb.cluster(streamlines)
sizes3 = map(len, clusters)
indices3 = map(lambda c: c.indices, clusters)
assert_equal(len(clusters), expected_nb_clusters)
assert_array_equal(list(sizes3), sizes1)
assert_arrays_equal(indices3, indices1)
| [
"dipy.segment.quickbundles.QuickBundles",
"numpy.testing.measure",
"dipy.segment.clustering.QuickBundles",
"numpy.testing.assert_equal",
"dipy.data.get_fnames",
"dipy.testing.assert_arrays_equal",
"dipy.tracking.streamline.set_number_of_points",
"numpy.sum",
"numpy.array"
] | [((1139, 1195), 'dipy.tracking.streamline.set_number_of_points', 'streamline_utils.set_number_of_points', (['fornix', 'nb_points'], {}), '(fornix, nb_points)\n', (1176, 1195), True, 'import dipy.tracking.streamline as streamline_utils\n'), ((2087, 2127), 'dipy.segment.quickbundles.QuickBundles', 'QB_Old', (['streamlines', 'threshold'], {'pts': 'None'}), '(streamlines, threshold, pts=None)\n', (2093, 2127), True, 'from dipy.segment.quickbundles import QuickBundles as QB_Old\n'), ((2143, 2203), 'numpy.testing.measure', 'measure', (['"""QB_Old(streamlines, threshold, nb_points)"""', 'repeat'], {}), "('QB_Old(streamlines, threshold, nb_points)', repeat)\n", (2150, 2203), False, 'from numpy.testing import assert_array_equal, measure\n'), ((2267, 2320), 'numpy.testing.assert_equal', 'assert_equal', (['qb.total_clusters', 'expected_nb_clusters'], {}), '(qb.total_clusters, expected_nb_clusters)\n', (2279, 2320), False, 'from numpy.testing import assert_equal\n'), ((2502, 2519), 'dipy.segment.clustering.QuickBundles', 'QB_New', (['threshold'], {}), '(threshold)\n', (2508, 2519), True, 'from dipy.segment.clustering import QuickBundles as QB_New\n'), ((2535, 2589), 'numpy.testing.measure', 'measure', (['"""clusters = qb2.cluster(streamlines)"""', 'repeat'], {}), "('clusters = qb2.cluster(streamlines)', repeat)\n", (2542, 2589), False, 'from numpy.testing import assert_array_equal, measure\n'), ((2933, 2972), 'dipy.testing.assert_arrays_equal', 'assert_arrays_equal', (['indices2', 'indices1'], {}), '(indices2, indices1)\n', (2952, 2972), False, 'from dipy.testing import assert_arrays_equal\n'), ((3032, 3085), 'numpy.testing.measure', 'measure', (['"""clusters = qb.cluster(streamlines)"""', 'repeat'], {}), "('clusters = qb.cluster(streamlines)', repeat)\n", (3039, 3085), False, 'from numpy.testing import assert_array_equal, measure\n'), ((3435, 3474), 'dipy.testing.assert_arrays_equal', 'assert_arrays_equal', (['indices3', 'indices1'], {}), '(indices3, indices1)\n', (3454, 3474), False, 'from dipy.testing import assert_arrays_equal\n'), ((1053, 1073), 'dipy.data.get_fnames', 'get_fnames', (['"""fornix"""'], {}), "('fornix')\n", (1063, 1073), False, 'from dipy.data import get_fnames\n'), ((821, 865), 'numpy.sum', 'np.sum', (['((features1 - features2) ** 2)'], {'axis': '(1)'}), '((features1 - features2) ** 2, axis=1)\n', (827, 865), True, 'import numpy as np\n'), ((1320, 1352), 'numpy.array', 'np.array', (['[100, 100, 100]', 'dtype'], {}), '([100, 100, 100], dtype)\n', (1328, 1352), True, 'import numpy as np\n'), ((1394, 1427), 'numpy.array', 'np.array', (['[100, -100, 100]', 'dtype'], {}), '([100, -100, 100], dtype)\n', (1402, 1427), True, 'import numpy as np\n'), ((1469, 1502), 'numpy.array', 'np.array', (['[100, 100, -100]', 'dtype'], {}), '([100, 100, -100], dtype)\n', (1477, 1502), True, 'import numpy as np\n'), ((1544, 1578), 'numpy.array', 'np.array', (['[100, -100, -100]', 'dtype'], {}), '([100, -100, -100], dtype)\n', (1552, 1578), True, 'import numpy as np\n'), ((1620, 1653), 'numpy.array', 'np.array', (['[-100, 100, 100]', 'dtype'], {}), '([-100, 100, 100], dtype)\n', (1628, 1653), True, 'import numpy as np\n'), ((1695, 1729), 'numpy.array', 'np.array', (['[-100, -100, 100]', 'dtype'], {}), '([-100, -100, 100], dtype)\n', (1703, 1729), True, 'import numpy as np\n'), ((1771, 1805), 'numpy.array', 'np.array', (['[-100, 100, -100]', 'dtype'], {}), '([-100, 100, -100], dtype)\n', (1779, 1805), True, 'import numpy as np\n'), ((1847, 1882), 'numpy.array', 'np.array', (['[-100, -100, -100]', 'dtype'], {}), '([-100, -100, -100], dtype)\n', (1855, 1882), True, 'import numpy as np\n')] |
import datetime
import os
import copy
import json
import numpy as np
from pytz import timezone
from gamified_squad import GamifiedSquad
from agent import CustomAgent
import generic
import evaluate
SAVE_CHECKPOINT = 100000
def train():
time_1 = datetime.datetime.now()
config = generic.load_config()
env = GamifiedSquad(config)
env.split_reset("train")
agent = CustomAgent(config, env.has_token_set)
if config["general"]["visdom"]:
# visdom
import visdom
viz = visdom.Visdom()
plt_win = None
eval_plt_win = None
plt_q_value_win = None
plt_steps_win = None
eval_plt_steps_win = None
viz_avg_ig_acc, viz_avg_qa_acc = [], []
viz_avg_ig_q_value = []
viz_eval_ig_acc, viz_eval_qa_acc, viz_eval_steps = [], [], []
viz_avg_steps = []
step_in_total = 0
batch_no = 0
episode_no = 0
running_avg_qa_acc = generic.HistoryScoreCache(capacity=50)
running_avg_ig_acc = generic.HistoryScoreCache(capacity=50)
running_avg_qa_loss = generic.HistoryScoreCache(capacity=50)
running_avg_ig_loss = generic.HistoryScoreCache(capacity=50)
running_avg_ig_q_value = generic.HistoryScoreCache(capacity=50)
running_avg_steps = generic.HistoryScoreCache(capacity=50)
output_dir = "."
data_dir = "."
json_file_name = agent.experiment_tag.replace(" ", "_")
best_qa_acc_so_far = 0.0
prev_performance = 0.0
i_am_patient = 0
# load model from checkpoint
if os.path.exists(output_dir + "/" + agent.experiment_tag + "_model.pt"):
print("checkpoint already exist.")
exit(0)
if os.path.exists(data_dir + "/" + agent.load_graph_generation_model_from_tag + ".pt"):
agent.load_pretrained_graph_generation_model(data_dir + "/" + agent.load_graph_generation_model_from_tag + ".pt")
if agent.load_pretrained:
if os.path.exists(data_dir + "/" + agent.load_from_tag + ".pt"):
agent.load_pretrained_model(data_dir + "/" + agent.load_from_tag + ".pt") # load partial graph
agent.update_target_net()
while(True):
if episode_no > agent.max_episode:
break
np.random.seed(episode_no)
env.seed(episode_no)
obs, infos = env.reset()
batch_size = len(obs)
report = agent.report_frequency > 0 and (episode_no % agent.report_frequency <= max(episode_no - batch_size, 0) % agent.report_frequency)
__save__ = episode_no % SAVE_CHECKPOINT <= max(episode_no - batch_size, 0) % SAVE_CHECKPOINT
if report:
print("====================================================================================", episode_no)
print("-- Q: %s" % (agent.bert_tokenizer.decode(infos[0]["q"]).encode('utf-8')))
print("-- A: %s" % (infos[0]["a_string"][0].encode('utf-8')))
agent.train()
agent.init(obs, infos)
quest_list = agent.get_game_quest_info(infos)
agent.kg.push_batch_question(quest_list, [item["q_srl"] for item in infos])
previous_dynamics = None
previous_belief = None
input_quest, input_quest_mask, quest_id_list = agent.get_agent_inputs(quest_list)
tmp_replay_buffer = []
print_cmds = []
prev_commands = ["restart" for _ in range(batch_size)]
belief_buffer = []
act_randomly = False if agent.noisy_net else episode_no < agent.learn_start_from_this_episode
for _ in range(agent.max_nb_steps_per_episode):
# generate commands
if agent.noisy_net:
agent.reset_noise() # Draw a new set of noisy weights
commands, replay_info, current_dynamics, current_belief = agent.act(obs, infos, input_quest, input_quest_mask, quest_id_list, prev_commands, previous_dynamics, previous_belief, random=act_randomly)
tmp_replay_buffer.append(replay_info)
obs, infos = env.step(commands)
prev_commands = commands
previous_dynamics = current_dynamics
previous_belief = current_belief
belief_buffer.append(current_belief)
if agent.noisy_net and step_in_total % agent.update_per_k_game_steps == 0:
agent.reset_noise() # Draw a new set of noisy weights
if episode_no >= agent.learn_start_from_this_episode and step_in_total % agent.update_per_k_game_steps == 0:
interaction_loss, interaction_q_value = agent.update_interaction()
if interaction_loss is not None:
running_avg_ig_loss.push(interaction_loss)
running_avg_ig_q_value.push(interaction_q_value)
qa_loss = agent.update_qa()
if qa_loss is not None:
running_avg_qa_loss.push(qa_loss)
step_in_total += 1
still_running = generic.to_np(replay_info[-1])
print_cmds.append(commands[0] if still_running[0] else "--")
if np.sum(still_running) == 0:
break
if report:
print(" / ".join(print_cmds).encode('utf-8'))
# The agent has exhausted all steps, now answer question.
chosen_head_tails = agent.answer_question_act(agent.naozi.get(), quest_list, current_belief) # batch
chosen_head_tails_np = generic.to_np(chosen_head_tails)
chosen_answer_strings = generic.get_answer_strings(agent.naozi.get(), chosen_head_tails_np, agent.bert_tokenizer, agent.special_token_ids)
answer_strings = [item["a_string"] for item in infos]
answer_token_ids = [item["a"] for item in infos]
qa_reward_np = generic.get_qa_reward(chosen_answer_strings, answer_strings)
obs_strings = [agent.bert_tokenizer.decode(agent.naozi.get(i)) for i in range(batch_size)]
ig_reward_np = generic.get_sufficient_info_reward(agent.naozi.get(), answer_token_ids)
ig_reward = generic.to_pt(ig_reward_np, enable_cuda=False, type='float') # batch
# push qa experience into qa replay buffer
replay_node_vocab = agent.kg.get_node_vocabulary()
replay_relation_vocab = agent.kg.get_relation_vocabulary()
replay_triplets = agent.kg.get_triplets()
for b in range(batch_size): # data points in batch
is_prior = qa_reward_np[b] > agent.qa_reward_prior_threshold * agent.qa_replay_memory.avg_rewards()
# if the agent is not in the correct state, do not push it into replay buffer
if np.mean(ig_reward_np[b]) == 0.0:
continue
agent.qa_replay_memory.push(is_prior, qa_reward_np[b], agent.naozi.get_sentence_lists(b), quest_list[b], replay_node_vocab[b], replay_relation_vocab[b], replay_triplets[b], answer_token_ids[b], belief_buffer[-1][b].cpu() if belief_buffer[-1][b] is not None else None)
# small positive reward whenever it answers question correctly
masks_np = [generic.to_np(item[-1]) for item in tmp_replay_buffer]
command_rewards_np = []
for i in range(len(tmp_replay_buffer)):
if i == len(tmp_replay_buffer) - 1:
r = ig_reward * tmp_replay_buffer[i][-1]
r_np = ig_reward_np * masks_np[i]
else:
# give reward only at that one game step, not all
r = ig_reward * (tmp_replay_buffer[i][-1] - tmp_replay_buffer[i + 1][-1])
r_np = ig_reward_np * (masks_np[i] - masks_np[i + 1])
tmp_replay_buffer[i].append(r)
command_rewards_np.append(r_np)
command_rewards_np = np.array(command_rewards_np)
if report:
print(command_rewards_np[:, 0])
# push experience into replay buffer
for b in range(len(ig_reward_np)):
is_prior = np.sum(command_rewards_np, 0)[b] > 0.0
mem = []
for i in range(len(tmp_replay_buffer)):
batch_description_list, batch_chosen_indices, batch_chosen_ctrlf_indices, batch_graph_node_vocabulary, batch_graph_relation_vocabulary, batch_graph_triplets, _, batch_rewards = tmp_replay_buffer[i]
mem.append([copy.deepcopy(batch_description_list[b]),
copy.deepcopy(quest_list[b]),
batch_chosen_indices[b],
batch_chosen_ctrlf_indices[b],
copy.deepcopy(batch_graph_node_vocabulary[b]),
copy.deepcopy(batch_graph_relation_vocabulary[b]),
copy.deepcopy(batch_graph_triplets[b]),
copy.deepcopy(belief_buffer[i][b].cpu()) if belief_buffer[i][b] is not None else None,
batch_rewards[b]])
if masks_np[i][b] == 0.0:
break
agent.replay_memory.push(is_prior, mem)
qa_acc = np.mean(qa_reward_np)
ig_acc = np.mean(ig_reward_np)
step_masks_np = np.sum(np.array(masks_np), 0) # batch
for i in range(len(qa_reward_np)):
# if the answer is totally wrong, we assume it used all steps
if qa_reward_np[i] == 0.0:
step_masks_np[i] = agent.max_nb_steps_per_episode
used_steps = np.mean(step_masks_np)
running_avg_qa_acc.push(qa_acc)
running_avg_ig_acc.push(ig_acc)
running_avg_steps.push(used_steps)
print_rewards = np.sum(np.mean(command_rewards_np, -1))
if report:
print("-- OBS: %s" % (obs_strings[0].encode('utf-8')))
print("-- PRED: %s" % (chosen_answer_strings[0].encode('utf-8')))
# finish game
agent.finish_of_episode(episode_no, batch_no, batch_size)
time_2 = datetime.datetime.now()
eastern_time = datetime.datetime.now(timezone('US/Eastern')).strftime("%b %d %Y %H:%M:%S")
if report:
print("Episode: {:3d} | {:s} | time spent: {:s} | interaction loss: {:2.3f} | interaction qvalue: {:2.3f} | qa loss: {:2.3f} | rewards: {:2.3f} | qa acc: {:2.3f}/{:2.3f} | sufficient info: {:2.3f}/{:2.3f} | used steps: {:2.3f}".format(episode_no, eastern_time, str(time_2 - time_1).rsplit(".")[0], running_avg_ig_loss.get_avg(), running_avg_ig_q_value.get_avg(), running_avg_qa_loss.get_avg(), print_rewards, qa_acc, running_avg_qa_acc.get_avg(), ig_acc, running_avg_ig_acc.get_avg(), running_avg_steps.get_avg()))
if __save__:
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_ep" + str(episode_no) + "_model.pt")
if not report or episode_no < agent.learn_start_from_this_episode:
episode_no += batch_size
batch_no += 1
continue
eval_qa_acc, eval_ig_acc, eval_used_steps = 0.0, 0.0, 0.0
# evaluate
if agent.run_eval:
eval_qa_acc, eval_ig_acc, eval_used_steps = evaluate.evaluate(env, agent, "valid")
env.split_reset("train")
# if run eval, then save model by eval accucacy
if eval_qa_acc >= best_qa_acc_so_far:
best_qa_acc_so_far = eval_qa_acc
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_model.pt")
curr_performance = eval_qa_acc
else:
if running_avg_qa_acc.get_avg() >= best_qa_acc_so_far:
best_qa_acc_so_far = running_avg_qa_acc.get_avg()
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_model.pt")
curr_performance = running_avg_qa_acc.get_avg()
if prev_performance <= curr_performance:
i_am_patient = 0
else:
i_am_patient += 1
prev_performance = curr_performance
# if patient >= patience, resume from checkpoint
if agent.patience > 0 and i_am_patient >= agent.patience:
if os.path.exists(output_dir + "/" + agent.experiment_tag + "_model.pt"):
print('reload from a good checkpoint...')
agent.load_pretrained_model(output_dir + "/" + agent.experiment_tag + "_model.pt", load_partial_graph=False)
agent.update_target_net()
i_am_patient = 0
# plot using visdom
if config["general"]["visdom"] and not agent.debug_mode:
viz_avg_ig_acc.append(running_avg_ig_acc.get_avg())
viz_avg_qa_acc.append(running_avg_qa_acc.get_avg())
viz_avg_ig_q_value.append(running_avg_ig_q_value.get_avg())
viz_eval_ig_acc.append(eval_ig_acc)
viz_eval_qa_acc.append(eval_qa_acc)
viz_eval_steps.append(eval_used_steps)
viz_avg_steps.append(running_avg_steps.get_avg())
viz_x = np.arange(len(viz_avg_ig_acc)).tolist()
if plt_win is None:
plt_win = viz.line(X=viz_x, Y=viz_avg_ig_acc,
opts=dict(title=agent.experiment_tag + "_train"),
name="sufficient info")
viz.line(X=viz_x, Y=viz_avg_qa_acc,
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win, update='append', name="qa")
else:
viz.line(X=[len(viz_avg_ig_acc) - 1], Y=[viz_avg_ig_acc[-1]],
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win,
update='append', name="sufficient info")
viz.line(X=[len(viz_avg_qa_acc) - 1], Y=[viz_avg_qa_acc[-1]],
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win,
update='append', name="qa")
if plt_q_value_win is None:
plt_q_value_win = viz.line(X=viz_x, Y=viz_avg_ig_q_value,
opts=dict(title=agent.experiment_tag + "_train_q_value"),
name="sufficient info")
else:
viz.line(X=[len(viz_avg_ig_q_value) - 1], Y=[viz_avg_ig_q_value[-1]],
opts=dict(title=agent.experiment_tag + "_train_q_value"),
win=plt_q_value_win,
update='append', name="sufficient info")
if plt_steps_win is None:
plt_steps_win = viz.line(X=viz_x, Y=viz_avg_steps,
opts=dict(title=agent.experiment_tag + "_train_step"),
name="used steps")
else:
viz.line(X=[len(viz_avg_steps) - 1], Y=[viz_avg_steps[-1]],
opts=dict(title=agent.experiment_tag + "_train_step"),
win=plt_steps_win,
update='append', name="used steps")
if agent.run_eval:
if eval_plt_win is None:
eval_plt_win = viz.line(X=viz_x, Y=viz_eval_ig_acc,
opts=dict(title=agent.experiment_tag + "_eval"),
name="sufficient info")
viz.line(X=viz_x, Y=viz_eval_qa_acc,
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win, update='append', name="qa")
else:
viz.line(X=[len(viz_eval_ig_acc) - 1], Y=[viz_eval_ig_acc[-1]],
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win,
update='append', name="sufficient info")
viz.line(X=[len(viz_eval_qa_acc) - 1], Y=[viz_eval_qa_acc[-1]],
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win,
update='append', name="qa")
if eval_plt_steps_win is None:
eval_plt_steps_win = viz.line(X=viz_x, Y=viz_eval_steps,
opts=dict(title=agent.experiment_tag + "_eval_step"),
name="used steps")
else:
viz.line(X=[len(viz_avg_steps) - 1], Y=[viz_eval_steps[-1]],
opts=dict(title=agent.experiment_tag + "_eval_step"),
win=eval_plt_steps_win,
update='append', name="used steps")
# write accucacies down into file
_s = json.dumps({"time spent": str(time_2 - time_1).rsplit(".")[0],
"sufficient info": str(running_avg_ig_acc.get_avg()),
"qa": str(running_avg_qa_acc.get_avg()),
"sufficient qvalue": str(running_avg_ig_q_value.get_avg()),
"eval sufficient info": str(eval_ig_acc),
"eval qa": str(eval_qa_acc),
"eval steps": str(eval_used_steps),
"used steps": str(running_avg_steps.get_avg())})
with open(output_dir + "/" + json_file_name + '.json', 'a+') as outfile:
outfile.write(_s + '\n')
outfile.flush()
episode_no += batch_size
batch_no += 1
if __name__ == '__main__':
train()
| [
"generic.HistoryScoreCache",
"os.path.exists",
"agent.CustomAgent",
"numpy.mean",
"pytz.timezone",
"generic.to_np",
"datetime.datetime.now",
"numpy.array",
"generic.to_pt",
"numpy.sum",
"numpy.random.seed",
"gamified_squad.GamifiedSquad",
"copy.deepcopy",
"evaluate.evaluate",
"generic.lo... | [((252, 275), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (273, 275), False, 'import datetime\n'), ((289, 310), 'generic.load_config', 'generic.load_config', ([], {}), '()\n', (308, 310), False, 'import generic\n'), ((321, 342), 'gamified_squad.GamifiedSquad', 'GamifiedSquad', (['config'], {}), '(config)\n', (334, 342), False, 'from gamified_squad import GamifiedSquad\n'), ((384, 422), 'agent.CustomAgent', 'CustomAgent', (['config', 'env.has_token_set'], {}), '(config, env.has_token_set)\n', (395, 422), False, 'from agent import CustomAgent\n'), ((936, 974), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (961, 974), False, 'import generic\n'), ((1000, 1038), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1025, 1038), False, 'import generic\n'), ((1065, 1103), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1090, 1103), False, 'import generic\n'), ((1130, 1168), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1155, 1168), False, 'import generic\n'), ((1198, 1236), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1223, 1236), False, 'import generic\n'), ((1261, 1299), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1286, 1299), False, 'import generic\n'), ((1523, 1592), 'os.path.exists', 'os.path.exists', (["(output_dir + '/' + agent.experiment_tag + '_model.pt')"], {}), "(output_dir + '/' + agent.experiment_tag + '_model.pt')\n", (1537, 1592), False, 'import os\n'), ((1661, 1748), 'os.path.exists', 'os.path.exists', (["(data_dir + '/' + agent.load_graph_generation_model_from_tag + '.pt')"], {}), "(data_dir + '/' + agent.load_graph_generation_model_from_tag +\n '.pt')\n", (1675, 1748), False, 'import os\n'), ((514, 529), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (527, 529), False, 'import visdom\n'), ((1909, 1969), 'os.path.exists', 'os.path.exists', (["(data_dir + '/' + agent.load_from_tag + '.pt')"], {}), "(data_dir + '/' + agent.load_from_tag + '.pt')\n", (1923, 1969), False, 'import os\n'), ((2204, 2230), 'numpy.random.seed', 'np.random.seed', (['episode_no'], {}), '(episode_no)\n', (2218, 2230), True, 'import numpy as np\n'), ((5347, 5379), 'generic.to_np', 'generic.to_np', (['chosen_head_tails'], {}), '(chosen_head_tails)\n', (5360, 5379), False, 'import generic\n'), ((5670, 5730), 'generic.get_qa_reward', 'generic.get_qa_reward', (['chosen_answer_strings', 'answer_strings'], {}), '(chosen_answer_strings, answer_strings)\n', (5691, 5730), False, 'import generic\n'), ((5945, 6005), 'generic.to_pt', 'generic.to_pt', (['ig_reward_np'], {'enable_cuda': '(False)', 'type': '"""float"""'}), "(ig_reward_np, enable_cuda=False, type='float')\n", (5958, 6005), False, 'import generic\n'), ((7600, 7628), 'numpy.array', 'np.array', (['command_rewards_np'], {}), '(command_rewards_np)\n', (7608, 7628), True, 'import numpy as np\n'), ((8900, 8921), 'numpy.mean', 'np.mean', (['qa_reward_np'], {}), '(qa_reward_np)\n', (8907, 8921), True, 'import numpy as np\n'), ((8939, 8960), 'numpy.mean', 'np.mean', (['ig_reward_np'], {}), '(ig_reward_np)\n', (8946, 8960), True, 'import numpy as np\n'), ((9267, 9289), 'numpy.mean', 'np.mean', (['step_masks_np'], {}), '(step_masks_np)\n', (9274, 9289), True, 'import numpy as np\n'), ((9750, 9773), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9771, 9773), False, 'import datetime\n'), ((4892, 4922), 'generic.to_np', 'generic.to_np', (['replay_info[-1]'], {}), '(replay_info[-1])\n', (4905, 4922), False, 'import generic\n'), ((6950, 6973), 'generic.to_np', 'generic.to_np', (['item[-1]'], {}), '(item[-1])\n', (6963, 6973), False, 'import generic\n'), ((8992, 9010), 'numpy.array', 'np.array', (['masks_np'], {}), '(masks_np)\n', (9000, 9010), True, 'import numpy as np\n'), ((9445, 9476), 'numpy.mean', 'np.mean', (['command_rewards_np', '(-1)'], {}), '(command_rewards_np, -1)\n', (9452, 9476), True, 'import numpy as np\n'), ((10888, 10926), 'evaluate.evaluate', 'evaluate.evaluate', (['env', 'agent', '"""valid"""'], {}), "(env, agent, 'valid')\n", (10905, 10926), False, 'import evaluate\n'), ((11871, 11940), 'os.path.exists', 'os.path.exists', (["(output_dir + '/' + agent.experiment_tag + '_model.pt')"], {}), "(output_dir + '/' + agent.experiment_tag + '_model.pt')\n", (11885, 11940), False, 'import os\n'), ((5011, 5032), 'numpy.sum', 'np.sum', (['still_running'], {}), '(still_running)\n', (5017, 5032), True, 'import numpy as np\n'), ((6520, 6544), 'numpy.mean', 'np.mean', (['ig_reward_np[b]'], {}), '(ig_reward_np[b])\n', (6527, 6544), True, 'import numpy as np\n'), ((7805, 7834), 'numpy.sum', 'np.sum', (['command_rewards_np', '(0)'], {}), '(command_rewards_np, 0)\n', (7811, 7834), True, 'import numpy as np\n'), ((9819, 9841), 'pytz.timezone', 'timezone', (['"""US/Eastern"""'], {}), "('US/Eastern')\n", (9827, 9841), False, 'from pytz import timezone\n'), ((8159, 8199), 'copy.deepcopy', 'copy.deepcopy', (['batch_description_list[b]'], {}), '(batch_description_list[b])\n', (8172, 8199), False, 'import copy\n'), ((8230, 8258), 'copy.deepcopy', 'copy.deepcopy', (['quest_list[b]'], {}), '(quest_list[b])\n', (8243, 8258), False, 'import copy\n'), ((8403, 8448), 'copy.deepcopy', 'copy.deepcopy', (['batch_graph_node_vocabulary[b]'], {}), '(batch_graph_node_vocabulary[b])\n', (8416, 8448), False, 'import copy\n'), ((8479, 8528), 'copy.deepcopy', 'copy.deepcopy', (['batch_graph_relation_vocabulary[b]'], {}), '(batch_graph_relation_vocabulary[b])\n', (8492, 8528), False, 'import copy\n'), ((8559, 8597), 'copy.deepcopy', 'copy.deepcopy', (['batch_graph_triplets[b]'], {}), '(batch_graph_triplets[b])\n', (8572, 8597), False, 'import copy\n')] |
import argparse
import os
import traceback
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
from cv2 import cv2
import time
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from ObjectDetection.yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body
from ObjectDetection.Preprocessing import ReadAnchors, ReadClasses, PreprocessImageHybrid, ScaleBoxes, GenerateColors, DrawBoxes
from ObjectDetection.Postprocessing import YoloEval, YoloFilterBoxes, YoloNonMaxSuppression
from ObjectDetection.ExceptionHandler import RetryError
# Main Utility Functions
def PredictNetwork(sess, yoloModel, FRModel, database, image, classNames, scores, boxes, classes):
'''Function to do prediction on the given image
Arguments:
sess {keras sess} -- Session for keras model
yoloModel {model} -- Loaded model
FRmodel {model} -- face recognition model
database {dict} -- list of all encodings
image {image} -- image to process
classNames {list} -- list of all predictable class
scores {tensor} -- tensor of prob. of every prediction
boxes {tensor} -- tensor consisting of box info
classes {tensor} -- tensor consisting of all predicted classes
Return:
cv2image -- Processed cv2 image
'''
try:
image, imageData = PreprocessImageHybrid(image, modelImageSize = (608, 608))
# Feed the image in model
outScores, outBoxes, outClasses = sess.run([scores, boxes, classes], feed_dict = {yoloModel.input: imageData, K.learning_phase(): 0})
# generate colors
colors = GenerateColors(classNames)
# Draw prediction box
DrawBoxes(image, outScores, outBoxes, outClasses, classNames, colors, FRModel, database)
# Convert back to cv2 image
cv2image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
return cv2image
except Exception as err:
print("Fatal Error: ", err)
traceback.print_exc()
exit(1)
def PredictNodeCam(sess, yoloModel, FRmodel, database, classNames, scores, boxes, classes):
'''Function to do realtime prediction in the master node using cv2 framework
Arguments:
sess {Keras session} -- the keras sess with the model loaded
yoloModel {model} -- Loaded model
FRmodel {model} -- face recognition model
database {dict} -- list of all encodings
classNames {list} -- list of all predictable class
scores {tensor} -- tensor of prob. of every prediction
boxes {tensor} -- tensor consisting of box info
classes {tensor} -- tensor consisting of all predicted classes
Return:
outScores -- tensor of shape (None, ), scores of the predicted boxes
outBoxes -- tensor of shape (None, 4), coordinates of the predicted boxes
outClasses -- tensor of shape (None, ), class index of the predicted boxes
Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
'''
# get local cams
camera = cv2.VideoCapture(cv2.CAP_DSHOW)
try:
while(True):
# Exiting mechanism
if cv2.waitKey(1) & 0xFF == ord('q'):
raise KeyboardInterrupt
# Read video frame by frame
status, image = camera.read()
if not status:
raise IOError
# Preprocess the image with cv2 and Pillow
image, imageData = PreprocessImageHybrid(image, modelImageSize = (608, 608))
# Feed the image in model
outScores, outBoxes, outClasses = sess.run([scores, boxes, classes], feed_dict = {yoloModel.input: imageData, K.learning_phase():0})
# generate colors
colors = GenerateColors(classNames)
# Draw prediction box
DrawBoxes(image, outScores, outBoxes, outClasses, classNames, colors, FRmodel, database)
# Convert back to cv2 image
cv2image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
cv2.imshow("output", cv2image)
except KeyboardInterrupt:
print("[+] Releasing camera and shuting it down")
except IOError:
print("[+] Read Camera error")
except Exception as err:
print("[+] This is bad, we don't what error is this?!!")
print("[+] Send us a mail to check it out")
print("[+] You Faced the following error: ", err)
check = str(input("[+] Do you want to print the traceback error? (Y/N): ")).lower()
if check == "y":
traceback.print_exc()
finally:
camera.release()
cv2.destroyAllWindows()
return outScores, outBoxes, outClasses
def ModelLoader(modelPath:str):
'''Loads the Trained Keras Model
Arguments:
modelPath {str} -- file Path of the trained model
Return:
yoloModel -- Loaded yolo model
status -- checking bool var of the loaded model
retry -- checking bool var for retrying function
'''
try:
# Load the model
print("[+] Loading Trained Yolo Model")
yoloModel = load_model(modelPath)
status = True # For checking purposes
retry = False # For retrying purposes
except ValueError:
print("[+] Invalid model file type please enter .h5 type file")
status = False
raise RetryError
except ImportError:
print("[+] Invalid file path, please check if file path exist")
status = False
raise RetryError
except RetryError:
check = input("[+] You Can Try again. Do you wish to?(Y/N): ").lower()
if check == "y":
retry = True
except Exception as err:
print("[+] This is bad, we don't what error is this?!!")
print("[+] Send us a mail to check it out")
print("[+] You Faced the following error: ", err)
check = str(input("[+] Do you want to print the traceback error? (Y/N): ")).lower()
if check == "y":
traceback.print_exc()
status = False
raise RetryError
finally:
return yoloModel, status, retry
| [
"cv2.cv2.VideoCapture",
"keras.models.load_model",
"ObjectDetection.Preprocessing.GenerateColors",
"keras.backend.learning_phase",
"cv2.cv2.waitKey",
"ObjectDetection.Preprocessing.DrawBoxes",
"numpy.asarray",
"cv2.cv2.destroyAllWindows",
"traceback.print_exc",
"ObjectDetection.Preprocessing.Prepr... | [((3303, 3334), 'cv2.cv2.VideoCapture', 'cv2.VideoCapture', (['cv2.CAP_DSHOW'], {}), '(cv2.CAP_DSHOW)\n', (3319, 3334), False, 'from cv2 import cv2\n'), ((1563, 1618), 'ObjectDetection.Preprocessing.PreprocessImageHybrid', 'PreprocessImageHybrid', (['image'], {'modelImageSize': '(608, 608)'}), '(image, modelImageSize=(608, 608))\n', (1584, 1618), False, 'from ObjectDetection.Preprocessing import ReadAnchors, ReadClasses, PreprocessImageHybrid, ScaleBoxes, GenerateColors, DrawBoxes\n'), ((1840, 1866), 'ObjectDetection.Preprocessing.GenerateColors', 'GenerateColors', (['classNames'], {}), '(classNames)\n', (1854, 1866), False, 'from ObjectDetection.Preprocessing import ReadAnchors, ReadClasses, PreprocessImageHybrid, ScaleBoxes, GenerateColors, DrawBoxes\n'), ((1905, 1997), 'ObjectDetection.Preprocessing.DrawBoxes', 'DrawBoxes', (['image', 'outScores', 'outBoxes', 'outClasses', 'classNames', 'colors', 'FRModel', 'database'], {}), '(image, outScores, outBoxes, outClasses, classNames, colors,\n FRModel, database)\n', (1914, 1997), False, 'from ObjectDetection.Preprocessing import ReadAnchors, ReadClasses, PreprocessImageHybrid, ScaleBoxes, GenerateColors, DrawBoxes\n'), ((4919, 4942), 'cv2.cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4940, 4942), False, 'from cv2 import cv2\n'), ((5414, 5435), 'keras.models.load_model', 'load_model', (['modelPath'], {}), '(modelPath)\n', (5424, 5435), False, 'from keras.models import load_model, Model\n'), ((2062, 2079), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2072, 2079), True, 'import numpy as np\n'), ((2198, 2219), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2217, 2219), False, 'import traceback\n'), ((3739, 3794), 'ObjectDetection.Preprocessing.PreprocessImageHybrid', 'PreprocessImageHybrid', (['image'], {'modelImageSize': '(608, 608)'}), '(image, modelImageSize=(608, 608))\n', (3760, 3794), False, 'from ObjectDetection.Preprocessing import ReadAnchors, ReadClasses, PreprocessImageHybrid, ScaleBoxes, GenerateColors, DrawBoxes\n'), ((4033, 4059), 'ObjectDetection.Preprocessing.GenerateColors', 'GenerateColors', (['classNames'], {}), '(classNames)\n', (4047, 4059), False, 'from ObjectDetection.Preprocessing import ReadAnchors, ReadClasses, PreprocessImageHybrid, ScaleBoxes, GenerateColors, DrawBoxes\n'), ((4107, 4199), 'ObjectDetection.Preprocessing.DrawBoxes', 'DrawBoxes', (['image', 'outScores', 'outBoxes', 'outClasses', 'classNames', 'colors', 'FRmodel', 'database'], {}), '(image, outScores, outBoxes, outClasses, classNames, colors,\n FRmodel, database)\n', (4116, 4199), False, 'from ObjectDetection.Preprocessing import ReadAnchors, ReadClasses, PreprocessImageHybrid, ScaleBoxes, GenerateColors, DrawBoxes\n'), ((4324, 4354), 'cv2.cv2.imshow', 'cv2.imshow', (['"""output"""', 'cv2image'], {}), "('output', cv2image)\n", (4334, 4354), False, 'from cv2 import cv2\n'), ((4273, 4290), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (4283, 4290), True, 'import numpy as np\n'), ((4846, 4867), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4865, 4867), False, 'import traceback\n'), ((6308, 6329), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6327, 6329), False, 'import traceback\n'), ((1773, 1791), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (1789, 1791), True, 'from keras import backend as K\n'), ((3412, 3426), 'cv2.cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3423, 3426), False, 'from cv2 import cv2\n'), ((3958, 3976), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (3974, 3976), True, 'from keras import backend as K\n')] |
import numpy as np
from phonopy import Phonopy
from phonopy.interface.vasp import read_vasp
from phonopy.file_IO import parse_FORCE_SETS, parse_BORN
from phonopy.structure.atoms import PhonopyAtoms
def append_band(bands, q_start, q_end):
band = []
for i in range(51):
band.append(np.array(q_start) +
(np.array(q_end) - np.array(q_start)) / 50 * i)
bands.append(band)
# NaCl crystal structure is read from POSCAR.
unitcell = read_vasp("POSCAR")
# This can be given via a PhonopyAtoms class as follows:
# unitcell = PhonopyAtoms(symbols=(['Na'] * 4 + ['Cl'] * 4),
# cell=(np.eye(3) * 5.6903014761756712),
# scaled_positions=[[0, 0, 0],
# [0, 0.5, 0.5],
# [0.5, 0, 0.5],
# [0.5, 0.5, 0],
# [0.5, 0.5, 0.5],
# [0.5, 0, 0],
# [0, 0.5, 0],
# [0, 0, 0.5]])
phonon = Phonopy(unitcell,
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]])
symmetry = phonon.get_symmetry()
print("Space group: %s" % symmetry.get_international_table())
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
primitive = phonon.get_primitive()
# Born effective charges and dielectric constants are read from BORN file.
nac_params = parse_BORN(primitive, filename="BORN")
# Or it can be of course given by hand as follows:
# born = [[[1.08703, 0, 0],
# [0, 1.08703, 0],
# [0, 0, 1.08703]],
# [[-1.08672, 0, 0],
# [0, -1.08672, 0],
# [0, 0, -1.08672]]]
# epsilon = [[2.43533967, 0, 0],
# [0, 2.43533967, 0],
# [0, 0, 2.43533967]]
# factors = 14.400
# nac_params = {'born': born,
# 'factor': factors,
# 'dielectric': epsilon}
phonon.set_nac_params(nac_params)
# BAND = 0.0 0.0 0.0 0.5 0.0 0.0 0.5 0.5 0.0 0.0 0.0 0.0 0.5 0.5 0.5
bands = []
append_band(bands, [0.0, 0.0, 0.0], [0.5, 0.0, 0.0])
append_band(bands, [0.5, 0.0, 0.0], [0.5, 0.5, 0.0])
append_band(bands, [0.5, 0.5, 0.0], [0.0, 0.0, 0.0])
append_band(bands, [0.0, 0.0, 0.0], [0.5, 0.5, 0.5])
phonon.set_band_structure(bands)
q_points, distances, frequencies, eigvecs = phonon.get_band_structure()
for q, d, freq in zip(q_points, distances, frequencies):
print("%s %s %s" % (q, d, freq))
phonon.plot_band_structure().show()
# Mesh sampling 20x20x20
phonon.set_mesh([20, 20, 20])
phonon.set_thermal_properties(t_step=10,
t_max=1000,
t_min=0)
# DOS
phonon.set_total_DOS(sigma=0.1)
for omega, dos in np.array(phonon.get_total_DOS()).T:
print("%15.7f%15.7f" % (omega, dos))
phonon.plot_total_DOS().show()
# Thermal properties
for t, free_energy, entropy, cv in np.array(phonon.get_thermal_properties()).T:
print(("%12.3f " + "%15.7f" * 3) % ( t, free_energy, entropy, cv))
phonon.plot_thermal_properties().show()
# PDOS
phonon.set_mesh([10, 10, 10],
is_mesh_symmetry=False,
is_eigenvectors=True)
phonon.set_partial_DOS(tetrahedron_method=True)
omegas, pdos = phonon.get_partial_DOS()
pdos_indices = [[0], [1]]
phonon.plot_partial_DOS(pdos_indices=pdos_indices,
legend=pdos_indices).show()
| [
"phonopy.file_IO.parse_BORN",
"phonopy.Phonopy",
"phonopy.interface.vasp.read_vasp",
"numpy.array",
"phonopy.file_IO.parse_FORCE_SETS"
] | [((467, 486), 'phonopy.interface.vasp.read_vasp', 'read_vasp', (['"""POSCAR"""'], {}), "('POSCAR')\n", (476, 486), False, 'from phonopy.interface.vasp import read_vasp\n'), ((1145, 1266), 'phonopy.Phonopy', 'Phonopy', (['unitcell', '[[2, 0, 0], [0, 2, 0], [0, 0, 2]]'], {'primitive_matrix': '[[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]]'}), '(unitcell, [[2, 0, 0], [0, 2, 0], [0, 0, 2]], primitive_matrix=[[0, \n 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]])\n', (1152, 1266), False, 'from phonopy import Phonopy\n'), ((1512, 1530), 'phonopy.file_IO.parse_FORCE_SETS', 'parse_FORCE_SETS', ([], {}), '()\n', (1528, 1530), False, 'from phonopy.file_IO import parse_FORCE_SETS, parse_BORN\n'), ((1732, 1770), 'phonopy.file_IO.parse_BORN', 'parse_BORN', (['primitive'], {'filename': '"""BORN"""'}), "(primitive, filename='BORN')\n", (1742, 1770), False, 'from phonopy.file_IO import parse_FORCE_SETS, parse_BORN\n'), ((297, 314), 'numpy.array', 'np.array', (['q_start'], {}), '(q_start)\n', (305, 314), True, 'import numpy as np\n'), ((338, 353), 'numpy.array', 'np.array', (['q_end'], {}), '(q_end)\n', (346, 353), True, 'import numpy as np\n'), ((356, 373), 'numpy.array', 'np.array', (['q_start'], {}), '(q_start)\n', (364, 373), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib.figure import Figure
from matplotlib import rcParams
def dBofHz(inputHz):
'''
this function will simply print to console...
'''
outValue = 20*np.log10( inputHz )
print(outValue, " dB")
return outValue
def calculateForZ(Coefficient_Array = [-0.2,0.3,0.1],Z_Input = 0.3-0.3j):
'''
this function will simply print to console...
an evaluation of x[n] for a specific value of z
'''
summR = 0
summI = 0
for index in range(len(Coefficient_Array)):
#print("x[n] ", Coefficient_Array[index])
z_n_value = pow(Z_Input,-index)
print("z^-n = ", z_n_value)
print("np.real(Coefficient_Array[index] * z_n_value ", np.real(Coefficient_Array[index] * z_n_value))
print("np.imag(Coefficient_Array[index] * z_n_value ", np.imag(Coefficient_Array[index] * z_n_value))
summR += np.real(Coefficient_Array[index] * z_n_value)
summI += np.imag(Coefficient_Array[index] * z_n_value)
print("summR ", summR)
print("summI ", summI)
H_z = np.sqrt((summR**2) + (summI**2))
#print(H_z, " H_z")
return H_z
def zplot(x=[-0.2,0.3,0.1]):
"""
Plot the complex z-plane given an FIR filter impulse response.
- give plot as a surface
"""
# create the mesh in polar coordinates and compute corresponding Z.
radiusArray = np.linspace(0, 1, 50)
freqArray = np.linspace(0, 2*np.pi,120)
magnitudeArrayMesh, phaseangleArrayMesh = np.meshgrid(radiusArray, freqArray)
#print("radiusArrayMesh ", radiusArrayMesh)
#print("freqArrayMesh ", freqArrayMesh)
coordinateX = magnitudeArrayMesh * np.cos(phaseangleArrayMesh)
coordinateY = magnitudeArrayMesh * np.sin(phaseangleArrayMesh)
#print("coordinateX ", coordinateX)
#print("coordinateY ", coordinateY)
Z = magnitudeArrayMesh*(np.e**(1j*phaseangleArrayMesh))
#print("Z ", Z)
H_z = 0 + 0j
for index in range(len(x)):
H_z += x[index] * (Z**-index)
#print("H_z ", H_z)
H_z_real = np.real(H_z)
H_z_imag = np.imag(H_z)
#print("H_z_real ", H_z_real)
#print("H_z_imag ", H_z_imag)
FreqZ = []
Z_dB = 20*np.log10(np.sqrt((H_z_real**2) + (H_z_imag**2)))
for row in Z_dB:
FreqZ.append(row[len(row)-1])
#print("Z_dB ", Z_dB)
# Plot the 3D surface
fig = plt.figure(figsize=(16, 10))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(coordinateX, coordinateY, Z_dB, rstride=1, cstride=1, alpha=0.8, cmap='brg', vmin=-24., vmax=24.)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=10)
# Customize the z axis.
ax.view_init(45, -120)
ax.set_zlim(-36, 36)
ax.set_ylim([-1, 1])
ax.set_xlim([-1, 1])
ax.zaxis.set_major_locator(LinearLocator(9))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_xlabel("Real")
ax.set_ylabel("Imaginary")
ax.set_zlabel("Magnitude [dB]")
plt.savefig('ZSurfaceSide.png')
# Plot the 3D surface
fig = plt.figure(figsize=(16, 10))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(coordinateX, coordinateY, Z_dB, rstride=1, cstride=1, alpha=0.8, cmap='brg', vmin=-24., vmax=24.)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=10)
# Customize the z axis.
ax.view_init(90, -90)
ax.set_zlim(-36, 36)
ax.set_ylim([-1, 1])
ax.set_xlim([-1, 1])
ax.zaxis.set_major_locator(LinearLocator(9))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_xlabel("Real")
ax.set_ylabel("Imaginary")
ax.set_zlabel("Magnitude [dB]")
plt.savefig('ZSurfaceTop.png')
plt.show()
fig, ax1 = plt.subplots()
ax1.set_title('Digital filter frequency response')
ax1.plot(freqArray,FreqZ, 'b')
ax1.set_ylabel('Amplitude [dB]')
ax1.set_ylim([-24, 12])
ax1.set_xlabel('Frequency [rad/sample]')
ax1.set_xlim([0, (np.pi)])
plt.savefig('ZFreq.png')
plt.grid()
plt.show()
return
x=[-0.2,0.3,0.1]
zplot(x=x)
v = calculateForZ()
dBofHz(v)
| [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.savefig",
"numpy.log10",
"matplotlib.ticker.LinearLocator",
"numpy.real",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",... | [((1293, 1325), 'numpy.sqrt', 'np.sqrt', (['(summR ** 2 + summI ** 2)'], {}), '(summR ** 2 + summI ** 2)\n', (1300, 1325), True, 'import numpy as np\n'), ((1609, 1630), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (1620, 1630), True, 'import numpy as np\n'), ((1648, 1678), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(120)'], {}), '(0, 2 * np.pi, 120)\n', (1659, 1678), True, 'import numpy as np\n'), ((1725, 1760), 'numpy.meshgrid', 'np.meshgrid', (['radiusArray', 'freqArray'], {}), '(radiusArray, freqArray)\n', (1736, 1760), True, 'import numpy as np\n'), ((2288, 2300), 'numpy.real', 'np.real', (['H_z'], {}), '(H_z)\n', (2295, 2300), True, 'import numpy as np\n'), ((2317, 2329), 'numpy.imag', 'np.imag', (['H_z'], {}), '(H_z)\n', (2324, 2329), True, 'import numpy as np\n'), ((2614, 2642), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (2624, 2642), True, 'import matplotlib.pyplot as plt\n'), ((3265, 3296), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ZSurfaceSide.png"""'], {}), "('ZSurfaceSide.png')\n", (3276, 3296), True, 'import matplotlib.pyplot as plt\n'), ((3339, 3367), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (3349, 3367), True, 'import matplotlib.pyplot as plt\n'), ((3989, 4019), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ZSurfaceTop.png"""'], {}), "('ZSurfaceTop.png')\n", (4000, 4019), True, 'import matplotlib.pyplot as plt\n'), ((4025, 4035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4033, 4035), True, 'import matplotlib.pyplot as plt\n'), ((4054, 4068), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4066, 4068), True, 'import matplotlib.pyplot as plt\n'), ((4311, 4335), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ZFreq.png"""'], {}), "('ZFreq.png')\n", (4322, 4335), True, 'import matplotlib.pyplot as plt\n'), ((4341, 4351), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4349, 4351), True, 'import matplotlib.pyplot as plt\n'), ((4357, 4367), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4365, 4367), True, 'import matplotlib.pyplot as plt\n'), ((400, 417), 'numpy.log10', 'np.log10', (['inputHz'], {}), '(inputHz)\n', (408, 417), True, 'import numpy as np\n'), ((1116, 1161), 'numpy.real', 'np.real', (['(Coefficient_Array[index] * z_n_value)'], {}), '(Coefficient_Array[index] * z_n_value)\n', (1123, 1161), True, 'import numpy as np\n'), ((1180, 1225), 'numpy.imag', 'np.imag', (['(Coefficient_Array[index] * z_n_value)'], {}), '(Coefficient_Array[index] * z_n_value)\n', (1187, 1225), True, 'import numpy as np\n'), ((1895, 1922), 'numpy.cos', 'np.cos', (['phaseangleArrayMesh'], {}), '(phaseangleArrayMesh)\n', (1901, 1922), True, 'import numpy as np\n'), ((1963, 1990), 'numpy.sin', 'np.sin', (['phaseangleArrayMesh'], {}), '(phaseangleArrayMesh)\n', (1969, 1990), True, 'import numpy as np\n'), ((3083, 3099), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(9)'], {}), '(9)\n', (3096, 3099), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((3135, 3162), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (3153, 3162), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((3807, 3823), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(9)'], {}), '(9)\n', (3820, 3823), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((3859, 3886), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (3877, 3886), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((940, 985), 'numpy.real', 'np.real', (['(Coefficient_Array[index] * z_n_value)'], {}), '(Coefficient_Array[index] * z_n_value)\n', (947, 985), True, 'import numpy as np\n'), ((1051, 1096), 'numpy.imag', 'np.imag', (['(Coefficient_Array[index] * z_n_value)'], {}), '(Coefficient_Array[index] * z_n_value)\n', (1058, 1096), True, 'import numpy as np\n'), ((2440, 2478), 'numpy.sqrt', 'np.sqrt', (['(H_z_real ** 2 + H_z_imag ** 2)'], {}), '(H_z_real ** 2 + H_z_imag ** 2)\n', (2447, 2478), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
from numpy import cos, sin, tanh, pi
# generate random synthetic 2D field
def deterministic_field(i, j, X, Y):
r = (i*2*pi)/X
t = (j*2*pi)/Y
return sin(r)*sin(t) + sin(2.1*r)*sin(2.1*t) \
+ sin(3.1*r)*sin(3.1*t) + tanh(r)*cos(t) \
+ tanh(2*r)*cos(2.1*t) + tanh(4*r)*cos(0.1*t) \
+ tanh(2.4*r)*cos(1.1*t) + tanh(r + t) \
+ tanh(r + 2*t)
# generate linear displacement field
def linear_field(obs, time):
field = np.zeros((obs,time))
for i in range(0, obs):
r = np.linspace(0, 10, obs)[i]
for j in range(0, time):
t = np.linspace(1, 100, time)[j]
field[i][j] = (1 - 2*r)*t
return field
def construct_field(m, n):
field = np.zeros((m,n))
for i in range(0, m):
x = (i*2*np.pi)/m
for j in range(0, n):
t = (j*2*np.pi)/n
field[i][j] = deterministic_field(x, t)
return field
| [
"numpy.tanh",
"numpy.zeros",
"numpy.linspace",
"numpy.cos",
"numpy.sin"
] | [((502, 523), 'numpy.zeros', 'np.zeros', (['(obs, time)'], {}), '((obs, time))\n', (510, 523), True, 'import numpy as np\n'), ((763, 779), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (771, 779), True, 'import numpy as np\n'), ((409, 424), 'numpy.tanh', 'tanh', (['(r + 2 * t)'], {}), '(r + 2 * t)\n', (413, 424), False, 'from numpy import cos, sin, tanh, pi\n'), ((385, 396), 'numpy.tanh', 'tanh', (['(r + t)'], {}), '(r + t)\n', (389, 396), False, 'from numpy import cos, sin, tanh, pi\n'), ((563, 586), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', 'obs'], {}), '(0, 10, obs)\n', (574, 586), True, 'import numpy as np\n'), ((639, 664), 'numpy.linspace', 'np.linspace', (['(1)', '(100)', 'time'], {}), '(1, 100, time)\n', (650, 664), True, 'import numpy as np\n'), ((360, 373), 'numpy.tanh', 'tanh', (['(2.4 * r)'], {}), '(2.4 * r)\n', (364, 373), False, 'from numpy import cos, sin, tanh, pi\n'), ((372, 384), 'numpy.cos', 'cos', (['(1.1 * t)'], {}), '(1.1 * t)\n', (375, 384), False, 'from numpy import cos, sin, tanh, pi\n'), ((327, 338), 'numpy.tanh', 'tanh', (['(4 * r)'], {}), '(4 * r)\n', (331, 338), False, 'from numpy import cos, sin, tanh, pi\n'), ((337, 349), 'numpy.cos', 'cos', (['(0.1 * t)'], {}), '(0.1 * t)\n', (340, 349), False, 'from numpy import cos, sin, tanh, pi\n'), ((304, 315), 'numpy.tanh', 'tanh', (['(2 * r)'], {}), '(2 * r)\n', (308, 315), False, 'from numpy import cos, sin, tanh, pi\n'), ((314, 326), 'numpy.cos', 'cos', (['(2.1 * t)'], {}), '(2.1 * t)\n', (317, 326), False, 'from numpy import cos, sin, tanh, pi\n'), ((277, 284), 'numpy.tanh', 'tanh', (['r'], {}), '(r)\n', (281, 284), False, 'from numpy import cos, sin, tanh, pi\n'), ((285, 291), 'numpy.cos', 'cos', (['t'], {}), '(t)\n', (288, 291), False, 'from numpy import cos, sin, tanh, pi\n'), ((253, 265), 'numpy.sin', 'sin', (['(3.1 * r)'], {}), '(3.1 * r)\n', (256, 265), False, 'from numpy import cos, sin, tanh, pi\n'), ((264, 276), 'numpy.sin', 'sin', (['(3.1 * t)'], {}), '(3.1 * t)\n', (267, 276), False, 'from numpy import cos, sin, tanh, pi\n'), ((203, 209), 'numpy.sin', 'sin', (['r'], {}), '(r)\n', (206, 209), False, 'from numpy import cos, sin, tanh, pi\n'), ((210, 216), 'numpy.sin', 'sin', (['t'], {}), '(t)\n', (213, 216), False, 'from numpy import cos, sin, tanh, pi\n'), ((219, 231), 'numpy.sin', 'sin', (['(2.1 * r)'], {}), '(2.1 * r)\n', (222, 231), False, 'from numpy import cos, sin, tanh, pi\n'), ((230, 242), 'numpy.sin', 'sin', (['(2.1 * t)'], {}), '(2.1 * t)\n', (233, 242), False, 'from numpy import cos, sin, tanh, pi\n')] |
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import TypeVar
import numpy as np
import tensorflow as tf
import torch
Tensor = TypeVar('Tensor', tf.Tensor, torch.Tensor, np.ndarray)
def zscore(data: Tensor, epsilon: float = 1e-7) -> Tensor:
"""Apply Zscore processing to a given tensor or array.
This method can be used with Numpy data:
```python
n = np.array([[0,1],[2,3]])
b = fe.backend.zscore(n) # [[-1.34164079, -0.4472136 ],[0.4472136 , 1.34164079]]
```
This method can be used with TensorFlow tensors:
```python
t = tf.constant([[0,1],[2,3]])
b = fe.backend.zscore(t) # [[-1.34164079, -0.4472136 ],[0.4472136 , 1.34164079]]
```
This method can be used with PyTorch tensors:
```python
p = torch.tensor([[0,1],[2,3]])
b = fe.backend.zscore(p) # [[-1.34164079, -0.4472136 ],[0.4472136 , 1.34164079]]
```
Args:
data: The input tensor or array.
Returns:
Data after substracting mean and divided by standard deviation.
Raises:
ValueError: If `tensor` is an unacceptable data type.
"""
if tf.is_tensor(data):
data = tf.cast(data, tf.float32)
mean = tf.reduce_mean(data)
std = tf.keras.backend.std(data)
return (data - mean) / tf.maximum(std, epsilon)
elif isinstance(data, torch.Tensor):
data = data.type(torch.float32)
mean = torch.mean(data)
std = torch.std(data, unbiased=False)
return (data - mean) / torch.max(std, torch.tensor(epsilon))
elif isinstance(data, np.ndarray):
mean = np.mean(data)
std = np.std(data)
return (data - mean) / max(std, epsilon)
else:
raise ValueError("Unrecognized data type {}".format(type(data)))
| [
"numpy.mean",
"tensorflow.is_tensor",
"torch.mean",
"numpy.std",
"torch.tensor",
"tensorflow.maximum",
"tensorflow.reduce_mean",
"tensorflow.cast",
"torch.std",
"tensorflow.keras.backend.std",
"typing.TypeVar"
] | [((786, 840), 'typing.TypeVar', 'TypeVar', (['"""Tensor"""', 'tf.Tensor', 'torch.Tensor', 'np.ndarray'], {}), "('Tensor', tf.Tensor, torch.Tensor, np.ndarray)\n", (793, 840), False, 'from typing import TypeVar\n'), ((1767, 1785), 'tensorflow.is_tensor', 'tf.is_tensor', (['data'], {}), '(data)\n', (1779, 1785), True, 'import tensorflow as tf\n'), ((1802, 1827), 'tensorflow.cast', 'tf.cast', (['data', 'tf.float32'], {}), '(data, tf.float32)\n', (1809, 1827), True, 'import tensorflow as tf\n'), ((1843, 1863), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['data'], {}), '(data)\n', (1857, 1863), True, 'import tensorflow as tf\n'), ((1878, 1904), 'tensorflow.keras.backend.std', 'tf.keras.backend.std', (['data'], {}), '(data)\n', (1898, 1904), True, 'import tensorflow as tf\n'), ((1936, 1960), 'tensorflow.maximum', 'tf.maximum', (['std', 'epsilon'], {}), '(std, epsilon)\n', (1946, 1960), True, 'import tensorflow as tf\n'), ((2057, 2073), 'torch.mean', 'torch.mean', (['data'], {}), '(data)\n', (2067, 2073), False, 'import torch\n'), ((2088, 2119), 'torch.std', 'torch.std', (['data'], {'unbiased': '(False)'}), '(data, unbiased=False)\n', (2097, 2119), False, 'import torch\n'), ((2243, 2256), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2250, 2256), True, 'import numpy as np\n'), ((2271, 2283), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (2277, 2283), True, 'import numpy as np\n'), ((2166, 2187), 'torch.tensor', 'torch.tensor', (['epsilon'], {}), '(epsilon)\n', (2178, 2187), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# @Author: yulidong
# @Date: 2018-04-25 23:06:40
# @Last Modified by: yulidong
# @Last Modified time: 2018-11-20 00:11:31
import os
import torch
import numpy as np
import scipy.misc as m
import cv2
from torch.utils import data
from python_pfm import *
import torchvision.transforms as transforms
import torch.nn.functional as F
import random
path=os.path.join('/home/dataset/datasets/nyu2_depth/npy_data')
files=os.listdir(path)
alpha=100
beta=0
min=[]
max=[]
for i in range(len(files)):
data=np.load(os.path.join(path,files[i]))
depth = data[:,:,3]
depth=np.where(depth==0,np.mean(depth),depth)
depth=np.where(depth==10,np.mean(depth),depth)
alpha=np.min([alpha,np.max([0,np.min(depth)])])
beta=np.max([beta,np.max(depth)])
min.append(np.max([0,np.min(depth)]))
max.append(np.max(depth))
print(i,alpha,beta,min[-1],max[-1])
print(alpha,beta)
#0.7132995128631592 9.99547004699707
#0.014277142867333174 9.999999202576088 | [
"numpy.mean",
"os.listdir",
"os.path.join",
"numpy.max",
"numpy.min"
] | [((378, 436), 'os.path.join', 'os.path.join', (['"""/home/dataset/datasets/nyu2_depth/npy_data"""'], {}), "('/home/dataset/datasets/nyu2_depth/npy_data')\n", (390, 436), False, 'import os\n'), ((443, 459), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (453, 459), False, 'import os\n'), ((536, 564), 'os.path.join', 'os.path.join', (['path', 'files[i]'], {}), '(path, files[i])\n', (548, 564), False, 'import os\n'), ((617, 631), 'numpy.mean', 'np.mean', (['depth'], {}), '(depth)\n', (624, 631), True, 'import numpy as np\n'), ((668, 682), 'numpy.mean', 'np.mean', (['depth'], {}), '(depth)\n', (675, 682), True, 'import numpy as np\n'), ((837, 850), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (843, 850), True, 'import numpy as np\n'), ((764, 777), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (770, 777), True, 'import numpy as np\n'), ((805, 818), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (811, 818), True, 'import numpy as np\n'), ((724, 737), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (730, 737), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Write out the KL distance between two kmer models
"""
from __future__ import print_function
import os, sys
import numpy as np
from vis_kmer_distributions import *
from scipy.stats import entropy
from scipy.spatial.distance import euclidean
from itertools import product
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser (description=__doc__)
parser.add_argument('--pk_dir', action='store', default=None, required=True, type=str, dest='pk_dir',
help="Path to experimental kmer distriutions")
parser.add_argument('--out', action='store', default=None, required=True, type=str, dest='out',
help="place to put result files")
args = parser.parse_args()
return args
_SQRT2 = np.sqrt(2)
def hellinger2(p, q):
return euclidean(np.sqrt(p), np.sqrt(q)) / _SQRT2
def main(args):
args = parse_args()
file_with_ont_model = "../../tests/minion_test_reads/C/" \
"makeson_PC_MA_286_R7.3_ZYMO_C_1_09_11_15_1714_1_ch1_file1_strand.fast5"
assert os.path.exists(file_with_ont_model), "Didn't find ONT model containing file"
kl_out_file_path = args.out + "kl_distance.txt"
hd_out_file_path = args.out + "hellinger_distance.txt"
assert os.path.exists(kl_out_file_path) is not True, "Out file {} already exists".format(kl_out_file_path)
assert os.path.exists(hd_out_file_path) is not True, "Out file {} already exists".format(hd_out_file_path)
kl_out = open(kl_out_file_path, 'w')
hd_out = open(hd_out_file_path, 'w')
x_vals = np.linspace(30, 90, 600)
print("Collecting distances for {pk} against ONT table\n".format(pk=args.pk_dir), file=sys.stdout)
for kmer in product("ACGT", repeat=6):
kmer = ''.join(kmer)
template_pdf, complement_pdf = plot_ont_distribution(kmer=kmer, fast5=file_with_ont_model, x_vals=x_vals)
hdp_distribution = KmerHdpDistribution(data_directory=args.pk_dir, kmer=kmer)
ent = entropy(pk=hdp_distribution.density, qk=template_pdf, base=2)
h_distance = hellinger2(p=hdp_distribution.density, q=template_pdf)
print("finished with kmer {kmer} entropy {ent} hellinger distance {hd}"
"".format(kmer=kmer, ent=ent, hd=h_distance), file=sys.stderr)
kl_out.write("{ent}\n".format(ent=ent))
hd_out.write("{hd}\n".format(hd=h_distance))
kl_out.close()
hd_out.close()
print("\nFinished collecting distances", file=sys.stdout)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| [
"os.path.exists",
"scipy.stats.entropy",
"numpy.sqrt",
"argparse.ArgumentParser",
"itertools.product",
"numpy.linspace"
] | [((795, 805), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (802, 805), True, 'import numpy as np\n'), ((364, 399), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (378, 399), False, 'from argparse import ArgumentParser\n'), ((1099, 1134), 'os.path.exists', 'os.path.exists', (['file_with_ont_model'], {}), '(file_with_ont_model)\n', (1113, 1134), False, 'import os, sys\n'), ((1606, 1630), 'numpy.linspace', 'np.linspace', (['(30)', '(90)', '(600)'], {}), '(30, 90, 600)\n', (1617, 1630), True, 'import numpy as np\n'), ((1751, 1776), 'itertools.product', 'product', (['"""ACGT"""'], {'repeat': '(6)'}), "('ACGT', repeat=6)\n", (1758, 1776), False, 'from itertools import product\n'), ((1298, 1330), 'os.path.exists', 'os.path.exists', (['kl_out_file_path'], {}), '(kl_out_file_path)\n', (1312, 1330), False, 'import os, sys\n'), ((1409, 1441), 'os.path.exists', 'os.path.exists', (['hd_out_file_path'], {}), '(hd_out_file_path)\n', (1423, 1441), False, 'import os, sys\n'), ((2021, 2082), 'scipy.stats.entropy', 'entropy', ([], {'pk': 'hdp_distribution.density', 'qk': 'template_pdf', 'base': '(2)'}), '(pk=hdp_distribution.density, qk=template_pdf, base=2)\n', (2028, 2082), False, 'from scipy.stats import entropy\n'), ((851, 861), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (858, 861), True, 'import numpy as np\n'), ((863, 873), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (870, 873), True, 'import numpy as np\n')] |
import sys
sys.path.insert(0, '../../../src_python')
import nmpccodegen as nmpc
import nmpccodegen.tools as tools
import nmpccodegen.models as models
import nmpccodegen.controller as controller
import nmpccodegen.controller.obstacles as obstacles
import nmpccodegen.Cfunctions as cfunctions
import nmpccodegen.example_models as example_models
import math
import numpy as np
import matplotlib.pyplot as plt
import math
import sys
import time
def init_controller_files(controller_name):
## -- GENERATE STATIC FILES --
# start by generating the static files and folder of the controller
trailer_controller_location = "../../../test_controller_builds/" + controller_name
tools.Bootstrapper.bootstrap(trailer_controller_location, simulation_tools=True)
return trailer_controller_location
## -----------------------------------------------------------------
def generate_controller_with_obs(trailer_controller_location,reference_state,Q,R,rectangular_obstacle_1,obstacle_weight,horizon,display_figure=True,index_figure=0):
# get the continious system equations
(system_equations,number_of_states,number_of_inputs,coordinates_indices) = example_models.get_trailer_model(L=0.5)
step_size = 0.05
# simulation_time = 10
# number_of_steps = math.ceil(simulation_time / step_size)
integrator = "RK44"
constraint_input = cfunctions.IndicatorBoxFunction([-1,-1],[1,1]) # input needs stay within these borders
model = models.Model_continious(system_equations, constraint_input, step_size, number_of_states,\
number_of_inputs,coordinates_indices, integrator)
# reference_state=np.array([2,2,0])
stage_cost = controller.Stage_cost_QR(model, Q, R)
# define the controller
trailer_controller = controller.Nmpc_panoc(trailer_controller_location,model,stage_cost)
trailer_controller.horizon = horizon
trailer_controller.step_size = step_size
trailer_controller.integrator_casadi = True
trailer_controller.panoc_max_steps= 1000
trailer_controller._lbgfs_buffer_size = 20
trailer_controller.min_residual = -5
# add an obstacle
trailer_controller.add_obstacle(rectangular_obstacle_1)
# generate the code
trailer_controller.generate_code()
# -- simulate controller --
# setup a simulator to test
sim = tools.Simulator(trailer_controller.location)
initial_state=np.array([0.01,0.,0.])
state=initial_state
state_history = np.zeros((number_of_states,horizon))
sim.set_weight_obstacle(0,obstacle_weight)
reference_input = np.array([0, 0])
(sim_data, full_solution) = sim.simulate_nmpc_multistep_solution(initial_state, reference_state, reference_input,
number_of_inputs * horizon)
inputs = np.reshape(full_solution, (horizon, number_of_inputs))
print("solved NMPC problem time="+ sim_data.time_string + " number of panoc iterations=" + str(
sim_data.panoc_interations))
for i in range(0,horizon):
state = model.get_next_state_numpy(state,inputs[i,:])
state_history[:,i] = np.reshape(state[:],number_of_states)
print("Reference state:")
print(reference_state)
print("Final state:")
print(state)
if(display_figure==True):
plt.figure(index_figure)
example_models.trailer_print(state_history)
rectangular_obstacle_1.plot()
plt.xlim([-2.2, 2.2])
plt.ylim([-0.1, 2.2])
# plt.clf()
return state
def main():
# create static files
trailer_move_diag_obs_location_ = init_controller_files("trailer_move_diag_obs")
trailer_move_right_obs_location_ = init_controller_files("trailer_move_right_obs")
trailer_move_move_up_obs_location_ = init_controller_files("trailer_move_up_obs")
# Start simulating:
# TEST 1
rectangular_center_coordinates = np.array([0.75, 0.45])
rectangular_width = 0.5
rectangular_height = 0.3
rectangular_obstacle_1 = obstacles.Obstacle_rectangular(rectangular_center_coordinates, \
rectangular_width, rectangular_height)
Q = np.diag([10., 10., 1.])
R = np.diag([1., 1.]) * 0.01
obstacle_weight = 10000.
horizon = 50
reference_state = np.array([2, 0.5, 0])
current_state = generate_controller_with_obs(trailer_move_diag_obs_location_, reference_state, Q,R, \
rectangular_obstacle_1 , obstacle_weight,\
horizon,display_figure=True,index_figure=0)
# TEST 2
rectangular_center_coordinates_2 = np.array([1, 0.])
rectangular_width_2 = 0.5
rectangular_height_2 = 0.2
rectangular_obstacle_2 = obstacles.Obstacle_rectangular(rectangular_center_coordinates_2, \
rectangular_width_2, rectangular_height_2)
Q = np.diag([10., 10., 1.])*1.
R = np.diag([1., 1.]) * 0.01
obstacle_weight = 1000.
horizon = 50
reference_state = np.array([2, 0, 0])
current_state = generate_controller_with_obs(trailer_move_right_obs_location_, reference_state, Q, R,\
rectangular_obstacle_2,obstacle_weight,horizon,\
display_figure=True,index_figure=1)
# TEST 3
rectangular_center_coordinates = np.array([0.6, 0.5])
rectangular_width = 1.2
rectangular_height = 0.2
rectangular_obstacle_3 = obstacles.Obstacle_rectangular(rectangular_center_coordinates, \
rectangular_width, rectangular_height)
Q = np.diag([10., 10., 0.1])
R = np.diag([1., 1.]) * 0.01
obstacle_weight = 10000.
horizon = 50
reference_state = np.array([0, 2, 0])
current_state = generate_controller_with_obs(trailer_move_move_up_obs_location_, reference_state, Q, R,\
rectangular_obstacle_3,obstacle_weight,horizon,\
display_figure=True,index_figure=2)
plt.show()
if __name__ == '__main__':
main()
| [
"sys.path.insert",
"numpy.reshape",
"nmpccodegen.example_models.get_trailer_model",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"nmpccodegen.Cfunctions.IndicatorBoxFunction",
"nmpccodegen.controller.Stage_cost_QR",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
... | [((11, 52), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../../src_python"""'], {}), "(0, '../../../src_python')\n", (26, 52), False, 'import sys\n'), ((685, 770), 'nmpccodegen.tools.Bootstrapper.bootstrap', 'tools.Bootstrapper.bootstrap', (['trailer_controller_location'], {'simulation_tools': '(True)'}), '(trailer_controller_location, simulation_tools=True\n )\n', (713, 770), True, 'import nmpccodegen.tools as tools\n'), ((1166, 1205), 'nmpccodegen.example_models.get_trailer_model', 'example_models.get_trailer_model', ([], {'L': '(0.5)'}), '(L=0.5)\n', (1198, 1205), True, 'import nmpccodegen.example_models as example_models\n'), ((1366, 1415), 'nmpccodegen.Cfunctions.IndicatorBoxFunction', 'cfunctions.IndicatorBoxFunction', (['[-1, -1]', '[1, 1]'], {}), '([-1, -1], [1, 1])\n', (1397, 1415), True, 'import nmpccodegen.Cfunctions as cfunctions\n'), ((1465, 1608), 'nmpccodegen.models.Model_continious', 'models.Model_continious', (['system_equations', 'constraint_input', 'step_size', 'number_of_states', 'number_of_inputs', 'coordinates_indices', 'integrator'], {}), '(system_equations, constraint_input, step_size,\n number_of_states, number_of_inputs, coordinates_indices, integrator)\n', (1488, 1608), True, 'import nmpccodegen.models as models\n'), ((1699, 1736), 'nmpccodegen.controller.Stage_cost_QR', 'controller.Stage_cost_QR', (['model', 'Q', 'R'], {}), '(model, Q, R)\n', (1723, 1736), True, 'import nmpccodegen.controller as controller\n'), ((1791, 1860), 'nmpccodegen.controller.Nmpc_panoc', 'controller.Nmpc_panoc', (['trailer_controller_location', 'model', 'stage_cost'], {}), '(trailer_controller_location, model, stage_cost)\n', (1812, 1860), True, 'import nmpccodegen.controller as controller\n'), ((2348, 2392), 'nmpccodegen.tools.Simulator', 'tools.Simulator', (['trailer_controller.location'], {}), '(trailer_controller.location)\n', (2363, 2392), True, 'import nmpccodegen.tools as tools\n'), ((2413, 2439), 'numpy.array', 'np.array', (['[0.01, 0.0, 0.0]'], {}), '([0.01, 0.0, 0.0])\n', (2421, 2439), True, 'import numpy as np\n'), ((2480, 2517), 'numpy.zeros', 'np.zeros', (['(number_of_states, horizon)'], {}), '((number_of_states, horizon))\n', (2488, 2517), True, 'import numpy as np\n'), ((2587, 2603), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2595, 2603), True, 'import numpy as np\n'), ((2802, 2856), 'numpy.reshape', 'np.reshape', (['full_solution', '(horizon, number_of_inputs)'], {}), '(full_solution, (horizon, number_of_inputs))\n', (2812, 2856), True, 'import numpy as np\n'), ((3881, 3903), 'numpy.array', 'np.array', (['[0.75, 0.45]'], {}), '([0.75, 0.45])\n', (3889, 3903), True, 'import numpy as np\n'), ((3990, 4095), 'nmpccodegen.controller.obstacles.Obstacle_rectangular', 'obstacles.Obstacle_rectangular', (['rectangular_center_coordinates', 'rectangular_width', 'rectangular_height'], {}), '(rectangular_center_coordinates,\n rectangular_width, rectangular_height)\n', (4020, 4095), True, 'import nmpccodegen.controller.obstacles as obstacles\n'), ((4153, 4179), 'numpy.diag', 'np.diag', (['[10.0, 10.0, 1.0]'], {}), '([10.0, 10.0, 1.0])\n', (4160, 4179), True, 'import numpy as np\n'), ((4279, 4300), 'numpy.array', 'np.array', (['[2, 0.5, 0]'], {}), '([2, 0.5, 0])\n', (4287, 4300), True, 'import numpy as np\n'), ((4645, 4663), 'numpy.array', 'np.array', (['[1, 0.0]'], {}), '([1, 0.0])\n', (4653, 4663), True, 'import numpy as np\n'), ((4753, 4864), 'nmpccodegen.controller.obstacles.Obstacle_rectangular', 'obstacles.Obstacle_rectangular', (['rectangular_center_coordinates_2', 'rectangular_width_2', 'rectangular_height_2'], {}), '(rectangular_center_coordinates_2,\n rectangular_width_2, rectangular_height_2)\n', (4783, 4864), True, 'import nmpccodegen.controller.obstacles as obstacles\n'), ((5060, 5079), 'numpy.array', 'np.array', (['[2, 0, 0]'], {}), '([2, 0, 0])\n', (5068, 5079), True, 'import numpy as np\n'), ((5409, 5429), 'numpy.array', 'np.array', (['[0.6, 0.5]'], {}), '([0.6, 0.5])\n', (5417, 5429), True, 'import numpy as np\n'), ((5516, 5621), 'nmpccodegen.controller.obstacles.Obstacle_rectangular', 'obstacles.Obstacle_rectangular', (['rectangular_center_coordinates', 'rectangular_width', 'rectangular_height'], {}), '(rectangular_center_coordinates,\n rectangular_width, rectangular_height)\n', (5546, 5621), True, 'import nmpccodegen.controller.obstacles as obstacles\n'), ((5689, 5715), 'numpy.diag', 'np.diag', (['[10.0, 10.0, 0.1]'], {}), '([10.0, 10.0, 0.1])\n', (5696, 5715), True, 'import numpy as np\n'), ((5816, 5835), 'numpy.array', 'np.array', (['[0, 2, 0]'], {}), '([0, 2, 0])\n', (5824, 5835), True, 'import numpy as np\n'), ((6125, 6135), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6133, 6135), True, 'import matplotlib.pyplot as plt\n'), ((3117, 3155), 'numpy.reshape', 'np.reshape', (['state[:]', 'number_of_states'], {}), '(state[:], number_of_states)\n', (3127, 3155), True, 'import numpy as np\n'), ((3295, 3319), 'matplotlib.pyplot.figure', 'plt.figure', (['index_figure'], {}), '(index_figure)\n', (3305, 3319), True, 'import matplotlib.pyplot as plt\n'), ((3328, 3371), 'nmpccodegen.example_models.trailer_print', 'example_models.trailer_print', (['state_history'], {}), '(state_history)\n', (3356, 3371), True, 'import nmpccodegen.example_models as example_models\n'), ((3418, 3439), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-2.2, 2.2]'], {}), '([-2.2, 2.2])\n', (3426, 3439), True, 'import matplotlib.pyplot as plt\n'), ((3448, 3469), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.1, 2.2]'], {}), '([-0.1, 2.2])\n', (3456, 3469), True, 'import matplotlib.pyplot as plt\n'), ((4185, 4204), 'numpy.diag', 'np.diag', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (4192, 4204), True, 'import numpy as np\n'), ((4932, 4958), 'numpy.diag', 'np.diag', (['[10.0, 10.0, 1.0]'], {}), '([10.0, 10.0, 1.0])\n', (4939, 4958), True, 'import numpy as np\n'), ((4967, 4986), 'numpy.diag', 'np.diag', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (4974, 4986), True, 'import numpy as np\n'), ((5722, 5741), 'numpy.diag', 'np.diag', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (5729, 5741), True, 'import numpy as np\n')] |
import torch
from torch import nn
from torch.nn import functional as F
from torch import optim
from torch.autograd import Variable
import numpy as np
class ConcreteDropout(nn.Module):
def __init__(self, weight_regularizer=1e-7,
dropout_regularizer=1e-6, init_min=0.1, init_max=0.1):
super(ConcreteDropout, self).__init__()
self.weight_regularizer = weight_regularizer
self.dropout_regularizer = dropout_regularizer
init_min = np.log(init_min) - np.log(1. - init_min)
init_max = np.log(init_max) - np.log(1. - init_max)
self.p_logit = nn.Parameter(torch.empty(1).uniform_(init_min, init_max))
def forward(self, x, layer):
p = torch.sigmoid(self.p_logit)
out = layer(self._concrete_dropout(x, p))
sum_of_square = 0
for param in layer.parameters():
sum_of_square += torch.sum(torch.pow(param, 2))
weights_regularizer = self.weight_regularizer * sum_of_square / (1 - p)
dropout_regularizer = p * torch.log(p)
dropout_regularizer += (1. - p) * torch.log(1. - p)
input_dimensionality = x[0].numel() # Number of elements of first item in batch
dropout_regularizer *= self.dropout_regularizer * input_dimensionality
#regularization = weights_regularizer + dropout_regularizer
regularization = dropout_regularizer
return out, regularization
def _concrete_dropout(self, x, p):
eps = 1e-7
temp = 0.1
unif_noise = torch.rand_like(x)
drop_prob = (torch.log(p + eps)
- torch.log(1 - p + eps)
+ torch.log(unif_noise + eps)
- torch.log(1 - unif_noise + eps))
drop_prob = torch.sigmoid(drop_prob / temp)
random_tensor = 1 - drop_prob
retain_prob = 1 - p
x = torch.mul(x, random_tensor)
x /= retain_prob
return x
| [
"torch.mul",
"torch.log",
"torch.rand_like",
"numpy.log",
"torch.sigmoid",
"torch.pow",
"torch.empty"
] | [((712, 739), 'torch.sigmoid', 'torch.sigmoid', (['self.p_logit'], {}), '(self.p_logit)\n', (725, 739), False, 'import torch\n'), ((1525, 1543), 'torch.rand_like', 'torch.rand_like', (['x'], {}), '(x)\n', (1540, 1543), False, 'import torch\n'), ((1756, 1787), 'torch.sigmoid', 'torch.sigmoid', (['(drop_prob / temp)'], {}), '(drop_prob / temp)\n', (1769, 1787), False, 'import torch\n'), ((1868, 1895), 'torch.mul', 'torch.mul', (['x', 'random_tensor'], {}), '(x, random_tensor)\n', (1877, 1895), False, 'import torch\n'), ((483, 499), 'numpy.log', 'np.log', (['init_min'], {}), '(init_min)\n', (489, 499), True, 'import numpy as np\n'), ((502, 524), 'numpy.log', 'np.log', (['(1.0 - init_min)'], {}), '(1.0 - init_min)\n', (508, 524), True, 'import numpy as np\n'), ((543, 559), 'numpy.log', 'np.log', (['init_max'], {}), '(init_max)\n', (549, 559), True, 'import numpy as np\n'), ((562, 584), 'numpy.log', 'np.log', (['(1.0 - init_max)'], {}), '(1.0 - init_max)\n', (568, 584), True, 'import numpy as np\n'), ((1035, 1047), 'torch.log', 'torch.log', (['p'], {}), '(p)\n', (1044, 1047), False, 'import torch\n'), ((1090, 1108), 'torch.log', 'torch.log', (['(1.0 - p)'], {}), '(1.0 - p)\n', (1099, 1108), False, 'import torch\n'), ((1702, 1733), 'torch.log', 'torch.log', (['(1 - unif_noise + eps)'], {}), '(1 - unif_noise + eps)\n', (1711, 1733), False, 'import torch\n'), ((898, 917), 'torch.pow', 'torch.pow', (['param', '(2)'], {}), '(param, 2)\n', (907, 917), False, 'import torch\n'), ((1652, 1679), 'torch.log', 'torch.log', (['(unif_noise + eps)'], {}), '(unif_noise + eps)\n', (1661, 1679), False, 'import torch\n'), ((621, 635), 'torch.empty', 'torch.empty', (['(1)'], {}), '(1)\n', (632, 635), False, 'import torch\n'), ((1566, 1584), 'torch.log', 'torch.log', (['(p + eps)'], {}), '(p + eps)\n', (1575, 1584), False, 'import torch\n'), ((1607, 1629), 'torch.log', 'torch.log', (['(1 - p + eps)'], {}), '(1 - p + eps)\n', (1616, 1629), False, 'import torch\n')] |
# Streng kopi af tds artikel
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import ipywidgets as widgets
import scipy.stats as scs
import scipy.optimize as sco
import statsmodels.api as sm
import scipy.interpolate as sci
from pandas_datareader import data as pdr
import yfinance as yf
import seaborn as sns
start_date = datetime.datetime(2010,1,1)
end_date = datetime.datetime(2020,1,1)
sym = ["RICK","PM","AVAV", "RACE","LVS","CGC","TIF","TSLA"]
data = pdr.get_data_yahoo(sym, start=start_date, end=end_date)["Adj Close"]
data.head()
data.info()
table = data
table.head()
plt.figure(figsize=(14, 7))
for c in table.columns.values:
plt.plot(table.index, table[c], lw=1,alpha=0.8,label=c)
plt.legend(fontsize=10)
plt.ylabel('Price in USD')
returns = table.pct_change()
plt.figure(figsize=(14, 7))
for c in returns.columns.values:
plt.plot(returns.index, returns[c], lw=1,alpha=0.8,label=c)
plt.legend(fontsize=10)
plt.ylabel('Daily returns')
# calculating annualised return and std dev
def portfolio_annualised_performance(weights, mean_returns, cov_matrix):
returns = np.sum(mean_returns*weights ) *252
std = np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights))) * np.sqrt(252)
return std, returns
def random_portfolios(num_portfolios, mean_returns, cov_matrix, risk_free_rate):
results = np.zeros((3,num_portfolios))
weights_record = []
for i in range(num_portfolios):
weights = np.random.random(4)
weights /= np.sum(weights)
weights_record.append(weights)
portfolio_std_dev, portfolio_return = portfolio_annualised_performance(weights, mean_returns, cov_matrix)
results[0,i] = portfolio_std_dev
results[1,i] = portfolio_return
results[2,i] = (portfolio_return - risk_free_rate) / portfolio_std_dev
return results, weights_record
returns = table.pct_change()
mean_returns = returns.mean()
cov_matrix = returns.cov()
num_portfolios = 10000
risk_free_rate = 0.00618
def display_simulated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate):
results, weights = random_portfolios(num_portfolios,mean_returns, cov_matrix, risk_free_rate)
max_sharpe_idx = np.argmax(results[2])
sdp, rp = results[0,max_sharpe_idx], results[1,max_sharpe_idx]
max_sharpe_allocation = pd.DataFrame(weights[max_sharpe_idx],index=table.columns,columns=['allocation'])
max_sharpe_allocation.allocation = [round(i*100,2)for i in max_sharpe_allocation.allocation]
max_sharpe_allocation = max_sharpe_allocation.T
min_vol_idx = np.argmin(results[0])
sdp_min, rp_min = results[0,min_vol_idx], results[1,min_vol_idx]
min_vol_allocation = pd.DataFrame(weights[min_vol_idx],index=table.columns,columns=['allocation'])
min_vol_allocation.allocation = [round(i*100,2)for i in min_vol_allocation.allocation]
min_vol_allocation = min_vol_allocation.T
print("Maximum Sharpe Ratio Portfolio Allocation\n")
print("Annualised Return:", round(rp,2))
print("Annualised Volatility:", round(sdp,2))
print("\n")
print(max_sharpe_allocation)
print("Minimum Volatility Portfolio Allocation\n")
print("Annualised Return:", round(rp_min,2))
print("Annualised Volatility:", round(sdp_min,2))
print("\n")
print(min_vol_allocation)
display_simulated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate)
| [
"datetime.datetime",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.dot",
"numpy.argmin",
"pandas.DataFrame",
"matplotlib.pyplot.legend",
"pandas_datareader.data.get_d... | [((363, 392), 'datetime.datetime', 'datetime.datetime', (['(2010)', '(1)', '(1)'], {}), '(2010, 1, 1)\n', (380, 392), False, 'import datetime\n'), ((402, 431), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (419, 431), False, 'import datetime\n'), ((619, 646), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (629, 646), True, 'import matplotlib.pyplot as plt\n'), ((739, 762), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (749, 762), True, 'import matplotlib.pyplot as plt\n'), ((763, 789), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price in USD"""'], {}), "('Price in USD')\n", (773, 789), True, 'import matplotlib.pyplot as plt\n'), ((820, 847), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (830, 847), True, 'import matplotlib.pyplot as plt\n'), ((946, 969), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (956, 969), True, 'import matplotlib.pyplot as plt\n'), ((970, 997), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Daily returns"""'], {}), "('Daily returns')\n", (980, 997), True, 'import matplotlib.pyplot as plt\n'), ((497, 552), 'pandas_datareader.data.get_data_yahoo', 'pdr.get_data_yahoo', (['sym'], {'start': 'start_date', 'end': 'end_date'}), '(sym, start=start_date, end=end_date)\n', (515, 552), True, 'from pandas_datareader import data as pdr\n'), ((683, 740), 'matplotlib.pyplot.plot', 'plt.plot', (['table.index', 'table[c]'], {'lw': '(1)', 'alpha': '(0.8)', 'label': 'c'}), '(table.index, table[c], lw=1, alpha=0.8, label=c)\n', (691, 740), True, 'import matplotlib.pyplot as plt\n'), ((886, 947), 'matplotlib.pyplot.plot', 'plt.plot', (['returns.index', 'returns[c]'], {'lw': '(1)', 'alpha': '(0.8)', 'label': 'c'}), '(returns.index, returns[c], lw=1, alpha=0.8, label=c)\n', (894, 947), True, 'import matplotlib.pyplot as plt\n'), ((1368, 1397), 'numpy.zeros', 'np.zeros', (['(3, num_portfolios)'], {}), '((3, num_portfolios))\n', (1376, 1397), True, 'import numpy as np\n'), ((2234, 2255), 'numpy.argmax', 'np.argmax', (['results[2]'], {}), '(results[2])\n', (2243, 2255), True, 'import numpy as np\n'), ((2351, 2438), 'pandas.DataFrame', 'pd.DataFrame', (['weights[max_sharpe_idx]'], {'index': 'table.columns', 'columns': "['allocation']"}), "(weights[max_sharpe_idx], index=table.columns, columns=[\n 'allocation'])\n", (2363, 2438), True, 'import pandas as pd\n'), ((2604, 2625), 'numpy.argmin', 'np.argmin', (['results[0]'], {}), '(results[0])\n', (2613, 2625), True, 'import numpy as np\n'), ((2720, 2799), 'pandas.DataFrame', 'pd.DataFrame', (['weights[min_vol_idx]'], {'index': 'table.columns', 'columns': "['allocation']"}), "(weights[min_vol_idx], index=table.columns, columns=['allocation'])\n", (2732, 2799), True, 'import pandas as pd\n'), ((1132, 1162), 'numpy.sum', 'np.sum', (['(mean_returns * weights)'], {}), '(mean_returns * weights)\n', (1138, 1162), True, 'import numpy as np\n'), ((1235, 1247), 'numpy.sqrt', 'np.sqrt', (['(252)'], {}), '(252)\n', (1242, 1247), True, 'import numpy as np\n'), ((1475, 1494), 'numpy.random.random', 'np.random.random', (['(4)'], {}), '(4)\n', (1491, 1494), True, 'import numpy as np\n'), ((1514, 1529), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (1520, 1529), True, 'import numpy as np\n'), ((1203, 1230), 'numpy.dot', 'np.dot', (['cov_matrix', 'weights'], {}), '(cov_matrix, weights)\n', (1209, 1230), True, 'import numpy as np\n')] |
"""
Here, the code to get the heatmap of individual channels is present.
It assumes that a pretrained model of type GazeStaticSineAndCosineModel is passed on.
One has the option to choose the layer of which the heatmap is desired.
"""
from typing import List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torchvision.transforms.functional as F
from PIL import Image
from tqdm import tqdm
from eye_model.data_loader_static_sinecosine import DictEyeImgLoader, remove_eyeless_imgs
from backbones.resnet import ResNet
from sinecosine_model.data_loader_static_sinecosine import ImageLoaderStaticSineCosine
from sinecosine_model.static_sinecosine_model import GazeStaticSineAndCosineModel
class ResNetHeatmap(nn.Module):
def __init__(self, model, idx):
super().__init__()
self._idx = idx
layers = list(model.children())
assert isinstance(layers[0], GazeStaticSineAndCosineModel)
layers = list(layers[0].children())
assert isinstance(layers[0], ResNet)
resnet_layers = list(layers[0].children())
print('-->'.join(f'{i}.{type(x).__name__}' for i, x in enumerate(resnet_layers)))
self.fmap = nn.Sequential(*resnet_layers[:idx])
def forward(self, input):
return self.fmap(input)
class ImageLoaderHeatMap(ImageLoaderStaticSineCosine):
def __init__(
self,
source_path,
file_name=None,
transform=None,
g_z_min_val=None,
):
super().__init__(source_path, file_name=file_name, transform=transform)
loader_obj = DictEyeImgLoader()
self.imgs = remove_eyeless_imgs(self.imgs, loader_obj)
self._dict_loader = loader_obj.load
def __getitem__(self, index):
img, _ = super().__getitem__(index)
path_source, _ = self.imgs[index]
return (img, self._dict_loader(path_source))
def __len__(self):
return len(self.imgs)
def get_blended_img(img: Image.Image, fmap: torch.Tensor, alpha: float) -> Image.Image:
"""
Blends 2 images: img and fmap. Here fmap is actually the feature map. It is scaled up to img shape and
with transparency `alpha`, the two images are superimposed.
"""
h_img = F.to_pil_image(torch.Tensor(fmap.cpu().numpy()))
hw = max(h_img.size[0], img.size[0])
img = img.resize((hw, hw))
h_img = h_img.resize((hw, hw))
return Image.blend(img, h_img, alpha)
def blend_heatmap(img: Image.Image,
fmaps: np.ndarray,
blend_alpha: float = 0.8,
nrows: int = 8,
ncols: int = 8,
img_idx_list: List[int] = None):
"""
This function plots multiple heatmaps either choosen at random if img_idx_list is None. Heatmaps are blended with
the original image first before plotting. If img_idx_list is provided, only those channel heatmaps are plotted.
Args:
fmaps: Format: (#Channels, height, width)
"""
if img_idx_list is None:
cnt = nrows * ncols
img_idx_list = np.random.choice(np.arange(fmaps.shape[0]), size=cnt, replace=False)
else:
cnt = len(img_idx_list)
ncols = 8
nrows = int(np.ceil(cnt / ncols))
_, ax = plt.subplots(figsize=(20, 3 * nrows), nrows=nrows, ncols=ncols)
for i, img_idx in enumerate(img_idx_list):
bl_img = get_blended_img(img, fmaps[img_idx], blend_alpha)
one_ax = ax[i // ncols, i % ncols] if nrows > 1 else ax[i]
one_ax.imshow(bl_img)
one_ax.set_title(f'#Channel:{img_idx}')
def get_eye_region_activation(activation: np.ndarray, eye_bbox: np.ndarray) -> np.ndarray:
"""
This function computes the average activation in a rectangular region specified by eye_bbox. It is computed for all
channels.
Args:
activation: Format: (#Channels, height, width)
eye_bbox: array of size 4. They contain normalized x,y,h and w.
"""
assert activation.shape[1] == activation.shape[2]
y, x, h, w = (activation.shape[1] * eye_bbox).astype(int)
return np.mean(activation[:, x:x + w, y:y + h].reshape((activation.shape[0], -1)), axis=1)
def get_eye_region_activation_df(model: ResNetHeatmap,
img_loader: ImageLoaderHeatMap) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
For each image present in img_loader, the function computes the average activation of region of left and right eye.
It computes it for each channel. It then computes several quantiles over the channel dimension.
"""
quantiles = [0.5, 0.8, 0.9, 1.0]
ldata = []
rdata = []
with torch.no_grad():
for i in tqdm(range(len(img_loader))):
img, img_dict = img_loader[i]
activation = model(img.view((1, 3, 224, 224)))[0]
l_activation = get_eye_region_activation(activation.cpu().numpy(), img_dict['bbox']['left'])
r_activation = get_eye_region_activation(activation.cpu().numpy(), img_dict['bbox']['right'])
ldata.append(np.quantile(l_activation, quantiles))
rdata.append(np.quantile(r_activation, quantiles))
leye_df = pd.DataFrame(ldata, columns=[f'Q{c}' for c in quantiles])
reye_df = pd.DataFrame(ldata, columns=[f'Q{c}' for c in quantiles])
return (leye_df, reye_df)
| [
"numpy.ceil",
"torch.nn.Sequential",
"PIL.Image.blend",
"eye_model.data_loader_static_sinecosine.remove_eyeless_imgs",
"eye_model.data_loader_static_sinecosine.DictEyeImgLoader",
"numpy.quantile",
"pandas.DataFrame",
"torch.no_grad",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((2458, 2488), 'PIL.Image.blend', 'Image.blend', (['img', 'h_img', 'alpha'], {}), '(img, h_img, alpha)\n', (2469, 2488), False, 'from PIL import Image\n'), ((3300, 3363), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 3 * nrows)', 'nrows': 'nrows', 'ncols': 'ncols'}), '(figsize=(20, 3 * nrows), nrows=nrows, ncols=ncols)\n', (3312, 3363), True, 'import matplotlib.pyplot as plt\n'), ((5207, 5264), 'pandas.DataFrame', 'pd.DataFrame', (['ldata'], {'columns': "[f'Q{c}' for c in quantiles]"}), "(ldata, columns=[f'Q{c}' for c in quantiles])\n", (5219, 5264), True, 'import pandas as pd\n'), ((5279, 5336), 'pandas.DataFrame', 'pd.DataFrame', (['ldata'], {'columns': "[f'Q{c}' for c in quantiles]"}), "(ldata, columns=[f'Q{c}' for c in quantiles])\n", (5291, 5336), True, 'import pandas as pd\n'), ((1237, 1272), 'torch.nn.Sequential', 'nn.Sequential', (['*resnet_layers[:idx]'], {}), '(*resnet_layers[:idx])\n', (1250, 1272), True, 'import torch.nn as nn\n'), ((1648, 1666), 'eye_model.data_loader_static_sinecosine.DictEyeImgLoader', 'DictEyeImgLoader', ([], {}), '()\n', (1664, 1666), False, 'from eye_model.data_loader_static_sinecosine import DictEyeImgLoader, remove_eyeless_imgs\n'), ((1687, 1729), 'eye_model.data_loader_static_sinecosine.remove_eyeless_imgs', 'remove_eyeless_imgs', (['self.imgs', 'loader_obj'], {}), '(self.imgs, loader_obj)\n', (1706, 1729), False, 'from eye_model.data_loader_static_sinecosine import DictEyeImgLoader, remove_eyeless_imgs\n'), ((4688, 4703), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4701, 4703), False, 'import torch\n'), ((3134, 3159), 'numpy.arange', 'np.arange', (['fmaps.shape[0]'], {}), '(fmaps.shape[0])\n', (3143, 3159), True, 'import numpy as np\n'), ((3266, 3286), 'numpy.ceil', 'np.ceil', (['(cnt / ncols)'], {}), '(cnt / ncols)\n', (3273, 3286), True, 'import numpy as np\n'), ((5092, 5128), 'numpy.quantile', 'np.quantile', (['l_activation', 'quantiles'], {}), '(l_activation, quantiles)\n', (5103, 5128), True, 'import numpy as np\n'), ((5155, 5191), 'numpy.quantile', 'np.quantile', (['r_activation', 'quantiles'], {}), '(r_activation, quantiles)\n', (5166, 5191), True, 'import numpy as np\n')] |
import torch
import matplotlib.pyplot as plt
from torch.nn import functional as F
import numpy as np
from seqwise_cont_skillspace.algo.algo_cont_skillspace import \
SeqwiseAlgoRevisedContSkills
import self_supervised.utils.typed_dicts as td
from self_supervised.base.replay_buffer.env_replay_buffer import \
NormalSequenceReplayBuffer
import rlkit.torch.pytorch_util as ptu
from seqwise_cont_skillspace.utils.get_colors import get_colors
class SeqwiseAlgoRevisedContSkillsHighdimusingvae(SeqwiseAlgoRevisedContSkills):
@torch.no_grad()
def _classfier_perf_eval(self):
num_paths = 2
eval_paths = self._get_paths_mode_influence_test(
num_paths=num_paths,
seq_len=self.seq_len,
)
assert type(eval_paths[0]) == td.TransitonModeMappingDiscreteSkills
obs_dim = eval_paths[0].obs.shape[0]
next_obs = []
mode = []
skill_id = []
for path in eval_paths:
next_obs.append(path.next_obs)
mode.append(path.mode)
skill_id.append(path.skill_id)
next_obs = ptu.from_numpy(
np.stack(next_obs, axis=0)
).transpose(-1, -2)
mode = ptu.from_numpy(
np.stack(mode, axis=0)
).transpose(-1, -2)
skill_id = ptu.from_numpy(
np.stack(skill_id, axis=0)
).transpose(-1, -2)
# assert next_obs.shape \
# == torch.Size((num_paths * self.policy.skill_dim, self.seq_len, obs_dim))
assert next_obs.shape \
== torch.Size((len(eval_paths), self.seq_len, obs_dim))
ret_dict = self.trainer.df(
next_obs, train=True
)
pred_skill_dist = ret_dict['skill_recon']['dist']
pred_skill_dist_seq = ret_dict['classified_seqs']
df_accuracy = F.mse_loss(
pred_skill_dist.loc.reshape(*mode.shape),
mode
)
df_accuracy_seq = F.mse_loss(pred_skill_dist_seq, mode[:, 0, :])
figs = self._plot_posterior(
post_dist=pred_skill_dist,
skill_id_seq=skill_id
)
return df_accuracy, df_accuracy_seq, figs
@torch.no_grad()
def _classfier_perf_on_memory(self):
batch_size = self.batch_size
assert isinstance(self.replay_buffer, NormalSequenceReplayBuffer)
batch = self.replay_buffer.random_batch_bsd_format(
batch_size=batch_size)
#assert isinstance(self.trainer.df, RnnVaeClassifierContSkills)
ret_dict = self.trainer.df(
ptu.from_numpy(batch.next_obs),
train=True
)
df_accuracy = F.mse_loss(
ret_dict['skill_recon']['dist'].loc.reshape(*batch.mode.shape),
ptu.from_numpy(batch.mode)
)
return df_accuracy
def _plot_posterior(self,
post_dist,
skill_id_seq: torch.Tensor,
skills_gt_seq=None,
):
"""
Args:
post_dist : (N * S, skill_dim) distribution
skills_gt_seq : (N, S, skill_dim) skills
skill_id_seq : (N, S, 1)
"""
if skills_gt_seq is not None:
raise ValueError
post_dist.loc = post_dist.loc.reshape(
*skill_id_seq.shape[:-1],
post_dist.batch_shape[-1]
)
batch_size = self.batch_size
assert isinstance(self.replay_buffer, NormalSequenceReplayBuffer)
skill_ids = ptu.get_numpy(skill_id_seq[:, 0]).astype(np.int)
skill_ids_unique = np.unique(ptu.get_numpy(skill_id_seq[:, 0])).astype(np.int)
color_array = get_colors()
plt.clf()
plt.interactive(False)
_, axes = plt.subplots()
# for idx, skill_gt_seq in enumerate(skills_gt_seq):
for id in skill_ids_unique:
id_idx = skill_ids == id
id_idx = id_idx.squeeze()
plt.scatter(
ptu.get_numpy(post_dist.loc[id_idx, :, 0].reshape(-1)),
ptu.get_numpy(post_dist.loc[id_idx, :, 1].reshape(-1)),
label="skill_{}_".format(id),
c=color_array[id]
)
axes.grid(True)
axes.legend()
fig_without_lim = plt.gcf()
plt.close()
_, axes = plt.subplots()
for id in skill_ids_unique:
id_idx = skill_ids == id
id_idx = id_idx.squeeze()
plt.scatter(
ptu.get_numpy(post_dist.loc[id_idx, :, 0].reshape(-1)),
ptu.get_numpy(post_dist.loc[id_idx, :, 1].reshape(-1)),
label="skill_{}".format(id),
c=color_array[id]
)
lim = [-3., 3.]
axes.set_ylim(lim)
axes.set_xlim(lim)
axes.legend()
fig_with_lim = plt.gcf()
return dict(
no_lim=fig_without_lim,
lim=fig_with_lim
)
| [
"torch.nn.functional.mse_loss",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.clf",
"seqwise_cont_skillspace.utils.get_colors.get_colors",
"matplotlib.pyplot.close",
"rlkit.torch.pytorch_util.from_numpy",
"rlkit.torch.pytorch_util.get_numpy",
"numpy.stack",
"matplotlib.pyplot.interactive",
"torch.no... | [((539, 554), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (552, 554), False, 'import torch\n'), ((2175, 2190), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2188, 2190), False, 'import torch\n'), ((1950, 1996), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['pred_skill_dist_seq', 'mode[:, 0, :]'], {}), '(pred_skill_dist_seq, mode[:, 0, :])\n', (1960, 1996), True, 'from torch.nn import functional as F\n'), ((3715, 3727), 'seqwise_cont_skillspace.utils.get_colors.get_colors', 'get_colors', ([], {}), '()\n', (3725, 3727), False, 'from seqwise_cont_skillspace.utils.get_colors import get_colors\n'), ((3736, 3745), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3743, 3745), True, 'import matplotlib.pyplot as plt\n'), ((3754, 3776), 'matplotlib.pyplot.interactive', 'plt.interactive', (['(False)'], {}), '(False)\n', (3769, 3776), True, 'import matplotlib.pyplot as plt\n'), ((3795, 3809), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3807, 3809), True, 'import matplotlib.pyplot as plt\n'), ((4325, 4334), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4332, 4334), True, 'import matplotlib.pyplot as plt\n'), ((4343, 4354), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4352, 4354), True, 'import matplotlib.pyplot as plt\n'), ((4374, 4388), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4386, 4388), True, 'import matplotlib.pyplot as plt\n'), ((4887, 4896), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4894, 4896), True, 'import matplotlib.pyplot as plt\n'), ((2560, 2590), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['batch.next_obs'], {}), '(batch.next_obs)\n', (2574, 2590), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((2748, 2774), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['batch.mode'], {}), '(batch.mode)\n', (2762, 2774), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((3556, 3589), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['skill_id_seq[:, 0]'], {}), '(skill_id_seq[:, 0])\n', (3569, 3589), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((1136, 1162), 'numpy.stack', 'np.stack', (['next_obs'], {'axis': '(0)'}), '(next_obs, axis=0)\n', (1144, 1162), True, 'import numpy as np\n'), ((1234, 1256), 'numpy.stack', 'np.stack', (['mode'], {'axis': '(0)'}), '(mode, axis=0)\n', (1242, 1256), True, 'import numpy as np\n'), ((1332, 1358), 'numpy.stack', 'np.stack', (['skill_id'], {'axis': '(0)'}), '(skill_id, axis=0)\n', (1340, 1358), True, 'import numpy as np\n'), ((3642, 3675), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['skill_id_seq[:, 0]'], {}), '(skill_id_seq[:, 0])\n', (3655, 3675), True, 'import rlkit.torch.pytorch_util as ptu\n')] |
import os, pickle, re, subprocess, itertools
import numpy as np, pandas as pd, matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from datetime import datetime
from numpy import array as nparr
from astropy.io import fits
def read_fits(fits_file,ext=0):
'''
Shortcut function to get the header and data from a fits file and a given
extension.
'''
hdulist = fits.open(fits_file)
img_header = hdulist[ext].header
img_data = hdulist[ext].data
hdulist.close()
return img_data, img_header
def plot_before_after_difference_images(subimgfile, calfile, outdir, trim=False):
trimstr = '' if not trim else 'TRIM_'
outpath = ( os.path.join( outdir, ('before_after_'+trimstr+
os.path.basename(subimgfile).replace('.fits','.png'))))
# this would be 100000x better as an actual python package. (todo)
sub_img, _ = read_fits(subimgfile)
cal_img, _ = read_fits(calfile)
if trim:
xlow, xhigh = 400, 1000
ylow, yhigh = 0, 600
sub_img = sub_img[ylow:yhigh,xlow:xhigh]
cal_img = cal_img[ylow:yhigh,xlow:xhigh]
plt.close('all')
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
fig, axs = plt.subplots(ncols=2, nrows=1, figsize=(6,4))
# calibrated image
vmin, vmax = 10, int(1e3)
norm = colors.LogNorm(vmin=vmin, vmax=vmax)
cset1 = axs[0].imshow(cal_img, cmap='binary_r', vmin=vmin, vmax=vmax,
norm=norm)
# difference image
diff_vmin, diff_vmax = -1000, 1000
diffnorm = colors.SymLogNorm(linthresh=0.03, linscale=0.03, vmin=diff_vmin,
vmax=diff_vmax)
toplen = 57
top = cm.get_cmap('Oranges_r', toplen)
bottom = cm.get_cmap('Blues', toplen)
newcolors = np.vstack((top(np.linspace(0, 1, toplen)),
np.zeros(((256-2*toplen),4)),
bottom(np.linspace(0, 1, toplen))))
newcmp = ListedColormap(newcolors, name='lgb_cmap')
cset2 = axs[1].imshow(sub_img, cmap=newcmp, vmin=diff_vmin,
vmax=diff_vmax, norm=diffnorm)
# looked pretty good
#cset2 = axs[1].imshow(sub_img, cmap='RdBu_r', vmin=diff_vmin,
# vmax=diff_vmax, norm=diffnorm)
# tweaking
for ax in axs.flatten():
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.get_xaxis().set_tick_params(which='both', direction='in')
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# colorbars
divider0 = make_axes_locatable(axs[0])
divider1 = make_axes_locatable(axs[1])
cax0 = divider0.append_axes('right', size='5%', pad=0.05)
cax1 = divider1.append_axes('right', size='5%', pad=0.05)
cb1 = fig.colorbar(cset1, ax=axs[0], cax=cax0, extend='both')
cb2 = fig.colorbar(cset2, ax=axs[1], cax=cax1, extend='both')
cb2.set_ticks([-1e3,-1e2,-1e1,0,1e1,1e2,1e3])
cb2.set_ticklabels(['-$10^3$','-$10^2$','-$10^1$','0',
'$10^1$','$10^2$','$10^3$'])
for cb in [cb1, cb2]:
cb.ax.tick_params(direction='in')
cb.ax.tick_params(labelsize='small')
fig.tight_layout(h_pad=0.1, w_pad=0.1, pad=-1)
fig.savefig(outpath, bbox_inches='tight', dpi=400)
print('{}: made {}'.format(datetime.utcnow().isoformat(), outpath))
if __name__ == "__main__":
datadir = '../data/subtracted_demo_images/projid_1378_cam4_ccd3/'
subimgfile = os.path.join(
datadir, 'rsub-d2f9343c-tess2018230145941-s0001-4-3-0120_cal_img_bkgdsub-xtrns.fits')
calfile = os.path.join(
datadir, 'tess2018230145941-s0001-4-3-0120_cal_img.fits')
plot_before_after_difference_images(subimgfile, calfile, datadir, trim=True)
plot_before_after_difference_images(subimgfile, calfile, datadir)
| [
"matplotlib.cm.get_cmap",
"datetime.datetime.utcnow",
"os.path.join",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.linspace",
"os.path.basename",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"astropy.io.fits.open",
"matplotlib.colors.SymLogNorm",
"matp... | [((541, 561), 'astropy.io.fits.open', 'fits.open', (['fits_file'], {}), '(fits_file)\n', (550, 561), False, 'from astropy.io import fits\n'), ((1266, 1282), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1275, 1282), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((1384, 1430), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'nrows': '(1)', 'figsize': '(6, 4)'}), '(ncols=2, nrows=1, figsize=(6, 4))\n', (1396, 1430), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((1495, 1531), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (1509, 1531), True, 'import matplotlib.colors as colors\n'), ((1723, 1808), 'matplotlib.colors.SymLogNorm', 'colors.SymLogNorm', ([], {'linthresh': '(0.03)', 'linscale': '(0.03)', 'vmin': 'diff_vmin', 'vmax': 'diff_vmax'}), '(linthresh=0.03, linscale=0.03, vmin=diff_vmin, vmax=diff_vmax\n )\n', (1740, 1808), True, 'import matplotlib.colors as colors\n'), ((1864, 1896), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Oranges_r"""', 'toplen'], {}), "('Oranges_r', toplen)\n", (1875, 1896), False, 'from matplotlib import cm\n'), ((1910, 1938), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Blues"""', 'toplen'], {}), "('Blues', toplen)\n", (1921, 1938), False, 'from matplotlib import cm\n'), ((2131, 2173), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['newcolors'], {'name': '"""lgb_cmap"""'}), "(newcolors, name='lgb_cmap')\n", (2145, 2173), False, 'from matplotlib.colors import ListedColormap, LinearSegmentedColormap\n'), ((2811, 2838), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axs[0]'], {}), '(axs[0])\n', (2830, 2838), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2854, 2881), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axs[1]'], {}), '(axs[1])\n', (2873, 2881), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((3710, 3817), 'os.path.join', 'os.path.join', (['datadir', '"""rsub-d2f9343c-tess2018230145941-s0001-4-3-0120_cal_img_bkgdsub-xtrns.fits"""'], {}), "(datadir,\n 'rsub-d2f9343c-tess2018230145941-s0001-4-3-0120_cal_img_bkgdsub-xtrns.fits'\n )\n", (3722, 3817), False, 'import os, pickle, re, subprocess, itertools\n'), ((3832, 3902), 'os.path.join', 'os.path.join', (['datadir', '"""tess2018230145941-s0001-4-3-0120_cal_img.fits"""'], {}), "(datadir, 'tess2018230145941-s0001-4-3-0120_cal_img.fits')\n", (3844, 3902), False, 'import os, pickle, re, subprocess, itertools\n'), ((2025, 2056), 'numpy.zeros', 'np.zeros', (['(256 - 2 * toplen, 4)'], {}), '((256 - 2 * toplen, 4))\n', (2033, 2056), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((1970, 1995), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'toplen'], {}), '(0, 1, toplen)\n', (1981, 1995), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((2089, 2114), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'toplen'], {}), '(0, 1, toplen)\n', (2100, 2114), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((884, 912), 'os.path.basename', 'os.path.basename', (['subimgfile'], {}), '(subimgfile)\n', (900, 912), False, 'import os, pickle, re, subprocess, itertools\n'), ((3552, 3569), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3567, 3569), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python
"""
Show distribution after a change of variables with y = x^(1/2), where the pdf for x is Gaussian
"""
import matplotlib.pyplot as pl
from scipy.stats import norm
import numpy as np
# normal distribution
mu = 5. # the mean, mu
sigma = 1 # standard deviations, sigma
x = np.linspace(0, 10, 1000) # x
# set plot to render labels using latex
pl.rc('text', usetex=True)
pl.rc('font', family='serif')
pl.rc('font', size=14)
fig = pl.figure(figsize=(6,5), dpi=100)
# plot pdfs
pl.plot(x, norm.pdf(x, mu, sigma), 'b--', label='$p(z=x)$')
pl.plot(np.sqrt(x), 2.*np.sqrt(x)*norm.pdf(x, mu, sigma), 'r', label='$p(z=y=x^{1/2})$')
ax = pl.gca()
ax.set_xlabel('$z$', fontsize=14)
ax.set_ylabel('$p(z)$', fontsize=14)
ax.legend(loc='upper right', frameon=False)
fig.subplots_adjust(bottom=0.15)
pl.savefig('../change_of_variables_1d.pdf')
pl.show()
| [
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.pyplot.gca",
"numpy.linspace",
"matplotlib.pyplot.figure",
"scipy.stats.norm.pdf",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] | [((297, 321), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(1000)'], {}), '(0, 10, 1000)\n', (308, 321), True, 'import numpy as np\n'), ((367, 393), 'matplotlib.pyplot.rc', 'pl.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (372, 393), True, 'import matplotlib.pyplot as pl\n'), ((394, 423), 'matplotlib.pyplot.rc', 'pl.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (399, 423), True, 'import matplotlib.pyplot as pl\n'), ((424, 446), 'matplotlib.pyplot.rc', 'pl.rc', (['"""font"""'], {'size': '(14)'}), "('font', size=14)\n", (429, 446), True, 'import matplotlib.pyplot as pl\n'), ((453, 487), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(6, 5)', 'dpi': '(100)'}), '(figsize=(6, 5), dpi=100)\n', (462, 487), True, 'import matplotlib.pyplot as pl\n'), ((655, 663), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (661, 663), True, 'import matplotlib.pyplot as pl\n'), ((815, 858), 'matplotlib.pyplot.savefig', 'pl.savefig', (['"""../change_of_variables_1d.pdf"""'], {}), "('../change_of_variables_1d.pdf')\n", (825, 858), True, 'import matplotlib.pyplot as pl\n'), ((859, 868), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (866, 868), True, 'import matplotlib.pyplot as pl\n'), ((511, 533), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', 'mu', 'sigma'], {}), '(x, mu, sigma)\n', (519, 533), False, 'from scipy.stats import norm\n'), ((568, 578), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (575, 578), True, 'import numpy as np\n'), ((594, 616), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', 'mu', 'sigma'], {}), '(x, mu, sigma)\n', (602, 616), False, 'from scipy.stats import norm\n'), ((583, 593), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (590, 593), True, 'import numpy as np\n')] |
from scipy.stats.stats import pearsonr
import matplotlib.pyplot as plt
import numpy as np
# compute correlation between features
def compute_correlation(Xtrain):
for i in range(0, Xtrain.shape[1]):
for j in range(i+1, Xtrain.shape[1]):
correlation = pearsonr(Xtrain[:, i], Xtrain[:, j])[0]
if correlation > 0.3 or correlation < -0.3:
print ('correlation between', i, "and", j, " feature is", correlation)
# plot features with y, x sorted
def plotFeatures (X, Y):
MAX_FEATURES = 15
Y = np.exp(Y)
permY = np.argsort(Y, axis=0)
plt.title("Y")
plt.plot(Y[permY])
plt.show()
for i in range(0, np.min(MAX_FEATURES, X.shape[1])):
column = X[:, i]
perm = np.argsort(column, axis=0)
plt.title("feature " + str(i))
plt.plot(column[perm], Y[perm], 'bo')
plt.show()
| [
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.argsort",
"scipy.stats.stats.pearsonr",
"numpy.min",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((550, 559), 'numpy.exp', 'np.exp', (['Y'], {}), '(Y)\n', (556, 559), True, 'import numpy as np\n'), ((572, 593), 'numpy.argsort', 'np.argsort', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (582, 593), True, 'import numpy as np\n'), ((598, 612), 'matplotlib.pyplot.title', 'plt.title', (['"""Y"""'], {}), "('Y')\n", (607, 612), True, 'import matplotlib.pyplot as plt\n'), ((617, 635), 'matplotlib.pyplot.plot', 'plt.plot', (['Y[permY]'], {}), '(Y[permY])\n', (625, 635), True, 'import matplotlib.pyplot as plt\n'), ((640, 650), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (648, 650), True, 'import matplotlib.pyplot as plt\n'), ((674, 706), 'numpy.min', 'np.min', (['MAX_FEATURES', 'X.shape[1]'], {}), '(MAX_FEATURES, X.shape[1])\n', (680, 706), True, 'import numpy as np\n'), ((749, 775), 'numpy.argsort', 'np.argsort', (['column'], {'axis': '(0)'}), '(column, axis=0)\n', (759, 775), True, 'import numpy as np\n'), ((823, 860), 'matplotlib.pyplot.plot', 'plt.plot', (['column[perm]', 'Y[perm]', '"""bo"""'], {}), "(column[perm], Y[perm], 'bo')\n", (831, 860), True, 'import matplotlib.pyplot as plt\n'), ((869, 879), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (877, 879), True, 'import matplotlib.pyplot as plt\n'), ((275, 311), 'scipy.stats.stats.pearsonr', 'pearsonr', (['Xtrain[:, i]', 'Xtrain[:, j]'], {}), '(Xtrain[:, i], Xtrain[:, j])\n', (283, 311), False, 'from scipy.stats.stats import pearsonr\n')] |
#贪心法
import pandas as pd
import numpy as np
import math
import torch
import time
def getset(citynumber,samples):
torch.manual_seed(66)
data_set = []
for l in range(samples):
#生成在坐标在0 1 之间的
x = torch.FloatTensor(2, citynumber*2).uniform_(0, 1)
data_set.append(x)
return data_set
trainset=getset(10,100)
data_set=[]
for i in range(100):
data_set.append(np.array(trainset[i]))
#print(data_set)
print(data_set[0][1])
dist=np.zeros((10,10))
total=0
for p in range(100):
for i in range(10):
for j in range(10):
dist[i][j]=math.sqrt((data_set[p][1][i]-data_set[p][1][j])**2+(data_set[p][0][i]-data_set[p][0][j])**2)
i=1
n=10
j=0
sumpath=0
s=[]
s.append(0)
start = time.clock()
while True:
k=1
Detemp=10000000
while True:
l=0
flag=0
if k in s:
flag = 1
if (flag==0) and (dist[k][s[i-1]] < Detemp):
j = k
Detemp=dist[k][s[i - 1]]
k+=1
if k>=n:
break
s.append(j)
i+=1
sumpath+=Detemp
if i>=n:
break
sumpath+=dist[0][j]
end = time.clock()
print("结果:")
total+=sumpath
print(sumpath)
for m in range(n):
print("%s "%(s[m]),end='')
print(total/100) | [
"torch.manual_seed",
"time.clock",
"math.sqrt",
"numpy.array",
"numpy.zeros",
"torch.FloatTensor"
] | [((482, 500), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (490, 500), True, 'import numpy as np\n'), ((124, 145), 'torch.manual_seed', 'torch.manual_seed', (['(66)'], {}), '(66)\n', (141, 145), False, 'import torch\n'), ((785, 797), 'time.clock', 'time.clock', ([], {}), '()\n', (795, 797), False, 'import time\n'), ((1280, 1292), 'time.clock', 'time.clock', ([], {}), '()\n', (1290, 1292), False, 'import time\n'), ((412, 433), 'numpy.array', 'np.array', (['trainset[i]'], {}), '(trainset[i])\n', (420, 433), True, 'import numpy as np\n'), ((609, 715), 'math.sqrt', 'math.sqrt', (['((data_set[p][1][i] - data_set[p][1][j]) ** 2 + (data_set[p][0][i] -\n data_set[p][0][j]) ** 2)'], {}), '((data_set[p][1][i] - data_set[p][1][j]) ** 2 + (data_set[p][0][i] -\n data_set[p][0][j]) ** 2)\n', (618, 715), False, 'import math\n'), ((232, 268), 'torch.FloatTensor', 'torch.FloatTensor', (['(2)', '(citynumber * 2)'], {}), '(2, citynumber * 2)\n', (249, 268), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""Generating the training data.
This script generates the training data according to the config specifications.
Example
-------
To run this script, pass in the desired config file as argument::
$ generate baobab/configs/tdlmc_diagonal_config.py --n_data 1000
"""
import os, sys
import random
import argparse
import gc
from types import SimpleNamespace
from tqdm import tqdm
import numpy as np
import pandas as pd
# Lenstronomy modules
import lenstronomy
print("Lenstronomy path being used: {:s}".format(lenstronomy.__path__[0]))
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.PointSource.point_source import PointSource
from lenstronomy.SimulationAPI.data_api import DataAPI
import lenstronomy.Util.util as util
# Baobab modules
from baobab.configs import BaobabConfig
import baobab.bnn_priors as bnn_priors
from baobab.sim_utils import instantiate_PSF_models, get_PSF_model, generate_image, Selection
def parse_args():
"""Parse command-line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('config', help='train config file path')
parser.add_argument('--n_data', default=None, dest='n_data', type=int,
help='size of dataset to generate (overrides config file)')
args = parser.parse_args()
# sys.argv rerouting for setuptools entry point
if args is None:
args = SimpleNamespace()
args.config = sys.argv[0]
args.n_data = sys.argv[1]
return args
def main():
args = parse_args()
cfg = BaobabConfig.from_file(args.config)
if args.n_data is not None:
cfg.n_data = args.n_data
# Seed for reproducibility
np.random.seed(cfg.seed)
random.seed(cfg.seed)
# Create data directory
save_dir = cfg.out_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print("Destination folder path: {:s}".format(save_dir))
print("Log path: {:s}".format(cfg.log_path))
cfg.export_log()
else:
raise OSError("Destination folder already exists.")
# Instantiate PSF models
psf_models = instantiate_PSF_models(cfg.psf, cfg.instrument.pixel_scale)
n_psf = len(psf_models)
# Instantiate density models
kwargs_model = dict(
lens_model_list=[cfg.bnn_omega.lens_mass.profile, cfg.bnn_omega.external_shear.profile],
source_light_model_list=[cfg.bnn_omega.src_light.profile],
)
lens_mass_model = LensModel(lens_model_list=kwargs_model['lens_model_list'])
src_light_model = LightModel(light_model_list=kwargs_model['source_light_model_list'])
lens_eq_solver = LensEquationSolver(lens_mass_model)
lens_light_model = None
ps_model = None
if 'lens_light' in cfg.components:
kwargs_model['lens_light_model_list'] = [cfg.bnn_omega.lens_light.profile]
lens_light_model = LightModel(light_model_list=kwargs_model['lens_light_model_list'])
if 'agn_light' in cfg.components:
kwargs_model['point_source_model_list'] = [cfg.bnn_omega.agn_light.profile]
ps_model = PointSource(point_source_type_list=kwargs_model['point_source_model_list'], fixed_magnification_list=[False])
# Instantiate Selection object
selection = Selection(cfg.selection, cfg.components)
# Initialize BNN prior
bnn_prior = getattr(bnn_priors, cfg.bnn_prior_class)(cfg.bnn_omega, cfg.components)
# Initialize empty metadata dataframe
metadata = pd.DataFrame()
metadata_path = os.path.join(save_dir, 'metadata.csv')
current_idx = 0 # running idx of dataset
pbar = tqdm(total=cfg.n_data)
while current_idx < cfg.n_data:
sample = bnn_prior.sample() # FIXME: sampling in batches
# Selections on sampled parameters
if selection.reject_initial(sample):
continue
psf_model = get_PSF_model(psf_models, n_psf, current_idx)
# Instantiate the image maker data_api with detector and observation conditions
kwargs_detector = util.merge_dicts(cfg.instrument, cfg.bandpass, cfg.observation)
kwargs_detector.update(seeing=cfg.psf.fwhm,
psf_type=cfg.psf.type,
kernel_point_source=psf_model,
background_noise=0.0)
data_api = DataAPI(cfg.image.num_pix, **kwargs_detector)
# Generate the image
img, img_features = generate_image(sample, psf_model, data_api, lens_mass_model, src_light_model, lens_eq_solver, cfg.instrument.pixel_scale, cfg.image.num_pix, cfg.components, cfg.numerics, min_magnification=cfg.selection.magnification.min, lens_light_model=lens_light_model, ps_model=ps_model)
if img is None: # couldn't make the magnification cut
continue
# Save image file
img_filename = 'X_{0:07d}.npy'.format(current_idx)
img_path = os.path.join(save_dir, img_filename)
np.save(img_path, img)
# Save labels
meta = {}
for comp in cfg.components:
for param_name, param_value in sample[comp].items():
meta['{:s}_{:s}'.format(comp, param_name)] = param_value
#if cfg.bnn_prior_class in ['DiagonalCosmoBNNPrior']:
# if cfg.bnn_omega.time_delays.calculate_time_delays:
# # Order time delays in increasing dec
# unordered_td = sample['misc']['true_td'] # np array
# increasing_dec_i = np.argsort(img_features['y_image'])
# td = unordered_td[increasing_dec_i]
# td = td[1:] - td[0] # take BCD - A
# sample['misc']['true_td'] = list(td)
# img_features['x_image'] = img_features['x_image'][increasing_dec_i]
# img_features['y_image'] = img_features['y_image'][increasing_dec_i]
if cfg.bnn_prior_class in ['EmpiricalBNNPrior', 'DiagonalCosmoBNNPrior']:
for misc_name, misc_value in sample['misc'].items():
meta['{:s}'.format(misc_name)] = misc_value
if 'agn_light' in cfg.components:
x_image = np.zeros(4)
y_image = np.zeros(4)
n_img = len(img_features['x_image'])
meta['n_img'] = n_img
x_image[:n_img] = img_features['x_image']
y_image[:n_img] = img_features['y_image']
for i in range(4):
meta['x_image_{:d}'.format(i)] = x_image[i]
meta['y_image_{:d}'.format(i)] = y_image[i]
meta['total_magnification'] = img_features['total_magnification']
meta['img_filename'] = img_filename
metadata = metadata.append(meta, ignore_index=True)
# Export metadata.csv for the first time
if current_idx == 0:
# Sort columns lexicographically
metadata = metadata.reindex(sorted(metadata.columns), axis=1)
# Export to csv
metadata.to_csv(metadata_path, index=None)
# Initialize empty dataframe for next checkpoint chunk
metadata = pd.DataFrame()
gc.collect()
# Export metadata every checkpoint interval
if (current_idx + 1)%cfg.checkpoint_interval == 0:
# Export to csv
metadata.to_csv(metadata_path, index=None, mode='a', header=None)
# Initialize empty dataframe for next checkpoint chunk
metadata = pd.DataFrame()
gc.collect()
# Update progress
current_idx += 1
pbar.update(1)
# Export to csv
metadata.to_csv(metadata_path, index=None, mode='a', header=None)
pbar.close()
if __name__ == '__main__':
main()
| [
"lenstronomy.LensModel.Solver.lens_equation_solver.LensEquationSolver",
"numpy.save",
"os.path.exists",
"baobab.sim_utils.get_PSF_model",
"argparse.ArgumentParser",
"lenstronomy.LensModel.lens_model.LensModel",
"numpy.random.seed",
"pandas.DataFrame",
"lenstronomy.SimulationAPI.data_api.DataAPI",
... | [((1177, 1202), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1200, 1202), False, 'import argparse\n'), ((1695, 1730), 'baobab.configs.BaobabConfig.from_file', 'BaobabConfig.from_file', (['args.config'], {}), '(args.config)\n', (1717, 1730), False, 'from baobab.configs import BaobabConfig\n'), ((1831, 1855), 'numpy.random.seed', 'np.random.seed', (['cfg.seed'], {}), '(cfg.seed)\n', (1845, 1855), True, 'import numpy as np\n'), ((1860, 1881), 'random.seed', 'random.seed', (['cfg.seed'], {}), '(cfg.seed)\n', (1871, 1881), False, 'import random\n'), ((2262, 2321), 'baobab.sim_utils.instantiate_PSF_models', 'instantiate_PSF_models', (['cfg.psf', 'cfg.instrument.pixel_scale'], {}), '(cfg.psf, cfg.instrument.pixel_scale)\n', (2284, 2321), False, 'from baobab.sim_utils import instantiate_PSF_models, get_PSF_model, generate_image, Selection\n'), ((2647, 2705), 'lenstronomy.LensModel.lens_model.LensModel', 'LensModel', ([], {'lens_model_list': "kwargs_model['lens_model_list']"}), "(lens_model_list=kwargs_model['lens_model_list'])\n", (2656, 2705), False, 'from lenstronomy.LensModel.lens_model import LensModel\n'), ((2728, 2796), 'lenstronomy.LightModel.light_model.LightModel', 'LightModel', ([], {'light_model_list': "kwargs_model['source_light_model_list']"}), "(light_model_list=kwargs_model['source_light_model_list'])\n", (2738, 2796), False, 'from lenstronomy.LightModel.light_model import LightModel\n'), ((2818, 2853), 'lenstronomy.LensModel.Solver.lens_equation_solver.LensEquationSolver', 'LensEquationSolver', (['lens_mass_model'], {}), '(lens_mass_model)\n', (2836, 2853), False, 'from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver\n'), ((3457, 3497), 'baobab.sim_utils.Selection', 'Selection', (['cfg.selection', 'cfg.components'], {}), '(cfg.selection, cfg.components)\n', (3466, 3497), False, 'from baobab.sim_utils import instantiate_PSF_models, get_PSF_model, generate_image, Selection\n'), ((3670, 3684), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3682, 3684), True, 'import pandas as pd\n'), ((3705, 3743), 'os.path.join', 'os.path.join', (['save_dir', '"""metadata.csv"""'], {}), "(save_dir, 'metadata.csv')\n", (3717, 3743), False, 'import os, sys\n'), ((3800, 3822), 'tqdm.tqdm', 'tqdm', ([], {'total': 'cfg.n_data'}), '(total=cfg.n_data)\n', (3804, 3822), False, 'from tqdm import tqdm\n'), ((1546, 1563), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '()\n', (1561, 1563), False, 'from types import SimpleNamespace\n'), ((1948, 1972), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1962, 1972), False, 'import os, sys\n'), ((1982, 2003), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1993, 2003), False, 'import os, sys\n'), ((3088, 3154), 'lenstronomy.LightModel.light_model.LightModel', 'LightModel', ([], {'light_model_list': "kwargs_model['lens_light_model_list']"}), "(light_model_list=kwargs_model['lens_light_model_list'])\n", (3098, 3154), False, 'from lenstronomy.LightModel.light_model import LightModel\n'), ((3296, 3409), 'lenstronomy.PointSource.point_source.PointSource', 'PointSource', ([], {'point_source_type_list': "kwargs_model['point_source_model_list']", 'fixed_magnification_list': '[False]'}), "(point_source_type_list=kwargs_model['point_source_model_list'],\n fixed_magnification_list=[False])\n", (3307, 3409), False, 'from lenstronomy.PointSource.point_source import PointSource\n'), ((4053, 4098), 'baobab.sim_utils.get_PSF_model', 'get_PSF_model', (['psf_models', 'n_psf', 'current_idx'], {}), '(psf_models, n_psf, current_idx)\n', (4066, 4098), False, 'from baobab.sim_utils import instantiate_PSF_models, get_PSF_model, generate_image, Selection\n'), ((4214, 4277), 'lenstronomy.Util.util.merge_dicts', 'util.merge_dicts', (['cfg.instrument', 'cfg.bandpass', 'cfg.observation'], {}), '(cfg.instrument, cfg.bandpass, cfg.observation)\n', (4230, 4277), True, 'import lenstronomy.Util.util as util\n'), ((4518, 4563), 'lenstronomy.SimulationAPI.data_api.DataAPI', 'DataAPI', (['cfg.image.num_pix'], {}), '(cfg.image.num_pix, **kwargs_detector)\n', (4525, 4563), False, 'from lenstronomy.SimulationAPI.data_api import DataAPI\n'), ((4621, 4910), 'baobab.sim_utils.generate_image', 'generate_image', (['sample', 'psf_model', 'data_api', 'lens_mass_model', 'src_light_model', 'lens_eq_solver', 'cfg.instrument.pixel_scale', 'cfg.image.num_pix', 'cfg.components', 'cfg.numerics'], {'min_magnification': 'cfg.selection.magnification.min', 'lens_light_model': 'lens_light_model', 'ps_model': 'ps_model'}), '(sample, psf_model, data_api, lens_mass_model,\n src_light_model, lens_eq_solver, cfg.instrument.pixel_scale, cfg.image.\n num_pix, cfg.components, cfg.numerics, min_magnification=cfg.selection.\n magnification.min, lens_light_model=lens_light_model, ps_model=ps_model)\n', (4635, 4910), False, 'from baobab.sim_utils import instantiate_PSF_models, get_PSF_model, generate_image, Selection\n'), ((5084, 5120), 'os.path.join', 'os.path.join', (['save_dir', 'img_filename'], {}), '(save_dir, img_filename)\n', (5096, 5120), False, 'import os, sys\n'), ((5129, 5151), 'numpy.save', 'np.save', (['img_path', 'img'], {}), '(img_path, img)\n', (5136, 5151), True, 'import numpy as np\n'), ((6291, 6302), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (6299, 6302), True, 'import numpy as np\n'), ((6325, 6336), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (6333, 6336), True, 'import numpy as np\n'), ((7227, 7241), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7239, 7241), True, 'import pandas as pd\n'), ((7254, 7266), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7264, 7266), False, 'import gc\n'), ((7575, 7589), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7587, 7589), True, 'import pandas as pd\n'), ((7602, 7614), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7612, 7614), False, 'import gc\n')] |
"""The entrance tank of an AguaClara water treatment plant
#. removes large grit particles using plate settlers,
#. contains the :ref:`design-lfom`, which maintains a linear relation between flow and water level, and
#. introduces chemical dosing through the CDC <add link> using the water level set by the :ref:`design-lfom`.
Example:
>>> from aguaclara.design.ent import *
>>> ent_tank = EntranceTank(q = 20 * u.L / u.s, floc_chan_w = 42.0 * u.inch)
>>> ent_tank.plate_n
<Quantity(11.0, 'dimensionless')>
"""
import aguaclara.core.constants as con
import aguaclara.core.head_loss as hl
import aguaclara.core.materials as mat
import aguaclara.core.physchem as pc
import aguaclara.core.pipes as pipe
from aguaclara.core.units import u
import aguaclara.core.utility as ut
from aguaclara.design.component import Component
from aguaclara.design.pipeline import Pipe
import numpy as np
class EntranceTank(Component):
"""Design an AguaClara plant's entrance tank.
An entrance tank's design relies on the LFOM's and flocculator's design in
the same plant, but assumed/default values may be used to design an
entrance tank by itself. To design these components in tandem, use
:class:`aguaclara.design.ent_floc.EntTankFloc`.
Design Inputs:
- ``q (float * u.L / u.s)``: Flow rate (recommended, defaults to 20L/s)
- ``temp (float * u.degC)``: Water temperature (recommended, defaults to
20°C)
- ``lfom_nd (float * u.inch)``: The LFOM's nominal diameter (recommended,
defaults to 2")
- ``floc_chan_w (float * u.inch)``: The flocculator's channel width
(recommended, defaults to 42")
- ``floc_chan_depth (float * u.m)``: The flocculator's channel depth
(recommended, defaults to 2m)
- ``plate_s (float * u.cm)``: The spacing between plates in a plate
settler (optional, defaults to 2.5cm)
- ``plate_thickness (float * u.deg)``: The thickness of a plate in a
plate settler (optional, defaults to 2mm)
- ``plate_angle (float * u.deg)``: The angle of the plate settler
(optional, defaults to 60 degrees)
- ``plate_capture_vel (float * u.mm / u.s)``: The capture velocity of the
plate settler (optional, defaults to 8m/s)
- ``fab_s(float * u.cm)``: The space needed for a person to remove
the drain pipe (optional, defaults to 5cm)
- ``sdr (float)``: Standard demension ratio (optional,
defaults to 41)
"""
def __init__(self, **kwargs):
self.lfom_nd = 2.0 * u.inch # May be innacurate, check with Monroe -<NAME>., oal22, 4 Jun '19
self.floc_chan_w = 42.0 * u.inch
self.floc_end_depth = 2.0 * u.m
self.plate_s = 2.5 * u.cm
self.plate_thickness = 2.0 * u.mm
self.plate_angle = 50.0 * u.deg
self.plate_capture_vel = 8.0 * u.mm / u.s
self.fab_s = 5.0 * u.cm
self.spec = 'sdr41'
self.drain_pipe = Pipe()
self.subcomponents = [self.drain_pipe]
super().__init__(**kwargs)
self._set_drain_pipe()
super().set_subcomponents()
def _set_drain_pipe(self):
"""The inner diameter of the entrance tank drain pipe."""
drain_pipe_k_minor = \
hl.PIPE_ENTRANCE_K_MINOR + hl.PIPE_EXIT_K_MINOR + hl.EL90_K_MINOR
nu = pc.viscosity_kinematic_water(self.temp)
drain_id = pc.diam_pipe(self.q,
self.floc_end_depth,
self.floc_end_depth,
nu,
mat.PVC_PIPE_ROUGH,
drain_pipe_k_minor)
self.drain_pipe = Pipe(
id = drain_id,
k_minor = drain_pipe_k_minor,
spec = self.spec
)
@property
def plate_n(self):
"""The number of plates in the plate settlers."""
num_plates_as_float = \
np.sqrt(
(self.q / (
(self.plate_s + self.plate_thickness) * self.floc_chan_w *
self.plate_capture_vel *
np.sin(self.plate_angle.to(u.rad)).item()
)).to(u.dimensionless)
)
num_plates_as_int = np.ceil(num_plates_as_float)
return num_plates_as_int # This calculates to be too low. -Oliver
@property
def plate_l(self):
"""The length of the plates in the plate settlers."""
plate_l = (
self.q / (
self.plate_n * self.floc_chan_w * self.plate_capture_vel *
np.cos(self.plate_angle.to(u.rad))
)
) - (self.plate_s * np.tan(self.plate_angle.to(u.rad)))
plate_l_rounded = ut.ceil_step(plate_l.to(u.cm), 1.0 * u.cm)
return plate_l_rounded
@property
def l(self):
"""The length of the entrance tank."""
plate_array_thickness = \
(self.plate_thickness * self.plate_n) + \
(self.plate_s * (self.plate_n - 1))
l = self.drain_pipe.od + (self.fab_s * 2) + \
(
plate_array_thickness * np.cos(((90 * u.deg) -
self.plate_angle).to(u.rad))
) + \
(self.plate_l * np.cos(self.plate_angle.to(u.rad))) + \
(self.lfom_nd * 2)
return l
| [
"numpy.ceil",
"aguaclara.design.pipeline.Pipe",
"aguaclara.core.physchem.viscosity_kinematic_water",
"aguaclara.core.physchem.diam_pipe"
] | [((3005, 3011), 'aguaclara.design.pipeline.Pipe', 'Pipe', ([], {}), '()\n', (3009, 3011), False, 'from aguaclara.design.pipeline import Pipe\n'), ((3383, 3422), 'aguaclara.core.physchem.viscosity_kinematic_water', 'pc.viscosity_kinematic_water', (['self.temp'], {}), '(self.temp)\n', (3411, 3422), True, 'import aguaclara.core.physchem as pc\n'), ((3442, 3553), 'aguaclara.core.physchem.diam_pipe', 'pc.diam_pipe', (['self.q', 'self.floc_end_depth', 'self.floc_end_depth', 'nu', 'mat.PVC_PIPE_ROUGH', 'drain_pipe_k_minor'], {}), '(self.q, self.floc_end_depth, self.floc_end_depth, nu, mat.\n PVC_PIPE_ROUGH, drain_pipe_k_minor)\n', (3454, 3553), True, 'import aguaclara.core.physchem as pc\n'), ((3736, 3797), 'aguaclara.design.pipeline.Pipe', 'Pipe', ([], {'id': 'drain_id', 'k_minor': 'drain_pipe_k_minor', 'spec': 'self.spec'}), '(id=drain_id, k_minor=drain_pipe_k_minor, spec=self.spec)\n', (3740, 3797), False, 'from aguaclara.design.pipeline import Pipe\n'), ((4294, 4322), 'numpy.ceil', 'np.ceil', (['num_plates_as_float'], {}), '(num_plates_as_float)\n', (4301, 4322), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 21:33:40 2018
@author: ivan
"""
import os
import numpy as np
import scipy
import matplotlib.pyplot as plt
class time_series():
"""
Create a time series object.
"""
def __init__(self, file_path):
"""
data is required to be a numpy series or a list
:param file_path:
"""
self.basename, self.file_extension = os.path.splitext(os.path.basename(file_path))
self.file_path = file_path
def import_data(self, square=True):
if self.file_extension == ".wav":
_, data_set = scipy.io.wavfile.read(self.file_path)
elif self.file_extension == ".txt":
data_set = np.loadtxt(self.file_path)
if square:
data_set2 = np.power(data_set.astype(float), 2)
else:
data_set2 = data_set
self.plot_data(data_set)
self.plot_data(data_set2)
def plot_data(self, data):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data)
plt.show()
fig.savefig(self.basename + ".pdf")
| [
"matplotlib.pyplot.figure",
"scipy.io.wavfile.read",
"os.path.basename",
"numpy.loadtxt",
"matplotlib.pyplot.show"
] | [((1005, 1017), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1015, 1017), True, 'import matplotlib.pyplot as plt\n'), ((1082, 1092), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1090, 1092), True, 'import matplotlib.pyplot as plt\n'), ((456, 483), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (472, 483), False, 'import os\n'), ((630, 667), 'scipy.io.wavfile.read', 'scipy.io.wavfile.read', (['self.file_path'], {}), '(self.file_path)\n', (651, 667), False, 'import scipy\n'), ((736, 762), 'numpy.loadtxt', 'np.loadtxt', (['self.file_path'], {}), '(self.file_path)\n', (746, 762), True, 'import numpy as np\n')] |
'''
This script is to get anchors and pos/neg weights
'''
import os
import h5py
import json
import math
import numpy as np
import h5py
import random
import time
import threading
from sklearn.cluster import KMeans
sample_ratio = 1.0
c3d_resolution = 16
stride = 4
sample_num = 1
n_anchors = 128
tiou_threshold = 0.5
num_scale = 1
feature_path = '../../features/sub_activitynet_v1-3_stride_64frame.c3d.hdf5'
features = h5py.File(feature_path)
splits = {'train':'train', 'val':'val_1', 'test':'val_2'}
out_proposal_source = '../'
out_anchor_file = 'anchors.txt'
train_data = json.load(open('../../%s.json'%'train'))
video_ids = open(os.path.join(out_proposal_source, 'train', 'ids.txt')).readlines()
video_ids = [video_id.strip() for video_id in video_ids]
feature_lengths = dict()
proposal_lengths = []
for video_id in video_ids:
data = train_data[video_id]
timestamps = data['timestamps']
duration = data['duration']
feature = features[video_id].values()[0]
feature_len = feature.shape[0]
feature_lengths[video_id] = feature_len
for stamp in timestamps:
t1 = stamp[0]
t2 = stamp[1]
if t1 > t2:
temp = t1
t1 = t2
t2 = temp
clip_len = t2-t1
proposal_lengths.append(clip_len)
proposal_lengths = np.array(proposal_lengths).reshape(len(proposal_lengths), 1)
print('Clustering all proposals ...')
kmeans = KMeans(n_clusters=n_anchors, random_state=0).fit(proposal_lengths)
anchors = kmeans.cluster_centers_
anchors = np.array(anchors.reshape(anchors.shape[0],), dtype=np.float32)
anchors = list(anchors)
# remove duplicate
anchors = sorted(list(set(anchors)))
print('Number of anchors: %d'%len(anchors))
# avoid inconsistency
n_anchors = len(anchors)
print('Writing anchors ...')
with open(out_anchor_file, 'w') as fid:
fid.writelines([str(anchor)+'\n' for anchor in anchors])
# a thread to count the sampled video stream proposal
class get_proposal_label_thread (threading.Thread):
def __init__(self, threadID, name, feature_lengths, train_data, video_ids, n_anchors, anchors, count_anchors):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.feature_lengths = feature_lengths
self.train_data = train_data
self.video_ids = video_ids
self.n_anchors = n_anchors
self.anchors = anchors
self.count_anchors = count_anchors
def run(self):
print('%s start ...'%self.name)
for index, vid in enumerate(self.video_ids):
print('Processing video id: %s'%vid)
data = self.train_data[vid]
timestamps = data['timestamps']
duration = data['duration']
feature_len = self.feature_lengths[vid]
print('feat len: %d'%feature_len)
stream_len = int(math.ceil(sample_ratio*feature_len))
start_feature_id = random.randint(0, feature_len-stream_len)
end_feature_id = start_feature_id + stream_len
start_time = (float(start_feature_id) / feature_len) * duration
end_time = (float(end_feature_id) / feature_len) * duration
print('sample stream: (%f, %f)'%(start_time, end_time))
for stamp_id, stamp in enumerate(timestamps):
t1 = stamp[0]
t2 = stamp[1]
if t1 > t2:
temp = t1
t1 = t2
t2 = temp
start = t1
end = t2
start = max(start, start_time)
# if not end or if no overlap at all
if end > end_time or start > end_time:
continue
mid_feature_id = int(((1.-tiou_threshold)*end + tiou_threshold*start) * feature_len / duration)
for i in range(mid_feature_id, stream_len):
overlap = False
for anchor_id, anchor in enumerate(self.anchors):
# from feature id to time stamp
end_pred = (float(i+1)/feature_len) * duration
start_pred = end_pred - anchor
#
if end_pred < end or i - int(end*feature_len/duration) > 5:
continue
intersection = max(0, min(end, end_pred) - max(start, start_pred))
union = min(max(end, end_pred) - min(start, start_pred), end-start + end_pred-start_pred)
iou = float(intersection) / (union + 1e-8)
if iou > tiou_threshold:
self.count_anchors[self.threadID][anchor_id] += 1
overlap = True
elif overlap:
break
def get_proposal_label(num_thread, feature_lengths, train_data, video_ids, n_anchors, achors, count_anchors):
threads = []
for i in range(num_thread):
this_thread = get_proposal_label_thread(i, 'Thread-%d'%i, feature_lengths, train_data, video_ids, n_anchors, anchors, count_anchors)
this_thread.start()
threads.append(this_thread)
for thread in threads:
thread.join()
print('Exiting main thread.')
count_anchors = [[0 for _ in range(n_anchors)] for _ in range(sample_num)]
sum_video_length = sample_ratio*sum(feature_lengths.values())
weights = [[0., 0.] for _ in range(n_anchors)]
out_weight_path = 'weights.json'
# samplg to get anchor weights
print('Get anchor weights by sampling ...')
get_proposal_label(sample_num, feature_lengths, train_data, video_ids, n_anchors, anchors, count_anchors)
count_anchors = np.mean(np.array(count_anchors), axis=0)
for i in range(n_anchors):
# weight for negative label
weights[i][1] = count_anchors[i] / float(sum_video_length)
# weight for positive label
weights[i][0] = 1. - weights[i][1]
print('The weights are:')
print(weights)
print('Writing ...')
with open(out_weight_path, 'w') as fid:
json.dump(weights, fid)
with open('weights.txt', 'w') as fid:
for w in weights:
fid.write('%.4f\t%.4f\n'%(w[0], w[1]))
| [
"sklearn.cluster.KMeans",
"threading.Thread.__init__",
"math.ceil",
"os.path.join",
"h5py.File",
"numpy.array",
"random.randint",
"json.dump"
] | [((445, 468), 'h5py.File', 'h5py.File', (['feature_path'], {}), '(feature_path)\n', (454, 468), False, 'import h5py\n'), ((6046, 6069), 'numpy.array', 'np.array', (['count_anchors'], {}), '(count_anchors)\n', (6054, 6069), True, 'import numpy as np\n'), ((6392, 6415), 'json.dump', 'json.dump', (['weights', 'fid'], {}), '(weights, fid)\n', (6401, 6415), False, 'import json\n'), ((1390, 1416), 'numpy.array', 'np.array', (['proposal_lengths'], {}), '(proposal_lengths)\n', (1398, 1416), True, 'import numpy as np\n'), ((1500, 1544), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_anchors', 'random_state': '(0)'}), '(n_clusters=n_anchors, random_state=0)\n', (1506, 1544), False, 'from sklearn.cluster import KMeans\n'), ((2231, 2262), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (2256, 2262), False, 'import threading\n'), ((673, 726), 'os.path.join', 'os.path.join', (['out_proposal_source', '"""train"""', '"""ids.txt"""'], {}), "(out_proposal_source, 'train', 'ids.txt')\n", (685, 726), False, 'import os\n'), ((3074, 3117), 'random.randint', 'random.randint', (['(0)', '(feature_len - stream_len)'], {}), '(0, feature_len - stream_len)\n', (3088, 3117), False, 'import random\n'), ((3005, 3042), 'math.ceil', 'math.ceil', (['(sample_ratio * feature_len)'], {}), '(sample_ratio * feature_len)\n', (3014, 3042), False, 'import math\n')] |
"""lake/utils.py"""
import os
import math
import random
import datetime
import torch
import numpy as np
import matplotlib.pyplot as plt
def get_summary_dir():
now = datetime.datetime.now()
summary_dir = os.path.join('.', 'runs', now.strftime("%Y%m%d-%H%M%S"))
return summary_dir
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def find_json_value(key_path, json, delimiter='.'):
paths = key_path.split(delimiter)
data = json
for i in range(0, len(paths)):
data = data[paths[i]]
return data
def mse(x, y):
return np.square(x - y).mean()
def overlap_match(a, b):
""" a, b = (0,1)
Overlap is where tensors have 1's in the same position.
Return number of bits that overlap """
return torch.sum(a*b)
def compute_matrix_prep(primary_features, secondary_features):
primary_ftrs = primary_features # shape = [num labels, feature_size]
secondary_ftrs = secondary_features # shape = [num labels, feature_size]
num_labels = primary_ftrs.shape[0]
matrix = np.zeros([num_labels, num_labels])
return primary_ftrs, secondary_ftrs, num_labels, matrix
def compute_matrix(primary_features, secondary_features, comparison_type_='mse'):
"""
Compute a 'confusion matrix' style matrix with comparison (e.g. mse)
between primary and secondary sets for a specified feature type.
Secondary Label
Primary Label | Label 1 Label 2 Label 3
----------------------------------------------------------------------------------
Label 1 | mse(trn_1, tst_1) mse(trn_1, tst_2) mse(trn_1, tst_2)
Label 2 | mse(trn_2, tst_1) ... ...
Label 3 | mse(trn_3, tst_1) ... ...
Label 4 | mse(trn_4, tst_1) ... ...
"""
primary_ftrs, secondary_ftrs, num_labels, matrix = compute_matrix_prep(primary_features, secondary_features)
for i in range(num_labels):
primary = primary_ftrs[i]
for j in range(num_labels):
secondary = secondary_ftrs[j]
if comparison_type_ == 'mse':
matrix[i, j] = mse(primary, secondary)
else: # overlap
matrix[i, j] = overlap_match(primary, secondary)
return matrix
def compute_accuracy(similarity, truth_matrix, comparison_type_='mse'):
"""
@:param similarity: matrix [train x test], size=[num labels x num labels], elements=similarity score
@:param comparison_type_: function type used for comparisons, could be 'mse' or 'overlap'
@:return:
"""
dbug = False
if dbug:
print("------------ COMPUTE ACCURACY ---------------")
predictions = np.argmin(similarity, axis=1) # argmin for each train sample, rows (across test samples, cols)
truth = np.argmax(truth_matrix, axis=1)
acc = metrics.accuracy_score(truth, predictions)
print(similarity)
print(truth_matrix)
print(predictions)
print(truth)
print("Accuracy = {0}".format(acc))
num_labels = similarity.shape[0]
correct = 0
max_correct = 0
sum_ambiguous_ = 0
for i in range(num_labels):
if comparison_type_ == 'mse':
best_test_idx = np.argmin(similarity[i, :]) # this constitutes the prediction
else: # overlap
best_test_idx = np.argmax(similarity[i, :]) # this constitutes the prediction
best_val = similarity[i, best_test_idx]
bast_val_indices = np.where(similarity[i] == best_val)
if len(bast_val_indices[0]) > 1:
sum_ambiguous_ += 1
num_correct = np.sum(truth_matrix[i, :]) # is there a correct answer (i.e. was there a matching class?)
if num_correct > 0:
max_correct = max_correct + 1
val = truth_matrix[i, best_test_idx] # was this one of the matching classes (there may be more than 1)
if val == 1.0:
correct = correct + 1
if max_correct == 0:
accuracy = -1.0
else:
accuracy = correct / max_correct
return accuracy, sum_ambiguous_
def compute_truth_matrix(primary_labels, secondary_labels):
"""
Compute truth matrix for oneshot test
Find matching labels in Test and Train and set that cell 'true match'
"""
primary_ftrs, secondary_ftrs, num_labels, matrix = compute_matrix_prep(primary_labels, secondary_labels)
for idx_primary in range(num_labels):
for idx_secondary in range(num_labels):
if primary_ftrs[idx_primary] == secondary_ftrs[idx_secondary]:
matrix[idx_primary, idx_secondary] = 1.0
else:
matrix[idx_primary, idx_secondary] = 0.0
return matrix
def add_completion_summary(summary_images, folder, batch, save_figs=True, plot_encoding=True, plot_diff=False):
"""
Show all the relevant images put in _summary_images by the _prep methods.
They are collected during training and testing.
NOTE: summary_images is a LIST and is plotted in subfigure in the given order
"""
if len(summary_images) == 3: # 3 images -> train_input, test_input, recon (i.e. no encoding)
plot_encoding = False
if save_figs:
plt.switch_backend('agg')
col_nums = True
row_nums = True
# first_image
(name, image, image_shape) = summary_images[0]
rows = len(summary_images)
rows = rows + (2 if plot_diff else 0)
rows = rows + (1 if col_nums else 0)
cols = image_shape[0] + 1 if row_nums else 0 # number of samples in batch
if plot_encoding:
# figsize = [18, 5.4] # figure size, inches
figsize = [10, 3] # figure size, inches
else:
figsize = [10, 2]
# create figure (fig), and array of axes (ax)
fig, ax = plt.subplots(nrows=rows, ncols=cols, figsize=figsize, num=batch)
plt.subplots_adjust(left=0, right=1.0, bottom=0, top=1.0, hspace=0.1, wspace=0.1)
# plot simple raster image on each sub-plot
for i, ax in enumerate(ax.flat):
# i runs from 0 to (nrows*ncols-1)
# axi is equivalent with ax[rowid][colid]
# get indices of row/column
row_idx = i // cols
col_idx = i % cols
if col_idx == 0:
if row_idx == rows - 1:
ax.text(0.3, 0.3, ' ')
else:
ax.text(0.3, 0.3, str(row_idx+1))
elif plot_diff and row_idx in [rows - 2, rows - 3]:
if col_idx == 0:
ax.text(0.3, 0.3, ' ')
else:
img_idx = col_idx - 1
_, target_imgs, target_shape = summary_images[0]
target_shape = [target_shape[1], target_shape[2]]
_, output_imgs, _ = summary_images[-1]
target_img = np.reshape(target_imgs[img_idx], target_shape)
output_img = np.reshape(output_imgs[img_idx], target_shape)
output_img = np.clip(output_img, 0.0, 1.0)
sq_err = np.square(target_img - output_img)
mse = sq_err.mean()
mse = '{:.2E}'.format(mse)
if row_idx == rows - 2:
ax.text(0.3, 0.3, mse, fontsize=5)
elif row_idx == rows - 3:
ax.imshow(sq_err)
elif row_idx == rows - 1:
if col_idx == 0:
ax.text(0.3, 0.3, ' ')
else:
ax.text(0.3, 0.3, str(col_idx))
else:
(name, image, image_shape) = summary_images[row_idx]
image_shape = [image_shape[1], image_shape[2]]
img_idx = col_idx - 1
img = np.reshape(image[img_idx], image_shape)
if not plot_encoding or 'inputs' in name or 'recon' in name:
ax.imshow(img, cmap='binary', vmin=0, vmax=1)
else:
ax.imshow(img, vmin=-1, vmax=1)
ax.axis('off')
if save_figs:
filetype = 'png'
filename = 'completion_summary_' + str(batch) + '.' + filetype
filepath = os.path.join(folder, filename)
plt.savefig(filepath, dpi=300, format=filetype)
else:
plt.show()
def square_image_shape_from_1d(filters):
"""
Make 1d tensor as square as possible. If the length is a prime, the worst case, it will remain 1d.
Assumes and retains first dimension as batches.
"""
height = int(math.sqrt(filters))
while height > 1:
width_remainder = filters % height
if width_remainder == 0:
break
else:
height = height - 1
width = filters // height
area = height * width
lost_pixels = filters - area
shape = [-1, height, width, 1]
return shape, lost_pixels
| [
"numpy.clip",
"math.sqrt",
"torch.sum",
"matplotlib.pyplot.switch_backend",
"numpy.reshape",
"numpy.where",
"numpy.random.seed",
"numpy.argmin",
"matplotlib.pyplot.savefig",
"numpy.argmax",
"numpy.square",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"torch.manual_seed",
... | [((171, 194), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (192, 194), False, 'import datetime\n'), ((313, 330), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (324, 330), False, 'import random\n'), ((333, 353), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (347, 353), True, 'import numpy as np\n'), ((356, 379), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (373, 379), False, 'import torch\n'), ((761, 777), 'torch.sum', 'torch.sum', (['(a * b)'], {}), '(a * b)\n', (770, 777), False, 'import torch\n'), ((1038, 1072), 'numpy.zeros', 'np.zeros', (['[num_labels, num_labels]'], {}), '([num_labels, num_labels])\n', (1046, 1072), True, 'import numpy as np\n'), ((5523, 5587), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'rows', 'ncols': 'cols', 'figsize': 'figsize', 'num': 'batch'}), '(nrows=rows, ncols=cols, figsize=figsize, num=batch)\n', (5535, 5587), True, 'import matplotlib.pyplot as plt\n'), ((5590, 5675), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0)', 'right': '(1.0)', 'bottom': '(0)', 'top': '(1.0)', 'hspace': '(0.1)', 'wspace': '(0.1)'}), '(left=0, right=1.0, bottom=0, top=1.0, hspace=0.1,\n wspace=0.1)\n', (5609, 5675), True, 'import matplotlib.pyplot as plt\n'), ((2665, 2694), 'numpy.argmin', 'np.argmin', (['similarity'], {'axis': '(1)'}), '(similarity, axis=1)\n', (2674, 2694), True, 'import numpy as np\n'), ((2775, 2806), 'numpy.argmax', 'np.argmax', (['truth_matrix'], {'axis': '(1)'}), '(truth_matrix, axis=1)\n', (2784, 2806), True, 'import numpy as np\n'), ((3400, 3435), 'numpy.where', 'np.where', (['(similarity[i] == best_val)'], {}), '(similarity[i] == best_val)\n', (3408, 3435), True, 'import numpy as np\n'), ((3518, 3544), 'numpy.sum', 'np.sum', (['truth_matrix[i, :]'], {}), '(truth_matrix[i, :])\n', (3524, 3544), True, 'import numpy as np\n'), ((5001, 5026), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (5019, 5026), True, 'import matplotlib.pyplot as plt\n'), ((7470, 7500), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (7482, 7500), False, 'import os\n'), ((7505, 7552), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {'dpi': '(300)', 'format': 'filetype'}), '(filepath, dpi=300, format=filetype)\n', (7516, 7552), True, 'import matplotlib.pyplot as plt\n'), ((7565, 7575), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7573, 7575), True, 'import matplotlib.pyplot as plt\n'), ((7797, 7815), 'math.sqrt', 'math.sqrt', (['filters'], {}), '(filters)\n', (7806, 7815), False, 'import math\n'), ((583, 599), 'numpy.square', 'np.square', (['(x - y)'], {}), '(x - y)\n', (592, 599), True, 'import numpy as np\n'), ((3162, 3189), 'numpy.argmin', 'np.argmin', (['similarity[i, :]'], {}), '(similarity[i, :])\n', (3171, 3189), True, 'import numpy as np\n'), ((3269, 3296), 'numpy.argmax', 'np.argmax', (['similarity[i, :]'], {}), '(similarity[i, :])\n', (3278, 3296), True, 'import numpy as np\n'), ((6393, 6439), 'numpy.reshape', 'np.reshape', (['target_imgs[img_idx]', 'target_shape'], {}), '(target_imgs[img_idx], target_shape)\n', (6403, 6439), True, 'import numpy as np\n'), ((6461, 6507), 'numpy.reshape', 'np.reshape', (['output_imgs[img_idx]', 'target_shape'], {}), '(output_imgs[img_idx], target_shape)\n', (6471, 6507), True, 'import numpy as np\n'), ((6529, 6558), 'numpy.clip', 'np.clip', (['output_img', '(0.0)', '(1.0)'], {}), '(output_img, 0.0, 1.0)\n', (6536, 6558), True, 'import numpy as np\n'), ((6577, 6611), 'numpy.square', 'np.square', (['(target_img - output_img)'], {}), '(target_img - output_img)\n', (6586, 6611), True, 'import numpy as np\n'), ((7116, 7155), 'numpy.reshape', 'np.reshape', (['image[img_idx]', 'image_shape'], {}), '(image[img_idx], image_shape)\n', (7126, 7155), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for NeighbourSelection class"""
import unittest
import cartopy.crs as ccrs
import iris
import numpy as np
import scipy
from iris.tests import IrisTest
from improver.spotdata.neighbour_finding import NeighbourSelection
from improver.utilities.cube_metadata import create_coordinate_hash
from improver.utilities.warnings_handler import ManageWarnings
class Test_NeighbourSelection(IrisTest):
"""Test class for the NeighbourSelection tests, setting up inputs."""
def setUp(self):
"""Set up cubes and sitelists for use in testing NeighbourSelection"""
# Set up orography and land mask data
land_data = np.zeros((9, 9))
land_data[0:2, 4] = 1
land_data[4, 4] = 1
orography_data = np.zeros((9, 9))
orography_data[0, 4] = 1
orography_data[1, 4] = 5
# Global coordinates and cubes
projection = iris.coord_systems.GeogCS(6371229.0)
xcoord = iris.coords.DimCoord(
np.linspace(-160, 160, 9), standard_name='longitude',
units='degrees', coord_system=projection,
circular=True)
xcoord.guess_bounds()
ycoord = iris.coords.DimCoord(
np.linspace(-80, 80, 9), standard_name='latitude',
units='degrees', coord_system=projection,
circular=False)
ycoord.guess_bounds()
global_land_mask = iris.cube.Cube(
land_data, standard_name="land_binary_mask", units=1,
dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])
global_orography = iris.cube.Cube(
orography_data, standard_name="surface_altitude", units='m',
dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])
# Regional grid coordinates and cubes
projection = iris.coord_systems.LambertAzimuthalEqualArea(
ellipsoid=iris.coord_systems.GeogCS(
semi_major_axis=6378137.0, semi_minor_axis=6356752.314140356))
xcoord = iris.coords.DimCoord(
np.linspace(-1E5, 1E5, 9), standard_name='projection_x_coordinate',
units='m', coord_system=projection)
xcoord.guess_bounds()
ycoord = iris.coords.DimCoord(
np.linspace(-5E4, 5E4, 9), standard_name='projection_y_coordinate',
units='degrees', coord_system=projection)
ycoord.guess_bounds()
region_land_mask = iris.cube.Cube(
land_data, standard_name="land_binary_mask", units=1,
dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])
region_orography = iris.cube.Cube(
orography_data, standard_name="surface_altitude", units='m',
dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])
# Create site lists
self.global_sites = [
{'altitude': 2.0, 'latitude': 0.0, 'longitude': -64.0,
'wmo_id': 1}]
self.region_sites = [
{'altitude': 2.0, 'projection_x_coordinate': -4.0E4,
'projection_y_coordinate': 0.0, 'wmo_id': 1}]
self.global_land_mask = global_land_mask
self.global_orography = global_orography
self.region_land_mask = region_land_mask
self.region_orography = region_orography
self.region_projection = projection
class Test__repr__(IrisTest):
"""Tests the class __repr__ function."""
def test_basic(self):
"""Test that the __repr__ returns the expected string with defaults."""
plugin = NeighbourSelection()
result = str(plugin)
msg = ("<NeighbourSelection: land_constraint: False, minimum_dz: False"
", search_radius: 10000.0, site_coordinate_system: <class "
"'cartopy.crs.PlateCarree'>, site_x_coordinate:longitude, "
"site_y_coordinate: latitude, node_limit: 36>")
self.assertEqual(result, msg)
def test_non_default(self):
"""Test that the __repr__ returns the expected string with defaults."""
plugin = NeighbourSelection(land_constraint=True, minimum_dz=True,
search_radius=1000,
site_coordinate_system=ccrs.Mercator(),
site_x_coordinate='x_axis',
site_y_coordinate='y_axis',
node_limit=100)
result = str(plugin)
msg = ("<NeighbourSelection: land_constraint: True, minimum_dz: True,"
" search_radius: 1000, site_coordinate_system: <class "
"'cartopy.crs.Mercator'>, site_x_coordinate:x_axis, "
"site_y_coordinate: y_axis, node_limit: 100>")
self.assertEqual(result, msg)
class Test_neighbour_finding_method_name(IrisTest):
"""Test the function for generating the name that describes the neighbour
finding method."""
def test_nearest(self):
"""Test name generated when using the default nearest neighbour
method."""
plugin = NeighbourSelection()
expected = 'nearest'
result = plugin.neighbour_finding_method_name()
self.assertEqual(result, expected)
def test_nearest_land(self):
"""Test name generated when using the nearest land neighbour
method."""
plugin = NeighbourSelection(land_constraint=True)
expected = 'nearest_land'
result = plugin.neighbour_finding_method_name()
self.assertEqual(result, expected)
def test_nearest_land_minimum_dz(self):
"""Test name generated when using the nearest land neighbour
with smallest vertical displacment method."""
plugin = NeighbourSelection(land_constraint=True, minimum_dz=True)
expected = 'nearest_land_minimum_dz'
result = plugin.neighbour_finding_method_name()
self.assertEqual(result, expected)
def test_nearest_minimum_dz(self):
"""Test name generated when using the nearest neighbour with the
smallest vertical displacment method."""
plugin = NeighbourSelection(minimum_dz=True)
expected = 'nearest_minimum_dz'
result = plugin.neighbour_finding_method_name()
self.assertEqual(result, expected)
class Test__transform_sites_coordinate_system(Test_NeighbourSelection):
"""Test the function for converting arrays of site coordinates into the
correct coordinate system for the model/grid cube."""
def test_global_to_region(self):
"""Test coordinates generated when transforming from a global to
regional coordinate system, in this case PlateCarree to Lambert
Azimuthal Equal Areas."""
plugin = NeighbourSelection()
x_points = np.array([0, 10, 20])
y_points = np.array([0, 0, 10])
expected = [[0., 0.], [1111782.53516264, 0.],
[2189747.33076441, 1121357.32401753]]
result = plugin._transform_sites_coordinate_system(
x_points, y_points, self.region_orography)
self.assertArrayAlmostEqual(result, expected)
def test_region_to_global(self):
"""Test coordinates generated when transforming from a regional to
global coordinate system, in this case Lambert Azimuthal Equal Areas
to PlateCarree."""
plugin = NeighbourSelection(
site_coordinate_system=self.region_projection.as_cartopy_crs())
x_points = np.array([0, 1, 2])
y_points = np.array([0, 0, 1])
expected = [[0., 0.], [8.98315284e-06, 0.],
[1.79663057e-05, 9.04369476e-06]]
result = plugin._transform_sites_coordinate_system(
x_points, y_points, self.global_orography)
self.assertArrayAlmostEqual(result, expected)
def test_global_to_global(self):
"""Test coordinates generated when the input and output coordinate
systems are the same, in this case Plate-Carree."""
plugin = NeighbourSelection()
x_points = np.array([0, 10, 20])
y_points = np.array([0, 0, 10])
expected = np.stack((x_points, y_points), axis=1)
result = plugin._transform_sites_coordinate_system(
x_points, y_points, self.global_orography)
self.assertArrayAlmostEqual(result, expected)
def test_region_to_region(self):
"""Test coordinates generated when the input and output coordinate
systems are the same, in this case Lambert Azimuthal Equal Areas."""
plugin = NeighbourSelection(
site_coordinate_system=self.region_projection.as_cartopy_crs())
x_points = np.array([0, 1, 2])
y_points = np.array([0, 0, 1])
expected = np.stack((x_points, y_points), axis=1)
result = plugin._transform_sites_coordinate_system(
x_points, y_points, self.region_orography)
self.assertArrayAlmostEqual(result, expected)
class Test_check_sites_are_within_domain(Test_NeighbourSelection):
"""Test the function that removes sites falling outside the model domain
from the site list and raises a warning."""
def test_all_valid(self):
"""Test case in which all sites are valid and fall within domain."""
plugin = NeighbourSelection()
sites = [{'projection_x_coordinate': 1.0E4,
'projection_y_coordinate': 1.0E4},
{'projection_x_coordinate': 1.0E5,
'projection_y_coordinate': 5.0E4}]
x_points = np.array(
[site['projection_x_coordinate'] for site in sites])
y_points = np.array(
[site['projection_y_coordinate'] for site in sites])
site_coords = np.stack((x_points, y_points), axis=1)
sites_out, site_coords_out, out_x, out_y = (
plugin.check_sites_are_within_domain(
sites, site_coords, x_points, y_points,
self.region_orography))
self.assertArrayEqual(sites_out, sites)
self.assertArrayEqual(site_coords_out, site_coords)
self.assertArrayEqual(out_x, x_points)
self.assertArrayEqual(out_y, y_points)
@ManageWarnings(record=True)
def test_some_invalid(self, warning_list=None):
"""Test case with some sites falling outside the regional domain."""
plugin = NeighbourSelection()
sites = [{'projection_x_coordinate': 1.0E4,
'projection_y_coordinate': 1.0E4},
{'projection_x_coordinate': 1.0E5,
'projection_y_coordinate': 5.0E4},
{'projection_x_coordinate': 1.0E6,
'projection_y_coordinate': 1.0E5}]
x_points = np.array(
[site['projection_x_coordinate'] for site in sites])
y_points = np.array(
[site['projection_y_coordinate'] for site in sites])
site_coords = np.stack((x_points, y_points), axis=1)
sites_out, site_coords_out, out_x, out_y = (
plugin.check_sites_are_within_domain(
sites, site_coords, x_points, y_points,
self.region_orography))
self.assertArrayEqual(sites_out, sites[0:2])
self.assertArrayEqual(site_coords_out[0:2], site_coords[0:2])
self.assertArrayEqual(out_x, x_points[0:2])
self.assertArrayEqual(out_y, y_points[0:2])
msg = "1 spot sites fall outside the grid"
self.assertTrue(any([msg in str(warning) for warning in warning_list]))
self.assertTrue(any(item.category == UserWarning
for item in warning_list))
@ManageWarnings(record=True)
def test_global_invalid(self, warning_list=None):
"""Test case with some sites falling outside the global domain."""
plugin = NeighbourSelection()
sites = [
{'latitude': 0.0, 'longitude': 0.0},
{'latitude': 50.0, 'longitude': 0.0},
{'latitude': 100.0, 'longitude': 0.0}]
x_points = np.array(
[site['longitude'] for site in sites])
y_points = np.array(
[site['latitude'] for site in sites])
site_coords = np.stack((x_points, y_points), axis=1)
plugin.global_coordinate_system = True
sites_out, site_coords_out, out_x, out_y = (
plugin.check_sites_are_within_domain(
sites, site_coords, x_points, y_points,
self.global_orography))
self.assertArrayEqual(sites_out, sites[0:2])
self.assertArrayEqual(site_coords_out[0:2], site_coords[0:2])
self.assertArrayEqual(out_x, x_points[0:2])
self.assertArrayEqual(out_y, y_points[0:2])
msg = "1 spot sites fall outside the grid"
self.assertTrue(any([msg in str(warning) for warning in warning_list]))
self.assertTrue(any(item.category == UserWarning
for item in warning_list))
def test_global_circular_valid(self):
"""Test case with a site defined using a longitide exceeding 180
degrees (e.g. with longitudes that run 0 to 360) is still included
as the circular x-coordinate means it will still be used correctly."""
plugin = NeighbourSelection()
sites = [
{'latitude': 0.0, 'longitude': 100.0},
{'latitude': 30.0, 'longitude': 200.0},
{'latitude': 60.0, 'longitude': 300.0}]
x_points = np.array(
[site['longitude'] for site in sites])
y_points = np.array(
[site['latitude'] for site in sites])
site_coords = np.stack((x_points, y_points), axis=1)
plugin.global_coordinate_system = True
sites_out, site_coords_out, out_x, out_y = (
plugin.check_sites_are_within_domain(
sites, site_coords, x_points, y_points,
self.global_orography))
self.assertArrayEqual(sites_out, sites)
self.assertArrayEqual(site_coords_out, site_coords)
self.assertArrayEqual(out_x, x_points)
self.assertArrayEqual(out_y, y_points)
class Test_get_nearest_indices(Test_NeighbourSelection):
"""Test function wrapping iris functionality to get nearest grid point
indices to arbitrary coordinates."""
def test_basic(self):
"""Test that expected coordinates are returned."""
plugin = NeighbourSelection()
x_points = np.array([site['projection_x_coordinate']
for site in self.region_sites])
y_points = np.array([site['projection_y_coordinate']
for site in self.region_sites])
site_coords = np.stack((x_points, y_points), axis=1)
expected = [[2, 4]]
result = plugin.get_nearest_indices(site_coords,
self.region_orography)
self.assertArrayEqual(result, expected)
class Test_geocentric_cartesian(Test_NeighbourSelection):
"""Test conversion of global coordinates to geocentric cartesians. In this
coordinate system, x and y are in the equitorial plane, and z is towards
the poles."""
def test_basic(self):
"""Test a (0, 0) coordinate conversion to geocentric cartesian. This is
expected to give an x coordinate which is the semi-major axis of the
globe defined in the global coordinate system."""
plugin = NeighbourSelection()
x_points = np.array([0])
y_points = np.array([0])
result = plugin.geocentric_cartesian(self.global_orography,
x_points, y_points)
radius = self.global_orography.coord_system().semi_major_axis
expected = [[radius, 0, 0]]
self.assertArrayAlmostEqual(result, expected)
def test_north_pole(self):
"""Test a (0, 90) coordinate conversion to geocentric cartesian, this
being the north pole. This is expected to give an x coordinate which 0
and a z coordinate equivalent to the semi-major axis of the globe
defined in the global coordinate system."""
plugin = NeighbourSelection()
x_points = np.array([0])
y_points = np.array([90])
result = plugin.geocentric_cartesian(self.global_orography,
x_points, y_points)
radius = self.global_orography.coord_system().semi_major_axis
expected = [[0, 0, radius]]
self.assertArrayAlmostEqual(result, expected)
def test_45_degrees_latitude(self):
"""Test a (0, 45) coordinate conversion to geocentric cartesian. In
this case the components of the semi-major axis of the globe
defined in the global coordinate system should be shared between the
resulting x and z coordinates."""
plugin = NeighbourSelection()
x_points = np.array([0])
y_points = np.array([45])
result = plugin.geocentric_cartesian(self.global_orography,
x_points, y_points)
radius = self.global_orography.coord_system().semi_major_axis
component = radius/np.sqrt(2.)
expected = [[component, 0, component]]
self.assertArrayAlmostEqual(result, expected)
def test_45_degrees_longitude(self):
"""Test a (45, 0) coordinate conversion to geocentric cartesian. In
this case the components of the semi-major axis of the globe
defined in the global coordinate system should be shared between the
resulting x and y coordinates."""
plugin = NeighbourSelection()
x_points = np.array([45])
y_points = np.array([0])
result = plugin.geocentric_cartesian(self.global_orography,
x_points, y_points)
radius = self.global_orography.coord_system().semi_major_axis
component = radius/np.sqrt(2.)
expected = [[component, component, 0]]
self.assertArrayAlmostEqual(result, expected)
def test_45_degrees_latitude_and_longitude(self):
"""Test a (45, 45) coordinate conversion to geocentric cartesian. In
this case the z component should be a cos(45) component of the
semi-major axis of the globe defined in the global coordinate system.
The x and y coordinates should be cos(45) components of the remaining
cos(45) component of the semi-major axis."""
plugin = NeighbourSelection()
x_points = np.array([45])
y_points = np.array([45])
result = plugin.geocentric_cartesian(self.global_orography,
x_points, y_points)
radius = self.global_orography.coord_system().semi_major_axis
component = radius/np.sqrt(2.)
sub_component = component/np.sqrt(2.)
expected = [[sub_component, sub_component, component]]
self.assertArrayAlmostEqual(result, expected)
def test_negative_45_degrees_latitude_and_longitude(self):
"""Test a (-45, -45) coordinate conversion to geocentric cartesian.
In this case the x is expected to remain positive, whilst y and z
become negative."""
plugin = NeighbourSelection()
x_points = np.array([-45])
y_points = np.array([-45])
result = plugin.geocentric_cartesian(self.global_orography,
x_points, y_points)
radius = self.global_orography.coord_system().semi_major_axis
component = radius/np.sqrt(2.)
sub_component = component/np.sqrt(2.)
expected = [[sub_component, -sub_component, -component]]
self.assertArrayAlmostEqual(result, expected)
class Test_build_KDTree(Test_NeighbourSelection):
"""Test construction of a KDTree with scipy."""
def test_basic(self):
"""Test that the expected number of nodes are created and that a tree
is returned; this should be the lengths of the x and y coordinates
multiplied in the simple case."""
plugin = NeighbourSelection()
result, result_nodes = plugin.build_KDTree(self.region_land_mask)
expected_length = (self.region_land_mask.shape[0] *
self.region_land_mask.shape[1])
self.assertEqual(result_nodes.shape[0], expected_length)
self.assertIsInstance(result, scipy.spatial.ckdtree.cKDTree)
def test_only_land(self):
"""Test that the expected number of nodes are created and that a tree
is returned. In this case the number of nodes should be this should be
equal to the number of land points."""
plugin = NeighbourSelection(land_constraint=True)
result, result_nodes = plugin.build_KDTree(self.region_land_mask)
expected_length = np.nonzero(self.region_land_mask.data)[0].shape[0]
self.assertEqual(result_nodes.shape[0], expected_length)
self.assertIsInstance(result, scipy.spatial.ckdtree.cKDTree)
class Test_select_minimum_dz(Test_NeighbourSelection):
"""Test extraction of the minimum height difference points from a provided
array of neighbours. Note that the region orography has a series of islands
at a y index of 4, changing elevation with x. As such the nodes are chosen
along this line, e.g. [0, 4], [1, 4], etc."""
@ManageWarnings(ignored_messages=["Limit on number of nearest neighbours"])
def test_basic(self):
"""Test a simple case where the first element in the provided lists
has the smallest vertical displacement to the site. Expect the
coordinates of the first node to be returned."""
plugin = NeighbourSelection()
site_altitude = 3.
nodes = np.array([[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]])
distance = np.arange(5)
indices = np.arange(5)
result = plugin.select_minimum_dz(self.region_orography,
site_altitude, nodes,
distance, indices)
self.assertArrayEqual(result, nodes[0])
def test_some_invalid_points(self):
"""Test a case where some nodes are beyond the imposed search_radius,
which means they have a distance of np.inf, ensuring this is handled.
Also change the site height so the second node is the expected
result."""
plugin = NeighbourSelection()
site_altitude = 5.
nodes = np.array([[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]])
distance = np.array([0, 1, 2, 3, np.inf])
indices = np.arange(5)
result = plugin.select_minimum_dz(self.region_orography,
site_altitude, nodes,
distance, indices)
self.assertArrayEqual(result, nodes[1])
def test_all_invalid_points(self):
"""Test a case where all nodes are beyond the imposed search_radius,
so the returned value should be None."""
plugin = NeighbourSelection()
site_altitude = 5.
nodes = np.array([[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]])
distance = np.full(5, np.inf)
indices = np.arange(5)
result = plugin.select_minimum_dz(self.region_orography,
site_altitude, nodes,
distance, indices)
self.assertEqual(result, None)
@ManageWarnings(record=True)
def test_incomplete_search(self, warning_list=None):
"""Test a warning is raised when the number of nearest neighbours
searched for the minimum dz neighbour does not exhaust the
search_radius."""
plugin = NeighbourSelection(search_radius=6)
site_altitude = 3.
nodes = np.array([[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]])
distance = np.arange(5)
indices = np.arange(5)
plugin.select_minimum_dz(self.region_orography,
site_altitude, nodes,
distance, indices)
msg = "Limit on number of nearest neighbours"
self.assertTrue(any([msg in str(warning) for warning in warning_list]))
self.assertTrue(any(item.category == UserWarning
for item in warning_list))
class Test_process(Test_NeighbourSelection):
"""Test the process method of the NeighbourSelection class."""
def test_non_metre_spatial_dimensions(self):
"""Test an error is raised if a regional grid is provided for which the
spatial coordinates do not have units of metres."""
self.region_orography.coord(axis='x').convert_units('feet')
msg = 'Cube spatial coordinates for regional grids'
plugin = NeighbourSelection()
with self.assertRaisesRegex(ValueError, msg):
plugin.process(self.region_sites, self.region_orography,
self.region_land_mask)
def test_different_cube_grids(self):
"""Test an error is raised if the land mask and orography cubes
provided are on different grids."""
msg = 'Orography and land_mask cubes are not on the same'
plugin = NeighbourSelection()
with self.assertRaisesRegex(ValueError, msg):
plugin.process(self.region_sites, self.region_orography,
self.global_land_mask)
def test_global_attribute(self):
"""Test that a cube is returned with a model_grid_hash that matches
that of the global input grids."""
expected = create_coordinate_hash(self.global_orography)
plugin = NeighbourSelection()
result = plugin.process(self.global_sites, self.global_orography,
self.global_land_mask)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.attributes['model_grid_hash'], expected)
def test_wmo_ids(self):
"""Test that the returned cube has the wmo_ids present when they are
available. Should be None when they are not provided."""
plugin = NeighbourSelection()
sites = self.global_sites + [self.global_sites.copy()[0].copy()]
sites[1]['wmo_id'] = None
expected = ['1', 'None']
result = plugin.process(sites, self.global_orography,
self.global_land_mask)
self.assertArrayEqual(result.coord('wmo_id').points, expected)
def test_global_nearest(self):
"""Test that a cube is returned, here using a conventional site list
with lat/lon site coordinates. Neighbour coordinates of [2, 4] are
expected, with a vertical displacement of 2.."""
plugin = NeighbourSelection()
result = plugin.process(self.global_sites, self.global_orography,
self.global_land_mask)
expected = [[[2, 4, 2]]]
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayEqual(result.data, expected)
def test_global_land(self):
"""Test how the neighbour index changes when a land constraint is
imposed. Here we expect to go 'west' to the first band of land
which has an altitude of 5m. So we expect [1, 4, -3]."""
plugin = NeighbourSelection(land_constraint=True, search_radius=1E7)
result = plugin.process(self.global_sites, self.global_orography,
self.global_land_mask)
expected = [[[1, 4, -3]]]
self.assertArrayEqual(result.data, expected)
def test_global_land_minimum_dz(self):
"""Test how the neighbour index changes when a land constraint is
imposed and a minimum height difference condition. Here we expect to go
'west' to the second band of land that we encounter, which has an
altitude closer to that of the site. So we expect [0, 4, 1]."""
plugin = NeighbourSelection(land_constraint=True, minimum_dz=True,
search_radius=1E8)
result = plugin.process(self.global_sites, self.global_orography,
self.global_land_mask)
expected = [[[0, 4, 1]]]
self.assertArrayEqual(result.data, expected)
def test_global_dateline(self):
"""Test that for a global grid with a circular longitude coordinate,
the code selects the nearest neighbour matching constraints even if it
falls at the opposite edge of the grid. The spot site is nearest to
grid point [6, 4], and the nearest land point is at [4, 4]. However
by imposing a minimum vertical displacement constraint the code will
return a point across the dateline at [0, 4]. We can be sure we have
crossed the dateline by the fact that there is an island of land with
the same vertical displacment to the spot site between the point and
the grid point returned. Therefore, the short path must be across the
dateline, rather than across this island travelling west."""
self.global_sites[0]['longitude'] = 64.
self.global_sites[0]['altitude'] = 3.
plugin = NeighbourSelection(land_constraint=True, minimum_dz=True,
search_radius=1E8)
result = plugin.process(self.global_sites, self.global_orography,
self.global_land_mask)
expected = [[[0, 4, 2]]]
self.assertArrayEqual(result.data, expected)
def test_region_attribute(self):
"""Test that a cube is returned with a model_grid_hash that matches
that of the regional input grids."""
expected = create_coordinate_hash(self.region_orography)
plugin = NeighbourSelection(
site_coordinate_system=self.region_projection.as_cartopy_crs(),
site_x_coordinate='projection_x_coordinate',
site_y_coordinate='projection_y_coordinate')
result = plugin.process(self.region_sites, self.region_orography,
self.region_land_mask)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.attributes['model_grid_hash'], expected)
def test_region_nearest(self):
"""Test that a cube is returned, this time using the site list in
which site coordinates are defined in metres in an equal areas
projection. Neighbour coordinates of [2, 4] are expected, with a
vertical displacement of 2."""
plugin = NeighbourSelection(
site_coordinate_system=self.region_projection.as_cartopy_crs(),
site_x_coordinate='projection_x_coordinate',
site_y_coordinate='projection_y_coordinate')
result = plugin.process(self.region_sites, self.region_orography,
self.region_land_mask)
expected = [[[2, 4, 2]]]
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayEqual(result.data, expected)
def test_region_land(self):
"""Test how the neighbour index changes when a land constraint is
imposed. Here we expect to go 'west' to the first island of land
which has an altitude of 5m. So we expect [1, 4, -3]."""
plugin = NeighbourSelection(
land_constraint=True, search_radius=2E5,
site_coordinate_system=self.region_projection.as_cartopy_crs(),
site_x_coordinate='projection_x_coordinate',
site_y_coordinate='projection_y_coordinate')
result = plugin.process(self.region_sites, self.region_orography,
self.region_land_mask)
expected = [[[1, 4, -3]]]
self.assertArrayEqual(result.data, expected)
def test_region_land_minimum_dz(self):
"""Test how the neighbour index changes when a land constraint is
imposed and a minimum height difference condition. Here we expect to go
'west' to the second band of land that we encounter, which has an
altitude closer to that of the site. So we expect [0, 4, 1]."""
plugin = NeighbourSelection(
land_constraint=True, minimum_dz=True, search_radius=2E5,
site_coordinate_system=self.region_projection.as_cartopy_crs(),
site_x_coordinate='projection_x_coordinate',
site_y_coordinate='projection_y_coordinate')
result = plugin.process(self.region_sites, self.region_orography,
self.region_land_mask)
expected = [[[0, 4, 1]]]
self.assertArrayEqual(result.data, expected)
def test_global_tied_case_nearest(self):
"""Test which neighbour is returned in an artificial case in which two
neighbouring grid points are identically close. First with no
constraints using the iris method. We put a site exactly half way
between the two islands at -60 degrees longitude ([1, 4] and [4, 4] are
equal distances either side of the site). This consistently returns the
western island. Note that nudging the value to -59.9 will return the
island to the east as expected."""
self.global_sites[0]['longitude'] = -60.
plugin = NeighbourSelection()
result = plugin.process(self.global_sites, self.global_orography,
self.global_land_mask)
expected = [[[2, 4, 2]]]
self.assertArrayEqual(result.data, expected)
def test_global_tied_case_nearest_land(self):
"""Test which neighbour is returned in an artificial case in which two
neighbouring grid points are identically close. Identical to the test
above except for the land constraint is now applied, so the neigbour is
found using the KDTree. Using the KDTree the neighbour to the east is
returned everytime the test is run."""
self.global_sites[0]['longitude'] = -60.0
plugin = NeighbourSelection(land_constraint=True, search_radius=1E8)
result = plugin.process(self.global_sites, self.global_orography,
self.global_land_mask)
expected = [[[4, 4, 2]]]
self.assertArrayEqual(result.data, expected)
def test_global_tied_case_nearest_land_min_dz(self):
"""Test which neighbour is returned in an artificial case in which two
neighbouring grid points are identically close. Identical to the test
above except for now with both a land constraint and minimum dz
constraint. The neighbouring islands have been set to have the
same vertical displacement as each other from the spot site. The
neigbour is found using the KDTree. Using the KDTree the neighbour to
the east is returned everytime the test is run."""
self.global_sites[0]['longitude'] = -60.0
self.global_sites[0]['altitude'] = 5.
self.global_orography.data[4, 4] = 5.
plugin = NeighbourSelection(land_constraint=True, search_radius=1E8,
minimum_dz=True)
result = plugin.process(self.global_sites, self.global_orography,
self.global_land_mask)
expected = [[[4, 4, 0]]]
self.assertArrayEqual(result.data, expected)
if __name__ == '__main__':
unittest.main()
| [
"improver.spotdata.neighbour_finding.NeighbourSelection",
"cartopy.crs.Mercator",
"numpy.sqrt",
"improver.utilities.cube_metadata.create_coordinate_hash",
"numpy.array",
"numpy.zeros",
"numpy.stack",
"numpy.linspace",
"iris.coord_systems.GeogCS",
"numpy.nonzero",
"unittest.main",
"numpy.full",... | [((11680, 11707), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (11694, 11707), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((13118, 13145), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (13132, 13145), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((22794, 22868), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Limit on number of nearest neighbours']"}), "(ignored_messages=['Limit on number of nearest neighbours'])\n", (22808, 22868), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((24878, 24905), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (24892, 24905), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((36721, 36736), 'unittest.main', 'unittest.main', ([], {}), '()\n', (36734, 36736), False, 'import unittest\n'), ((2313, 2329), 'numpy.zeros', 'np.zeros', (['(9, 9)'], {}), '((9, 9))\n', (2321, 2329), True, 'import numpy as np\n'), ((2413, 2429), 'numpy.zeros', 'np.zeros', (['(9, 9)'], {}), '((9, 9))\n', (2421, 2429), True, 'import numpy as np\n'), ((2557, 2593), 'iris.coord_systems.GeogCS', 'iris.coord_systems.GeogCS', (['(6371229.0)'], {}), '(6371229.0)\n', (2582, 2593), False, 'import iris\n'), ((3052, 3172), 'iris.cube.Cube', 'iris.cube.Cube', (['land_data'], {'standard_name': '"""land_binary_mask"""', 'units': '(1)', 'dim_coords_and_dims': '[(ycoord, 1), (xcoord, 0)]'}), "(land_data, standard_name='land_binary_mask', units=1,\n dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])\n", (3066, 3172), False, 'import iris\n'), ((3221, 3348), 'iris.cube.Cube', 'iris.cube.Cube', (['orography_data'], {'standard_name': '"""surface_altitude"""', 'units': '"""m"""', 'dim_coords_and_dims': '[(ycoord, 1), (xcoord, 0)]'}), "(orography_data, standard_name='surface_altitude', units='m',\n dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])\n", (3235, 3348), False, 'import iris\n'), ((4040, 4160), 'iris.cube.Cube', 'iris.cube.Cube', (['land_data'], {'standard_name': '"""land_binary_mask"""', 'units': '(1)', 'dim_coords_and_dims': '[(ycoord, 1), (xcoord, 0)]'}), "(land_data, standard_name='land_binary_mask', units=1,\n dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])\n", (4054, 4160), False, 'import iris\n'), ((4209, 4336), 'iris.cube.Cube', 'iris.cube.Cube', (['orography_data'], {'standard_name': '"""surface_altitude"""', 'units': '"""m"""', 'dim_coords_and_dims': '[(ycoord, 1), (xcoord, 0)]'}), "(orography_data, standard_name='surface_altitude', units='m',\n dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])\n", (4223, 4336), False, 'import iris\n'), ((5108, 5128), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (5126, 5128), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((6630, 6650), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (6648, 6650), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((6918, 6958), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {'land_constraint': '(True)'}), '(land_constraint=True)\n', (6936, 6958), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((7277, 7334), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {'land_constraint': '(True)', 'minimum_dz': '(True)'}), '(land_constraint=True, minimum_dz=True)\n', (7295, 7334), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((7658, 7693), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {'minimum_dz': '(True)'}), '(minimum_dz=True)\n', (7676, 7693), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((8276, 8296), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (8294, 8296), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((8316, 8337), 'numpy.array', 'np.array', (['[0, 10, 20]'], {}), '([0, 10, 20])\n', (8324, 8337), True, 'import numpy as np\n'), ((8357, 8377), 'numpy.array', 'np.array', (['[0, 0, 10]'], {}), '([0, 0, 10])\n', (8365, 8377), True, 'import numpy as np\n'), ((9008, 9027), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (9016, 9027), True, 'import numpy as np\n'), ((9047, 9066), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (9055, 9066), True, 'import numpy as np\n'), ((9532, 9552), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (9550, 9552), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((9572, 9593), 'numpy.array', 'np.array', (['[0, 10, 20]'], {}), '([0, 10, 20])\n', (9580, 9593), True, 'import numpy as np\n'), ((9613, 9633), 'numpy.array', 'np.array', (['[0, 0, 10]'], {}), '([0, 0, 10])\n', (9621, 9633), True, 'import numpy as np\n'), ((9653, 9691), 'numpy.stack', 'np.stack', (['(x_points, y_points)'], {'axis': '(1)'}), '((x_points, y_points), axis=1)\n', (9661, 9691), True, 'import numpy as np\n'), ((10184, 10203), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (10192, 10203), True, 'import numpy as np\n'), ((10223, 10242), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (10231, 10242), True, 'import numpy as np\n'), ((10262, 10300), 'numpy.stack', 'np.stack', (['(x_points, y_points)'], {'axis': '(1)'}), '((x_points, y_points), axis=1)\n', (10270, 10300), True, 'import numpy as np\n'), ((10791, 10811), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (10809, 10811), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((11041, 11102), 'numpy.array', 'np.array', (["[site['projection_x_coordinate'] for site in sites]"], {}), "([site['projection_x_coordinate'] for site in sites])\n", (11049, 11102), True, 'import numpy as np\n'), ((11135, 11196), 'numpy.array', 'np.array', (["[site['projection_y_coordinate'] for site in sites]"], {}), "([site['projection_y_coordinate'] for site in sites])\n", (11143, 11196), True, 'import numpy as np\n'), ((11232, 11270), 'numpy.stack', 'np.stack', (['(x_points, y_points)'], {'axis': '(1)'}), '((x_points, y_points), axis=1)\n', (11240, 11270), True, 'import numpy as np\n'), ((11854, 11874), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (11872, 11874), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((12210, 12271), 'numpy.array', 'np.array', (["[site['projection_x_coordinate'] for site in sites]"], {}), "([site['projection_x_coordinate'] for site in sites])\n", (12218, 12271), True, 'import numpy as np\n'), ((12304, 12365), 'numpy.array', 'np.array', (["[site['projection_y_coordinate'] for site in sites]"], {}), "([site['projection_y_coordinate'] for site in sites])\n", (12312, 12365), True, 'import numpy as np\n'), ((12401, 12439), 'numpy.stack', 'np.stack', (['(x_points, y_points)'], {'axis': '(1)'}), '((x_points, y_points), axis=1)\n', (12409, 12439), True, 'import numpy as np\n'), ((13292, 13312), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (13310, 13312), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((13501, 13548), 'numpy.array', 'np.array', (["[site['longitude'] for site in sites]"], {}), "([site['longitude'] for site in sites])\n", (13509, 13548), True, 'import numpy as np\n'), ((13581, 13627), 'numpy.array', 'np.array', (["[site['latitude'] for site in sites]"], {}), "([site['latitude'] for site in sites])\n", (13589, 13627), True, 'import numpy as np\n'), ((13663, 13701), 'numpy.stack', 'np.stack', (['(x_points, y_points)'], {'axis': '(1)'}), '((x_points, y_points), axis=1)\n', (13671, 13701), True, 'import numpy as np\n'), ((14709, 14729), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (14727, 14729), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((14923, 14970), 'numpy.array', 'np.array', (["[site['longitude'] for site in sites]"], {}), "([site['longitude'] for site in sites])\n", (14931, 14970), True, 'import numpy as np\n'), ((15003, 15049), 'numpy.array', 'np.array', (["[site['latitude'] for site in sites]"], {}), "([site['latitude'] for site in sites])\n", (15011, 15049), True, 'import numpy as np\n'), ((15085, 15123), 'numpy.stack', 'np.stack', (['(x_points, y_points)'], {'axis': '(1)'}), '((x_points, y_points), axis=1)\n', (15093, 15123), True, 'import numpy as np\n'), ((15855, 15875), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (15873, 15875), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((15896, 15969), 'numpy.array', 'np.array', (["[site['projection_x_coordinate'] for site in self.region_sites]"], {}), "([site['projection_x_coordinate'] for site in self.region_sites])\n", (15904, 15969), True, 'import numpy as np\n'), ((16018, 16091), 'numpy.array', 'np.array', (["[site['projection_y_coordinate'] for site in self.region_sites]"], {}), "([site['projection_y_coordinate'] for site in self.region_sites])\n", (16026, 16091), True, 'import numpy as np\n'), ((16143, 16181), 'numpy.stack', 'np.stack', (['(x_points, y_points)'], {'axis': '(1)'}), '((x_points, y_points), axis=1)\n', (16151, 16181), True, 'import numpy as np\n'), ((16879, 16899), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (16897, 16899), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((16919, 16932), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (16927, 16932), True, 'import numpy as np\n'), ((16952, 16965), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (16960, 16965), True, 'import numpy as np\n'), ((17592, 17612), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (17610, 17612), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((17632, 17645), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (17640, 17645), True, 'import numpy as np\n'), ((17665, 17679), 'numpy.array', 'np.array', (['[90]'], {}), '([90])\n', (17673, 17679), True, 'import numpy as np\n'), ((18296, 18316), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (18314, 18316), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((18336, 18349), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (18344, 18349), True, 'import numpy as np\n'), ((18369, 18383), 'numpy.array', 'np.array', (['[45]'], {}), '([45])\n', (18377, 18383), True, 'import numpy as np\n'), ((19052, 19072), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (19070, 19072), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((19092, 19106), 'numpy.array', 'np.array', (['[45]'], {}), '([45])\n', (19100, 19106), True, 'import numpy as np\n'), ((19126, 19139), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (19134, 19139), True, 'import numpy as np\n'), ((19914, 19934), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (19932, 19934), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((19954, 19968), 'numpy.array', 'np.array', (['[45]'], {}), '([45])\n', (19962, 19968), True, 'import numpy as np\n'), ((19988, 20002), 'numpy.array', 'np.array', (['[45]'], {}), '([45])\n', (19996, 20002), True, 'import numpy as np\n'), ((20669, 20689), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (20687, 20689), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((20709, 20724), 'numpy.array', 'np.array', (['[-45]'], {}), '([-45])\n', (20717, 20724), True, 'import numpy as np\n'), ((20744, 20759), 'numpy.array', 'np.array', (['[-45]'], {}), '([-45])\n', (20752, 20759), True, 'import numpy as np\n'), ((21513, 21533), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (21531, 21533), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((22115, 22155), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {'land_constraint': '(True)'}), '(land_constraint=True)\n', (22133, 22155), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((23117, 23137), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (23135, 23137), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((23181, 23231), 'numpy.array', 'np.array', (['[[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]]'], {}), '([[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]])\n', (23189, 23231), True, 'import numpy as np\n'), ((23251, 23263), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (23260, 23263), True, 'import numpy as np\n'), ((23282, 23294), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (23291, 23294), True, 'import numpy as np\n'), ((23839, 23859), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (23857, 23859), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((23903, 23953), 'numpy.array', 'np.array', (['[[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]]'], {}), '([[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]])\n', (23911, 23953), True, 'import numpy as np\n'), ((23973, 24003), 'numpy.array', 'np.array', (['[0, 1, 2, 3, np.inf]'], {}), '([0, 1, 2, 3, np.inf])\n', (23981, 24003), True, 'import numpy as np\n'), ((24022, 24034), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (24031, 24034), True, 'import numpy as np\n'), ((24458, 24478), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (24476, 24478), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((24522, 24572), 'numpy.array', 'np.array', (['[[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]]'], {}), '([[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]])\n', (24530, 24572), True, 'import numpy as np\n'), ((24592, 24610), 'numpy.full', 'np.full', (['(5)', 'np.inf'], {}), '(5, np.inf)\n', (24599, 24610), True, 'import numpy as np\n'), ((24629, 24641), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (24638, 24641), True, 'import numpy as np\n'), ((25148, 25183), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {'search_radius': '(6)'}), '(search_radius=6)\n', (25166, 25183), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((25227, 25277), 'numpy.array', 'np.array', (['[[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]]'], {}), '([[0, 4], [1, 4], [2, 4], [3, 4], [4, 4]])\n', (25235, 25277), True, 'import numpy as np\n'), ((25297, 25309), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (25306, 25309), True, 'import numpy as np\n'), ((25328, 25340), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (25337, 25340), True, 'import numpy as np\n'), ((26204, 26224), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (26222, 26224), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((26640, 26660), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (26658, 26660), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((27011, 27056), 'improver.utilities.cube_metadata.create_coordinate_hash', 'create_coordinate_hash', (['self.global_orography'], {}), '(self.global_orography)\n', (27033, 27056), False, 'from improver.utilities.cube_metadata import create_coordinate_hash\n'), ((27074, 27094), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (27092, 27094), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((27541, 27561), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (27559, 27561), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((28155, 28175), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (28173, 28175), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((28707, 28773), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {'land_constraint': '(True)', 'search_radius': '(10000000.0)'}), '(land_constraint=True, search_radius=10000000.0)\n', (28725, 28773), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((29346, 29435), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {'land_constraint': '(True)', 'minimum_dz': '(True)', 'search_radius': '(100000000.0)'}), '(land_constraint=True, minimum_dz=True, search_radius=\n 100000000.0)\n', (29364, 29435), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((30589, 30678), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {'land_constraint': '(True)', 'minimum_dz': '(True)', 'search_radius': '(100000000.0)'}), '(land_constraint=True, minimum_dz=True, search_radius=\n 100000000.0)\n', (30607, 30678), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((31097, 31142), 'improver.utilities.cube_metadata.create_coordinate_hash', 'create_coordinate_hash', (['self.region_orography'], {}), '(self.region_orography)\n', (31119, 31142), False, 'from improver.utilities.cube_metadata import create_coordinate_hash\n'), ((34635, 34655), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {}), '()\n', (34653, 34655), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((35353, 35420), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {'land_constraint': '(True)', 'search_radius': '(100000000.0)'}), '(land_constraint=True, search_radius=100000000.0)\n', (35371, 35420), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((36359, 36447), 'improver.spotdata.neighbour_finding.NeighbourSelection', 'NeighbourSelection', ([], {'land_constraint': '(True)', 'search_radius': '(100000000.0)', 'minimum_dz': '(True)'}), '(land_constraint=True, search_radius=100000000.0,\n minimum_dz=True)\n', (36377, 36447), False, 'from improver.spotdata.neighbour_finding import NeighbourSelection\n'), ((2645, 2670), 'numpy.linspace', 'np.linspace', (['(-160)', '(160)', '(9)'], {}), '(-160, 160, 9)\n', (2656, 2670), True, 'import numpy as np\n'), ((2861, 2884), 'numpy.linspace', 'np.linspace', (['(-80)', '(80)', '(9)'], {}), '(-80, 80, 9)\n', (2872, 2884), True, 'import numpy as np\n'), ((3663, 3698), 'numpy.linspace', 'np.linspace', (['(-100000.0)', '(100000.0)', '(9)'], {}), '(-100000.0, 100000.0, 9)\n', (3674, 3698), True, 'import numpy as np\n'), ((3860, 3893), 'numpy.linspace', 'np.linspace', (['(-50000.0)', '(50000.0)', '(9)'], {}), '(-50000.0, 50000.0, 9)\n', (3871, 3893), True, 'import numpy as np\n'), ((18614, 18626), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (18621, 18626), True, 'import numpy as np\n'), ((19370, 19382), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (19377, 19382), True, 'import numpy as np\n'), ((20233, 20245), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (20240, 20245), True, 'import numpy as np\n'), ((20279, 20291), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (20286, 20291), True, 'import numpy as np\n'), ((20990, 21002), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (20997, 21002), True, 'import numpy as np\n'), ((21036, 21048), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (21043, 21048), True, 'import numpy as np\n'), ((3506, 3598), 'iris.coord_systems.GeogCS', 'iris.coord_systems.GeogCS', ([], {'semi_major_axis': '(6378137.0)', 'semi_minor_axis': '(6356752.314140356)'}), '(semi_major_axis=6378137.0, semi_minor_axis=\n 6356752.314140356)\n', (3531, 3598), False, 'import iris\n'), ((5792, 5807), 'cartopy.crs.Mercator', 'ccrs.Mercator', ([], {}), '()\n', (5805, 5807), True, 'import cartopy.crs as ccrs\n'), ((22256, 22294), 'numpy.nonzero', 'np.nonzero', (['self.region_land_mask.data'], {}), '(self.region_land_mask.data)\n', (22266, 22294), True, 'import numpy as np\n')] |
from rdkit import Chem
from rdkit.Chem import AllChem
import numpy as np
def reset_ids(mol):
for i, conf in enumerate(mol.GetConformers()):
conf.SetId(i)
class EnergyFilter:
def __init__(self, energy_diff):
self.energy_diff = energy_diff
def filter(self, mol, energies, min_energy=None):
if min_energy is None:
min_energy = np.min(energies)
for conf_id, energy in enumerate(list(energies)):
if energy > min_energy + self.energy_diff:
mol.RemoveConformer(conf_id)
energies.remove(energy)
reset_ids(mol)
class StructureFilter:
def filter(self, mol, smiles=None):
if smiles is None:
smiles = Chem.MolToSmiles(Chem.RemoveHs(mol))
for conf_id in range(mol.GetNumConformers()):
if smiles != Chem.MolToSmiles(Chem.MolFromMolBlock(Chem.MolToMolBlock(Chem.RemoveHs(mol), confId=conf_id))):
mol.RemoveConformer(conf_id)
reset_ids(mol)
class ConformerEvaluator:
def __init__(self, energy_diff, min_rmsd, max_iters):
self.energy_diff = energy_diff
self.min_rmsd = min_rmsd
self.max_iters = max_iters
def evaluate(self, mol, energies, opt_mol, opt_energies, min_energy):
"""
Determines if the conformers on mol are accepted in the final set of conformers or are rejected based on energy
difference from the minimum energy conformer and whether conformers are greater than the RMSD threshold apart
from each other. In the latter case, if they are not, then the lowest energy conformer out of the two is kept.
Args:
mol (RDKit Mol): The molecule containing the candidate conformers.
energies (list): The list of energies of the candidate conformers.
opt_mol (RDKit Mol): The molecule containing the final set of conformers.
opt_energies (list): The energies of the final set of conformers.
min_energy (int): The lowest energy in the final set of conformers.
"""
for i, macro_conf in enumerate(mol.GetConformers()):
# skip if energy is too high
if energies[i] > min_energy + self.energy_diff:
continue
similar_confs = []
for opt_conf in opt_mol.GetConformers():
# remove conformer if energy is too high
if opt_energies[opt_conf.GetId()] > min_energy + self.energy_diff:
del opt_energies[opt_conf.GetId()]
opt_mol.RemoveConformer(opt_conf.GetId())
continue
rmsd = AllChem.AlignMol(mol, opt_mol, macro_conf.GetId(), opt_conf.GetId(), maxIters=self.max_iters)
if rmsd < self.min_rmsd:
similar_confs.append(opt_conf.GetId())
similar_energies = [opt_energies[conf_id] for conf_id in similar_confs]
similar_energies.append(energies[i])
if np.argmin(similar_energies) == len(similar_energies) - 1:
for conf_id in similar_confs:
opt_mol.RemoveConformer(conf_id)
del opt_energies[conf_id]
conf_id = opt_mol.AddConformer(macro_conf, assignId=True)
opt_energies[conf_id] = energies[i]
| [
"numpy.argmin",
"rdkit.Chem.RemoveHs",
"numpy.min"
] | [((379, 395), 'numpy.min', 'np.min', (['energies'], {}), '(energies)\n', (385, 395), True, 'import numpy as np\n'), ((751, 769), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol'], {}), '(mol)\n', (764, 769), False, 'from rdkit import Chem\n'), ((3011, 3038), 'numpy.argmin', 'np.argmin', (['similar_energies'], {}), '(similar_energies)\n', (3020, 3038), True, 'import numpy as np\n'), ((908, 926), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol'], {}), '(mol)\n', (921, 926), False, 'from rdkit import Chem\n')] |
import numpy as np
from utils.bit_tools import parity, int_to_bin
class eigenstate:
"""Class for constructing the n-th +1-eigenstate of A
Attributes
----------
A : dict
Dictionary containg two items A = \{P_1:r_1, P_2:r_2\}}
n : int
The eigenstate index
num_qubits : int
The number of qubits in the Hamiltonian
Methods
-------
P_index
Indexes the qubit positions acted upon by each Pauli operator X, Y, Z
t_val
Calculates the eigenstate parameter t that satisfies the required amplitude constraint
construct
Constructs the eigenstate, stored as a numpy array
"""
def __init__(self, A, n, num_qubits):
self.A = A
self.n = n
self.num_qubits = num_qubits
def P_index(self, q_pos=False):
"""Indexes the qubit positions acted upon by each Pauli operator X, Y, Z
Parameters
----------
q_pos: bool optional
indices of qubits so compatible with qiskit
Returns
-------
dict
Dictionary of qubit indices acted upon by a Pauli P. Accessed via keys 'Pj' where j=1,2.
"""
P_index={}
for P in ['X', 'Y', 'Z']:
for index, a in enumerate(self.A.keys()):
index_key = '%s%i' % (P, index+1)
offset = 0
if q_pos:
offset = self.num_qubits-1
P_index[index_key] = [abs(index-offset) for index, p in enumerate(list(a)) if p==P]
return P_index
def t_val(self, alt=False):
"""Calculates the eigenstate parameter t that satisfies the required amplitude constraint
Returns
-------
float
The eigenstate parameter t
"""
r1 = list(self.A.values())[0]
r2 = list(self.A.values())[1]
P_index = self.P_index()
init_state = int_to_bin(self.n, self.num_qubits)
# compute the parities of relevant qubit subsets
sgn = (-1)**(parity(init_state, P_index['Z1'] + P_index['Y1']) + len(P_index['Y1'])/2) # check the initial minus
# calculate the quotient constraint on +1-eigenstates of A
quotient = r1 / (1 - r2*(-1)**(parity(init_state, P_index['Z2'])))
alt_quotient = r1 / (1 - r2*(-1)**(1 + parity(init_state, P_index['Z2'])))
# define t such that |psi_n> := sin(t)|b_n> + cos(t)|b_n'> is a +1-eigenstate of A
t1 = sgn * np.arctan(quotient)
t2 = sgn * np.arctan(alt_quotient)
if alt:
return t1, t2
else:
return t1
def construct(self):
"""Constructs the eigenstate, stored as a numpy array
Returns
-------
numpy.array
Normalised +1-eigenstate of A, a column vector of 2**num_qubits elements
"""
# initiate blank state as a list
psi = [0 for i in range(2**self.num_qubits)]
P_index = self.P_index()
t = self.t_val()
# binary representation of the eigenstate index
init_state = int_to_bin(self.n, self.num_qubits)
# determine the index of the basis vector that is paired with the initial state
n_prime = self.n + sum([((-1)**int(init_state[i])) * 2**(self.num_qubits-1 - i) for i in P_index['X1'] + P_index['Y1']])
# corresponding entries in psi are set as necessary
psi[self.n] = np.sin(t)
psi[n_prime] = np.cos(t)
return np.array(psi) | [
"utils.bit_tools.int_to_bin",
"numpy.array",
"numpy.cos",
"utils.bit_tools.parity",
"numpy.sin",
"numpy.arctan"
] | [((1932, 1967), 'utils.bit_tools.int_to_bin', 'int_to_bin', (['self.n', 'self.num_qubits'], {}), '(self.n, self.num_qubits)\n', (1942, 1967), False, 'from utils.bit_tools import parity, int_to_bin\n'), ((3114, 3149), 'utils.bit_tools.int_to_bin', 'int_to_bin', (['self.n', 'self.num_qubits'], {}), '(self.n, self.num_qubits)\n', (3124, 3149), False, 'from utils.bit_tools import parity, int_to_bin\n'), ((3462, 3471), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (3468, 3471), True, 'import numpy as np\n'), ((3495, 3504), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (3501, 3504), True, 'import numpy as np\n'), ((3521, 3534), 'numpy.array', 'np.array', (['psi'], {}), '(psi)\n', (3529, 3534), True, 'import numpy as np\n'), ((2491, 2510), 'numpy.arctan', 'np.arctan', (['quotient'], {}), '(quotient)\n', (2500, 2510), True, 'import numpy as np\n'), ((2530, 2553), 'numpy.arctan', 'np.arctan', (['alt_quotient'], {}), '(alt_quotient)\n', (2539, 2553), True, 'import numpy as np\n'), ((2056, 2105), 'utils.bit_tools.parity', 'parity', (['init_state', "(P_index['Z1'] + P_index['Y1'])"], {}), "(init_state, P_index['Z1'] + P_index['Y1'])\n", (2062, 2105), False, 'from utils.bit_tools import parity, int_to_bin\n'), ((2262, 2295), 'utils.bit_tools.parity', 'parity', (['init_state', "P_index['Z2']"], {}), "(init_state, P_index['Z2'])\n", (2268, 2295), False, 'from utils.bit_tools import parity, int_to_bin\n'), ((2345, 2378), 'utils.bit_tools.parity', 'parity', (['init_state', "P_index['Z2']"], {}), "(init_state, P_index['Z2'])\n", (2351, 2378), False, 'from utils.bit_tools import parity, int_to_bin\n')] |
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import trange
from torch import nn
from ..metrics import Metric, MultipleMetrics
from ..wdtypes import *
use_cuda = torch.cuda.is_available()
class WarmUp(object):
r"""
'Warm up' methods to be applied to the individual models before the joined
training. There are 3 warm up routines available:
1) Warm up all trainable layers at once
2) Gradual warm up inspired by the work of Felbo et al., 2017
3) Gradual warm up inspired by the work of Howard & Ruder 2018
The structure of the code in this class is designed to be instantiated within
the class WideDeep. This is not ideal, but represents a compromise towards
implementing a 'warm up' functionality for the current overall structure of
the package without having to re-structure most of the existing code.
Parameters
----------
loss_fn: Any
any function with the same strucure as '_loss_fn' in the main class WideDeep
at pytorch_widedeep.models.wide_deep
metric: Metric
object of class Metric (see Metric in pytorch_widedeep.metrics)
method: str
one of 'binary', 'regression' or 'multiclass'
verbose: Boolean
"""
def __init__(
self,
loss_fn: Any,
metric: Union[Metric, MultipleMetrics],
method: str,
verbose: int,
):
super(WarmUp, self).__init__()
self.loss_fn = loss_fn
self.metric = metric
self.method = method
self.verbose = verbose
def warm_all(
self,
model: nn.Module,
model_name: str,
loader: DataLoader,
n_epochs: int,
max_lr: float,
):
r"""
Warm up all trainable layers in a model using a one cyclic learning rate
with a triangular pattern. This is refereed as Slanted Triangular learing
rate in <NAME> & <NAME> 2018
(https://arxiv.org/abs/1801.06146). The cycle is described as follows:
1-The learning rate will gradually increase for 10% of the training steps
from max_lr/10 to max_lr.
2-It will then gradually decrease to max_lr/10 for the remaining 90% of the
steps.
The optimizer used in the process is AdamW
Parameters:
----------
model: nn.Module
nn.Module object containing one the WideDeep model components (wide,
deepdense, deeptext or deepimage)
model_name: Str
string indicating the model name to access the corresponding parameters.
One of 'wide', 'deepdense', 'deeptext' or 'deepimage'
loader: DataLoader
Pytorch DataLoader containing the data used to warm up
n_epochs: Int
number of epochs used to warm up the model
max_lr: Float
maximum learning rate value during the triangular cycle.
"""
if self.verbose:
print("Warming up {} for {} epochs".format(model_name, n_epochs))
model.train()
optimizer = torch.optim.AdamW(model.parameters(), lr=max_lr / 10.0) # type: ignore
step_size_up, step_size_down = self._steps_up_down(len(loader), n_epochs)
scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer,
base_lr=max_lr / 10.0,
max_lr=max_lr,
step_size_up=step_size_up,
step_size_down=step_size_down,
cycle_momentum=False,
)
self._warm(model, model_name, loader, optimizer, scheduler, n_epochs=n_epochs)
def warm_gradual(
self,
model: nn.Module,
model_name: str,
loader: DataLoader,
last_layer_max_lr: float,
layers: List[nn.Module],
routine: str,
):
r"""
Warm up certain layers within the model following a gradual warm up routine.
The approaches implemented in this method are inspired by the work of Felbo
et al., 2017 in their DeepEmoji paper (https://arxiv.org/abs/1708.00524) and
Howard & <NAME> 2018 ULMFit paper
(https://arxiv.org/abs/1801.06146).
A one cycle triangular learning rate is used. In both Felbo's and Howard's
routines a gradually decreasing learning rate is used as we go deeper into
the network. The 'closest' layer to the output neuron(s) will use a maximum
learning rate of 'last_layer_max_lr'. The learning rate will then decrease by a factor
of 2.5 per layer
1) The 'Felbo' routine:
warm up the first layer in 'layers' for one epoch. Then warm up the next
layer in 'layers' for one epoch freezing the already warmed up layer(s).
Repeat untill all individual layers are warmed. Then warm one last epoch
with all warmed layers trainable
2) The 'Howard' routine:
warm up the first layer in 'layers' for one epoch. Then warm the next layer
in the model for one epoch while keeping the already warmed up layer(s)
trainable. Repeat.
Parameters:
----------
model: nn.Module
nn.Module object containing one the WideDeep model components (wide,
deepdense, deeptext or deepimage)
model_name: Str
string indicating the model name to access the corresponding parameters.
One of 'wide', 'deepdense', 'deeptext' or 'deepimage'
loader: DataLoader
Pytorch DataLoader containing the data to warm up with.
last_layer_max_lr: Float
maximum learning rate value during the triangular cycle for the layer
closest to the output neuron(s). Deeper layers in 'model' will be trained
with a gradually descending learning rate. The descending factor is fixed
and is 2.5
layers: List
List of nn.Module objects containing the layers that will be warmed up.
This must be in 'WARM-UP ORDER'.
routine: str
one of 'howard' or 'felbo'
"""
model.train()
step_size_up, step_size_down = self._steps_up_down(len(loader))
original_setup = {}
for n, p in model.named_parameters():
original_setup[n] = p.requires_grad
layers_max_lr = [last_layer_max_lr] + [
last_layer_max_lr / (2.5 * n) for n in range(1, len(layers))
]
for layer in layers:
for p in layer.parameters():
p.requires_grad = False
if routine == "howard":
params: List = []
max_lr: List = []
base_lr: List = []
for i, (lr, layer) in enumerate(zip(layers_max_lr, layers)):
if self.verbose:
print(
"Warming up {}, layer {} of {}".format(
model_name, i + 1, len(layers)
)
)
for p in layer.parameters():
p.requires_grad = True
if routine == "felbo":
params, max_lr, base_lr = layer.parameters(), lr, lr / 10.0 # type: ignore
elif routine == "howard":
params += [{"params": layer.parameters(), "lr": lr / 10.0}]
max_lr += [lr]
base_lr += [lr / 10.0]
optimizer = torch.optim.AdamW(params) # type: ignore
scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer,
base_lr=base_lr, # type: ignore
max_lr=max_lr, # type: ignore
step_size_up=step_size_up,
step_size_down=step_size_down, # type: ignore
cycle_momentum=False,
)
self._warm(model, model_name, loader, optimizer, scheduler)
if routine == "felbo":
for p in layer.parameters():
p.requires_grad = False
if routine == "felbo":
if self.verbose:
print("Warming up one last epoch with all warmed up layers trainable")
for layer in layers:
for p in layer.parameters():
p.requires_grad = True
params, max_lr, base_lr = [], [], []
for lr, layer in zip(layers_max_lr, layers):
params += [{"params": layer.parameters(), "lr": lr / 10.0}]
max_lr += [lr]
base_lr += [lr / 10.0]
optimizer = torch.optim.AdamW(params) # type: ignore
scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer,
base_lr=base_lr, # type: ignore
max_lr=max_lr, # type: ignore
step_size_up=step_size_up,
step_size_down=step_size_down, # type: ignore
cycle_momentum=False,
)
self._warm(model, model_name, loader, optimizer, scheduler)
for n, p in model.named_parameters():
p.requires_grad = original_setup[n]
def _warm(
self,
model: nn.Module,
model_name: str,
loader: DataLoader,
optimizer: Optimizer,
scheduler: LRScheduler,
n_epochs: int = 1,
):
r"""
Standard Pytorch training loop
"""
steps = len(loader)
for epoch in range(n_epochs):
running_loss = 0.0
with trange(steps, disable=self.verbose != 1) as t:
for batch_idx, (data, target) in zip(t, loader):
t.set_description("epoch %i" % (epoch + 1))
X = data[model_name].cuda() if use_cuda else data[model_name]
y = target.float() if self.method != "multiclass" else target
y = y.cuda() if use_cuda else y
optimizer.zero_grad()
y_pred = model(X)
loss = self.loss_fn(y_pred, y)
loss.backward()
optimizer.step()
scheduler.step() # type: ignore
running_loss += loss.item()
avg_loss = running_loss / (batch_idx + 1)
if self.metric is not None:
if self.method == "binary":
acc = self.metric(torch.sigmoid(y_pred), y)
if self.method == "multiclass":
acc = self.metric(F.softmax(y_pred, dim=1), y)
t.set_postfix(metrics=acc, loss=avg_loss)
else:
t.set_postfix(loss=np.sqrt(avg_loss))
def _steps_up_down(self, steps: int, n_epochs: int = 1) -> Tuple[int, int]:
r"""
Calculate the number of steps up and down during the one cycle warm up for a
given number of epochs
Parameters:
----------
steps: Int
steps per epoch
n_epochs: Int. Default=1
number of warm up epochs
Returns:
-------
up, down: Tuple, Int
number of steps increasing/decreasing the learning rate during the cycle
"""
up = round((steps * n_epochs) * 0.1)
down = (steps * n_epochs) - up
return up, down
| [
"torch.nn.functional.softmax",
"numpy.sqrt",
"torch.sigmoid",
"torch.optim.lr_scheduler.CyclicLR",
"torch.cuda.is_available",
"tqdm.trange",
"torch.optim.AdamW"
] | [((192, 217), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (215, 217), False, 'import torch\n'), ((3245, 3416), 'torch.optim.lr_scheduler.CyclicLR', 'torch.optim.lr_scheduler.CyclicLR', (['optimizer'], {'base_lr': '(max_lr / 10.0)', 'max_lr': 'max_lr', 'step_size_up': 'step_size_up', 'step_size_down': 'step_size_down', 'cycle_momentum': '(False)'}), '(optimizer, base_lr=max_lr / 10.0, max_lr=\n max_lr, step_size_up=step_size_up, step_size_down=step_size_down,\n cycle_momentum=False)\n', (3278, 3416), False, 'import torch\n'), ((7333, 7358), 'torch.optim.AdamW', 'torch.optim.AdamW', (['params'], {}), '(params)\n', (7350, 7358), False, 'import torch\n'), ((7399, 7563), 'torch.optim.lr_scheduler.CyclicLR', 'torch.optim.lr_scheduler.CyclicLR', (['optimizer'], {'base_lr': 'base_lr', 'max_lr': 'max_lr', 'step_size_up': 'step_size_up', 'step_size_down': 'step_size_down', 'cycle_momentum': '(False)'}), '(optimizer, base_lr=base_lr, max_lr=max_lr,\n step_size_up=step_size_up, step_size_down=step_size_down,\n cycle_momentum=False)\n', (7432, 7563), False, 'import torch\n'), ((8456, 8481), 'torch.optim.AdamW', 'torch.optim.AdamW', (['params'], {}), '(params)\n', (8473, 8481), False, 'import torch\n'), ((8522, 8686), 'torch.optim.lr_scheduler.CyclicLR', 'torch.optim.lr_scheduler.CyclicLR', (['optimizer'], {'base_lr': 'base_lr', 'max_lr': 'max_lr', 'step_size_up': 'step_size_up', 'step_size_down': 'step_size_down', 'cycle_momentum': '(False)'}), '(optimizer, base_lr=base_lr, max_lr=max_lr,\n step_size_up=step_size_up, step_size_down=step_size_down,\n cycle_momentum=False)\n', (8555, 8686), False, 'import torch\n'), ((9388, 9428), 'tqdm.trange', 'trange', (['steps'], {'disable': '(self.verbose != 1)'}), '(steps, disable=self.verbose != 1)\n', (9394, 9428), False, 'from tqdm import trange\n'), ((10296, 10317), 'torch.sigmoid', 'torch.sigmoid', (['y_pred'], {}), '(y_pred)\n', (10309, 10317), False, 'import torch\n'), ((10424, 10448), 'torch.nn.functional.softmax', 'F.softmax', (['y_pred'], {'dim': '(1)'}), '(y_pred, dim=1)\n', (10433, 10448), True, 'import torch.nn.functional as F\n'), ((10588, 10605), 'numpy.sqrt', 'np.sqrt', (['avg_loss'], {}), '(avg_loss)\n', (10595, 10605), True, 'import numpy as np\n')] |
import numpy as np
import scipy.integrate
import sys
import Functional
from scipy import signal
class MFA1d(Functional.Functional):
def __init__(self, fluid, system):
super(MFA1d, self).__init__(fluid, system)
# ============ init DCF ============ #
self.DCF = np.zeros((self.maxNum*2+1, self.fluid["component"], self.fluid["component"]))
# print(self.DCF)
for i in range(self.fluid["component"]):
for j in range(i, self.fluid["component"]):
sigma = (self.fluid["sigma"][i] + self.fluid["sigma"][j]) / 2
epsilon = np.sqrt(self.fluid["epsilon"][i] * self.fluid["epsilon"][j])/ self.fluid["temperature"]
u = [scipy.integrate.quad(self.uattz,0,np.inf,args=(z*self.gridWidth,epsilon,sigma, \
self.system["cutoff"]))[0] for z in range(-self.maxNum, self.maxNum+1)]
u = np.array(u)
u *= 2 * np.pi
if i == j:
self.DCF[:,i,j] = u
else:
self.DCF[:,i,j] = u
self.DCF[:,j,i] = u
# print(self.DCF.T)
# print(np.sum(self.DCF))
@property
def density(self):
return self._density
@density.setter
def density(self, density):
self._density = density
def uattPY(self, rr, epsilon, sigma, eta):
Tstar = 1.0/epsilon
d = (1+0.2977*Tstar) / (1 + 0.33163*Tstar + 0.0010477*Tstar**2) * sigma
r = rr/d
if r > 1:
u = 0
else:
u = -eta*(1+2*eta)**2 * r**4 / ( 2*(1-eta)**4 )
u += 6*eta*(1+eta+eta**2/4) * r**2 / ( 1-eta )**4
u -= (1+2*eta)**2 * r / ( (1-eta)**4 )
return u/r
def uattz(self, rr, z, epsilon, sigma, cutoff):
r = np.sqrt(rr**2 + z**2)
ucutoff = 4*epsilon * ((sigma/cutoff)**12 - (sigma/cutoff)**6)
# print(ucutoff)
if r < (2**(1/6)) * sigma:
u = -epsilon - ucutoff
# if r < sigma:
# u = 0
elif r < cutoff:
u = 4*epsilon * ((sigma/r)**12 - (sigma/r)**6) - ucutoff
else:
u = 0
return u * rr
'''
[old version] In this version, I create a matrix to achieve the convolution.
'''
# def exChemicalPotential(self):
# densityMatrix = self.densityIntegrate(self._density, self.fluid["component"], self.maxNum, self.system["grid"])
# exChemP = np.zeros((self.fluid["component"], self.system["grid"]))
# for i in range(self.fluid["component"]):
# x = np.sum(densityMatrix * self.DCF[:,:,i].reshape((-1,self.fluid["component"],1)), axis = 0)
# exChemP[i, :] = np.sum(x, axis = 0)
# exChemP *= self.gridWidth
# return exChemP
'''
In this version, I finished the convolution by using FFT (from scipy)
'''
def exChemicalPotential(self):
# density = self._density
if self.system["Cor"] == True:
density = self._density
else:
density = self._density - self.system["bulkDensity"].reshape((self.fluid["component"], -1))
return self.densityIntegrateFFT(density, self.DCF, self.fluid["component"], self.maxNum, self.system["grid"])
if __name__ == "__main__":
import matplotlib.pyplot as plt
import MBWR_EOS
import FMT1d
fluid = {}
fluid["type"] = "LJ"
fluid["component"] = 1
fluid["sigma"] = np.array([1.0])
fluid["epsilon"] = np.array([1.0])
fluid["diameter"] = np.array([1.0])
fluid["temperature"] = 1
system = {}
system["grid"] = 600
system["bulkDensity"] = np.array([0.2])
system["boundaryCondition"] = 1
system["size"] = 30
system["cutoff"] = np.array([6])
# testFMT = FMT1d.FMT1d(fluid, system)
testMFA = MFA1d(fluid, system)
testMFA.density = np.zeros((fluid["component"], system["grid"])) + system["bulkDensity"]
# print(testMFA.exChemicalPotential())
x = [[x, -testMFA.uattz(x, 0, 1/1.5, 1, 5)/x] for x in np.linspace(0.001, 3, 600)]
x = np.array(x)
y = [[0, testMFA.uattPY(x, 1/1.5, 1, 0.4*np.pi/6)] for x in np.linspace(0.001, 3, 600)]
y = np.array(y)
z = x + y
y = np.loadtxt("./Comparison/LJ_cDFT/cr_att.dat")
plt.figure()
plt.xlim((0,2.0))
plt.ylim((-5,1.2))
plt.plot(z[:,0], z[:,1])
plt.scatter(y[:,0], y[:,1])
plt.savefig("./Comparison/LJ_cDFT/cr_att_MFA.jpg")
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"numpy.sqrt",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"numpy.loadtxt"
] | [((3477, 3492), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (3485, 3492), True, 'import numpy as np\n'), ((3516, 3531), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (3524, 3531), True, 'import numpy as np\n'), ((3556, 3571), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (3564, 3571), True, 'import numpy as np\n'), ((3671, 3686), 'numpy.array', 'np.array', (['[0.2]'], {}), '([0.2])\n', (3679, 3686), True, 'import numpy as np\n'), ((3770, 3783), 'numpy.array', 'np.array', (['[6]'], {}), '([6])\n', (3778, 3783), True, 'import numpy as np\n'), ((4095, 4106), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4103, 4106), True, 'import numpy as np\n'), ((4208, 4219), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4216, 4219), True, 'import numpy as np\n'), ((4244, 4289), 'numpy.loadtxt', 'np.loadtxt', (['"""./Comparison/LJ_cDFT/cr_att.dat"""'], {}), "('./Comparison/LJ_cDFT/cr_att.dat')\n", (4254, 4289), True, 'import numpy as np\n'), ((4294, 4306), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4304, 4306), True, 'import matplotlib.pyplot as plt\n'), ((4311, 4329), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 2.0)'], {}), '((0, 2.0))\n', (4319, 4329), True, 'import matplotlib.pyplot as plt\n'), ((4333, 4352), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-5, 1.2)'], {}), '((-5, 1.2))\n', (4341, 4352), True, 'import matplotlib.pyplot as plt\n'), ((4356, 4382), 'matplotlib.pyplot.plot', 'plt.plot', (['z[:, 0]', 'z[:, 1]'], {}), '(z[:, 0], z[:, 1])\n', (4364, 4382), True, 'import matplotlib.pyplot as plt\n'), ((4385, 4414), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y[:, 0]', 'y[:, 1]'], {}), '(y[:, 0], y[:, 1])\n', (4396, 4414), True, 'import matplotlib.pyplot as plt\n'), ((4417, 4467), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./Comparison/LJ_cDFT/cr_att_MFA.jpg"""'], {}), "('./Comparison/LJ_cDFT/cr_att_MFA.jpg')\n", (4428, 4467), True, 'import matplotlib.pyplot as plt\n'), ((294, 380), 'numpy.zeros', 'np.zeros', (["(self.maxNum * 2 + 1, self.fluid['component'], self.fluid['component'])"], {}), "((self.maxNum * 2 + 1, self.fluid['component'], self.fluid[\n 'component']))\n", (302, 380), True, 'import numpy as np\n'), ((1822, 1847), 'numpy.sqrt', 'np.sqrt', (['(rr ** 2 + z ** 2)'], {}), '(rr ** 2 + z ** 2)\n', (1829, 1847), True, 'import numpy as np\n'), ((3885, 3931), 'numpy.zeros', 'np.zeros', (["(fluid['component'], system['grid'])"], {}), "((fluid['component'], system['grid']))\n", (3893, 3931), True, 'import numpy as np\n'), ((4059, 4085), 'numpy.linspace', 'np.linspace', (['(0.001)', '(3)', '(600)'], {}), '(0.001, 3, 600)\n', (4070, 4085), True, 'import numpy as np\n'), ((4172, 4198), 'numpy.linspace', 'np.linspace', (['(0.001)', '(3)', '(600)'], {}), '(0.001, 3, 600)\n', (4183, 4198), True, 'import numpy as np\n'), ((912, 923), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (920, 923), True, 'import numpy as np\n'), ((609, 669), 'numpy.sqrt', 'np.sqrt', (["(self.fluid['epsilon'][i] * self.fluid['epsilon'][j])"], {}), "(self.fluid['epsilon'][i] * self.fluid['epsilon'][j])\n", (616, 669), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.constants import k,h,c
from scipy.optimize import curve_fit
from numba import njit,jit
import emcee
@njit
def Planck(lamb,T):
"""
Black-body radiation; Bnu.
Args:
lam: (float) wavelength [m]
T: (float) temperature [K]
Returns:
flux: (float) flux from Planck function at the given wavelength and temperature.
"""
flux = 2*h*c**2/lamb**5 * 1/ (np.exp(h*c/(lamb*k*T))-1)
return flux
def get_mag_filtered(filter_data,band,T2,b):
"""
Returns calibrated magnitude based on the difference between
two black-body curves at two different temperatures (Vega and target).
The difference of the distance between two objects is absorbed into the free parameter b.
Args:
filter_data: (dictionary) filter data. keys correspond to band, values to data.
band: (str) string corresponding to filter name.
T2: (float) the target temperature to test. [Kelvin]
b: (float) free parameter corresponding to the difference between two blackbodies. [mags]
"""
# select filter-specific weights and wavelengths
band_response = filter_data[band]
x_th = band_response[0] * 1e-10 # angstrom to meter
weights = band_response[1]
# get continuous flux curve
flux_vega = Planck(x_th,9602)
flux_test = Planck(x_th,T2)
# add weight and take mean
flux_vega_binned = (flux_vega*weights).sum()
flux_test_binned = (flux_test*weights).sum()
# calculate mag
mag = -2.5*np.log10(flux_test_binned/flux_vega_binned) + b
return mag
def calc_loglik(T_test,filter_data,bands,mag_obs,mag_err):
"""
Calculate log-likelihood
Args:
T_test: (float) temperature at which to evaluate the log-likelihood. [K]
filter_data: (dictionary) filter data. keys correspond to band, values to data.
bands: (list) list of strings corresponding to filter names.
mag_obs: (array) observed magnitudes of target. [mags]
mag_err: (array) error on observed magnitude [mags]
"""
mag_fit = np.asarray(list(map(
lambda band: get_mag_filtered(filter_data,band,T_test,0),
bands)))
b = np.average(mag_obs-mag_fit, weights=1/mag_err**2)
chi2 = (mag_obs-mag_fit-b)**2/mag_err**2
loglik = -0.5* (chi2 + 2*np.pi*mag_err**2).sum()
return loglik
def log_Gaussian(x,mu,sigma,offset):
"""
Calculates a log Gaussian function.
Args:
x: (array) input array on which to calculate a Gaussian.
mu: (float) center of Gaussian.
sigma: (float) standard deviation of Gaussian.
offset: (float) vertical offset of the log Gaussian.
Returns:
(array) result of log Gaussian.
"""
return -0.5*(x-mu)**2/sigma**2 + offset
def get_Temp(filter_data,bands,m_obs,m_err,T_min=3000,T_max=15000,dT=10,cut_range=1000,R_peak=500,Nsigma_range=2,multiprocessing=True):
"""
Calculate temperature.
Args:
filter_data: (dictionary) filter data. keys correspond to band, values to data.
bands: (list) list of strings corresponding to filter names.
m_obs: (array) observed magnitudes of target. [mags]
m_err: (array) error on observed magnitude [mags]
T_min: (float) minimum temperature to explore. [K]
T_max: (float) maximum temperature to explore. [K]
dT: (float) increment of temperature in grid exploration. [K]
cut_range: (float) region around the peak of the temperature likelihood function at which the Gaussian fit is performed. [K]
R_peak: (int) number of samples (resolution) for second, finer fitting.
Nsigma_range: (float) number of sigma around best fit to search for the second, finer fitting.
multiprocessing: (bool) whether or not to use the multiprocessing module to speed up code execution.
"""
global helper
def helper(Temp):
return calc_loglik(Temp,filter_data,bands,m_obs,m_err)
## large search
temps = np.linspace(T_min,T_max,int((T_max-T_min)/dT))
if multiprocessing:
with Pool() as pool:
loglik = pool.map(helper,temps)
pool.close()
pool.join()
else:
loglik = list(map(helper,temps))
loglik = np.asarray(loglik)
## estimate uncertainty
mpv = temps[loglik==loglik.max()][0]
cut = (temps>mpv-cut_range) & (temps<mpv+cut_range)
popt,_ = curve_fit(lambda x,sigma,offset:log_Gaussian(x,mpv,sigma,offset),temps[cut],loglik[cut])
T_sigma = popt[0]
if T_sigma<dT:
print('warning: results may not be accurate. Try again with smaller dT (default 10)')
## fine search
temps = np.linspace(mpv-Nsigma_range*T_sigma,
mpv+Nsigma_range*T_sigma,
R_peak)
if multiprocessing:
with Pool() as pool:
loglik = pool.map(helper,temps)
pool.close()
pool.join()
else:
loglik = list(map(helper,temps))
loglik = np.asarray(loglik)
popt,_ = curve_fit(log_Gaussian,temps,loglik,p0=[mpv,T_sigma,loglik.max()])
T_mpv = popt[0]
T_sigma = popt[1]
return T_mpv,T_sigma
| [
"numpy.log10",
"numpy.average",
"numpy.asarray",
"numpy.exp",
"numpy.linspace"
] | [((2274, 2329), 'numpy.average', 'np.average', (['(mag_obs - mag_fit)'], {'weights': '(1 / mag_err ** 2)'}), '(mag_obs - mag_fit, weights=1 / mag_err ** 2)\n', (2284, 2329), True, 'import numpy as np\n'), ((4364, 4382), 'numpy.asarray', 'np.asarray', (['loglik'], {}), '(loglik)\n', (4374, 4382), True, 'import numpy as np\n'), ((4778, 4857), 'numpy.linspace', 'np.linspace', (['(mpv - Nsigma_range * T_sigma)', '(mpv + Nsigma_range * T_sigma)', 'R_peak'], {}), '(mpv - Nsigma_range * T_sigma, mpv + Nsigma_range * T_sigma, R_peak)\n', (4789, 4857), True, 'import numpy as np\n'), ((5108, 5126), 'numpy.asarray', 'np.asarray', (['loglik'], {}), '(loglik)\n', (5118, 5126), True, 'import numpy as np\n'), ((487, 517), 'numpy.exp', 'np.exp', (['(h * c / (lamb * k * T))'], {}), '(h * c / (lamb * k * T))\n', (493, 517), True, 'import numpy as np\n'), ((1604, 1649), 'numpy.log10', 'np.log10', (['(flux_test_binned / flux_vega_binned)'], {}), '(flux_test_binned / flux_vega_binned)\n', (1612, 1649), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script saves bid and ask data for specified ETFs to files for each day
during market open hours.
It assumes the computer is at US East Coast Time.
@author: mark
"""
import os
import pandas as pd
import numpy as np
from itertools import product
import streamlit as st
from bokeh.plotting import figure
from bokeh.models.tools import HoverTool
from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet
from streamlit_metrics import metric_row
def display_method_to_choose_etfs(selected_method_choose_dates, all_etfs, etf_data, sl_obj):
"""
Generates various streamlit options for selecting which ETFs to display.
Parameters
----------
selected_method_choose_dates : list of str
Strings of the various methods of selecting ETFs.
all_etfs : list of str
List of all ETF tickers.
etf_data : pd.DataFrame
Dataframe containing bulk data about ETFs.
sl_obj : streamlit
Stremlit object to place the elements.
Returns
-------
selected_etfs : list of str
List of str tickers chosen by users.
"""
selected_etfs = all_etfs
if 'By volume traded' in selected_method_choose_dates:
selection_data = etf_data['volume (shares/day)']
log_min = float(np.floor(np.log10(selection_data.min())))
log_max = float(np.ceil(np.log10(selection_data.max())))
min_vol, max_vol = sl_obj.slider('Average Volume (shares/day)',
min_value=float(log_min),
max_value=float(log_max),
value=(float(log_min), float(log_max)),
step=float(log_min - log_max) / 100,
format='10^%.1f'
)
selected = (selection_data >= 10**min_vol) & (selection_data <= 10**max_vol)
selected_etfs = list(set(selected_etfs) & set(selection_data[selected].index))
if 'By market cap' in selected_method_choose_dates:
selection_data = etf_data['net assets (million USD)']
log_min = float(np.floor(np.log10(selection_data.min())))
log_max = float(np.ceil(np.log10(selection_data.max())))
min_vol, max_vol = sl_obj.slider('Market Cap as of 2021-02-21 (million USD)',
min_value=float(log_min),
max_value=float(log_max),
value=(float(log_min), float(log_max)),
step=float(log_min - log_max) / 100,
format='10^%.1f'
)
selected = (selection_data >= 10**min_vol) & (selection_data <= 10**max_vol)
selected_etfs = list(set(selected_etfs) & set(selection_data[selected].index))
if 'Only ESG ETFs' in selected_method_choose_dates:
esg_etfs = etf_data[etf_data['esg'] == True].index
selected_etfs = list(set(selected_etfs) & set(esg_etfs))
if 'choose specific ETFs' in selected_method_choose_dates:
selected_etfs = sl_obj.multiselect('Which ETFs do you want to look at', list(selected_etfs), ['ESGV','VTI','BND', 'VCEB', 'VSGX'])
return selected_etfs
def get_averages(data, selected_dates, selected_etfs):
"""
Obtain average values of various ETFs across the trading day.
Parameters
----------
data : pd.DataFrame
data of various days and ETFs.
selected_dates : list of str
list of dates in format YYYY-MM-DD.
selected_etfs : list of str
list of ETF tickers.
Returns
-------
pd.Series
Data frame of average values in ETFs at various times during tradiing day.
"""
potential_columns = product(selected_dates, selected_etfs)
actual_columns = [x for x in potential_columns if x in data.columns]
return data[actual_columns].T.groupby(level=['etf']).mean().T
def add_trade_windows(p, t_new, t_old, ymax):
"""
Add trade windows to plot
Parameters
----------
p : Bokeh figure
Figure to add trading windows to.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
ymax : float
Maxs value to extend trading windows.
Returns
-------
None.
"""
source = ColumnDataSource(dict(x=[t_old[0]+0.5*(t_old[1]-t_old[0]),t_new[0]+0.5*(t_new[1]-t_new[0])],
y=[ymax-0.0002, ymax-0.0002 ],
w=[t_old[1]-t_old[0], t_new[1]-t_new[0]],
h =[2,2],
desc=['Old', 'New']))
if ymax > 2:
patch = {'h' : [ (0, ymax), (1, ymax) ],}
source.patch(patch)
boxes = Rect(x='x',y='y',width='w', height='h', fill_color='grey', fill_alpha=0.1,
line_width=0)
boxes_select = Rect(x='x',y='y',width='w', height='h', fill_color='grey', fill_alpha=.2,
line_width=0)
box_rend = p.add_glyph(source, boxes)
box_rend.hover_glyph = boxes_select
tooltips = [('trade window','@desc')]
p.add_tools(HoverTool(tooltips=tooltips, renderers=[box_rend]))
def format_plots(p, ymax=None):
"""
Format bokeh plots for quoted spreads across market times
Parameters
----------
p : Bokeh figure plot
Bokeh plot object to format
ymax : TYPE, optional
Max yaxis value. The default is None.
Returns
-------
None
"""
if ymax is None:
num_formatter='0.00%'
else:
num_zeros = int(np.log10(1/ymax)-.4)
num_formatter = '0.'+''.join(['0' for x in range(num_zeros)])+'%'
p.yaxis.formatter = NumeralTickFormatter(format=num_formatter)
p.xaxis.formatter = DatetimeTickFormatter(hours='%H:%M')
p.xaxis.axis_label = 'Market Time'
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.toolbar.autohide = True
def make_multi_etf_plot(selected_etfs, selected_dates, t_new, t_old, quoted_spread):
"""
Make plot with multiple ETF averages
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to obtain averages of. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
Returns
-------
p : Bokeh figure
Plot of multiple ETF averages.
"""
t_all = t_new + t_old
average_data = get_averages(quoted_spread, selected_dates, selected_etfs)
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='quoted Bid-Ask Spread for various ETFs',
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(0, average_data.max().max()+0.0001))
#trading windows
add_trade_windows(p, t_new, t_old, average_data.max().max())
# etf lines
renders = []
for etf in selected_etfs:
renders.append(p.line(average_data.index, average_data[etf],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=1,
# set visual properties for non-selected glyphs
color="grey",
alpha=0.5,
name=etf))
tooltips = [('etf','$name'),
('time','$x{%H:%M}'),
('Bid-Ask spread', '$y{"0.00%"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p, ymax=average_data.max().max()+0.0001)
return p
def make_single_etf_plot(selected_etf, selected_dates, t_new, t_old, quoted_spread, supress_hover_after= 10000):
"""
Plots data for a single ETF for multiple days.
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to plot. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
supress_hover_after : int, optional
Do not show hover functionality if there are more than this number of days. The default is 10000.
Returns
-------
p : Bokeh figure
Plot of single ETF over various days.
"""
t_all = t_new + t_old
average_data = get_averages(quoted_spread, selected_dates, [selected_etf])
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='Quoted spread for {}'.format(selected_etf),
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(0, average_data.max().max()+0.0001))
add_trade_windows(p, t_new, t_old, average_data.max().max())
# etf lines
renders = []
if len(selected_dates) > 1:
for date in selected_dates:
try:
render = p.line(quoted_spread.index, quoted_spread.loc[:,(date,selected_etf)],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=0.33,
color="grey",
alpha=0.25,
name=date)
except KeyError:
continue
if len(selected_dates) < supress_hover_after:
renders.append(render)
average_name = 'average'
else:
average_name = selected_dates[0]
renders.append(p.line(average_data.index, average_data[selected_etf],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=0.75,
color="black",
alpha=0.5,
name=average_name))
tooltips = [('date','$name'),
('time','$x{%H:%M}'),
('Bid-Ask spread', '$y{"0.00%"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p)
return p
def make_bid_ask_plot(selected_etf, selected_date, t_new, t_old, directory):
"""
Plots bid and ask prices over one trading day for one ETF.
Parameters
----------
selected_etf : str
ETF ticker of data to show.
selected_date : str
Date of data to show. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
directory : str
Folder containing ETF bid and ask price data. File must be in format date_etf.csv.
Returns
-------
p : Bokeh figure
Plot of bid and ask prices.
"""
data = pd.read_csv(os.path.join(directory, '{}_{}.csv'.format(selected_date, selected_etf)), index_col=0)
basetime = pd.to_datetime('2021-01-01') + pd.Timedelta(hours=9, minutes=30)
timedeltas = pd.TimedeltaIndex([pd.Timedelta(seconds=x) for x in data.index])
data.index = timedeltas + basetime
t_all = t_new + t_old
bid = data.bid
ask = data.ask
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='Bid & ask prices for {} on {}'.format(selected_etf, selected_date),
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(min(bid.min(),ask.min())-0.2, max(bid.max(),ask.max())+0.2))
add_trade_windows(p, t_new, t_old, max(bid.max(),ask.max()))
renders = []
renders.append(p.line(bid.index, bid.values,# set visual properties for selected glyphs
hover_color="blue",
hover_alpha=1,
color="blue",
alpha=.5,
name='bid'))
renders.append(p.line(ask.index, ask.values,# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=1,
color="firebrick",
alpha=0.5,
name='ask'))
tooltips = [('type','$name'),
('time','$x{%H:%M}'),
('price', '$y{"$0.00"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p)
p.yaxis.formatter = NumeralTickFormatter(format="$0.00")
return p
def make_relative_fee_amount(selected_ratios, t_new_text = ''):
"""
Generate a bar plot for the ratio of quoted spread to expense ratio.
Parameters
----------
selected_ratios : pd.Series
Data of ratio of quoted spread to expense ratio.
t_new_text : str
Time range to place in title of plot.
Returns
-------
p : Bokeh figure
Produced plot.
"""
p = figure(plot_width=400, plot_height=400,
x_axis_label="ETFs", x_minor_ticks=len(selected_ratios),
toolbar_location='below', title='Ratio of quoted spread to expense ratio {}'.format(t_new_text))
source = ColumnDataSource(dict(x=range(len(selected_ratios)),
top=selected_ratios.values,
desc=selected_ratios.index,))
glyph = VBar(x='x', top='top', bottom=0, width=0.5, fill_color='grey',
line_width=0, fill_alpha=0.5)
glyph_hover = VBar(x='x', top='top', bottom=0, width=0.5, fill_color='firebrick',
line_width=0, fill_alpha=1)
rend = p.add_glyph(source, glyph)
rend.hover_glyph = glyph_hover
labels = LabelSet(x='x', level='glyph', source=source, render_mode='canvas')
tooltips = [('etf','@desc'),
('ratio','@top')]
p.add_tools(HoverTool(tooltips=tooltips, renderers=[rend]))
num_zeros = int(np.log10(1/selected_ratios.max())-.4)
num_formatter = '0.'+''.join(['0' for x in range(num_zeros)])+'%'
p.yaxis.formatter = NumeralTickFormatter(format=num_formatter)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.toolbar.autohide = True
p.xaxis.bounds = (-.5,len(selected_ratios)-.5)
p.xaxis.ticker = list(range(len(selected_ratios)))
p.xaxis.major_label_overrides = dict(zip(range(len(selected_ratios)), list(selected_ratios.index)))
p.xaxis.major_label_orientation = 3.14/2
return p
def get_quoted_spread_change(selected_etfs, selected_dates, t_old, t_new, quoted_spread):
"""
Get the relative change in average quoted spread between the two time windows.
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to obtain averages of. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
Returns
-------
pd.Series
The relative change in average quoted spread between the two time windows.
"""
df = get_averages(quoted_spread, selected_dates, selected_etfs)
old_quotes = df[(df.index > t_old[0]) & (df.index < t_old[1])].mean(0)
new_quotes = df[(df.index > t_new[0]) & (df.index < t_new[1])].mean(0)
return (new_quotes / old_quotes).sort_values(ascending=False)
def create_metrics(fractional_increase, nwide=4, container=st, max_rows=2):
"""
Print information about fractional change in quoted spreads in metric form
Parameters
----------
fractional_increase : pd.Series
Data of the increase in fees between two windows.
nwide : int, optional
Number of metrics to print side-by-side. The default is 4.
container : streamlit object, optional
Object to display metrics. The default is st.
max_rows : int, optional
Max number of rows to present data for. The default is 2.
Returns
-------
None.
"""
metrics = {}
rows = 0
for etf, val in dict(fractional_increase).items():
if len(metrics) == nwide:
with container:
metric_row(metrics)
metrics = {}
rows += 1
if rows == max_rows:
break
metrics[etf] = '{:.0f}%'.format((val-1)*100)
if len(metrics) > 0:
with container:
metric_row(metrics)
st.write("# Bid-Ask spreads. Does time of day matter?")
st.write("#### By <NAME>")
st.write('first published March 10, 2021')
intro = st.beta_expander("Introduction")
data_selection = st.beta_expander("Data selection")
results = st.beta_expander("Results")
conclusion = st.beta_expander("Conclusion")
methods = st.beta_expander("Methods")
disclaimer = st.beta_expander("Disclaimer")
quoted_spread = pd.read_pickle('data/quoted_spread.pkl')
# remove outliers that impact average
del quoted_spread[('2020-12-16', 'SPCX')] # high value on second day of trading
del quoted_spread[('2020-03-12', 'ESGU')] # short high value on during large uncertainty
del quoted_spread[('2020-03-17', 'DRIV')] # short high value on during large uncertainty
del quoted_spread[('2020-02-03', 'EAGG')] # short high value on during large uncertainty
all_dates = list(quoted_spread.columns.levels[0])
all_dates.sort()
all_etfs = list(quoted_spread.columns.levels[1])
etf_data = pd.read_csv('etf.csv', index_col='Symbol')
etf_data = etf_data[etf_data['for_data'] == True]
start, end = data_selection.select_slider('Dates to analyze', all_dates, (all_dates[0], all_dates[-1]))
selected_dates = all_dates[all_dates.index(start):all_dates.index(end)]
method_choose_etfs = data_selection.multiselect('Methods for selecting ETFs',
['By volume traded', 'By market cap', 'Only ESG ETFs', 'choose specific ETFs'], ['choose specific ETFs'])
selected_etfs = display_method_to_choose_etfs(method_choose_etfs, all_etfs,etf_data,sl_obj=data_selection)
left_column, right_column = data_selection.beta_columns(2)
t_old = right_column.slider('Old trading window timing',
min_value=pd.Timestamp('2021-01-01 9:30').to_pydatetime(),
max_value=pd.Timestamp('2021-01-01 16:00').to_pydatetime(),
value=(pd.Timestamp('2021-01-01 10:00').to_pydatetime(), pd.Timestamp('2021-01-01 10:15').to_pydatetime()),
step=pd.Timedelta(minutes=5).to_pytimedelta(),
format='H:mm'
)
t_new = left_column.slider('New trading window timing',
min_value=pd.Timestamp('2021-01-01 9:30').to_pydatetime(),
max_value=pd.Timestamp('2021-01-01 16:00').to_pydatetime(),
value=(pd.Timestamp('2021-01-01 9:30').to_pydatetime(), pd.Timestamp('2021-01-01 9:45').to_pydatetime()),
step=pd.Timedelta(minutes=5).to_pytimedelta(),
format='H:mm'
)
if len(selected_dates) == 0:
results.write("Please select at least one date.")
if len(selected_etfs) == 0:
results.write("Please select at least one ETF.")
elif len(selected_etfs) == 1:
results.bokeh_chart(make_single_etf_plot(selected_etfs[0],selected_dates,t_new, t_old, quoted_spread, supress_hover_after=50))
else:
results.bokeh_chart(make_multi_etf_plot(selected_etfs,selected_dates, t_new, t_old, quoted_spread))
results.write(r"Quoted spreads $\left(\frac{ask - bid}{(ask + bid)/2}\right)$ were obtained from full volume stock market data")
results.write("#### Relative increase in Bid-Ask spread when moving to new time window:")
relative_spreads = get_quoted_spread_change(selected_etfs, selected_dates, t_old, t_new, quoted_spread)
create_metrics(relative_spreads, container = results)
results.write("""This spread is not the only fee that ETF investors might face. Another prominent one
is an annual fee that ETF funds charge, known as the [expense ratio](https://en.wikipedia.org/wiki/Expense_ratio). Let's compare
it with quoted spreads in the new trade window by taking the ratio of the two.""")
df = get_averages(quoted_spread, selected_dates, selected_etfs)
new_quotes = df[(df.index > t_new[0]) & (df.index < t_new[1])].mean(0)
t_new_text = '{}:{}-{}:{}'.format(t_new[0].hour, t_new[0].minute,t_new[1].hour, t_new[1].minute)
ratio = (new_quotes / (etf_data.loc[selected_etfs,'expense ratio']/100)).sort_values(ascending=False)
results.bokeh_chart(make_relative_fee_amount(ratio,t_new_text))
results.write("""To put this ratio in perspective, a ratio of 100% in the plot above indicates that if:
1. you were to buy and one year later sell that ETF with this bid-ask spread,
2. the market maker doesn't give a significant reduction in bid-ask spread, and
3. the real value of the fund is halfway between the bid and ask price
then the cost due to the bid-ask spread is approximately equal to the expense
ratio that you paid to the fund.
""")
def write_intro():
intro.write("""
One investment cost that investors may overlook is the [bid-ask spread](https://en.wikipedia.org/wiki/Bid%E2%80%93ask_spread),
which is the difference between the selling and buying price for a stock. When executing trades, your broker will send
your order to a [market maker](https://en.wikipedia.org/wiki/Market_maker), which makes money by giving the seller less money
than they take from the buyer and keeping the difference (known as the effective spread).
U.S. [regulations](https://www.schwab.com/execution-quality/price-improvement)
prevent this difference from being greater than the bid-ask spread listed on the exchange (known as the quoted spread).
The higher the bid-ask spread, the higher the cut the market maker may take.
Market makers often pass part of the cut back to the brokerage that sent them your trades (known as payment for order flow).
While this incentivizes brokers to send trades to places that give them a larger cut, they
are also regulated to ensure the [best execution](https://www.finra.org/rules-guidance/guidance/reports/2019-report-exam-findings-and-observations/best-execution)
of trades for investors when deciding which market maker
to send trades to.
From a broker's perspective, this can be a significant revenue source.
Barrons recently cited a CFRA analyst that [estimated](https://www.barrons.com/articles/after-the-gamestop-frenzy-robinhood-faces-a-new-set-of-risks-51612573317?mod=hp_minor_pos17)
80% of Robinhood's revenue came from payment for order flow.
Since [volatility](https://www.investopedia.com/ask/answers/06/bidaskspread.asp) increases bid-ask spread and one period of higher volatility is when a market opens, I wondered how much buying at the start of the trading day influences this cost. This is quite relevant for investors using [M1 Finance LLC](https://www.m1finance.com/), a company that offers automatic rebalancing but restricts users to set trade times.
On July 1 2020, M1 shifted their morning trading window from starting at 10:00 am to the market opening time of 9:30 am.
The reasons behind this, according to [M1 press release](https://www.m1finance.com/blog/trade-window-change/)
was to enable customers to invest when market volume is high, when the prices are similar to overnight
prices, because customers have asked for it, and because they can.
What was not mentioned in the press release was how the timing change will affect costs of trading.
I wanted to understand how much the bid-ask spread changes over the course of the day and if that even was significant to investors using M1. Below is one example showing quoted bid and ask prices, where you can see the higher gap during
the start of trading.
""")
intro.bokeh_chart(make_bid_ask_plot('ESGV','2021-02-03',t_new, t_old, 'data/'))
intro.write("""
Unfortunately unlike commissions or expense ratios, the bid-ask
spread costs are less transparent. The quoted prices on the exchange are not the same as the prices the market makers give.
While most brokers must report how much they receive in payment from market makers,
they do not have to publish price improvement data, which is what more directly impacts investors.
M1 is also less transparent than many other brokers. Since M1 [doesn't hold](https://m1-production-agreements.s3.amazonaws.com/documents/M1+Rules+606+%26+607+Disclosures.pdf)
investors' assets, they are not required to, nor do they voluntarily, provide regular reports of their payment from order flow. They do provide payment for order flow data for an individual trader's trades upon request, but this does not allow comparison on an aggregate basis. In addition, M1, unlike [other](https://www.fidelity.com/trading/execution-quality/overview) [brokers](https://www.schwab.com/execution-quality/price-improvement), does not show clients how much better than the quoted bid-ask spread their trade executed for, reducing transparancy when executing trades.
Without adequate M1 specific data, I scraped bid and ask prices for 41 ETFs between July 1 2019 and Feb 19 2021. Click the results tab to see how much the change in trade time affected bid-ask spreads for a few of Vanguard's ETFs and how this compares to the expense ratio, another significant fee when investing in ETFs.
Then check out the 'Data selection' tab to view different ETFs, dates, and trading window timings. If you want to dig
deeper than this analysis, read the methods section, download the [repository](https://github.com/goldmanm/bid-ask-visualization), and start playing with your own data.
""")
def write_methods():
methods.write(r"""
Raw bid and ask prices, while important, are not the most useful quantity to consider.
Derived from these values is the [quoted spread](https://en.wikipedia.org/wiki/Bid%E2%80%93ask_spread),
which gives an indication of the percent
cost an investor might face when trading. Another metric, the effective spread, would give
a better indication of the cost (by taking into account market makers' price improvements), but
M1 does not make this data available, so this can't be used.
Quoted spread is defined as the ask price minus the bid price,
divided by the midpoint between the two. Its usefulness is best shown with an example.
Let's say you chose to change your investment allocation and need to sell \$10,000 of stock A,
which has a quoted spread of 0.2%, and buy \$10,000 of stock B, which has a quoted
spread of 0.1%, that transaction would cost you up to \$15 (depending how much of a price improvement
the market makers give),
assuming the actual value of the stock is halfway between the two.
(maximum calculated by $\frac{10,000\times 0.002}{2} + \frac{10,000\times 0.001}{2}$).
Bid and ask price data originated from full-volume historical quotes obtained from [polygon.io](https://polygon.io/).
For each data point, the quoted bid-ask spread was calculated by subtracting the bid from
the ask and dividing by the midpoint of the two. The quoted spreads were
consolidated into 5 second, time-weighted averages and stored locally. Daily volume data
also comes from [polygon.io](https://polygon.io/).
When plotting, times were further consolidated into 1 minute chunks. The points on the graph
are shown at the midpoint of the averaged region (e.g. data from 9:30:00 to 9:31:00 would be shown at 9:30:30).
The quoted spread for a particular trading window is the average of the values within that
window. The default trading window used in this analysis ends 15 minutes after the start
of the window. The relative cost of moving the trading window (shown by the numbers below the first graph in the results section) is the difference
between the the new and old quoted spreads divided by the old quoted spread.
Four days were removed from the data set since they had large bid-ask spread outliers which noticeably
impacted the average values. The four removed days are:
1. EAGG on 2020-02-03
2. ESGU on 2020-03-12
3. DRIV on 2020-03-17
4. SPCX on 2020-12-16
If anyone is curious about these specific days, they can download the [repository](https://github.com/goldmanm/bid-ask-visualization), check out the data, and remove the lines in `app.py` which exclude these days.
Data about market cap and expense ratio were obtained after trading hours on 24 Feb. 2021 from Yahoo Finance.
ETFs were chosen
because either they are commonly traded, they screen for Environmental, Social, or Governance
(ESG) qualities, or they cover specific sectors. ETFs were not added nor removed based on expected change in price ratio."
""")
def write_conclusion():
conclusion.write("""
For the vast majority of ETFs evaluated here, trading at the market opening window had substantially wider quoted spreads.
This is true for both ETF behemoths (e.g. VTI) and newcomers (e.g. VCEB) across a wide range of sectors.
Some of the additional spread at market opening can be taken by the market makers, leading traders to pay a higher cost. Investors should take note of this cost when deciding when to execute trades.
M1 finance moved the trading window to a time where the customers may be paying more, and this does not seem to be in the customer's interest (which I believe should have been disclosed when [announcing](https://www.m1finance.com/blog/trade-window-change/) the change),
and appears to go against the spirit of FINRA's [best execution](https://www.finra.org/rules-guidance/rulebooks/finra-rules/5310) requirement,
though it may follow the letter of the requirement.
If M1's revenue from order flow is proportional to bid-ask spreads, it also seems like there could be an unmitigated conflict of interest at play
when M1 decided to move its trading window. Without full information, it is impossible to evaluate how much M1's revenue increased from changing the trading window.
When I reached out to M1 referencing their [607 disclosure document](https://m1-production-agreements.s3.amazonaws.com/documents/M1+Rules+606+%26+607+Disclosures.pdf), M1 told me how much they receive in payment for order flow for my trades
over the past 6 months. They made 15.5 cents per hundred shares on my trades (which involved buying eight distinct ETFs).
This is typically within the average payments that Apex Clearing (which is where M1 holds the shares) [receives](https://public.s3.com/rule606/apex/), and is
significantly lower than [Robinhood](https://cdn.robinhood.com/assets/robinhood/legal/RHS%20SEC%20Rule%20606a%20and%20607%20Disclosure%20Report%20Q4%202020.pdf) and
higher than [TD Ameritrade](https://www.tdameritrade.com/content/dam/tda/retail/marketing/en/pdf/cftc/tdainc-TDA2055-q2-2021.pdf) and [Schwab](https://content.schwab.com/drupal_dependencies/psr/606/2020-Q4-Schwab-Quarterly-Report.pdf).
This comparison isn't perfect given that my eight ETFs are not representative of all the non-S&P 500 equities (which is the lumped category which companies report payment for order flow from). Given the lack of data, it is unclear whether M1 actually increased revenue from moving the trading time.
Like any project, this analysis leaves many unanswered questions:
1. What other data sources and reasoning led M1 to move the market window?
2. How does the price improvement that market makers offer change between the two windows?
3. Did the changed window timing increase M1's revenue for order flow?
4. If there is a larger effective spread at the start of the trade day, why did M1 not inform investors of the potential increase in spread when moving to the new trading window?
Time may help answer some of these questions, though it's unlikely to happen without significant transparency on M1's part.
I truly hope that the change in timing was in the best interest of investors, but I have yet to see much evidence of that.
""")
def write_disclaimer():
disclaimer.write("""I received no compensation for working on this project, nor do I hold a stake in
M1 or its competitors (except for what is in the broad-based ETFs that I invest in).
This analysis and code is listed under an [MIT licence](https://mit-license.org/), which does not include any warranty of any kind.
This information is not intended to inform investment decisions. If you notice any mistakes, feel free to post an issue on [github](https://github.com/goldmanm/bid-ask-visualization).""")
write_intro()
write_methods()
write_conclusion()
write_disclaimer()
| [
"pandas.read_pickle",
"bokeh.models.DatetimeTickFormatter",
"numpy.log10",
"pandas.read_csv",
"streamlit_metrics.metric_row",
"bokeh.models.VBar",
"itertools.product",
"streamlit.write",
"bokeh.models.Rect",
"pandas.to_datetime",
"pandas.Timedelta",
"bokeh.models.tools.HoverTool",
"bokeh.mod... | [((17120, 17175), 'streamlit.write', 'st.write', (['"""# Bid-Ask spreads. Does time of day matter?"""'], {}), "('# Bid-Ask spreads. Does time of day matter?')\n", (17128, 17175), True, 'import streamlit as st\n'), ((17176, 17202), 'streamlit.write', 'st.write', (['"""#### By <NAME>"""'], {}), "('#### By <NAME>')\n", (17184, 17202), True, 'import streamlit as st\n'), ((17203, 17245), 'streamlit.write', 'st.write', (['"""first published March 10, 2021"""'], {}), "('first published March 10, 2021')\n", (17211, 17245), True, 'import streamlit as st\n'), ((17255, 17287), 'streamlit.beta_expander', 'st.beta_expander', (['"""Introduction"""'], {}), "('Introduction')\n", (17271, 17287), True, 'import streamlit as st\n'), ((17305, 17339), 'streamlit.beta_expander', 'st.beta_expander', (['"""Data selection"""'], {}), "('Data selection')\n", (17321, 17339), True, 'import streamlit as st\n'), ((17350, 17377), 'streamlit.beta_expander', 'st.beta_expander', (['"""Results"""'], {}), "('Results')\n", (17366, 17377), True, 'import streamlit as st\n'), ((17391, 17421), 'streamlit.beta_expander', 'st.beta_expander', (['"""Conclusion"""'], {}), "('Conclusion')\n", (17407, 17421), True, 'import streamlit as st\n'), ((17432, 17459), 'streamlit.beta_expander', 'st.beta_expander', (['"""Methods"""'], {}), "('Methods')\n", (17448, 17459), True, 'import streamlit as st\n'), ((17473, 17503), 'streamlit.beta_expander', 'st.beta_expander', (['"""Disclaimer"""'], {}), "('Disclaimer')\n", (17489, 17503), True, 'import streamlit as st\n'), ((17521, 17561), 'pandas.read_pickle', 'pd.read_pickle', (['"""data/quoted_spread.pkl"""'], {}), "('data/quoted_spread.pkl')\n", (17535, 17561), True, 'import pandas as pd\n'), ((18076, 18118), 'pandas.read_csv', 'pd.read_csv', (['"""etf.csv"""'], {'index_col': '"""Symbol"""'}), "('etf.csv', index_col='Symbol')\n", (18087, 18118), True, 'import pandas as pd\n'), ((3738, 3776), 'itertools.product', 'product', (['selected_dates', 'selected_etfs'], {}), '(selected_dates, selected_etfs)\n', (3745, 3776), False, 'from itertools import product\n'), ((4818, 4912), 'bokeh.models.Rect', 'Rect', ([], {'x': '"""x"""', 'y': '"""y"""', 'width': '"""w"""', 'height': '"""h"""', 'fill_color': '"""grey"""', 'fill_alpha': '(0.1)', 'line_width': '(0)'}), "(x='x', y='y', width='w', height='h', fill_color='grey', fill_alpha=0.1,\n line_width=0)\n", (4822, 4912), False, 'from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet\n'), ((4943, 5037), 'bokeh.models.Rect', 'Rect', ([], {'x': '"""x"""', 'y': '"""y"""', 'width': '"""w"""', 'height': '"""h"""', 'fill_color': '"""grey"""', 'fill_alpha': '(0.2)', 'line_width': '(0)'}), "(x='x', y='y', width='w', height='h', fill_color='grey', fill_alpha=0.2,\n line_width=0)\n", (4947, 5037), False, 'from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet\n'), ((5755, 5797), 'bokeh.models.NumeralTickFormatter', 'NumeralTickFormatter', ([], {'format': 'num_formatter'}), '(format=num_formatter)\n', (5775, 5797), False, 'from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet\n'), ((5822, 5858), 'bokeh.models.DatetimeTickFormatter', 'DatetimeTickFormatter', ([], {'hours': '"""%H:%M"""'}), "(hours='%H:%M')\n", (5843, 5858), False, 'from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet\n'), ((13020, 13056), 'bokeh.models.NumeralTickFormatter', 'NumeralTickFormatter', ([], {'format': '"""$0.00"""'}), "(format='$0.00')\n", (13040, 13056), False, 'from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet\n'), ((13905, 14001), 'bokeh.models.VBar', 'VBar', ([], {'x': '"""x"""', 'top': '"""top"""', 'bottom': '(0)', 'width': '(0.5)', 'fill_color': '"""grey"""', 'line_width': '(0)', 'fill_alpha': '(0.5)'}), "(x='x', top='top', bottom=0, width=0.5, fill_color='grey', line_width=0,\n fill_alpha=0.5)\n", (13909, 14001), False, 'from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet\n'), ((14033, 14132), 'bokeh.models.VBar', 'VBar', ([], {'x': '"""x"""', 'top': '"""top"""', 'bottom': '(0)', 'width': '(0.5)', 'fill_color': '"""firebrick"""', 'line_width': '(0)', 'fill_alpha': '(1)'}), "(x='x', top='top', bottom=0, width=0.5, fill_color='firebrick',\n line_width=0, fill_alpha=1)\n", (14037, 14132), False, 'from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet\n'), ((14232, 14299), 'bokeh.models.LabelSet', 'LabelSet', ([], {'x': '"""x"""', 'level': '"""glyph"""', 'source': 'source', 'render_mode': '"""canvas"""'}), "(x='x', level='glyph', source=source, render_mode='canvas')\n", (14240, 14299), False, 'from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet\n'), ((14590, 14632), 'bokeh.models.NumeralTickFormatter', 'NumeralTickFormatter', ([], {'format': 'num_formatter'}), '(format=num_formatter)\n', (14610, 14632), False, 'from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet\n'), ((5188, 5238), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {'tooltips': 'tooltips', 'renderers': '[box_rend]'}), '(tooltips=tooltips, renderers=[box_rend])\n', (5197, 5238), False, 'from bokeh.models.tools import HoverTool\n'), ((7839, 7909), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {'tooltips': 'tooltips', 'renderers': 'renders', 'formatters': 'formatters'}), '(tooltips=tooltips, renderers=renders, formatters=formatters)\n', (7848, 7909), False, 'from bokeh.models.tools import HoverTool\n'), ((10554, 10624), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {'tooltips': 'tooltips', 'renderers': 'renders', 'formatters': 'formatters'}), '(tooltips=tooltips, renderers=renders, formatters=formatters)\n', (10563, 10624), False, 'from bokeh.models.tools import HoverTool\n'), ((11498, 11526), 'pandas.to_datetime', 'pd.to_datetime', (['"""2021-01-01"""'], {}), "('2021-01-01')\n", (11512, 11526), True, 'import pandas as pd\n'), ((11529, 11562), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(9)', 'minutes': '(30)'}), '(hours=9, minutes=30)\n', (11541, 11562), True, 'import pandas as pd\n'), ((12904, 12974), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {'tooltips': 'tooltips', 'renderers': 'renders', 'formatters': 'formatters'}), '(tooltips=tooltips, renderers=renders, formatters=formatters)\n', (12913, 12974), False, 'from bokeh.models.tools import HoverTool\n'), ((14384, 14430), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {'tooltips': 'tooltips', 'renderers': '[rend]'}), '(tooltips=tooltips, renderers=[rend])\n', (14393, 14430), False, 'from bokeh.models.tools import HoverTool\n'), ((11599, 11622), 'pandas.Timedelta', 'pd.Timedelta', ([], {'seconds': 'x'}), '(seconds=x)\n', (11611, 11622), True, 'import pandas as pd\n'), ((17099, 17118), 'streamlit_metrics.metric_row', 'metric_row', (['metrics'], {}), '(metrics)\n', (17109, 17118), False, 'from streamlit_metrics import metric_row\n'), ((5636, 5654), 'numpy.log10', 'np.log10', (['(1 / ymax)'], {}), '(1 / ymax)\n', (5644, 5654), True, 'import numpy as np\n'), ((7002, 7033), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 9:30"""'], {}), "('2021-01-01 9:30')\n", (7014, 7033), True, 'import pandas as pd\n'), ((9164, 9195), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 9:30"""'], {}), "('2021-01-01 9:30')\n", (9176, 9195), True, 'import pandas as pd\n'), ((11960, 11991), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 9:30"""'], {}), "('2021-01-01 9:30')\n", (11972, 11991), True, 'import pandas as pd\n'), ((16863, 16882), 'streamlit_metrics.metric_row', 'metric_row', (['metrics'], {}), '(metrics)\n', (16873, 16882), False, 'from streamlit_metrics import metric_row\n'), ((18825, 18856), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 9:30"""'], {}), "('2021-01-01 9:30')\n", (18837, 18856), True, 'import pandas as pd\n'), ((18909, 18941), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 16:00"""'], {}), "('2021-01-01 16:00')\n", (18921, 18941), True, 'import pandas as pd\n'), ((19122, 19145), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (19134, 19145), True, 'import pandas as pd\n'), ((19321, 19352), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 9:30"""'], {}), "('2021-01-01 9:30')\n", (19333, 19352), True, 'import pandas as pd\n'), ((19405, 19437), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 16:00"""'], {}), "('2021-01-01 16:00')\n", (19417, 19437), True, 'import pandas as pd\n'), ((19616, 19639), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (19628, 19639), True, 'import pandas as pd\n'), ((7046, 7069), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(1.5)'}), '(hours=1.5)\n', (7058, 7069), True, 'import pandas as pd\n'), ((9208, 9231), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(1.5)'}), '(hours=1.5)\n', (9220, 9231), True, 'import pandas as pd\n'), ((12004, 12027), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(1.5)'}), '(hours=1.5)\n', (12016, 12027), True, 'import pandas as pd\n'), ((18991, 19023), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 10:00"""'], {}), "('2021-01-01 10:00')\n", (19003, 19023), True, 'import pandas as pd\n'), ((19041, 19073), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 10:15"""'], {}), "('2021-01-01 10:15')\n", (19053, 19073), True, 'import pandas as pd\n'), ((19487, 19518), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 9:30"""'], {}), "('2021-01-01 9:30')\n", (19499, 19518), True, 'import pandas as pd\n'), ((19536, 19567), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01 9:45"""'], {}), "('2021-01-01 9:45')\n", (19548, 19567), True, 'import pandas as pd\n')] |
from scipy.io import loadmat
import numpy as np
import pyfftw
from scipy.special import erf
np.set_string_function(lambda a: str(a.shape), repr=False)
def mat_to_npy(file_name):
return loadmat(file_name + '.mat')[file_name]
def mat_to_npy_vec(file_name):
a = mat_to_npy(file_name)
return a.reshape(a.shape[0] * a.shape[1])
def cart2rad(n):
# Compute the radii corresponding of the points of a cartesian grid of size NxN points
# XXX This is a name for this function.
n = np.floor(n)
x, y = image_grid(n)
r = np.sqrt(np.square(x) + np.square(y))
return r
def image_grid(n):
# Return the coordinates of Cartesian points in an NxN grid centered around the origin.
# The origin of the grid is always in the center, for both odd and even N.
p = (n - 1.0) / 2.0
x, y = np.meshgrid(np.linspace(-p, p, n), np.linspace(-p, p, n))
return x, y
def normalize_background(stack):
# Normalizes background to mean 0 and std 1.
#
# stack = normalize_background(stack)
# Estimate the mean and std of each image in the stack using pixels
# outside radius r (=half the image size in pixels), and normalize the image such that the
# background has mean 0 and std 1. Each image in the stack is corrected
# separately.
#
# Example:
# stack2 = normalize_background(stack)
n_images = len(stack)
m = np.shape(stack)[1]
n = np.shape(stack)[2]
if m != n:
ValueError('Images in the stack must be square.')
r = np.floor(n / 2)
# Find indices of backgruond pixels in the images
ctr = (n + 1) / 2
xv, yv = np.meshgrid(np.arange(1, n + 1), np.arange(1, n + 1))
radii_sq = (xv - ctr) ** 2 + (yv - ctr) ** 2
background_pixels_mask = (radii_sq > r * r)
sd_bg = np.zeros(n_images)
mean_bg = np.zeros(n_images)
for kk in np.arange(n_images):
proj = stack[kk]
background_pixels = proj[background_pixels_mask]
# Compute mean and standard deviation of background pixels
mm = np.mean(background_pixels)
sd = np.std(background_pixels, ddof=1)
proj = (proj - mm) / sd
stack[kk] = proj
sd_bg[kk] = sd
mean_bg[kk] = mm
return stack, mean_bg, sd_bg
# TODO:decorator function since 1. Itay's mask function expexts input in matlab style and 2. doesn't support a single image
def mask_decorator(images, is_stack=False, r=None, rise_time=None):
do_alter = images.ndim == 2 and is_stack == True
if do_alter:
images = images[:, :, np.newaxis]
images_masked = mask(images, is_stack, r, rise_time)
if do_alter:
images_masked = images_masked[:, :, 0]
return images_masked
def mask(images, is_stack=False, r=None, rise_time=None):
num_dims = images.ndim
if num_dims < 2 or num_dims > 3:
pass # raise error
if is_stack and num_dims == 2:
pass # raise error
if is_stack:
num_dims = 2
shape = images.shape[:num_dims]
if num_dims == 2:
if shape[0] != shape[1]:
pass # raise error
if num_dims == 3:
if shape[0] != shape[1] or shape[0] != shape[2] or shape[1] != shape[2]:
pass # raise error
n = shape[0]
if r is None:
r = int(np.floor(0.45 * n))
if rise_time is None:
rise_time = int(np.floor(0.05 * n))
m = fuzzymask(n, num_dims, r, rise_time)
out = (images.transpose(2, 0, 1) * m).transpose(1, 2, 0)
return out
def fuzzymask(n, dims, r0, risetime, origin=None):
if isinstance(n, int):
n = np.array([n])
if isinstance(r0, int):
r0 = np.array([r0])
center = (n + 1.0) / 2
k = 1.782 / risetime
if dims == 1:
if origin is None:
origin = center
origin = origin.astype('int')
r = np.abs(np.arange(1 - origin[0], n - origin[0] + 1))
elif dims == 2:
if origin is None:
origin = np.floor(n / 2) + 1
origin = origin.astype('int')
if len(n) == 1:
x, y = np.mgrid[1 - origin[0]:n[0] - origin[0] + 1, 1 - origin[0]:n[0] - origin[0] + 1]
else:
x, y = np.mgrid[1 - origin[0]:n[0] - origin[0] + 1, 1 - origin[1]:n[1] - origin[1] + 1]
if len(r0) < 2:
r = np.sqrt(np.square(x) + np.square(y))
else:
r = np.sqrt(np.square(x) + np.square(y * r0[0] / r0[1]))
elif dims == 3:
if origin is None:
origin = center
origin = origin.astype('int')
if len(n) == 1:
x, y, z = np.mgrid[1 - origin[0]:n[0] - origin[0] + 1, 1 - origin[0]:n[0] - origin[0] + 1, 1 - origin[0]:n[0] - origin[0] + 1]
else:
x, y, z = np.mgrid[1 - origin[0]:n[0] - origin[0] + 1, 1 - origin[1]:n[1] - origin[1] + 1, 1 - origin[2]:n[2] - origin[2] + 1]
if len(r0) < 3:
r = np.sqrt(np.square(x) + np.square(y) + np.square(z))
else:
r = np.sqrt(np.square(x) + np.square(y * r0[0] / r0[1]) + np.square(z * r0[0] / r0[2]))
else:
return 0 # raise error
m = 0.5 * (1 - erf(k * (r - r0[0])))
return m
# TODO: remove once Itay complies with python convention
def downsample_decorator(images, out_size, is_stack=True):
if images.ndim == 3 and is_stack == True:
images = np.transpose(images, axes=(1, 2, 0)) # move to matlab convention TODO: shouldn't we do copy to preserve contigousy???
images_down = downsample(images, out_size, is_stack)
return np.transpose(images_down, axes=(2, 0, 1)) # move to python convention TODO: shouldn't we do copy to preserve contigousy???
elif images.ndim == 2 and is_stack == True:
images = images[:, :, np.newaxis] # add a last axis as in matlab convention TODO: abandon once Itay fixes
return downsample(images, out_size, is_stack)
else:
return downsample(images, out_size, is_stack)
def downsample(images, out_size, is_stack=True):
if not is_stack:
images = np.expand_dims(images, 0)
in_size = np.array(images.shape[:-1])
out_size = np.zeros(in_size.shape, dtype='int') + out_size
num_dim = len(in_size)
down = all(in_size < out_size)
up = all(in_size > out_size)
if not (down or up):
if all(in_size == out_size):
return images
pass # raise error
if num_dim > 3:
pass # raise error
if num_dim == 1:
images = images.swapaxes(0, 1)
elif num_dim == 2:
images = images.swapaxes(0, 2)
images = images.swapaxes(1, 2)
else:
images = images.swapaxes(0, 3)
images = images.swapaxes(1, 3)
images = images.swapaxes(2, 3)
out = np.zeros([images.shape[0]] + out_size.tolist(), dtype='complex128')
for i, image in enumerate(images):
tmp = pyfftw.interfaces.numpy_fft.fftshift(pyfftw.interfaces.numpy_fft.fftn(image))
out_tmp = pyfftw.interfaces.numpy_fft.ifftshift(crop(tmp, out_size, False))
out[i] = pyfftw.interfaces.numpy_fft.ifftn(out_tmp)
if num_dim == 1:
out = out.swapaxes(0, 1)
elif num_dim == 2:
out = out.swapaxes(1, 2)
out = out.swapaxes(0, 2)
else:
out = out.swapaxes(2, 3)
out = out.swapaxes(1, 3)
out = out.swapaxes(0, 3)
out = out.squeeze() * np.prod(out_size) * 1.0 / np.prod(in_size)
return out
def crop(images, out_size, is_stack, fillval=0.0):
if is_stack:
in_size = images.shape[:-1]
else:
in_size = images.shape
num_images = images.shape[-1]
num_dim = len(in_size)
out_shape = [s for s in out_size] + [num_images]
if num_dim == 1:
out_x_size = out_size[0]
x_size = in_size[0]
nx = int(np.floor(x_size * 1.0 / 2) - np.floor(out_x_size * 1.0 / 2))
if nx >= 0:
nc = images[nx:nx + out_x_size]
else:
nc = np.zeros(out_shape) + fillval
nc[-nx:x_size - nx] = images
elif num_dim == 2:
out_x_size = out_size[0]
x_size = in_size[0]
nx = int(np.floor(x_size * 1.0 / 2) - np.floor(out_x_size * 1.0 / 2))
out_y_size = out_size[1]
y_size = in_size[1]
ny = int(np.floor(y_size * 1.0 / 2) - np.floor(out_y_size * 1.0 / 2))
if nx >= 0 and ny >= 0:
nc = images[nx:nx + out_x_size, ny:ny + out_y_size]
elif nx < 0 and ny < 0:
nc = np.zeros(out_shape) + fillval
nc[-nx:x_size - nx, -ny:y_size - ny] = images
else:
return 0 # raise error
elif num_dim == e:
out_x_size = out_size[0]
x_size = in_size[0]
nx = int(np.floor(x_size * 1.0 / 2) - np.floor(out_x_size * 1.0 / 2))
out_y_size = out_size[1]
y_size = in_size[1]
ny = int(np.floor(y_size * 1.0 / 2) - np.floor(out_y_size * 1.0 / 2))
out_z_size = out_size[2]
z_size = in_size[2]
nz = int(np.floor(z_size * 1.0 / 2) - np.floor(out_z_size * 1.0 / 2))
if nx >= 0 and ny >= 0 and nz >= 0:
nc = images[nx:nx + out_x_size, ny:ny + out_y_size, nz:nz + out_z_size]
elif nx < 0 and ny < 0 and nz < 0:
nc = np.zeros(out_shape) + fillval
nc[-nx:x_size - nx, -ny:y_size - ny, -nz:y_size - nz] = images
else:
return 0 # raise error
else:
return 0 # raise error
return nc
| [
"numpy.mean",
"numpy.prod",
"pyfftw.interfaces.numpy_fft.fftn",
"scipy.io.loadmat",
"numpy.floor",
"numpy.square",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"pyfftw.interfaces.numpy_fft.ifftn",
"scipy.special.erf",
"numpy.expand_dims",
"numpy.std",
"numpy.shape",
"numpy.transpose",... | [((502, 513), 'numpy.floor', 'np.floor', (['n'], {}), '(n)\n', (510, 513), True, 'import numpy as np\n'), ((1526, 1541), 'numpy.floor', 'np.floor', (['(n / 2)'], {}), '(n / 2)\n', (1534, 1541), True, 'import numpy as np\n'), ((1798, 1816), 'numpy.zeros', 'np.zeros', (['n_images'], {}), '(n_images)\n', (1806, 1816), True, 'import numpy as np\n'), ((1831, 1849), 'numpy.zeros', 'np.zeros', (['n_images'], {}), '(n_images)\n', (1839, 1849), True, 'import numpy as np\n'), ((1864, 1883), 'numpy.arange', 'np.arange', (['n_images'], {}), '(n_images)\n', (1873, 1883), True, 'import numpy as np\n'), ((6076, 6103), 'numpy.array', 'np.array', (['images.shape[:-1]'], {}), '(images.shape[:-1])\n', (6084, 6103), True, 'import numpy as np\n'), ((192, 219), 'scipy.io.loadmat', 'loadmat', (["(file_name + '.mat')"], {}), "(file_name + '.mat')\n", (199, 219), False, 'from scipy.io import loadmat\n'), ((836, 857), 'numpy.linspace', 'np.linspace', (['(-p)', 'p', 'n'], {}), '(-p, p, n)\n', (847, 857), True, 'import numpy as np\n'), ((859, 880), 'numpy.linspace', 'np.linspace', (['(-p)', 'p', 'n'], {}), '(-p, p, n)\n', (870, 880), True, 'import numpy as np\n'), ((1397, 1412), 'numpy.shape', 'np.shape', (['stack'], {}), '(stack)\n', (1405, 1412), True, 'import numpy as np\n'), ((1424, 1439), 'numpy.shape', 'np.shape', (['stack'], {}), '(stack)\n', (1432, 1439), True, 'import numpy as np\n'), ((1645, 1664), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (1654, 1664), True, 'import numpy as np\n'), ((1666, 1685), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (1675, 1685), True, 'import numpy as np\n'), ((2048, 2074), 'numpy.mean', 'np.mean', (['background_pixels'], {}), '(background_pixels)\n', (2055, 2074), True, 'import numpy as np\n'), ((2088, 2121), 'numpy.std', 'np.std', (['background_pixels'], {'ddof': '(1)'}), '(background_pixels, ddof=1)\n', (2094, 2121), True, 'import numpy as np\n'), ((3592, 3605), 'numpy.array', 'np.array', (['[n]'], {}), '([n])\n', (3600, 3605), True, 'import numpy as np\n'), ((3648, 3662), 'numpy.array', 'np.array', (['[r0]'], {}), '([r0])\n', (3656, 3662), True, 'import numpy as np\n'), ((5346, 5382), 'numpy.transpose', 'np.transpose', (['images'], {'axes': '(1, 2, 0)'}), '(images, axes=(1, 2, 0))\n', (5358, 5382), True, 'import numpy as np\n'), ((5541, 5582), 'numpy.transpose', 'np.transpose', (['images_down'], {'axes': '(2, 0, 1)'}), '(images_down, axes=(2, 0, 1))\n', (5553, 5582), True, 'import numpy as np\n'), ((6035, 6060), 'numpy.expand_dims', 'np.expand_dims', (['images', '(0)'], {}), '(images, 0)\n', (6049, 6060), True, 'import numpy as np\n'), ((6119, 6155), 'numpy.zeros', 'np.zeros', (['in_size.shape'], {'dtype': '"""int"""'}), "(in_size.shape, dtype='int')\n", (6127, 6155), True, 'import numpy as np\n'), ((7028, 7070), 'pyfftw.interfaces.numpy_fft.ifftn', 'pyfftw.interfaces.numpy_fft.ifftn', (['out_tmp'], {}), '(out_tmp)\n', (7061, 7070), False, 'import pyfftw\n'), ((7377, 7393), 'numpy.prod', 'np.prod', (['in_size'], {}), '(in_size)\n', (7384, 7393), True, 'import numpy as np\n'), ((555, 567), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (564, 567), True, 'import numpy as np\n'), ((570, 582), 'numpy.square', 'np.square', (['y'], {}), '(y)\n', (579, 582), True, 'import numpy as np\n'), ((3287, 3305), 'numpy.floor', 'np.floor', (['(0.45 * n)'], {}), '(0.45 * n)\n', (3295, 3305), True, 'import numpy as np\n'), ((3358, 3376), 'numpy.floor', 'np.floor', (['(0.05 * n)'], {}), '(0.05 * n)\n', (3366, 3376), True, 'import numpy as np\n'), ((3851, 3894), 'numpy.arange', 'np.arange', (['(1 - origin[0])', '(n - origin[0] + 1)'], {}), '(1 - origin[0], n - origin[0] + 1)\n', (3860, 3894), True, 'import numpy as np\n'), ((5129, 5149), 'scipy.special.erf', 'erf', (['(k * (r - r0[0]))'], {}), '(k * (r - r0[0]))\n', (5132, 5149), False, 'from scipy.special import erf\n'), ((6886, 6925), 'pyfftw.interfaces.numpy_fft.fftn', 'pyfftw.interfaces.numpy_fft.fftn', (['image'], {}), '(image)\n', (6918, 6925), False, 'import pyfftw\n'), ((7351, 7368), 'numpy.prod', 'np.prod', (['out_size'], {}), '(out_size)\n', (7358, 7368), True, 'import numpy as np\n'), ((7769, 7795), 'numpy.floor', 'np.floor', (['(x_size * 1.0 / 2)'], {}), '(x_size * 1.0 / 2)\n', (7777, 7795), True, 'import numpy as np\n'), ((7798, 7828), 'numpy.floor', 'np.floor', (['(out_x_size * 1.0 / 2)'], {}), '(out_x_size * 1.0 / 2)\n', (7806, 7828), True, 'import numpy as np\n'), ((7925, 7944), 'numpy.zeros', 'np.zeros', (['out_shape'], {}), '(out_shape)\n', (7933, 7944), True, 'import numpy as np\n'), ((3965, 3980), 'numpy.floor', 'np.floor', (['(n / 2)'], {}), '(n / 2)\n', (3973, 3980), True, 'import numpy as np\n'), ((8098, 8124), 'numpy.floor', 'np.floor', (['(x_size * 1.0 / 2)'], {}), '(x_size * 1.0 / 2)\n', (8106, 8124), True, 'import numpy as np\n'), ((8127, 8157), 'numpy.floor', 'np.floor', (['(out_x_size * 1.0 / 2)'], {}), '(out_x_size * 1.0 / 2)\n', (8135, 8157), True, 'import numpy as np\n'), ((8237, 8263), 'numpy.floor', 'np.floor', (['(y_size * 1.0 / 2)'], {}), '(y_size * 1.0 / 2)\n', (8245, 8263), True, 'import numpy as np\n'), ((8266, 8296), 'numpy.floor', 'np.floor', (['(out_y_size * 1.0 / 2)'], {}), '(out_y_size * 1.0 / 2)\n', (8274, 8296), True, 'import numpy as np\n'), ((4314, 4326), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (4323, 4326), True, 'import numpy as np\n'), ((4329, 4341), 'numpy.square', 'np.square', (['y'], {}), '(y)\n', (4338, 4341), True, 'import numpy as np\n'), ((4381, 4393), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (4390, 4393), True, 'import numpy as np\n'), ((4396, 4424), 'numpy.square', 'np.square', (['(y * r0[0] / r0[1])'], {}), '(y * r0[0] / r0[1])\n', (4405, 4424), True, 'import numpy as np\n'), ((8443, 8462), 'numpy.zeros', 'np.zeros', (['out_shape'], {}), '(out_shape)\n', (8451, 8462), True, 'import numpy as np\n'), ((8683, 8709), 'numpy.floor', 'np.floor', (['(x_size * 1.0 / 2)'], {}), '(x_size * 1.0 / 2)\n', (8691, 8709), True, 'import numpy as np\n'), ((8712, 8742), 'numpy.floor', 'np.floor', (['(out_x_size * 1.0 / 2)'], {}), '(out_x_size * 1.0 / 2)\n', (8720, 8742), True, 'import numpy as np\n'), ((8822, 8848), 'numpy.floor', 'np.floor', (['(y_size * 1.0 / 2)'], {}), '(y_size * 1.0 / 2)\n', (8830, 8848), True, 'import numpy as np\n'), ((8851, 8881), 'numpy.floor', 'np.floor', (['(out_y_size * 1.0 / 2)'], {}), '(out_y_size * 1.0 / 2)\n', (8859, 8881), True, 'import numpy as np\n'), ((8961, 8987), 'numpy.floor', 'np.floor', (['(z_size * 1.0 / 2)'], {}), '(z_size * 1.0 / 2)\n', (8969, 8987), True, 'import numpy as np\n'), ((8990, 9020), 'numpy.floor', 'np.floor', (['(out_z_size * 1.0 / 2)'], {}), '(out_z_size * 1.0 / 2)\n', (8998, 9020), True, 'import numpy as np\n'), ((4939, 4951), 'numpy.square', 'np.square', (['z'], {}), '(z)\n', (4948, 4951), True, 'import numpy as np\n'), ((5037, 5065), 'numpy.square', 'np.square', (['(z * r0[0] / r0[2])'], {}), '(z * r0[0] / r0[2])\n', (5046, 5065), True, 'import numpy as np\n'), ((9210, 9229), 'numpy.zeros', 'np.zeros', (['out_shape'], {}), '(out_shape)\n', (9218, 9229), True, 'import numpy as np\n'), ((4909, 4921), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (4918, 4921), True, 'import numpy as np\n'), ((4924, 4936), 'numpy.square', 'np.square', (['y'], {}), '(y)\n', (4933, 4936), True, 'import numpy as np\n'), ((4991, 5003), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (5000, 5003), True, 'import numpy as np\n'), ((5006, 5034), 'numpy.square', 'np.square', (['(y * r0[0] / r0[1])'], {}), '(y * r0[0] / r0[1])\n', (5015, 5034), True, 'import numpy as np\n')] |
import pandas as pd
import torch
import numpy as np
import torch.nn as nn
import Pre_processing
df = pd.read_csv(r'C:data/coords.csv')
df.drop(df.tail(10).index,inplace=True)
print(df.shape)
df_model = Pre_processing.Pre_process(df)
x = df_model.iloc[:,4:].to_numpy()
X = np.reshape(x,(-1,50,66)).astype(np.float)
n_hidden = 128
n_joints = 33*2
n_categories = 2
n_layer = 3
class LSTM(nn.Module):
def __init__(self,input_dim,hidden_dim,output_dim,layer_num):
super(LSTM,self).__init__()
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.lstm = torch.nn.LSTM(input_dim,hidden_dim,layer_num,batch_first=True)
self.fc = torch.nn.Linear(hidden_dim,output_dim)
self.bn = nn.BatchNorm1d(50)
def forward(self,inputs):
x = self.bn(inputs)
lstm_out,(hn,cn) = self.lstm(x)
out = self.fc(lstm_out[:,-1,:])
return out
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
rnn = LSTM(n_joints,n_hidden,n_categories,n_layer)
rnn.to(device)
X=X.to(device)
rnn.load_state_dict(torch.load('C:/Users/austi/datajam/python-computer-vision/scripts/lstm_6_bn.pkl'))
rnn.eval()
prediction = rnn(X)
print(prediction)
| [
"numpy.reshape",
"pandas.read_csv",
"Pre_processing.Pre_process",
"torch.nn.LSTM",
"torch.load",
"torch.nn.BatchNorm1d",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.device"
] | [((103, 135), 'pandas.read_csv', 'pd.read_csv', (['"""C:data/coords.csv"""'], {}), "('C:data/coords.csv')\n", (114, 135), True, 'import pandas as pd\n'), ((206, 236), 'Pre_processing.Pre_process', 'Pre_processing.Pre_process', (['df'], {}), '(df)\n', (232, 236), False, 'import Pre_processing\n'), ((964, 989), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (987, 989), False, 'import torch\n'), ((940, 960), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (952, 960), False, 'import torch\n'), ((995, 1014), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1007, 1014), False, 'import torch\n'), ((1118, 1204), 'torch.load', 'torch.load', (['"""C:/Users/austi/datajam/python-computer-vision/scripts/lstm_6_bn.pkl"""'], {}), "(\n 'C:/Users/austi/datajam/python-computer-vision/scripts/lstm_6_bn.pkl')\n", (1128, 1204), False, 'import torch\n'), ((278, 305), 'numpy.reshape', 'np.reshape', (['x', '(-1, 50, 66)'], {}), '(x, (-1, 50, 66))\n', (288, 305), True, 'import numpy as np\n'), ((607, 672), 'torch.nn.LSTM', 'torch.nn.LSTM', (['input_dim', 'hidden_dim', 'layer_num'], {'batch_first': '(True)'}), '(input_dim, hidden_dim, layer_num, batch_first=True)\n', (620, 672), False, 'import torch\n'), ((688, 727), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden_dim', 'output_dim'], {}), '(hidden_dim, output_dim)\n', (703, 727), False, 'import torch\n'), ((745, 763), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(50)'], {}), '(50)\n', (759, 763), True, 'import torch.nn as nn\n')] |
from sklearn.metrics import mean_squared_error
import numpy as np
def mse(A, B):
return (np.square(A - B)).mean(axis=None)
from scipy.stats import spearmanr
def spearman_rank(A, B):
result = 0.0
for i in range(len(A)):
result += spearmanr(A[i], B[i], axis=None)[0]
return result / len(A) | [
"scipy.stats.spearmanr",
"numpy.square"
] | [((94, 110), 'numpy.square', 'np.square', (['(A - B)'], {}), '(A - B)\n', (103, 110), True, 'import numpy as np\n'), ((252, 284), 'scipy.stats.spearmanr', 'spearmanr', (['A[i]', 'B[i]'], {'axis': 'None'}), '(A[i], B[i], axis=None)\n', (261, 284), False, 'from scipy.stats import spearmanr\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 06:36:36 2017
@author: Salem
This script takes the resulting mesh and spring constants from the design process and tests it by applying forces and checking if the
desired mode comes out.
The energy used here does not assume linear displacements so we only expect agreement for small enough forces. We will see what happens
when the force becomes too large.
"""
import numpy as np
import scipy.optimize as op
import numpy.random as npr
import numpy.linalg as la
import Many_Triangles as MT
import LatticeMaking as LM
import importlib
importlib.reload(LM)
importlib.reload(MT)
#====================================================================================================================================
# energy of the configuration given by vertices
#====================================================================================================================================
def elastic_energy(verts, edges, spring_constants, sqr_reference_lengths, force):
'''
computes the elastic energy of the mesh given by vertices and edges.
The sqr_reference_lengths, which are the preferred lengths of the bonds squared
represent the zero energy condition and spring_constants represent
the cost of stretching or compressing a bond.
verts are flattened, vertices are the reshaped version
force is flattened
'''
energy = 0
#vertices is to unflatten verts
vertices = verts.reshape((verts.size//2, 2))
for index, edge in enumerate(edges):
#print(edge)
#print(vertices[edge[1]])
separation = vertices[edge[1]] - vertices[edge[0]]
#print(separation)
#energy from each bond
energy += (spring_constants[index]**2 / 8)*(np.sum(separation**2) - sqr_reference_lengths[index])**2
#print(energy)
#add the energy from the applied external force and return
return energy - np.dot(force,verts)
#====================================================================================================================================
def apply_force(vertices, edges, spring_constants, force):
'''
applies the force to the mesh given by vertices and edges. The reference lengths
are given by the vertices. The force results in the stretching of the bonds whos
rigidity is given by spring_constants.
This is going to return the displacements.
'''
# project out the euclidean transforms
euclid_transforms = get_rigid_transformations(vertices)
euclid_projector = get_complement_space(euclid_transforms)
projected_force = np.dot(euclid_projector, force.flatten())
#print(projected_force)
# the distance between the neighbors squared.
sqr_reference_lengths = np.sum((vertices[edges[:,1]] - vertices[edges[:,0]])**2, axis=1)
#print(sqr_reference_lengths)
res = op.minimize(elastic_energy, vertices.flatten(), method='BFGS', args=(edges, spring_constants,
sqr_reference_lengths, projected_force), options={ 'disp': False})
new_verts = res.x.reshape(vertices.shape)
return new_verts - np.average(new_verts, axis=0)
def test_wave_changer(result=None):
if result is None: result = MT.wave_changer()
vertices = result[1][0].copy()
edges = result[1][1].copy()
num_of_verts = vertices.shape[0]
force = LM.normalizeVec(npr.rand(num_of_verts*2))
force[:4] = [0, 1, 0, -1]
k = result[2].copy()
dyn_mat = LM.makeDynamicalMat(verts=vertices, edgeArray=edges, springK=k)
lowestEigVector = LM.normalizeVec(la.eigh(dyn_mat)[1][:,3])
print("force along lowest: ", np.dot(lowestEigVector, force))
force = force.reshape((num_of_verts, 2))
new_verts = apply_force(vertices, edges, k, force)
disps = (new_verts - vertices).flatten()
# project out the euclidean transforms
euclid_transforms = LM.get_rigid_transformations(vertices)
euclid_projector = LM.get_complement_space(euclid_transforms)
return LM.normalizeVec(np.dot(euclid_projector, disps))
| [
"numpy.random.rand",
"numpy.average",
"numpy.sum",
"numpy.dot",
"LatticeMaking.get_complement_space",
"importlib.reload",
"numpy.linalg.eigh",
"Many_Triangles.wave_changer",
"LatticeMaking.get_rigid_transformations",
"LatticeMaking.makeDynamicalMat"
] | [((593, 613), 'importlib.reload', 'importlib.reload', (['LM'], {}), '(LM)\n', (609, 613), False, 'import importlib\n'), ((614, 634), 'importlib.reload', 'importlib.reload', (['MT'], {}), '(MT)\n', (630, 634), False, 'import importlib\n'), ((2833, 2901), 'numpy.sum', 'np.sum', (['((vertices[edges[:, 1]] - vertices[edges[:, 0]]) ** 2)'], {'axis': '(1)'}), '((vertices[edges[:, 1]] - vertices[edges[:, 0]]) ** 2, axis=1)\n', (2839, 2901), True, 'import numpy as np\n'), ((3576, 3639), 'LatticeMaking.makeDynamicalMat', 'LM.makeDynamicalMat', ([], {'verts': 'vertices', 'edgeArray': 'edges', 'springK': 'k'}), '(verts=vertices, edgeArray=edges, springK=k)\n', (3595, 3639), True, 'import LatticeMaking as LM\n'), ((4019, 4057), 'LatticeMaking.get_rigid_transformations', 'LM.get_rigid_transformations', (['vertices'], {}), '(vertices)\n', (4047, 4057), True, 'import LatticeMaking as LM\n'), ((4081, 4123), 'LatticeMaking.get_complement_space', 'LM.get_complement_space', (['euclid_transforms'], {}), '(euclid_transforms)\n', (4104, 4123), True, 'import LatticeMaking as LM\n'), ((1967, 1987), 'numpy.dot', 'np.dot', (['force', 'verts'], {}), '(force, verts)\n', (1973, 1987), True, 'import numpy as np\n'), ((3191, 3220), 'numpy.average', 'np.average', (['new_verts'], {'axis': '(0)'}), '(new_verts, axis=0)\n', (3201, 3220), True, 'import numpy as np\n'), ((3296, 3313), 'Many_Triangles.wave_changer', 'MT.wave_changer', ([], {}), '()\n', (3311, 3313), True, 'import Many_Triangles as MT\n'), ((3461, 3487), 'numpy.random.rand', 'npr.rand', (['(num_of_verts * 2)'], {}), '(num_of_verts * 2)\n', (3469, 3487), True, 'import numpy.random as npr\n'), ((3748, 3778), 'numpy.dot', 'np.dot', (['lowestEigVector', 'force'], {}), '(lowestEigVector, force)\n', (3754, 3778), True, 'import numpy as np\n'), ((4157, 4188), 'numpy.dot', 'np.dot', (['euclid_projector', 'disps'], {}), '(euclid_projector, disps)\n', (4163, 4188), True, 'import numpy as np\n'), ((3683, 3699), 'numpy.linalg.eigh', 'la.eigh', (['dyn_mat'], {}), '(dyn_mat)\n', (3690, 3699), True, 'import numpy.linalg as la\n'), ((1795, 1818), 'numpy.sum', 'np.sum', (['(separation ** 2)'], {}), '(separation ** 2)\n', (1801, 1818), True, 'import numpy as np\n')] |
import sys
cmd_folder = "../../../vis"
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from get_hdf5_data import ReadHDF5
import numpy as np
from scipy import fftpack
from scipy import signal
import pylab as plt
from matplotlib.image import NonUniformImage
from multiprocessing import Pool
#==============================================================================
#
#==============================================================================
# plt_file = str(sys.argv[1])
plt_file = "TRMI"
window = [[-0.5, 1.5], [0,1]]
# get a list of all the files in this directory
files = ReadHDF5.get_files('.', include=[plt_file], exclude=["temp", ".png", "inputs"], times=[], tol=1e-4, get_all=True)
# get tracer particle data
rh5 = ReadHDF5(files[-1], max_level=-1, limits=window)
x, y, Dx = rh5.expression("{x_D-field}")
x, y, Dy = rh5.expression("{y_D-field}")
x, y, Dz = rh5.expression("{z_D-field}")
z = np.sqrt(Dx**2 + Dy**2 + Dz**2)
rh5.close()
# =============================================================================
#
# =============================================================================
ni, nj = z.shape
n = 8
dx = x[1]-x[0]
ff = np.zeros(z.shape)*np.nan
o = int(n/2)
for i in range(o,ni-o):
for j in range(o,nj-o):
grab = z[i-o:i+o,j-o:j+o]
f, wx = signal.welch(grab,dx,axis=0,nperseg=n)
f, wy = signal.welch(grab,dx,axis=1,nperseg=n)
wx = np.sum(wx,axis=1)
wy = np.sum(wy,axis=0)
w = wx+wy
I = np.argmax(w)
# print("w=",w)
# print("I=",I)
max_pwr_freq = f[I]
# print("max of %g @ f = %g"%(w[I],f[I]))
ff[i,j] = max_pwr_freq
# f = fftpack.fft2(z)
# f = np.log(np.abs(f))
# =============================================================================
#
# =============================================================================
fig = plt.figure(figsize=(6,6))
###
ax = fig.add_subplot(211)
im = NonUniformImage(ax, interpolation='bilinear', extent=np.ravel(window),
cmap="viridis")
im.set_data(x, y, z.T)
ax.images.append(im)
plt.colorbar(im, label=r"$\left| \mathbf{D} \right|$", orientation="vertical")
ax.set_xlim(window[0][0], window[0][1])
ax.set_ylim(window[1][0], window[1][1])
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
ax.set_aspect(1)
###
ax = fig.add_subplot(212)
im = NonUniformImage(ax, interpolation='bilinear', extent=np.ravel(window),
cmap="viridis")
im.set_data(x, y, ff.T)
ax.images.append(im)
plt.colorbar(im, label=r"$\bar{f}$", orientation="vertical")
ax.set_xlim(window[0][0], window[0][1])
ax.set_ylim(window[1][0], window[1][1])
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
ax.set_aspect(1)
fig.tight_layout()
fig.savefig(plt_file+"_freq.png", dpi=300)
plt.close(fig)
print("DONE")
| [
"get_hdf5_data.ReadHDF5.get_files",
"sys.path.insert",
"numpy.sqrt",
"scipy.signal.welch",
"get_hdf5_data.ReadHDF5",
"numpy.argmax",
"pylab.close",
"pylab.figure",
"numpy.sum",
"numpy.zeros",
"pylab.colorbar",
"numpy.ravel"
] | [((617, 736), 'get_hdf5_data.ReadHDF5.get_files', 'ReadHDF5.get_files', (['"""."""'], {'include': '[plt_file]', 'exclude': "['temp', '.png', 'inputs']", 'times': '[]', 'tol': '(0.0001)', 'get_all': '(True)'}), "('.', include=[plt_file], exclude=['temp', '.png',\n 'inputs'], times=[], tol=0.0001, get_all=True)\n", (635, 736), False, 'from get_hdf5_data import ReadHDF5\n'), ((766, 814), 'get_hdf5_data.ReadHDF5', 'ReadHDF5', (['files[-1]'], {'max_level': '(-1)', 'limits': 'window'}), '(files[-1], max_level=-1, limits=window)\n', (774, 814), False, 'from get_hdf5_data import ReadHDF5\n'), ((944, 980), 'numpy.sqrt', 'np.sqrt', (['(Dx ** 2 + Dy ** 2 + Dz ** 2)'], {}), '(Dx ** 2 + Dy ** 2 + Dz ** 2)\n', (951, 980), True, 'import numpy as np\n'), ((1927, 1953), 'pylab.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1937, 1953), True, 'import pylab as plt\n'), ((2142, 2227), 'pylab.colorbar', 'plt.colorbar', (['im'], {'label': '"""$\\\\left| \\\\mathbf{D} \\\\right|$"""', 'orientation': '"""vertical"""'}), "(im, label='$\\\\left| \\\\mathbf{D} \\\\right|$', orientation='vertical'\n )\n", (2154, 2227), True, 'import pylab as plt\n'), ((2553, 2613), 'pylab.colorbar', 'plt.colorbar', (['im'], {'label': '"""$\\\\bar{f}$"""', 'orientation': '"""vertical"""'}), "(im, label='$\\\\bar{f}$', orientation='vertical')\n", (2565, 2613), True, 'import pylab as plt\n'), ((2820, 2834), 'pylab.close', 'plt.close', (['fig'], {}), '(fig)\n', (2829, 2834), True, 'import pylab as plt\n'), ((75, 105), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cmd_folder'], {}), '(0, cmd_folder)\n', (90, 105), False, 'import sys\n'), ((1199, 1216), 'numpy.zeros', 'np.zeros', (['z.shape'], {}), '(z.shape)\n', (1207, 1216), True, 'import numpy as np\n'), ((1343, 1384), 'scipy.signal.welch', 'signal.welch', (['grab', 'dx'], {'axis': '(0)', 'nperseg': 'n'}), '(grab, dx, axis=0, nperseg=n)\n', (1355, 1384), False, 'from scipy import signal\n'), ((1398, 1439), 'scipy.signal.welch', 'signal.welch', (['grab', 'dx'], {'axis': '(1)', 'nperseg': 'n'}), '(grab, dx, axis=1, nperseg=n)\n', (1410, 1439), False, 'from scipy import signal\n'), ((1451, 1469), 'numpy.sum', 'np.sum', (['wx'], {'axis': '(1)'}), '(wx, axis=1)\n', (1457, 1469), True, 'import numpy as np\n'), ((1482, 1500), 'numpy.sum', 'np.sum', (['wy'], {'axis': '(0)'}), '(wy, axis=0)\n', (1488, 1500), True, 'import numpy as np\n'), ((1532, 1544), 'numpy.argmax', 'np.argmax', (['w'], {}), '(w)\n', (1541, 1544), True, 'import numpy as np\n'), ((2043, 2059), 'numpy.ravel', 'np.ravel', (['window'], {}), '(window)\n', (2051, 2059), True, 'import numpy as np\n'), ((2453, 2469), 'numpy.ravel', 'np.ravel', (['window'], {}), '(window)\n', (2461, 2469), True, 'import numpy as np\n')] |
import datetime
import os
from datetime import timedelta
import numpy
from esdl.cube_provider import NetCDFCubeSourceProvider
all_vars_descr = {'E': {
'evaporation': {
'source_name': 'E',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'mm/day',
'long_name': 'Evaporation',
'standard_name': 'water_evaporation_flux',
'url': 'http://www.gleam.eu',
'references': '<NAME>., <NAME>., <NAME>., <NAME> '
'<NAME>., <NAME>., <NAME>.,'
' <NAME>., <NAME>., and <NAME>.: '
'GLEAM v3: satellite-based land evaporation and root-zone'
' soil moisture, Geoscientific Model Development, '
'10, 1903–1925, 2017.',
'project_name' : 'GLEAM',
}},
'S': {
'evaporative_stress': {
'source_name': 'S',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': '',
'long_name': 'Evaporative Stress Factor',
'standard_name': 'evaporative_stress_factor',
'references': '<NAME>., <NAME>., <NAME>., <NAME> '
'<NAME>., <NAME>., <NAME>.,'
' <NAME>., <NAME>., and <NAME>.: '
'GLEAM v3: satellite-based land evaporation and root-zone'
' soil moisture, Geoscientific Model Development, '
'10, 1903–1925, 2017.',
'url': 'http://www.gleam.eu',
'project_name' : 'GLEAM',
}},
'Ep': {
'potential_evaporation': {
'source_name': 'Ep',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'mm/day',
'long_name': 'Potential Evaporation',
'standard_name': 'potential_water_evaporation_flux',
'url': 'http://www.gleam.eu',
'references': '<NAME>., <NAME>., <NAME>., <NAME> '
'<NAME>., <NAME>.,'
' <NAME>., <NAME>., and <NAME>.: '
'GLEAM v3: satellite-based land evaporation and root-zone'
' soil moisture, Geoscientific Model Development, '
'10, 1903–1925, 2017.',
'project_name' : 'GLEAM',
}},
'Ei': {
'interception_loss': {
'source_name': 'Ei',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'mm/day',
'long_name': 'Interception Loss',
'standard_name': 'interception_loss',
'url': 'http://www.gleam.eu',
'references': '<NAME>., <NAME>., <NAME>., <NAME> '
'<NAME>., <NAME>.,'
' <NAME>., <NAME>., and <NAME>.: '
'GLEAM v3: satellite-based land evaporation and root-zone'
' soil moisture, Geoscientific Model Development, '
'10, 1903–1925, 2017.',
'project_name' : 'GLEAM',
}},
'SMroot': {
'root_moisture': {
'source_name': 'SMroot',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'm3/m3',
'long_name': 'Root-Zone Soil Moisture',
'standard_name': 'soil_moisture_content',
'url': 'http://www.gleam.eu',
'references': '<NAME>., <NAME>., <NAME> '
'<NAME>., <NAME>.,'
' <NAME>., <NAME>., and <NAME>.: '
'GLEAM v3: satellite-based land evaporation and root-zone'
' soil moisture, Geoscientific Model Development, '
'10, 1903–1925, 2017.',
'project_name' : 'GLEAM',
}},
'SMsurf': {
'surface_moisture': {
'source_name': 'SMsurf',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'mm3/mm3',
'long_name': 'Surface Soil Moisture',
'standard_name': 'soil_moisture_content',
'url': 'http://www.gleam.eu',
'references': '<NAME>., <NAME>., <NAME>., <NAME> '
'<NAME>., <NAME>.,'
' <NAME>., <NAME>., and <NAME>.: '
'GLEAM v3: satellite-based land evaporation and root-zone'
' soil moisture, Geoscientific Model Development, '
'10, 1903–1925, 2017.',
'project_name' : 'GLEAM',
}},
'Eb': {
'bare_soil_evaporation': {
'source_name': 'Eb',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'mm/day',
'long_name': 'Bare Soil Evaporation',
'standard_name': 'bare_soil_water_evaporation_flux',
'url': 'http://www.gleam.eu',
'references': '<NAME>., <NAME>., <NAME>., <NAME> '
'<NAME>., <NAME>.,'
' <NAME>., <NAME>., and <NAME>.: '
'GLEAM v3: satellite-based land evaporation and root-zone'
' soil moisture, Geoscientific Model Development, '
'10, 1903–1925, 2017.',
'project_name' : 'GLEAM',
}},
'Es': {
'snow_sublimation': {
'source_name': 'Es',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'mm/day',
'long_name': 'Snow Sublimation',
'standard_name': 'snow_sublimation_flux',
'url': 'http://www.gleam.eu',
'references': '<NAME>., <NAME>., <NAME>., <NAME> '
'<NAME>., <NAME>.,'
' <NAME>., <NAME>., and <NAME>.: '
'GLEAM v3: satellite-based land evaporation and root-zone'
' soil moisture, Geoscientific Model Development, '
'10, 1903–1925, 2017.',
'project_name' : 'GLEAM',
}},
'Et': {
'transpiration': {
'source_name': 'Et',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'mm/day',
'long_name': 'Transpiration',
'standard_name': 'transpiration_flux',
'url': 'http://www.gleam.eu',
'references': '<NAME>., <NAME>., <NAME>., <NAME> '
'<NAME>., <NAME>.,'
' <NAME>., <NAME>., and <NAME>.: '
'GLEAM v3: satellite-based land evaporation and root-zone'
' soil moisture, Geoscientific Model Development, '
'10, 1903–1925, 2017.',
'project_name' : 'GLEAM',
}},
'Ew': {
'open_water_evaporation': {
'source_name': 'Ew',
'data_type': numpy.float32,
'fill_value': numpy.nan,
'units': 'mm/day',
'long_name': 'Open-water Evaporation',
'standard_name': 'water_evaporation_flux',
'url': 'http://www.gleam.eu',
'references': '<NAME>., <NAME>., <NAME>., <NAME> '
'<NAME>., <NAME>., <NAME>.,'
' <NAME>., <NAME>., and <NAME>.: '
'GLEAM v3: satellite-based land evaporation and root-zone'
' soil moisture, Geoscientific Model Development, '
'10, 1903–1925, 2017.',
'project_name' : 'GLEAM',
}},
}
class GleamProvider(NetCDFCubeSourceProvider):
def __init__(self, cube_config, name='GLEAM', dir=None, resampling_order=None, var=None):
super(GleamProvider, self).__init__(cube_config, name, dir, resampling_order)
self.var_name = var
self.old_indices = None
@property
def variable_descriptors(self):
return all_vars_descr[self.var_name]
def compute_source_time_ranges(self):
source_time_ranges = []
for root, sub_dirs, files in os.walk(self.dir_path):
for sub_dir in sub_dirs:
source_year = int(sub_dir)
if self.cube_config.start_time.year <= source_year <= self.cube_config.end_time.year:
sub_dir_path = os.path.join(self.dir_path, sub_dir)
file_names = os.listdir(sub_dir_path)
for file_name in file_names:
if self.var_name + '_' in file_name:
file = os.path.join(self.dir_path, sub_dir, file_name).replace("\\", "/")
dataset = self.dataset_cache.get_dataset(file)
tvar = dataset.variables['time']
tref = datetime.datetime(1970,1,1)
tlist = tvar[:]
dates = [tref + datetime.timedelta(tlist[i]) for i in range(tlist.size)]
self.dataset_cache.close_dataset(file)
cnt = 0
for time in dates:
if self.cube_config.start_time <= time <= self.cube_config.end_time:
source_time_ranges.append((time, time + timedelta(days=1), file, cnt))
cnt += 1
return sorted(source_time_ranges, key=lambda item: item[0])
def transform_source_image(self, source_image):
"""
Transforms the source image, here by rotating and flipping.
:param source_image: 2D image
:return: source_image
"""
return numpy.fliplr(numpy.rot90(source_image, 3))
| [
"datetime.datetime",
"os.listdir",
"os.path.join",
"numpy.rot90",
"datetime.timedelta",
"os.walk"
] | [((8232, 8254), 'os.walk', 'os.walk', (['self.dir_path'], {}), '(self.dir_path)\n', (8239, 8254), False, 'import os\n'), ((9837, 9865), 'numpy.rot90', 'numpy.rot90', (['source_image', '(3)'], {}), '(source_image, 3)\n', (9848, 9865), False, 'import numpy\n'), ((8473, 8509), 'os.path.join', 'os.path.join', (['self.dir_path', 'sub_dir'], {}), '(self.dir_path, sub_dir)\n', (8485, 8509), False, 'import os\n'), ((8543, 8567), 'os.listdir', 'os.listdir', (['sub_dir_path'], {}), '(sub_dir_path)\n', (8553, 8567), False, 'import os\n'), ((8951, 8980), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (8968, 8980), False, 'import datetime\n'), ((8713, 8760), 'os.path.join', 'os.path.join', (['self.dir_path', 'sub_dir', 'file_name'], {}), '(self.dir_path, sub_dir, file_name)\n', (8725, 8760), False, 'import os\n'), ((9067, 9095), 'datetime.timedelta', 'datetime.timedelta', (['tlist[i]'], {}), '(tlist[i])\n', (9085, 9095), False, 'import datetime\n'), ((9451, 9468), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (9460, 9468), False, 'from datetime import timedelta\n')] |
#!/usr/bin/env python
##########################################################################
# Copyright 2018 Kata.ai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
import argparse
import json
import math
import numpy as np
def truncate_paragraphs(obj, length):
for key in 'paragraphs gold_labels pred_labels'.split():
if key in obj:
obj[key] = obj[key][:length]
def create_outlier_detector(data):
q1, q3 = np.percentile(data, (25, 75))
iqr = q3 - q1
def is_outlier(x):
return x < q1 - 1.5 * iqr or x > q3 + 1.5 * iqr
return is_outlier
def read_jsonl(path, encoding='utf-8'):
with open(args.path, encoding=args.encoding) as f:
return [json.loads(line.strip()) for line in f]
def train_for_paras_length(train_objs):
paras_lengths = [len(obj['paragraphs']) for obj in train_objs]
return np.mean(paras_lengths), np.std(paras_lengths)
def train_for_summ_length(train_objs):
summ_lengths = [len(obj['summary']) for obj in train_objs]
return create_outlier_detector(summ_lengths)
def main(args):
train_objs = read_jsonl(args.train_path, encoding=args.encoding)
objs = read_jsonl(args.path, encoding=args.encoding)
# Truncate paragraphs length
mean, std = train_for_paras_length(train_objs)
for obj in objs:
truncate_paragraphs(obj, math.floor(mean + 2 * std))
# Remove articles whose summary length is an outlier
is_outlier = train_for_summ_length(train_objs)
objs = [obj for obj in objs if not is_outlier(len(obj['summary']))]
for obj in objs:
print(json.dumps(obj, sort_keys=True))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Preprocess outliers in a given JSONL file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('train_path', help='path to the train JSONL file')
parser.add_argument('path', help='path to the JSONL file to preprocess')
parser.add_argument('--encoding', default='utf-8', help='file encoding')
args = parser.parse_args()
main(args)
| [
"numpy.mean",
"argparse.ArgumentParser",
"math.floor",
"json.dumps",
"numpy.std",
"numpy.percentile"
] | [((1019, 1048), 'numpy.percentile', 'np.percentile', (['data', '(25, 75)'], {}), '(data, (25, 75))\n', (1032, 1048), True, 'import numpy as np\n'), ((2244, 2391), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Preprocess outliers in a given JSONL file."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Preprocess outliers in a given JSONL file.', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (2267, 2391), False, 'import argparse\n'), ((1442, 1464), 'numpy.mean', 'np.mean', (['paras_lengths'], {}), '(paras_lengths)\n', (1449, 1464), True, 'import numpy as np\n'), ((1466, 1487), 'numpy.std', 'np.std', (['paras_lengths'], {}), '(paras_lengths)\n', (1472, 1487), True, 'import numpy as np\n'), ((1924, 1950), 'math.floor', 'math.floor', (['(mean + 2 * std)'], {}), '(mean + 2 * std)\n', (1934, 1950), False, 'import math\n'), ((2169, 2200), 'json.dumps', 'json.dumps', (['obj'], {'sort_keys': '(True)'}), '(obj, sort_keys=True)\n', (2179, 2200), False, 'import json\n')] |
import h5py
import random
import numpy as np
import pdb
import torch
class DataLoaderSimple(object):
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Does not use expert rewards.
"""
def __init__(self, opts):
"""
Loads the dataset and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, h5_path_unseen (optional)
mask_path (optional)
"""
# ---- Load the dataset ----
self.h5_file = h5py.File(opts.h5_path, 'r')
self.data = {}
self.data['train'] = np.array(self.h5_file['train'])
self.data['val'] = np.array(self.h5_file['val'])
self.data['test'] = np.array(self.h5_file['test'])
if 'val_highres' in self.h5_file.keys():
self.data['val_highres'] = np.array(self.h5_file['val_highres'])
self.data['test_highres'] = np.array(self.h5_file['test_highres'])
# ---- Load the unseen classes ----
if opts.h5_path_unseen != '':
h5_file_unseen = h5py.File(opts.h5_path_unseen, 'r')
self.data['test_unseen'] = np.array(h5_file_unseen['test'])
# ---- Save settings needed for batching operations ----
# Dataset statistics
self.train_count = self.h5_file['train'].shape[0]
self.val_count = self.h5_file['val'].shape[0]
self.test_count = self.h5_file['test'].shape[0]
if opts.h5_path_unseen != '':
self.test_unseen_count = self.data['test_unseen'].shape[0]
if hasattr(opts, 'mask_path') and opts.mask_path != '':
mask_file = h5py.File(opts.mask_path, 'r')
self.masks = {}
self.masks['test'] = np.array(mask_file['test_mask'])
if opts.h5_path_unseen != '':
self.masks['test_unseen'] = np.array(mask_file['test_unseen_mask'])
self.hasmasks = True
else:
self.hasmasks = False
self.pano_shape = self.h5_file['train'].shape[1:]
# Iteration tracker
self.train_idx = 0
self.val_idx = 0
self.test_idx = 0
if opts.h5_path_unseen != '':
self.test_unseen_idx = 0
self.batch_size = opts.batch_size
# Shuffle the training data indices and access them in the shuffled order
self.shuffle = opts.shuffle
self.shuffled_idx = list(range(self.h5_file['train'].shape[0]))
if self.shuffle:
random.shuffle(self.shuffled_idx)
# Debug mode
self.debug = opts.debug
self.N = self.data['train'].shape[1]
self.M = self.data['train'].shape[2]
self.C = self.data['train'].shape[3]
self.H = self.data['train'].shape[4]
self.W = self.data['train'].shape[5]
if 'val_highres' in self.data:
self.H_highres = self.data['val_highres'].shape[4]
self.W_highres = self.data['test_highres'].shape[5]
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, depleted
def next_batch_val(self, highres=False):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_highres: BxNxMxCx448x448 (optional)
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = np.array(self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :])
if highres:
out_highres = np.array(self.data['val_highres'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :])
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if highres:
assert(out_highres.shape == (batch_size, self.N, self.M, self.C, self.H_highres, self.W_highres))
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
if not highres:
return out, depleted
else:
return out, out_highres, depleted
def next_batch_test(self, highres=False):
"""
Returns the next testing batch
out: BxNxMxCx32x32
out_highres: BxNxMxCx448x448 (optional)
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.test_count - self.test_idx)
out = np.array(self.data['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if highres:
out_highres = np.array(self.data['test_highres'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.debug:
assert((batch_size == self.batch_size) or (self.test_idx + batch_size == self.test_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if highres:
assert(out_highres.shape == (batch_size, self.N, self.M, self.C, self.H_highres, self.W_highres))
if self.hasmasks:
assert(out_masks.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.test_idx + batch_size == self.test_count:
depleted = True
self.test_idx = 0
else:
depleted = False
self.test_idx = self.test_idx + batch_size
if not highres:
return out, out_masks, depleted
else:
return out, out_highres, out_masks, depleted
def next_batch_test_unseen(self):
"""
Returns the next unseen classes testing batch
out: BxNxMxCx32x32
"""
batch_size = min(self.batch_size, self.test_unseen_count - self.test_unseen_idx)
out = np.array(self.data['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.debug:
assert((batch_size == self.batch_size) or (self.test_unseen_idx + batch_size == self.test_unseen_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.test_unseen_idx + batch_size == self.test_unseen_count:
depleted = True
self.test_unseen_idx = 0
else:
depleted = False
self.test_unseen_idx = self.test_unseen_idx + batch_size
return out, out_masks, depleted
class DataLoaderExpert(DataLoaderSimple):
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Uses expert rewards.
"""
def __init__(self, opts):
"""
Loads the dataset, rewards and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, rewards_h5_path
"""
# ---- Load the dataset, save settings ----
super(DataLoaderExpert, self).__init__(opts)
# ---- Load the rewards ----
rewards_file = h5py.File(opts.rewards_h5_path)
self.rewards = {}
# These are KxNxM arrays containing rewards corresponding to each views of
# all panoramas in the train and val splits
self.rewards['train'] = np.array(rewards_file['train/nms'])
self.rewards['val'] = np.array(rewards_file['val/nms'])
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
out_rewards = self.rewards['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :]
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, out_rewards, depleted
def next_batch_val(self):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :]
out_rewards = self.rewards['val'][self.val_idx:(self.val_idx+batch_size), :, :]
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
return out, out_rewards, depleted
class DataLoaderExpertPolicy(DataLoaderSimple):
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Uses expert trajectories.
"""
def __init__(self, opts):
"""
Loads the dataset, utility maps and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, utility_h5_path, h5_path_unseen, debug
"""
# ---- Load the dataset, save the settings ----
super(DataLoaderExpertPolicy, self).__init__(opts)
self.trajectories_type = opts.trajectories_type
if opts.trajectories_type == 'utility_maps':
# ---- Load the utility maps ----
utility_file = h5py.File(opts.utility_h5_path)
self.utility_maps = {}
# These are KxNxMxNxM arrays
for split in utility_file.keys():
self.utility_maps[split] = np.array(utility_file[split]['utility_maps'])
elif opts.trajectories_type == 'expert_trajectories':
# ---- Load the trajectories ----
# {'train': #train_samples x T-1 numpy array, 'val': #val_samples x T-1 numpy array}
self.trajectories = torch.load(opts.utility_h5_path)
else:
raise ValueError('Wrong trajectories_type!')
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)]]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['train'][(i, j)][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :]
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, out_maps, depleted
def next_batch_val(self):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :]
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['val'][self.val_idx:(self.val_idx+batch_size)]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['val'][(i, j)][self.val_idx:(self.val_idx+batch_size), :]
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
return out, out_maps, depleted
def next_batch_test(self, highres=False):
"""
Returns the next testing batch
out: BxNxMxCx32x32
out_masks: ???
out_maps: BxNxMxNxM
out_highres: BxNxMxCx448x448 (optional)
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.test_count - self.test_idx)
out = np.array(self.data['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if highres:
out_highres = np.array(self.data['test_highres'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['test'][self.test_idx:(self.test_idx+batch_size)]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['test'][(i, j)][self.test_idx:(self.test_idx+batch_size), :]
if self.debug:
assert((batch_size == self.batch_size) or (self.test_idx + batch_size == self.test_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if highres:
assert(out_highres.shape == (batch_size, self.N, self.M, self.C, self.H_highres, self.W_highres))
if self.hasmasks:
assert(out_masks.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.test_idx + batch_size == self.test_count:
depleted = True
self.test_idx = 0
else:
depleted = False
self.test_idx = self.test_idx + batch_size
if not highres:
return out, out_masks, out_maps, depleted
else:
return out, out_highres, out_masks, out_maps, depleted
def next_batch_test_unseen(self):
"""
Returns the next unseen classes testing batch
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
out_masks: ???
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.test_unseen_count - self.test_unseen_idx)
out = np.array(self.data['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx + batch_size)]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :]
if self.debug:
assert((batch_size == self.batch_size) or (self.test_unseen_idx + batch_size == self.test_unseen_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.test_unseen_idx + batch_size == self.test_unseen_count:
depleted = True
self.test_unseen_idx = 0
else:
depleted = False
self.test_unseen_idx = self.test_unseen_idx + batch_size
return out, out_masks, out_maps, depleted
class DataLoaderExpertBoth(DataLoaderSimple):
# TODO: Need to update trajectories_type here
# TODO: Add next_batch_test with expert trajectories option here
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Uses expert trajectories and rewards.
"""
def __init__(self, opts):
"""
Loads the dataset, utility maps and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, utility_h5_path, rewards_h5_path, h5_path_unseen, debug
"""
# ---- Load the dataset, save the settings ----
super(DataLoaderExpertBoth, self).__init__(opts)
# ---- Load the utility maps and rewards ----
utility_file = h5py.File(opts.utility_h5_path)
rewards_file = h5py.File(opts.rewards_h5_path)
self.rewards = {}
self.utility_maps = {}
# These are KxNxMxNxM arrays
self.utility_maps['train'] = np.array(utility_file['train/utility_maps'])
self.utility_maps['val'] = np.array(utility_file['val/utility_maps'])
# These are KxNxM arrays containing rewards corresponding to each views of
# all panoramas in the train and val splits
self.rewards['train'] = np.array(rewards_file['train/nms'])
self.rewards['val'] = np.array(rewards_file['val/nms'])
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
out_maps = self.utility_maps['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)]]
out_rewards = self.rewards['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :]
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, out_maps, out_rewards, depleted
def next_batch_val(self):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :]
out_maps = self.utility_maps['val'][self.val_idx:(self.val_idx+batch_size)]
out_rewards = self.rewards['val'][self.val_idx:(self.val_idx+batch_size)]
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
return out, out_maps, out_rewards, depleted
| [
"numpy.array",
"torch.load",
"random.shuffle",
"h5py.File"
] | [((605, 633), 'h5py.File', 'h5py.File', (['opts.h5_path', '"""r"""'], {}), "(opts.h5_path, 'r')\n", (614, 633), False, 'import h5py\n'), ((686, 717), 'numpy.array', 'np.array', (["self.h5_file['train']"], {}), "(self.h5_file['train'])\n", (694, 717), True, 'import numpy as np\n'), ((745, 774), 'numpy.array', 'np.array', (["self.h5_file['val']"], {}), "(self.h5_file['val'])\n", (753, 774), True, 'import numpy as np\n'), ((803, 833), 'numpy.array', 'np.array', (["self.h5_file['test']"], {}), "(self.h5_file['test'])\n", (811, 833), True, 'import numpy as np\n'), ((3356, 3466), 'numpy.array', 'np.array', (["self.data['train'][self.shuffled_idx[self.train_idx:self.train_idx +\n batch_size], :, :, :, :, :]"], {}), "(self.data['train'][self.shuffled_idx[self.train_idx:self.train_idx +\n batch_size], :, :, :, :, :])\n", (3364, 3466), True, 'import numpy as np\n'), ((4261, 4346), 'numpy.array', 'np.array', (["self.data['val'][self.val_idx:self.val_idx + batch_size, :, :, :, :, :]"], {}), "(self.data['val'][self.val_idx:self.val_idx + batch_size, :, :, :,\n :, :])\n", (4269, 4346), True, 'import numpy as np\n'), ((5471, 5559), 'numpy.array', 'np.array', (["self.data['test'][self.test_idx:self.test_idx + batch_size, :, :, :, :, :]"], {}), "(self.data['test'][self.test_idx:self.test_idx + batch_size, :, :,\n :, :, :])\n", (5479, 5559), True, 'import numpy as np\n'), ((6964, 7073), 'numpy.array', 'np.array', (["self.data['test_unseen'][self.test_unseen_idx:self.test_unseen_idx +\n batch_size, :, :, :, :, :]"], {}), "(self.data['test_unseen'][self.test_unseen_idx:self.test_unseen_idx +\n batch_size, :, :, :, :, :])\n", (6972, 7073), True, 'import numpy as np\n'), ((8390, 8421), 'h5py.File', 'h5py.File', (['opts.rewards_h5_path'], {}), '(opts.rewards_h5_path)\n', (8399, 8421), False, 'import h5py\n'), ((8615, 8650), 'numpy.array', 'np.array', (["rewards_file['train/nms']"], {}), "(rewards_file['train/nms'])\n", (8623, 8650), True, 'import numpy as np\n'), ((8681, 8714), 'numpy.array', 'np.array', (["rewards_file['val/nms']"], {}), "(rewards_file['val/nms'])\n", (8689, 8714), True, 'import numpy as np\n'), ((9019, 9129), 'numpy.array', 'np.array', (["self.data['train'][self.shuffled_idx[self.train_idx:self.train_idx +\n batch_size], :, :, :, :, :]"], {}), "(self.data['train'][self.shuffled_idx[self.train_idx:self.train_idx +\n batch_size], :, :, :, :, :])\n", (9027, 9129), True, 'import numpy as np\n'), ((12454, 12564), 'numpy.array', 'np.array', (["self.data['train'][self.shuffled_idx[self.train_idx:self.train_idx +\n batch_size], :, :, :, :, :]"], {}), "(self.data['train'][self.shuffled_idx[self.train_idx:self.train_idx +\n batch_size], :, :, :, :, :])\n", (12462, 12564), True, 'import numpy as np\n'), ((15576, 15664), 'numpy.array', 'np.array', (["self.data['test'][self.test_idx:self.test_idx + batch_size, :, :, :, :, :]"], {}), "(self.data['test'][self.test_idx:self.test_idx + batch_size, :, :,\n :, :, :])\n", (15584, 15664), True, 'import numpy as np\n'), ((17848, 17957), 'numpy.array', 'np.array', (["self.data['test_unseen'][self.test_unseen_idx:self.test_unseen_idx +\n batch_size, :, :, :, :, :]"], {}), "(self.data['test_unseen'][self.test_unseen_idx:self.test_unseen_idx +\n batch_size, :, :, :, :, :])\n", (17856, 17957), True, 'import numpy as np\n'), ((20198, 20229), 'h5py.File', 'h5py.File', (['opts.utility_h5_path'], {}), '(opts.utility_h5_path)\n', (20207, 20229), False, 'import h5py\n'), ((20253, 20284), 'h5py.File', 'h5py.File', (['opts.rewards_h5_path'], {}), '(opts.rewards_h5_path)\n', (20262, 20284), False, 'import h5py\n'), ((20417, 20461), 'numpy.array', 'np.array', (["utility_file['train/utility_maps']"], {}), "(utility_file['train/utility_maps'])\n", (20425, 20461), True, 'import numpy as np\n'), ((20497, 20539), 'numpy.array', 'np.array', (["utility_file['val/utility_maps']"], {}), "(utility_file['val/utility_maps'])\n", (20505, 20539), True, 'import numpy as np\n'), ((20708, 20743), 'numpy.array', 'np.array', (["rewards_file['train/nms']"], {}), "(rewards_file['train/nms'])\n", (20716, 20743), True, 'import numpy as np\n'), ((20774, 20807), 'numpy.array', 'np.array', (["rewards_file['val/nms']"], {}), "(rewards_file['val/nms'])\n", (20782, 20807), True, 'import numpy as np\n'), ((21140, 21250), 'numpy.array', 'np.array', (["self.data['train'][self.shuffled_idx[self.train_idx:self.train_idx +\n batch_size], :, :, :, :, :]"], {}), "(self.data['train'][self.shuffled_idx[self.train_idx:self.train_idx +\n batch_size], :, :, :, :, :])\n", (21148, 21250), True, 'import numpy as np\n'), ((923, 960), 'numpy.array', 'np.array', (["self.h5_file['val_highres']"], {}), "(self.h5_file['val_highres'])\n", (931, 960), True, 'import numpy as np\n'), ((1001, 1039), 'numpy.array', 'np.array', (["self.h5_file['test_highres']"], {}), "(self.h5_file['test_highres'])\n", (1009, 1039), True, 'import numpy as np\n'), ((1152, 1187), 'h5py.File', 'h5py.File', (['opts.h5_path_unseen', '"""r"""'], {}), "(opts.h5_path_unseen, 'r')\n", (1161, 1187), False, 'import h5py\n'), ((1227, 1259), 'numpy.array', 'np.array', (["h5_file_unseen['test']"], {}), "(h5_file_unseen['test'])\n", (1235, 1259), True, 'import numpy as np\n'), ((1719, 1749), 'h5py.File', 'h5py.File', (['opts.mask_path', '"""r"""'], {}), "(opts.mask_path, 'r')\n", (1728, 1749), False, 'import h5py\n'), ((1811, 1843), 'numpy.array', 'np.array', (["mask_file['test_mask']"], {}), "(mask_file['test_mask'])\n", (1819, 1843), True, 'import numpy as np\n'), ((2563, 2596), 'random.shuffle', 'random.shuffle', (['self.shuffled_idx'], {}), '(self.shuffled_idx)\n', (2577, 2596), False, 'import random\n'), ((4389, 4482), 'numpy.array', 'np.array', (["self.data['val_highres'][self.val_idx:self.val_idx + batch_size, :, :, :, :, :]"], {}), "(self.data['val_highres'][self.val_idx:self.val_idx + batch_size, :,\n :, :, :, :])\n", (4397, 4482), True, 'import numpy as np\n'), ((5602, 5698), 'numpy.array', 'np.array', (["self.data['test_highres'][self.test_idx:self.test_idx + batch_size, :, :, :,\n :, :]"], {}), "(self.data['test_highres'][self.test_idx:self.test_idx + batch_size,\n :, :, :, :, :])\n", (5610, 5698), True, 'import numpy as np\n'), ((11542, 11573), 'h5py.File', 'h5py.File', (['opts.utility_h5_path'], {}), '(opts.utility_h5_path)\n', (11551, 11573), False, 'import h5py\n'), ((15707, 15803), 'numpy.array', 'np.array', (["self.data['test_highres'][self.test_idx:self.test_idx + batch_size, :, :, :,\n :, :]"], {}), "(self.data['test_highres'][self.test_idx:self.test_idx + batch_size,\n :, :, :, :, :])\n", (15715, 15803), True, 'import numpy as np\n'), ((1930, 1969), 'numpy.array', 'np.array', (["mask_file['test_unseen_mask']"], {}), "(mask_file['test_unseen_mask'])\n", (1938, 1969), True, 'import numpy as np\n'), ((11740, 11785), 'numpy.array', 'np.array', (["utility_file[split]['utility_maps']"], {}), "(utility_file[split]['utility_maps'])\n", (11748, 11785), True, 'import numpy as np\n'), ((12036, 12068), 'torch.load', 'torch.load', (['opts.utility_h5_path'], {}), '(opts.utility_h5_path)\n', (12046, 12068), False, 'import torch\n')] |
import numpy as np
import cv2
import os
import random
from skimage.transform import resize
from skimage.color import rgb2gray
import pickle
import tensorflow as tf
from keras.utils import np_utils
from PIL import Image
import threading
from concurrent.futures import ThreadPoolExecutor
#from flask import session
# image size to provide to TP-GAN
IMG_H, IMG_W = 128, 128
# subjects of MULTI-PIE
NUM_SUBJECTS = 30
# dictionary to map capture angle to MULTI-PIE dir name
"""
ANGLE_DIR = {
-90: "11_0",
-75: "12_0",
-60: "09_0",
-45: "08_0",
-30: "13_0",
-15: "14_0",
0: "05_1",
15: "05_0",
30: "04_1",
45: "19_0",
60: "20_0",
75: "01_0",
90: "24_0",
}
"""
ANGLE_DIR = {
-90: "110",
-75: "120",
-60: "090",
-45: "080",
-30: "130",
-15: "140",
0: "051",
15: "050",
30: "041",
45: "190",
60: "200",
75: "010",
90: "240",
}
# dictionary to map capture MULTI-PIE dir name to angle
DIR_ANGLE = {}
for angle in ANGLE_DIR.keys():
DIR_ANGLE[ANGLE_DIR[angle]] = angle
# size of cropped part image
EYE_H, EYE_W = 40, 40
NOSE_H, NOSE_W = 32, 40
MOUTH_H, MOUTH_W = 32, 48
# average part position of angle 0 deg images
LEYE_Y, LEYE_X = 49, 46
REYE_Y, REYE_X = 49, 86
NOSE_Y, NOSE_X = 78, 66
MOUTH_Y, MOUTH_X = 93, 66
#Define datalist
#datalist_name ='datalist_{}_{}_front_{}.pkl'
#datalist_name ='datalist.pkl'
class Datagen():
"""
this class provides data generator of MULTI-PIE dataset.
"""
def __init__(self, dataset_dir='', landmarks_dict_file='',
datalist_dir='', mirror_to_one_side=True,
min_angle=-30, max_angle=30, include_frontal=False,
face_trembling_range=0, valid_count=4, workers=16):
"""
Initializer
Args:
dataset_dir (str): jpg converted MULTI-PIE data dir; parent dir of session dirs.
this directory can be created by misc/jpeg_converter.py
landmarks_dict_file (str): pikled dict file. the structure of the dict is same as MULTI-PIE directories
this dict file can be created by misc/landmark_convert.py
datalist_dir (str): output dir for datalist file. datalist stores the list of train and valid image files.
min_angle (str): min pose angle. must be one of [-90, -75, -60, -45, -30, -15, 0, 15, 30, 45, 60, 75, 90]
max angle (str): max pose angle. must be one of [-90, -75, -60, -45, -30, -15, 0, 15, 30, 45, 60, 75, 90]
include_frontal (bool): if False, return data doesn't include frontal (0 deg) image.
face_trembling_range (int): random noise range (-val to + val) for face cropping position.
valid_count (int): data count for validation dataset
workers (int): worker count for multi-threading.
"""
self.dataset_dir = dataset_dir
if not tf.gfile.Exists(landmarks_dict_file):
raise Exception('landmarks dict file doesnt exsit. target file: {}'.format(landmarks_dict_file))
with Open(landmarks_dict_file, 'rb') as f:
self.landmarks_dict = pickle.load(f)
print('landmarks_dict',self.landmarks_dict)
self.datalist_file = datalist_dir
if tf.gfile.Exists(self.datalist_file):
with Open(self.datalist_file, 'rb') as f:
self.datalist = pickle.load(f)
else:
print("datalist is file doesnt exsit")
#print(self.datalist[0:])
self.train_list = self.datalist[valid_count:]
#print("len(self.train_list): ", len(self.train_list))
self.train_cursor = random.randint(0, len(self.train_list)-1)
#print("self.train_cursor: ", self.train_cursor)
self.valid_list = self.datalist[:valid_count]
self.valid_cursor = 0
self.mirror_to_one_side = mirror_to_one_side
self.face_trembling_range = face_trembling_range
self.workers = workers
if self.workers > 1:
self.thread_pool_executor = ThreadPoolExecutor(max_workers=workers)
self.lock = threading.Lock()
def __del__(self):
if self.workers > 1:
try:
self.thread_pool_executor.shutdown()
except:
pass
def batch_data(self, datalist, cursor, batch_size = 16):
"""
create mini-batch from datalist and cursor index
Args:
datalist (list): list of data file path
cursor (int): current index cursor on the datalist
batch_size (int): batch size of return data
Returns:
tuple of list of mini-batch data file path and updated cursor index
"""
ret_list = []
for i in range(batch_size):
ret_list.append(datalist[(cursor + i)%len(datalist)])
ret_cursor = (cursor + batch_size) % len(datalist)
return ret_list, ret_cursor
def create_datalist(self, min_angle, max_angle, include_frontal=False, shuffle=True):#Do not use
"""
create datalist; list of target image file path which saticefies arg params.
this function also save created datalist and load datalist if already exists.
Args:
min_angle (str): min pose angle. must be one of [-90, -75, -60, -45, -30, -15, 0, 15, 30, 45, 60, 75, 90]
max angle (str): max pose angle. must be one of [-90, -75, -60, -45, -30, -15, 0, 15, 30, 45, 60, 75, 90]
include_frontal (bool): if False, return data doesn't include frontal (0 deg) image.
shuffle (bool): if True, shuffle order of return list
Returns:
created or loaded datalist.
"""
datalist = []
cam_labels = []
for angle in range(min_angle, max_angle+1, 15):
if include_frontal or angle != 0:
cam_labels.append(ANGLE_DIR[angle])
curdir = os.getcwd()
try:
sessions = self.landmarks_dict.keys()
for session in sessions:
print(session)
subjects = self.landmarks_dict[session]['multiview'].keys()
for subject in subjects:
print(" " + subject)
rec_nums = self.landmarks_dict[session]['multiview'][subject].keys()
for rec_num in rec_nums:
print(" " + rec_num)
for cam_label in cam_labels:
landmarks = self.landmarks_dict[session]['multiview'][subject][rec_num][cam_label].keys()
for landmark in landmarks:
data_path = os.path.join(session, 'multiview', subject, rec_num, cam_label, landmark)
datalist.append(data_path)
os.chdir(curdir)
except:
os.chdir(curdir)
if shuffle:
random.shuffle(datalist)
return datalist
def load_landmarks(self, data_path):
try:
subject, landmark = data_path.split(os.sep)
except Exception as e:
print(e)
print(data_path)
return self.landmarks_dict[subject][landmark] #將data_path分成session(路徑)以及landmark(檔名)
def crop(self, image, landmarks, angle, size=128):
eye_y = 40/128
mouth_y = 88/128
reye = np.average(np.array((landmarks[37], landmarks[38], landmarks[40], landmarks[41])), axis=0)
leye = np.average(np.array((landmarks[43], landmarks[44], landmarks[46], landmarks[47])), axis=0)
mouth = np.average(np.array((landmarks[48], landmarks[54])), axis=0)
nose_tip = landmarks[30]
vec_mouth2reye = reye - mouth
vec_mouth2leye = leye - mouth
phi = np.arccos(vec_mouth2reye.dot(vec_mouth2leye) / (np.linalg.norm(vec_mouth2reye) * np.linalg.norm(vec_mouth2leye)))/np.pi * 180
if phi < 15: # consider the profile image is 90 deg.
# in case of 90 deg. set invisible eye with copy of visible eye.
eye_center = (reye + leye) / 2
if nose_tip[0] > eye_center[0]:
leye = reye
else:
reye = leye
# calc angle eyes against horizontal as theta
if np.array_equal(reye, leye) or phi < 38: # in case of 90 deg. avoid rotation
theta = 0
else:
vec_leye2reye = reye - leye
if vec_leye2reye[0] < 0:
vec_leye2reye = -vec_leye2reye
theta = np.arctan(vec_leye2reye[1]/vec_leye2reye[0])/np.pi*180
imgcenter = (image.shape[1]/2, image.shape[0]/2)
rotmat = cv2.getRotationMatrix2D(imgcenter, theta, 1)
rot_img = cv2.warpAffine(image, rotmat, (image.shape[1], image.shape[0]))
rot_landmarks = np.transpose(rotmat[:, :2].dot(np.transpose(landmarks)) + np.repeat(rotmat[:, 2].reshape((2,1)), landmarks.shape[0], axis=1))
rot_reye = np.average(np.array((rot_landmarks[37], rot_landmarks[38], rot_landmarks[40], rot_landmarks[41])), axis=0)
rot_leye = np.average(np.array((rot_landmarks[43], rot_landmarks[44], rot_landmarks[46], rot_landmarks[47])), axis=0)
rot_mouth = np.average(np.array((rot_landmarks[48], rot_landmarks[54])), axis=0)
crop_size = int((rot_mouth[1] - rot_reye[1])/(mouth_y - eye_y) + 0.5)
crop_up = int(rot_reye[1] - crop_size * eye_y + 0.5)
crop_left = int((rot_reye[0] + rot_leye[0]) / 2 - crop_size / 2 + 0.5)
up_size_fixed=0
left_size_fixed=0
if crop_up < 0 :
up_size_fixed=abs(crop_up)
crop_up = 0
if crop_left < 0 :
left_size_fixed=abs(crop_left)
crop_left = 0
crop_img = rot_img[crop_up:crop_up+crop_size+up_size_fixed, crop_left:crop_left+crop_size+left_size_fixed]
crop_landmarks = rot_landmarks - np.array([crop_left, crop_up])
crop_img = cv2.resize(crop_img, (size, size))
crop_landmarks *= size / crop_size
leye_points = crop_landmarks[42:48]
leye_center = (np.max(leye_points, axis=0) + np.min(leye_points, axis=0)) / 2
leye_left = int(leye_center[0] - EYE_W / 2 + 0.5)
leye_up = int(leye_center[1] - EYE_H / 2 + 0.5)
leye_img = crop_img[leye_up:leye_up + EYE_H, leye_left:leye_left + EYE_W]
reye_points = crop_landmarks[36:42]
reye_center = (np.max(reye_points, axis=0) + np.min(reye_points, axis=0)) / 2
reye_left = int(reye_center[0] - EYE_W / 2 + 0.5)
reye_up = int(reye_center[1] - EYE_H / 2 + 0.5)
reye_img = crop_img[reye_up:reye_up + EYE_H, reye_left:reye_left + EYE_W]
nose_points = crop_landmarks[31:36]
nose_center = (np.max(nose_points, axis=0) + np.min(nose_points, axis=0)) / 2
nose_left = int(nose_center[0] - NOSE_W / 2 + 0.5)
nose_up = int(nose_center[1] - 10 - NOSE_H / 2 + 0.5)
nose_img = crop_img[nose_up:nose_up + NOSE_H, nose_left:nose_left + NOSE_W]
mouth_points = crop_landmarks[48:60]
mouth_center = (np.max(mouth_points, axis=0) + np.min(mouth_points, axis=0)) / 2
mouth_left = int(mouth_center[0] - MOUTH_W / 2 + 0.5)
mouth_up = int(mouth_center[1] - MOUTH_H / 2 + 0.5)
mouth_img = crop_img[mouth_up:mouth_up + MOUTH_H, mouth_left:mouth_left + MOUTH_W]
if self.face_trembling_range != 0:
offset_x = random.randint(-self.face_trembling_range, self.face_trembling_range)
offset_y = random.randint(-self.face_trembling_range, self.face_trembling_range)
crop_img = rot_img[offset_y+crop_up:offset_y+crop_up+crop_size, offset_x+crop_left:offset_x+crop_left+crop_size]
crop_img = cv2.resize(crop_img, (size, size))
if leye_img.shape[:2] != (EYE_H, EYE_W) or reye_img.shape[:2] != (EYE_H, EYE_W) or nose_img.shape[:2] != (NOSE_H, NOSE_W) or mouth_img.shape[:2] != (MOUTH_H, MOUTH_W):
raise Exception('Error while croping image. angle:{}, phi:{}'.format(angle, phi))
return crop_img, leye_img, reye_img, nose_img, mouth_img
def imread(self, path, normalize=True):
with open(path, 'rb') as f:
image = Image.open(f)
imarray = np.asarray(image)
if normalize:
return imarray.astype(np.float32) / np.iinfo(imarray.dtype).max
else:
imarray
def get_generator(self, batch_size = 16, setting = 'train'):
"""
data geneartor for training generator model.
Args:
batch_size (int): Number of images per batch
setting (str): str of desired dataset type; 'train'/'valid'
"""
def get_next():
with self.lock:
if setting == 'train':
datalist, self.train_cursor = self.batch_data(self.train_list, self.train_cursor, batch_size = batch_size)
else:
datalist, self.valid_cursor = self.batch_data(self.valid_list, self.valid_cursor, batch_size = batch_size)
first_time = True
for i, x_data_path in enumerate(datalist):
x_image_path = os.path.join(self.dataset_dir, x_data_path + '.jpg')
x_image = self.imread(x_image_path, normalize=True)
x_landmarks = self.load_landmarks(x_data_path)
#angle = DIR_ANGLE[x_data_path[18:21]] #x_data_path包含資料前面的路徑
angle =0
try:
x_face, x_leye, x_reye, x_nose, x_mouth = self.crop(x_image, x_landmarks, angle=angle)
except (Exception, cv2.error) as e:
print(e)
print(x_data_path)
continue
if self.mirror_to_one_side and angle < 0:
x_face = x_face[:,::-1,:]
buff = x_leye[:,::-1,:]
x_leye = x_reye[:,::-1,:]
x_reye = buff
x_nose = x_nose[:,::-1,:]
x_mouth = x_mouth[:,::-1,:]
#y_data_path = x_data_path[:3]+'t'+ x_data_path[4:]
y_data_path = x_data_path[:-2]+'11'
#y_data_path = y_data_path[:-5]
y_image_path = os.path.join(self.dataset_dir,y_data_path +'.jpg')
y_image = self.imread(y_image_path, normalize=True)
y_landmarks = self.load_landmarks(y_data_path)
try:
y_face, y_leye, y_reye, y_nose, y_mouth = self.crop(y_image, y_landmarks, angle=0)
except (Exception, cv2.error) as e:
print(e)
print(y_data_path)
continue
'''
if self.mirror_to_one_side and angle < 0:
y_face = y_face[:,::-1,:]
buff = y_leye[:,::-1,:]
y_leye = y_reye[:,::-1,:]
y_reye = buff
y_nose = y_nose[:,::-1,:]
y_mouth = y_mouth[:,::-1,:]
'''
y_face64 = resize(y_face, (64, 64), mode='constant')
y_face32 = resize(y_face64, (32, 32), mode='constant')
# to adjust subject id starts from 0. (original multi pie subject id starts from 1)
#print('x_data_path : ',x_data_path)
#print('x_data_path[-28:-25] : ', x_data_path[-29:-26])
#y_subject_id = int(x_data_path[-29:-26])
y_subject_id = int(x_data_path[-5:-3]) - 1
y_subject_id = np_utils.to_categorical(y_subject_id, NUM_SUBJECTS)
y_face_gray = rgb2gray(y_face)[:, :, np.newaxis]
if first_time:
first_time = False
x_faces = x_face[np.newaxis,:]
x_leyes = x_leye[np.newaxis,:]
x_reyes = x_reye[np.newaxis,:]
x_noses = x_nose[np.newaxis,:]
x_mouthes = x_mouth[np.newaxis,:]
y_faces = y_face[np.newaxis,:]
y_face_grays = y_face_gray[np.newaxis,:]
y_faces64 = y_face64[np.newaxis,:]
y_faces32 = y_face32[np.newaxis,:]
y_subject_ids = y_subject_id[np.newaxis,:]
y_leyes = y_leye[np.newaxis,:]
y_reyes = y_reye[np.newaxis,:]
y_noses = y_nose[np.newaxis,:]
y_mouthes = y_mouth[np.newaxis,:]
else:
if x_leyes.shape[1:] != x_leye.shape:
print(x_leyes.shape)
print(x_leye.shape)
if x_reyes.shape[1:] != x_reye.shape:
print(x_reyes.shape)
print(x_reye.shape)
if x_noses.shape[1:] != x_nose.shape:
print(x_noses.shape)
print(x_nose.shape)
if x_mouthes.shape[1:] != x_mouth.shape:
print(x_mouthes.shape)
print(x_mouth.shape)
x_faces = np.concatenate((x_faces, x_face[np.newaxis,:]), axis=0)
x_leyes = np.concatenate((x_leyes, x_leye[np.newaxis,:]), axis=0)
x_reyes = np.concatenate((x_reyes, x_reye[np.newaxis,:]), axis=0)
x_noses = np.concatenate((x_noses, x_nose[np.newaxis,:]), axis=0)
x_mouthes = np.concatenate((x_mouthes, x_mouth[np.newaxis,:]), axis=0)
y_faces = np.concatenate((y_faces, y_face[np.newaxis,:]), axis=0)
y_face_grays = np.concatenate((y_face_grays, y_face_gray[np.newaxis,:]), axis=0)
y_faces64 = np.concatenate((y_faces64, y_face64[np.newaxis,:]), axis=0)
y_faces32 = np.concatenate((y_faces32, y_face32[np.newaxis,:]), axis=0)
y_subject_ids = np.concatenate((y_subject_ids, y_subject_id[np.newaxis,:]), axis=0)
y_leyes = np.concatenate((y_leyes, y_leye[np.newaxis,:]), axis=0)
y_reyes = np.concatenate((y_reyes, y_reye[np.newaxis,:]), axis=0)
y_noses = np.concatenate((y_noses, y_nose[np.newaxis,:]), axis=0)
y_mouthes = np.concatenate((y_mouthes, y_mouth[np.newaxis,:]), axis=0)
x_z = np.random.normal(scale=0.02, size=(x_faces.shape[0], 100))
return [x_faces, x_leyes, x_reyes, x_noses, x_mouthes, x_z], [y_faces, y_faces, y_faces, y_faces, y_faces, y_faces64, y_faces64, y_faces32, y_faces32, y_subject_ids, y_leyes, y_reyes, y_noses, y_mouthes]
if self.workers > 1:
# use easy thread implementing
# it is especially effective when getting data from google cloud storage
data_pool = []
while True:
if len(data_pool) > 0:
next_data = data_pool.pop(0)
else:
next_data = get_next()
while self.thread_pool_executor._work_queue.qsize() == 0 and len(data_pool) < self.workers:
self.thread_pool_executor.submit(fn=lambda : data_pool.append(get_next()))
yield next_data
else:
# dont use thread
while True:
next_data = get_next()
yield next_data
def get_class_generator(self, batch_size = 16, setting = 'train'):
"""
data geneartor for fine tuning lcnn model with MULTI-PIE.
Args:
batch_size (int): Number of images per batch
setting (str): str of desired dataset type; 'train'/'valid'
"""
def get_next():
with self.lock:
if setting == 'train':
datalist, self.train_cursor = self.batch_data(self.train_list, self.train_cursor, batch_size = batch_size)
else:
datalist, self.valid_cursor = self.batch_data(self.valid_list, self.valid_cursor, batch_size = batch_size)
first_time = True
for i, x_data_path in enumerate(datalist):
x_image_path = os.path.join(self.dataset_dir, x_data_path + '.jpg')
x_image = self.imread(x_image_path, normalize=True)
x_landmarks = self.load_landmarks(x_data_path)
#angle = DIR_ANGLE[x_data_path[-21:-17]]
#angle = DIR_ANGLE[x_data_path[18:21]]
angle = 0
try:
x_face = self.crop(x_image, x_landmarks, angle=angle)[0]
except (Exception, cv2.error) as e:
print(e)
print(x_data_path)
continue
x_face = x_face[:,:,np.newaxis]
if self.mirror_to_one_side and angle < 0:
x_face = x_face[:,::-1,:]
# to adjust subject id starts from 0. (original multi pie subject id starts from 1)
#y_subject_id = int(x_data_path[-28:-25]) - 1
y_subject_id = int(x_data_path[-5:-3]) - 1
y_subject_id = np_utils.to_categorical(y_subject_id, NUM_SUBJECTS)
if first_time:
first_time = False
x_faces = x_face[np.newaxis,:]
y_subject_ids = y_subject_id[np.newaxis,:]
else:
x_faces = np.concatenate((x_faces, x_face[np.newaxis,:]), axis=0)
y_subject_ids = np.concatenate((y_subject_ids, y_subject_id[np.newaxis,:]), axis=0)
return x_faces, y_subject_ids
if self.workers > 1:
# use easy thread implementing
# it is very effective when getting data from google cloud storage
data_pool = []
while True:
if len(data_pool) > 0:
next_data = data_pool.pop(0)
else:
next_data = get_next()
while self.thread_pool_executor._work_queue.qsize() == 0 and len(data_pool) < self.workers:
self.thread_pool_executor.submit(fn=lambda : data_pool.append(get_next()))
yield next_data
else:
# dont use thread
while True:
next_data = get_next()
yield next_data
def get_discriminator_generator(self, generator, batch_size=16, gt_shape=(4, 4), setting = 'train'):
"""
data geneartor for training discriminator model.
Args:
generator (Model): generator model
batch_size (int): Number of images per batch
gt_shape (tuple): shape of return y
setting (str): str of desired dataset type; 'train'/'valid'
"""
def get_next():
with self.lock:
if setting == 'train':
datalist, self.train_cursor = self.batch_data(self.train_list, self.train_cursor, batch_size = batch_size//2)
else:
datalist, self.valid_cursor = self.batch_data(self.valid_list, self.valid_cursor, batch_size = batch_size//2)
first_time = True
for data_path_for_fake in datalist:
profile_image_path = os.path.join(self.dataset_dir, data_path_for_fake + '.jpg')
profile_image = self.imread(profile_image_path, normalize=True)
profile_landmarks = self.load_landmarks(data_path_for_fake)
#angle = DIR_ANGLE[data_path_for_fake[18:21]]
angle = 0
#if data_path_for_fake[38:42] == 'flip':
# angle = -angle
try:
profile_face, profile_leye, profile_reye, profile_nose, profile_mouth = self.crop(profile_image, profile_landmarks, angle=angle)
except (Exception, cv2.error) as e:
print(e)
print(data_path_for_fake)
continue
if self.mirror_to_one_side and angle < 0:
profile_face = profile_face[:,::-1,:]
buff = profile_leye[:,::-1,:]
profile_leye = profile_reye[:,::-1,:]
profile_reye = buff
profile_nose = profile_nose[:,::-1,:]
profile_mouth = profile_mouth[:,::-1,:]
if first_time:
first_time = False
profile_faces = profile_face[np.newaxis,:]
profile_leyes = profile_leye[np.newaxis,:]
profile_reyes = profile_reye[np.newaxis,:]
profile_noses = profile_nose[np.newaxis,:]
profile_mouthes = profile_mouth[np.newaxis,:]
else:
profile_faces = np.concatenate((profile_faces, profile_face[np.newaxis,:]), axis=0)
profile_leyes = np.concatenate((profile_leyes, profile_leye[np.newaxis,:]), axis=0)
profile_reyes = np.concatenate((profile_reyes, profile_reye[np.newaxis,:]), axis=0)
profile_noses = np.concatenate((profile_noses, profile_nose[np.newaxis,:]), axis=0)
profile_mouthes = np.concatenate((profile_mouthes, profile_mouth[np.newaxis,:]), axis=0)
x_fake_inputs = [profile_faces, profile_leyes, profile_reyes, profile_noses, profile_mouthes, np.random.normal(scale=0.02, size=(profile_faces.shape[0], 100))]
first_time = True
for data_path_for_real in datalist:
#front_data_path = data_path_for_real[:3]+'t'+ data_path_for_real[4:]
front_data_path = data_path_for_real[:-2]+'11'
#front_data_path = front_data_path[:-5]
front_image_path = os.path.join(self.dataset_dir ,front_data_path +'.jpg')
front_image = self.imread(front_image_path, normalize=True)
front_landmarks = self.load_landmarks(front_data_path)
try:
front_face = self.crop(front_image, front_landmarks, angle=0)[0]
except (Exception, cv2.error) as e:
print(e)
print(front_data_path)
continue
if self.mirror_to_one_side and angle < 0:
front_face = front_face[:,::-1,:]
if first_time:
first_time = False
x_real = front_face[np.newaxis,:]
else:
x_real = np.concatenate((x_real, front_face[np.newaxis,:]), axis=0)
return x_fake_inputs, x_real
def make_batch(x_fake_inputs, x_real):
x_fake = generator.predict(x_fake_inputs)[0]
y_fake = np.zeros(shape=(x_fake.shape[0], *gt_shape, 1))
y_real = np.ones(shape=(x_real.shape[0], *gt_shape, 1))
return np.concatenate([x_fake, x_real]), np.concatenate([y_fake, y_real])
if self.workers > 1:
# use easy thread implementing
# it is especially effective when getting data from google cloud storage
data_pool = []
while True:
if len(data_pool) > 0:
next_data = data_pool.pop(0)
else:
next_data = get_next()
while self.thread_pool_executor._work_queue.qsize() == 0 and len(data_pool) < self.workers:
self.thread_pool_executor.submit(fn=lambda : data_pool.append(get_next()))
yield make_batch(*next_data)
else:
# dont use thread
while True:
next_data = get_next()
yield make_batch(*next_data)
def Open(name, mode='r'):
#because, in my environment, sometimes gfile.Open is very slow when target file is localpath(not gs://),
#so, use normal open if the target path is localpath.
'''
if len(name) >= 5 and name[:5] == 'gs://':
return tf.gfile.Open(name, mode)
else:
'''
return open(name, mode)
| [
"numpy.iinfo",
"numpy.array",
"numpy.linalg.norm",
"tensorflow.gfile.Exists",
"threading.Lock",
"numpy.asarray",
"numpy.max",
"numpy.concatenate",
"numpy.min",
"random.randint",
"numpy.arctan",
"numpy.random.normal",
"skimage.color.rgb2gray",
"cv2.warpAffine",
"random.shuffle",
"numpy.... | [((3485, 3520), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['self.datalist_file'], {}), '(self.datalist_file)\n', (3500, 3520), True, 'import tensorflow as tf\n'), ((4351, 4367), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4365, 4367), False, 'import threading\n'), ((6302, 6313), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6311, 6313), False, 'import os\n'), ((9269, 9313), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['imgcenter', 'theta', '(1)'], {}), '(imgcenter, theta, 1)\n', (9292, 9313), False, 'import cv2\n'), ((9332, 9395), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rotmat', '(image.shape[1], image.shape[0])'], {}), '(image, rotmat, (image.shape[1], image.shape[0]))\n', (9346, 9395), False, 'import cv2\n'), ((10615, 10649), 'cv2.resize', 'cv2.resize', (['crop_img', '(size, size)'], {}), '(crop_img, (size, size))\n', (10625, 10649), False, 'import cv2\n'), ((3095, 3131), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['landmarks_dict_file'], {}), '(landmarks_dict_file)\n', (3110, 3131), True, 'import tensorflow as tf\n'), ((3327, 3341), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3338, 3341), False, 'import pickle\n'), ((4291, 4330), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'workers'}), '(max_workers=workers)\n', (4309, 4330), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((7335, 7351), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (7343, 7351), False, 'import os\n'), ((7478, 7502), 'random.shuffle', 'random.shuffle', (['datalist'], {}), '(datalist)\n', (7492, 7502), False, 'import random\n'), ((7965, 8035), 'numpy.array', 'np.array', (['(landmarks[37], landmarks[38], landmarks[40], landmarks[41])'], {}), '((landmarks[37], landmarks[38], landmarks[40], landmarks[41]))\n', (7973, 8035), True, 'import numpy as np\n'), ((8071, 8141), 'numpy.array', 'np.array', (['(landmarks[43], landmarks[44], landmarks[46], landmarks[47])'], {}), '((landmarks[43], landmarks[44], landmarks[46], landmarks[47]))\n', (8079, 8141), True, 'import numpy as np\n'), ((8178, 8218), 'numpy.array', 'np.array', (['(landmarks[48], landmarks[54])'], {}), '((landmarks[48], landmarks[54]))\n', (8186, 8218), True, 'import numpy as np\n'), ((8874, 8900), 'numpy.array_equal', 'np.array_equal', (['reye', 'leye'], {}), '(reye, leye)\n', (8888, 8900), True, 'import numpy as np\n'), ((9586, 9676), 'numpy.array', 'np.array', (['(rot_landmarks[37], rot_landmarks[38], rot_landmarks[40], rot_landmarks[41])'], {}), '((rot_landmarks[37], rot_landmarks[38], rot_landmarks[40],\n rot_landmarks[41]))\n', (9594, 9676), True, 'import numpy as np\n'), ((9712, 9802), 'numpy.array', 'np.array', (['(rot_landmarks[43], rot_landmarks[44], rot_landmarks[46], rot_landmarks[47])'], {}), '((rot_landmarks[43], rot_landmarks[44], rot_landmarks[46],\n rot_landmarks[47]))\n', (9720, 9802), True, 'import numpy as np\n'), ((9839, 9887), 'numpy.array', 'np.array', (['(rot_landmarks[48], rot_landmarks[54])'], {}), '((rot_landmarks[48], rot_landmarks[54]))\n', (9847, 9887), True, 'import numpy as np\n'), ((10556, 10586), 'numpy.array', 'np.array', (['[crop_left, crop_up]'], {}), '([crop_left, crop_up])\n', (10564, 10586), True, 'import numpy as np\n'), ((12134, 12203), 'random.randint', 'random.randint', (['(-self.face_trembling_range)', 'self.face_trembling_range'], {}), '(-self.face_trembling_range, self.face_trembling_range)\n', (12148, 12203), False, 'import random\n'), ((12227, 12296), 'random.randint', 'random.randint', (['(-self.face_trembling_range)', 'self.face_trembling_range'], {}), '(-self.face_trembling_range, self.face_trembling_range)\n', (12241, 12296), False, 'import random\n'), ((12445, 12479), 'cv2.resize', 'cv2.resize', (['crop_img', '(size, size)'], {}), '(crop_img, (size, size))\n', (12455, 12479), False, 'import cv2\n'), ((12952, 12965), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (12962, 12965), False, 'from PIL import Image\n'), ((12988, 13005), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (12998, 13005), True, 'import numpy as np\n'), ((19485, 19543), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.02)', 'size': '(x_faces.shape[0], 100)'}), '(scale=0.02, size=(x_faces.shape[0], 100))\n', (19501, 19543), True, 'import numpy as np\n'), ((28553, 28600), 'numpy.zeros', 'np.zeros', ([], {'shape': '(x_fake.shape[0], *gt_shape, 1)'}), '(shape=(x_fake.shape[0], *gt_shape, 1))\n', (28561, 28600), True, 'import numpy as np\n'), ((28622, 28668), 'numpy.ones', 'np.ones', ([], {'shape': '(x_real.shape[0], *gt_shape, 1)'}), '(shape=(x_real.shape[0], *gt_shape, 1))\n', (28629, 28668), True, 'import numpy as np\n'), ((3608, 3622), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3619, 3622), False, 'import pickle\n'), ((7400, 7416), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (7408, 7416), False, 'import os\n'), ((10765, 10792), 'numpy.max', 'np.max', (['leye_points'], {'axis': '(0)'}), '(leye_points, axis=0)\n', (10771, 10792), True, 'import numpy as np\n'), ((10795, 10822), 'numpy.min', 'np.min', (['leye_points'], {'axis': '(0)'}), '(leye_points, axis=0)\n', (10801, 10822), True, 'import numpy as np\n'), ((11100, 11127), 'numpy.max', 'np.max', (['reye_points'], {'axis': '(0)'}), '(reye_points, axis=0)\n', (11106, 11127), True, 'import numpy as np\n'), ((11130, 11157), 'numpy.min', 'np.min', (['reye_points'], {'axis': '(0)'}), '(reye_points, axis=0)\n', (11136, 11157), True, 'import numpy as np\n'), ((11435, 11462), 'numpy.max', 'np.max', (['nose_points'], {'axis': '(0)'}), '(nose_points, axis=0)\n', (11441, 11462), True, 'import numpy as np\n'), ((11465, 11492), 'numpy.min', 'np.min', (['nose_points'], {'axis': '(0)'}), '(nose_points, axis=0)\n', (11471, 11492), True, 'import numpy as np\n'), ((11781, 11809), 'numpy.max', 'np.max', (['mouth_points'], {'axis': '(0)'}), '(mouth_points, axis=0)\n', (11787, 11809), True, 'import numpy as np\n'), ((11812, 11840), 'numpy.min', 'np.min', (['mouth_points'], {'axis': '(0)'}), '(mouth_points, axis=0)\n', (11818, 11840), True, 'import numpy as np\n'), ((13993, 14045), 'os.path.join', 'os.path.join', (['self.dataset_dir', "(x_data_path + '.jpg')"], {}), "(self.dataset_dir, x_data_path + '.jpg')\n", (14005, 14045), False, 'import os\n'), ((15143, 15195), 'os.path.join', 'os.path.join', (['self.dataset_dir', "(y_data_path + '.jpg')"], {}), "(self.dataset_dir, y_data_path + '.jpg')\n", (15155, 15195), False, 'import os\n'), ((16040, 16081), 'skimage.transform.resize', 'resize', (['y_face', '(64, 64)'], {'mode': '"""constant"""'}), "(y_face, (64, 64), mode='constant')\n", (16046, 16081), False, 'from skimage.transform import resize\n'), ((16109, 16152), 'skimage.transform.resize', 'resize', (['y_face64', '(32, 32)'], {'mode': '"""constant"""'}), "(y_face64, (32, 32), mode='constant')\n", (16115, 16152), False, 'from skimage.transform import resize\n'), ((16544, 16595), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_subject_id', 'NUM_SUBJECTS'], {}), '(y_subject_id, NUM_SUBJECTS)\n', (16567, 16595), False, 'from keras.utils import np_utils\n'), ((21441, 21493), 'os.path.join', 'os.path.join', (['self.dataset_dir', "(x_data_path + '.jpg')"], {}), "(self.dataset_dir, x_data_path + '.jpg')\n", (21453, 21493), False, 'import os\n'), ((22482, 22533), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_subject_id', 'NUM_SUBJECTS'], {}), '(y_subject_id, NUM_SUBJECTS)\n', (22505, 22533), False, 'from keras.utils import np_utils\n'), ((24816, 24875), 'os.path.join', 'os.path.join', (['self.dataset_dir', "(data_path_for_fake + '.jpg')"], {}), "(self.dataset_dir, data_path_for_fake + '.jpg')\n", (24828, 24875), False, 'import os\n'), ((27059, 27123), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.02)', 'size': '(profile_faces.shape[0], 100)'}), '(scale=0.02, size=(profile_faces.shape[0], 100))\n', (27075, 27123), True, 'import numpy as np\n'), ((27487, 27543), 'os.path.join', 'os.path.join', (['self.dataset_dir', "(front_data_path + '.jpg')"], {}), "(self.dataset_dir, front_data_path + '.jpg')\n", (27499, 27543), False, 'import os\n'), ((28701, 28733), 'numpy.concatenate', 'np.concatenate', (['[x_fake, x_real]'], {}), '([x_fake, x_real])\n', (28715, 28733), True, 'import numpy as np\n'), ((28735, 28767), 'numpy.concatenate', 'np.concatenate', (['[y_fake, y_real]'], {}), '([y_fake, y_real])\n', (28749, 28767), True, 'import numpy as np\n'), ((9131, 9177), 'numpy.arctan', 'np.arctan', (['(vec_leye2reye[1] / vec_leye2reye[0])'], {}), '(vec_leye2reye[1] / vec_leye2reye[0])\n', (9140, 9177), True, 'import numpy as np\n'), ((9452, 9475), 'numpy.transpose', 'np.transpose', (['landmarks'], {}), '(landmarks)\n', (9464, 9475), True, 'import numpy as np\n'), ((13085, 13108), 'numpy.iinfo', 'np.iinfo', (['imarray.dtype'], {}), '(imarray.dtype)\n', (13093, 13108), True, 'import numpy as np\n'), ((16643, 16659), 'skimage.color.rgb2gray', 'rgb2gray', (['y_face'], {}), '(y_face)\n', (16651, 16659), False, 'from skimage.color import rgb2gray\n'), ((18211, 18267), 'numpy.concatenate', 'np.concatenate', (['(x_faces, x_face[np.newaxis, :])'], {'axis': '(0)'}), '((x_faces, x_face[np.newaxis, :]), axis=0)\n', (18225, 18267), True, 'import numpy as np\n'), ((18297, 18353), 'numpy.concatenate', 'np.concatenate', (['(x_leyes, x_leye[np.newaxis, :])'], {'axis': '(0)'}), '((x_leyes, x_leye[np.newaxis, :]), axis=0)\n', (18311, 18353), True, 'import numpy as np\n'), ((18383, 18439), 'numpy.concatenate', 'np.concatenate', (['(x_reyes, x_reye[np.newaxis, :])'], {'axis': '(0)'}), '((x_reyes, x_reye[np.newaxis, :]), axis=0)\n', (18397, 18439), True, 'import numpy as np\n'), ((18469, 18525), 'numpy.concatenate', 'np.concatenate', (['(x_noses, x_nose[np.newaxis, :])'], {'axis': '(0)'}), '((x_noses, x_nose[np.newaxis, :]), axis=0)\n', (18483, 18525), True, 'import numpy as np\n'), ((18557, 18616), 'numpy.concatenate', 'np.concatenate', (['(x_mouthes, x_mouth[np.newaxis, :])'], {'axis': '(0)'}), '((x_mouthes, x_mouth[np.newaxis, :]), axis=0)\n', (18571, 18616), True, 'import numpy as np\n'), ((18646, 18702), 'numpy.concatenate', 'np.concatenate', (['(y_faces, y_face[np.newaxis, :])'], {'axis': '(0)'}), '((y_faces, y_face[np.newaxis, :]), axis=0)\n', (18660, 18702), True, 'import numpy as np\n'), ((18737, 18803), 'numpy.concatenate', 'np.concatenate', (['(y_face_grays, y_face_gray[np.newaxis, :])'], {'axis': '(0)'}), '((y_face_grays, y_face_gray[np.newaxis, :]), axis=0)\n', (18751, 18803), True, 'import numpy as np\n'), ((18835, 18895), 'numpy.concatenate', 'np.concatenate', (['(y_faces64, y_face64[np.newaxis, :])'], {'axis': '(0)'}), '((y_faces64, y_face64[np.newaxis, :]), axis=0)\n', (18849, 18895), True, 'import numpy as np\n'), ((18928, 18988), 'numpy.concatenate', 'np.concatenate', (['(y_faces32, y_face32[np.newaxis, :])'], {'axis': '(0)'}), '((y_faces32, y_face32[np.newaxis, :]), axis=0)\n', (18942, 18988), True, 'import numpy as np\n'), ((19024, 19092), 'numpy.concatenate', 'np.concatenate', (['(y_subject_ids, y_subject_id[np.newaxis, :])'], {'axis': '(0)'}), '((y_subject_ids, y_subject_id[np.newaxis, :]), axis=0)\n', (19038, 19092), True, 'import numpy as np\n'), ((19122, 19178), 'numpy.concatenate', 'np.concatenate', (['(y_leyes, y_leye[np.newaxis, :])'], {'axis': '(0)'}), '((y_leyes, y_leye[np.newaxis, :]), axis=0)\n', (19136, 19178), True, 'import numpy as np\n'), ((19208, 19264), 'numpy.concatenate', 'np.concatenate', (['(y_reyes, y_reye[np.newaxis, :])'], {'axis': '(0)'}), '((y_reyes, y_reye[np.newaxis, :]), axis=0)\n', (19222, 19264), True, 'import numpy as np\n'), ((19294, 19350), 'numpy.concatenate', 'np.concatenate', (['(y_noses, y_nose[np.newaxis, :])'], {'axis': '(0)'}), '((y_noses, y_nose[np.newaxis, :]), axis=0)\n', (19308, 19350), True, 'import numpy as np\n'), ((19382, 19441), 'numpy.concatenate', 'np.concatenate', (['(y_mouthes, y_mouth[np.newaxis, :])'], {'axis': '(0)'}), '((y_mouthes, y_mouth[np.newaxis, :]), axis=0)\n', (19396, 19441), True, 'import numpy as np\n'), ((22836, 22892), 'numpy.concatenate', 'np.concatenate', (['(x_faces, x_face[np.newaxis, :])'], {'axis': '(0)'}), '((x_faces, x_face[np.newaxis, :]), axis=0)\n', (22850, 22892), True, 'import numpy as np\n'), ((22928, 22996), 'numpy.concatenate', 'np.concatenate', (['(y_subject_ids, y_subject_id[np.newaxis, :])'], {'axis': '(0)'}), '((y_subject_ids, y_subject_id[np.newaxis, :]), axis=0)\n', (22942, 22996), True, 'import numpy as np\n'), ((26443, 26511), 'numpy.concatenate', 'np.concatenate', (['(profile_faces, profile_face[np.newaxis, :])'], {'axis': '(0)'}), '((profile_faces, profile_face[np.newaxis, :]), axis=0)\n', (26457, 26511), True, 'import numpy as np\n'), ((26547, 26615), 'numpy.concatenate', 'np.concatenate', (['(profile_leyes, profile_leye[np.newaxis, :])'], {'axis': '(0)'}), '((profile_leyes, profile_leye[np.newaxis, :]), axis=0)\n', (26561, 26615), True, 'import numpy as np\n'), ((26651, 26719), 'numpy.concatenate', 'np.concatenate', (['(profile_reyes, profile_reye[np.newaxis, :])'], {'axis': '(0)'}), '((profile_reyes, profile_reye[np.newaxis, :]), axis=0)\n', (26665, 26719), True, 'import numpy as np\n'), ((26755, 26823), 'numpy.concatenate', 'np.concatenate', (['(profile_noses, profile_nose[np.newaxis, :])'], {'axis': '(0)'}), '((profile_noses, profile_nose[np.newaxis, :]), axis=0)\n', (26769, 26823), True, 'import numpy as np\n'), ((26861, 26932), 'numpy.concatenate', 'np.concatenate', (['(profile_mouthes, profile_mouth[np.newaxis, :])'], {'axis': '(0)'}), '((profile_mouthes, profile_mouth[np.newaxis, :]), axis=0)\n', (26875, 26932), True, 'import numpy as np\n'), ((28306, 28365), 'numpy.concatenate', 'np.concatenate', (['(x_real, front_face[np.newaxis, :])'], {'axis': '(0)'}), '((x_real, front_face[np.newaxis, :]), axis=0)\n', (28320, 28365), True, 'import numpy as np\n'), ((8408, 8438), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_mouth2reye'], {}), '(vec_mouth2reye)\n', (8422, 8438), True, 'import numpy as np\n'), ((8441, 8471), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_mouth2leye'], {}), '(vec_mouth2leye)\n', (8455, 8471), True, 'import numpy as np\n'), ((7190, 7263), 'os.path.join', 'os.path.join', (['session', '"""multiview"""', 'subject', 'rec_num', 'cam_label', 'landmark'], {}), "(session, 'multiview', subject, rec_num, cam_label, landmark)\n", (7202, 7263), False, 'import os\n')] |
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
from Model import model
from utils import init_model
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
def project_tsne(params, dataset, pairs_x, pairs_y, dist, P_joint, device):
print("---------------------------------")
print("Begin finding the embedded space")
net = model(params.col, params.output_dim)
Project_DNN = init_model(net, device, restore=None)
optimizer = optim.RMSprop(Project_DNN.parameters(), lr=params.lr)
c_mse = nn.MSELoss()
Project_DNN.train()
dataset_num = len(dataset)
for i in range(dataset_num):
P_joint[i] = torch.from_numpy(P_joint[i]).float().to(device)
dataset[i] = torch.from_numpy(dataset[i]).float().to(device)
for epoch in range(params.epoch_DNN):
len_dataloader = np.int(np.max(params.row)/params.batch_size)
if len_dataloader == 0:
len_dataloader = 1
params.batch_size = np.max(params.row)
for step in range(len_dataloader):
KL_loss = []
for i in range(dataset_num):
random_batch = np.random.randint(0, params.row[i], params.batch_size)
data = dataset[i][random_batch]
P_tmp = torch.zeros([params.batch_size, params.batch_size]).to(device)
for j in range(params.batch_size):
P_tmp[j] = P_joint[i][random_batch[j], random_batch]
P_tmp = P_tmp / torch.sum(P_tmp)
low_dim_data = Project_DNN(data, i)
Q_joint = Q_tsne(low_dim_data)
KL_loss.append(torch.sum(P_tmp * torch.log(P_tmp / Q_joint)))
feature_loss = np.array(0)
feature_loss = torch.from_numpy(feature_loss).to(device).float()
for i in range(dataset_num-1):
low_dim = Project_DNN(dataset[i][pairs_x[i]], i)
low_dim_biggest_dataset = Project_DNN(dataset[dataset_num-1][pairs_y[i]], len(dataset)-1)
feature_loss += c_mse(low_dim, low_dim_biggest_dataset)
# min_norm = torch.min(torch.norm(low_dim), torch.norm(low_dim_biggest_dataset))
# feature_loss += torch.abs(torch.norm(low_dim) - torch.norm(low_dim_biggest_dataset))/min_norm
loss = params.beta * feature_loss
for i in range(dataset_num):
loss += KL_loss[i]
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % params.log_DNN == 0:
print("epoch:[{:d}/{}]: loss:{:4f}, align_loss:{:4f}".format(epoch+1, \
params.epoch_DNN, loss.data.item(), feature_loss.data.item()))
integrated_data = []
for i in range(dataset_num):
integrated_data.append(Project_DNN(dataset[i], i))
integrated_data[i] = integrated_data[i].detach().cpu().numpy()
print("Done")
return integrated_data
def neg_square_dists(X):
sum_X = torch.sum(X*X, 1)
tmp = torch.add(-2 * X.mm(torch.transpose(X,1,0)), sum_X)
D = torch.add(torch.transpose(tmp,1,0), sum_X)
return -D
def Q_tsne(Y):
distances = neg_square_dists(Y)
inv_distances = torch.pow(1. - distances, -1)
inv_distances = inv_distances - torch.diag(inv_distances.diag(0))
inv_distances = inv_distances + 1e-15
return inv_distances / torch.sum(inv_distances)
def project_barycentric(dataset, match):
print("---------------------------------")
print("Begin finding the embedded space")
integrated_data = []
for i in range(len(dataset)-1):
integrated_data.append(np.matmul(match[i], dataset[-1]))
integrated_data.append(dataset[-1])
print("Done")
return integrated_data
| [
"torch.log",
"utils.init_model",
"torch.pow",
"torch.transpose",
"numpy.max",
"torch.nn.MSELoss",
"numpy.array",
"numpy.random.randint",
"torch.sum",
"numpy.matmul",
"torch.from_numpy",
"Model.model",
"torch.zeros"
] | [((394, 430), 'Model.model', 'model', (['params.col', 'params.output_dim'], {}), '(params.col, params.output_dim)\n', (399, 430), False, 'from Model import model\n'), ((446, 483), 'utils.init_model', 'init_model', (['net', 'device'], {'restore': 'None'}), '(net, device, restore=None)\n', (456, 483), False, 'from utils import init_model\n'), ((561, 573), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (571, 573), True, 'import torch.nn as nn\n'), ((2630, 2649), 'torch.sum', 'torch.sum', (['(X * X)', '(1)'], {}), '(X * X, 1)\n', (2639, 2649), False, 'import torch\n'), ((2833, 2863), 'torch.pow', 'torch.pow', (['(1.0 - distances)', '(-1)'], {}), '(1.0 - distances, -1)\n', (2842, 2863), False, 'import torch\n'), ((2722, 2748), 'torch.transpose', 'torch.transpose', (['tmp', '(1)', '(0)'], {}), '(tmp, 1, 0)\n', (2737, 2748), False, 'import torch\n'), ((2993, 3017), 'torch.sum', 'torch.sum', (['inv_distances'], {}), '(inv_distances)\n', (3002, 3017), False, 'import torch\n'), ((956, 974), 'numpy.max', 'np.max', (['params.row'], {}), '(params.row)\n', (962, 974), True, 'import numpy as np\n'), ((1540, 1551), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (1548, 1551), True, 'import numpy as np\n'), ((3228, 3260), 'numpy.matmul', 'np.matmul', (['match[i]', 'dataset[-1]'], {}), '(match[i], dataset[-1])\n', (3237, 3260), True, 'import numpy as np\n'), ((847, 865), 'numpy.max', 'np.max', (['params.row'], {}), '(params.row)\n', (853, 865), True, 'import numpy as np\n'), ((1079, 1133), 'numpy.random.randint', 'np.random.randint', (['(0)', 'params.row[i]', 'params.batch_size'], {}), '(0, params.row[i], params.batch_size)\n', (1096, 1133), True, 'import numpy as np\n'), ((2675, 2699), 'torch.transpose', 'torch.transpose', (['X', '(1)', '(0)'], {}), '(X, 1, 0)\n', (2690, 2699), False, 'import torch\n'), ((1362, 1378), 'torch.sum', 'torch.sum', (['P_tmp'], {}), '(P_tmp)\n', (1371, 1378), False, 'import torch\n'), ((670, 698), 'torch.from_numpy', 'torch.from_numpy', (['P_joint[i]'], {}), '(P_joint[i])\n', (686, 698), False, 'import torch\n'), ((733, 761), 'torch.from_numpy', 'torch.from_numpy', (['dataset[i]'], {}), '(dataset[i])\n', (749, 761), False, 'import torch\n'), ((1182, 1233), 'torch.zeros', 'torch.zeros', (['[params.batch_size, params.batch_size]'], {}), '([params.batch_size, params.batch_size])\n', (1193, 1233), False, 'import torch\n'), ((1492, 1518), 'torch.log', 'torch.log', (['(P_tmp / Q_joint)'], {}), '(P_tmp / Q_joint)\n', (1501, 1518), False, 'import torch\n'), ((1570, 1600), 'torch.from_numpy', 'torch.from_numpy', (['feature_loss'], {}), '(feature_loss)\n', (1586, 1600), False, 'import torch\n')] |
import cv2
import numpy as np
import ImageLoader as il
from pprint import pprint
FACE_CASCADE = cv2.CascadeClassifier('haar_cascade.xml')
SIDE_CASCADE = cv2.CascadeClassifier('lbpcascade_sideface.xml')
def detect_faces(img):
"""
Method for detecting all faces in a given image.
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = FACE_CASCADE.detectMultiScale(
gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (30,30),
flags = 0
)
sides = SIDE_CASCADE.detectMultiScale(
gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (30,30),
flags = 0
)
if len(sides) > 0 and len(faces) > 0:
return np.concatenate((faces, sides), axis=0)
elif len(sides) > 0:
return sides
else:
return faces
def draw_boxes(img, facepositions, color):
"""
color is a tuple containing three values for bgr-colors like (0, 255, 0)
"""
for x, y, w, h in facepositions:
cv2.rectangle(img, (x,y), (x+w, y+h), color)
| [
"numpy.concatenate",
"cv2.CascadeClassifier",
"cv2.rectangle",
"cv2.cvtColor"
] | [((97, 138), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haar_cascade.xml"""'], {}), "('haar_cascade.xml')\n", (118, 138), False, 'import cv2\n'), ((154, 202), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""lbpcascade_sideface.xml"""'], {}), "('lbpcascade_sideface.xml')\n", (175, 202), False, 'import cv2\n'), ((307, 344), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (319, 344), False, 'import cv2\n'), ((725, 763), 'numpy.concatenate', 'np.concatenate', (['(faces, sides)'], {'axis': '(0)'}), '((faces, sides), axis=0)\n', (739, 763), True, 'import numpy as np\n'), ((1024, 1073), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', 'color'], {}), '(img, (x, y), (x + w, y + h), color)\n', (1037, 1073), False, 'import cv2\n')] |
from __future__ import division
import pywt
import numpy as np
import itertools as itt
from scipy.interpolate import interp1d
from functools import partial
from .common import *
class SimpleWaveletDensityEstimator(object):
def __init__(self, wave_name, j0=1, j1=None, thresholding=None):
self.wave = pywt.Wavelet(wave_name)
self.j0 = j0
self.j1 = j1 if j1 is not None else (j0 - 1)
#self.multi_supports = wave_support_info(self.wave)
self.pdf = None
if thresholding is None:
self.thresholding = lambda n, j, dn, c: c
else:
self.thresholding = thresholding
def fit(self, xs):
"Fit estimator to data. xs is a numpy array of dimension n x d, n = samples, d = dimensions"
self.dim = xs.shape[1]
self.dimpow = 2 ** self.dim
self.set_wavefuns(self.dim)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
self.n = xs.shape[0]
self.calc_coefficients(xs)
self.pdf = self.calc_pdf()
return True
def set_wavefuns(self, dim):
self.wave_funs = self.calc_wavefuns(dim, self.multi_supports['base'], self.wave)
self.dual_wave_funs = self.calc_wavefuns(dim, self.multi_supports['dual'], self.wave)
@staticmethod
def calc_wavefuns(dim, supports, wave):
resp = {}
phi_support, psi_support = supports
phi, psi, _ = wave.wavefun(level=12)
phi = interp1d(np.linspace(*phi_support, num=len(phi)), phi, fill_value=0.0, bounds_error=False)
psi = interp1d(np.linspace(*psi_support, num=len(psi)), psi, fill_value=0.0, bounds_error=False)
for wave_x, qx in all_qx(dim):
f = partial(wave_tensor, qx, phi, psi)
f.qx = qx
f.support = support_tensor(qx, phi_support, psi_support)
f.suppf = partial(suppf_tensor, qx, phi_support, psi_support)
resp[tuple(qx)] = f
return resp
def calc_coefficients(self, xs):
self.coeffs = {}
self.nums = {}
qxs = list(all_qx(self.dim))
self.do_calculate_j(self.j0, qxs[0:1], xs)
for j in range(self.j0, self.j1 + 1):
self.do_calculate_j(j, qxs[1:], xs)
def do_calculate_j(self, j, qxs, xs):
jpow2 = 2 ** j
if j not in self.coeffs:
self.coeffs[j] = {}
self.nums[j] = {}
for ix, qx in qxs:
wavef = self.wave_funs[qx]
zs_min, zs_max = zs_range(wavef, self.minx, self.maxx, j)
self.coeffs[j][qx] = {}
self.nums[j][qx] = {}
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
self.coeffs[j][qx][zs] = calc_coeff_simple(wavef, jpow2, zs, xs)
self.nums[j][qx][zs] = calc_num(wavef.suppf, jpow2, zs, xs)
def get_betas(self, j):
return [coeff for ix, qx in list(all_qx(self.dim))[1:] for coeff in self.coeffs[j][qx].values()]
def get_nums(self):
return [coeff
for j in self.nums
for ix, qx in list(all_qx(self.dim))[1:]
for coeff in self.nums[j][qx].values()]
def calc_pdf(self):
def pdffun_j(coords, xs_sum, j, qxs, threshold):
jpow2 = 2 ** j
for ix, qx in qxs:
wavef = self.dual_wave_funs[qx]
for zs, coeff in self.coeffs[j][qx].iteritems():
num = self.nums[j][qx][zs]
coeff_t = self.thresholding(self.n, j - self.j0, num, coeff) if threshold else coeff
vals = coeff_t * wavef(jpow2, zs, coords)
xs_sum += vals
def pdffun(coords):
xs_sum = np.zeros(coords[0].shape, dtype=np.float64)
qxs = list(all_qx(self.dim))
pdffun_j(coords, xs_sum, self.j0, qxs[0:1], False)
for j in range(self.j0, self.j1 + 1):
pdffun_j(coords, xs_sum, j, qxs[1:], True)
return xs_sum
return pdffun
| [
"numpy.amin",
"pywt.Wavelet",
"numpy.zeros",
"functools.partial",
"numpy.amax"
] | [((313, 336), 'pywt.Wavelet', 'pywt.Wavelet', (['wave_name'], {}), '(wave_name)\n', (325, 336), False, 'import pywt\n'), ((889, 908), 'numpy.amin', 'np.amin', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (896, 908), True, 'import numpy as np\n'), ((929, 948), 'numpy.amax', 'np.amax', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (936, 948), True, 'import numpy as np\n'), ((1720, 1754), 'functools.partial', 'partial', (['wave_tensor', 'qx', 'phi', 'psi'], {}), '(wave_tensor, qx, phi, psi)\n', (1727, 1754), False, 'from functools import partial\n'), ((1868, 1919), 'functools.partial', 'partial', (['suppf_tensor', 'qx', 'phi_support', 'psi_support'], {}), '(suppf_tensor, qx, phi_support, psi_support)\n', (1875, 1919), False, 'from functools import partial\n'), ((3723, 3766), 'numpy.zeros', 'np.zeros', (['coords[0].shape'], {'dtype': 'np.float64'}), '(coords[0].shape, dtype=np.float64)\n', (3731, 3766), True, 'import numpy as np\n')] |
#Coded by <NAME>
#02/09/2018 latest version.
#Copyright (c) <2018> <<NAME>>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#%%
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
import scipy.stats as stats
import tensorflow as tf
import os
slim=tf.contrib.slim
Exponential=tf.contrib.distributions.Exponential(rate=1.0)
Normal1=tf.contrib.distributions.Normal(loc=-2.0, scale=1.0)
Normal2=tf.contrib.distributions.Normal(loc=2.0, scale=1.0)
Normal=tf.contrib.distributions.Normal(loc=0., scale=1.)
directory = os.getcwd()
#%%
def sample_n(mu,sigma):
eps = tf.random_normal(shape=tf.shape(mu))
z=mu+eps*sigma
return z
def sample_hyper(noise_dim,K,reuse=False):
z_dim = 1
with tf.variable_scope("hyper_q") as scope:
if reuse:
scope.reuse_variables()
e2 = tf.random_normal(shape=[K,noise_dim])
input_ = e2
h2 = slim.stack(input_,slim.fully_connected,[20,40,20])
mu = tf.reshape(slim.fully_connected(h2,z_dim,activation_fn=None,scope='implicit_hyper_mu'),[-1,1])
return mu
#%%
data_p = {"1":"gaussian","2":"laplace","3":"gmm"}
data_number = "3"
target = data_p[data_number]
#%%
noise_dim = 10
K = 20
psi_sample = sample_hyper(noise_dim,K)
sigma = tf.constant(0.2)
z_sample = sample_n(psi_sample,sigma)
J = tf.placeholder(tf.int32, shape=())
psi_star = tf.transpose(sample_hyper(noise_dim,J,reuse=True))
merge = tf.placeholder(tf.int32, shape=[])
psi_star = tf.cond(merge>0,lambda:tf.concat([psi_star,tf.transpose(psi_sample)],1),lambda:psi_star)
log_H= tf.log(tf.reduce_mean(tf.exp(-0.5*tf.square(z_sample-psi_star)/tf.square(sigma)),axis=1,keep_dims=True))
#log_Q = -tf.log(sigma)-0.5*tf.square(z_sample-psi_sample)/tf.square(sigma)
#regular = log_Q - log_H
if target == 'gaussian':
log_P = -tf.log(3.0)-0.5*tf.square(z_sample)/tf.square(3.0) #gaussian
elif target == 'laplace':
log_P = -0.5*tf.abs(z_sample) #laplace(mu=0,b=2)
elif target == 'gmm':
log_P =tf.log(0.3*tf.exp(-tf.square(z_sample+2)/2)+0.7*tf.exp(-tf.square(z_sample-2)/2))
else:
raise ValueError('No pre-defined target distribution, you can write your own log(PDF) ')
loss = tf.reduce_mean(log_H - log_P)
nn_var = slim.get_model_variables()
lr=tf.constant(0.01)
train_op1 = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss,var_list=nn_var)
init_op=tf.global_variables_initializer()
#%%
# merge==1 corresponds to lower bound; merge==0 corresponds to upper bound
sess=tf.InteractiveSession()
sess.run(init_op)
record = []
for i in range(5000):
_,cost=sess.run([train_op1,loss],{lr:0.01*(0.75**(i/100)),J:100,merge:1})
record.append(cost)
if i%500 == 0:
print("iter:", '%04d' % (i+1), "cost=", np.mean(record))
record = []
#%% plot Q and target
r_hive=[]
for i in range(100):
r = sess.run(z_sample)
r_hive.extend(np.squeeze(r))
yy=[]
xx = np.arange(-10,10,0.01)
for r in xx:
if target=='gaussian':
pdf = stats.norm.pdf(r, loc=0, scale=3) #gaussian
elif target=='laplace':
pdf = 1/4*np.exp(-0.5*np.abs(r)) #laplace
elif target=='gmm':
pdf = 0.3*stats.norm.pdf(r, loc=-2, scale=1)+0.7*stats.norm.pdf(r, loc=2, scale=1) #gmm
yy.append(pdf)
ax=plt.figure()
ax=sns.distplot(r_hive,label='Q distribution')
ax=plt.plot(xx,yy,'y-',label='P distribution')
plt.legend()
#%% plot latent
latent=[]
for i in range(200):
muu = sess.run(psi_sample)
latent.extend(np.squeeze(muu))
plt.figure()
sns.distplot(latent)
plt.title("Gaussian \mu_i")
| [
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.contrib.distributions.Normal",
"tensorflow.reduce_mean",
"tensorflow.log",
"numpy.arange",
"numpy.mean",
"tensorflow.random_normal",
"seaborn.distplot",
"tensorflow.placeholder",
"matplotlib.pyplot.plot",
"tensorflow.square",
"tensorflo... | [((1405, 1451), 'tensorflow.contrib.distributions.Exponential', 'tf.contrib.distributions.Exponential', ([], {'rate': '(1.0)'}), '(rate=1.0)\n', (1441, 1451), True, 'import tensorflow as tf\n'), ((1460, 1512), 'tensorflow.contrib.distributions.Normal', 'tf.contrib.distributions.Normal', ([], {'loc': '(-2.0)', 'scale': '(1.0)'}), '(loc=-2.0, scale=1.0)\n', (1491, 1512), True, 'import tensorflow as tf\n'), ((1521, 1572), 'tensorflow.contrib.distributions.Normal', 'tf.contrib.distributions.Normal', ([], {'loc': '(2.0)', 'scale': '(1.0)'}), '(loc=2.0, scale=1.0)\n', (1552, 1572), True, 'import tensorflow as tf\n'), ((1580, 1631), 'tensorflow.contrib.distributions.Normal', 'tf.contrib.distributions.Normal', ([], {'loc': '(0.0)', 'scale': '(1.0)'}), '(loc=0.0, scale=1.0)\n', (1611, 1631), True, 'import tensorflow as tf\n'), ((1643, 1654), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1652, 1654), False, 'import os\n'), ((2366, 2382), 'tensorflow.constant', 'tf.constant', (['(0.2)'], {}), '(0.2)\n', (2377, 2382), True, 'import tensorflow as tf\n'), ((2428, 2462), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '()'}), '(tf.int32, shape=())\n', (2442, 2462), True, 'import tensorflow as tf\n'), ((2536, 2570), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[]'}), '(tf.int32, shape=[])\n', (2550, 2570), True, 'import tensorflow as tf\n'), ((3292, 3321), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(log_H - log_P)'], {}), '(log_H - log_P)\n', (3306, 3321), True, 'import tensorflow as tf\n'), ((3363, 3380), 'tensorflow.constant', 'tf.constant', (['(0.01)'], {}), '(0.01)\n', (3374, 3380), True, 'import tensorflow as tf\n'), ((3474, 3507), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3505, 3507), True, 'import tensorflow as tf\n'), ((3594, 3617), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (3615, 3617), True, 'import tensorflow as tf\n'), ((4001, 4025), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(0.01)'], {}), '(-10, 10, 0.01)\n', (4010, 4025), True, 'import numpy as np\n'), ((4344, 4356), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4354, 4356), True, 'from matplotlib import pyplot as plt\n'), ((4360, 4404), 'seaborn.distplot', 'sns.distplot', (['r_hive'], {'label': '"""Q distribution"""'}), "(r_hive, label='Q distribution')\n", (4372, 4404), True, 'import seaborn as sns\n'), ((4407, 4453), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy', '"""y-"""'], {'label': '"""P distribution"""'}), "(xx, yy, 'y-', label='P distribution')\n", (4415, 4453), True, 'from matplotlib import pyplot as plt\n'), ((4451, 4463), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4461, 4463), True, 'from matplotlib import pyplot as plt\n'), ((4586, 4598), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4596, 4598), True, 'from matplotlib import pyplot as plt\n'), ((4599, 4619), 'seaborn.distplot', 'sns.distplot', (['latent'], {}), '(latent)\n', (4611, 4619), True, 'import seaborn as sns\n'), ((4620, 4648), 'matplotlib.pyplot.title', 'plt.title', (['"""Gaussian \\\\mu_i"""'], {}), "('Gaussian \\\\mu_i')\n", (4629, 4648), True, 'from matplotlib import pyplot as plt\n'), ((1832, 1860), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""hyper_q"""'], {}), "('hyper_q')\n", (1849, 1860), True, 'import tensorflow as tf\n'), ((1938, 1976), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[K, noise_dim]'}), '(shape=[K, noise_dim])\n', (1954, 1976), True, 'import tensorflow as tf\n'), ((3393, 3433), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (3415, 3433), True, 'import tensorflow as tf\n'), ((3974, 3987), 'numpy.squeeze', 'np.squeeze', (['r'], {}), '(r)\n', (3984, 3987), True, 'import numpy as np\n'), ((4078, 4111), 'scipy.stats.norm.pdf', 'stats.norm.pdf', (['r'], {'loc': '(0)', 'scale': '(3)'}), '(r, loc=0, scale=3)\n', (4092, 4111), True, 'import scipy.stats as stats\n'), ((4569, 4584), 'numpy.squeeze', 'np.squeeze', (['muu'], {}), '(muu)\n', (4579, 4584), True, 'import numpy as np\n'), ((1717, 1729), 'tensorflow.shape', 'tf.shape', (['mu'], {}), '(mu)\n', (1725, 1729), True, 'import tensorflow as tf\n'), ((2926, 2937), 'tensorflow.log', 'tf.log', (['(3.0)'], {}), '(3.0)\n', (2932, 2937), True, 'import tensorflow as tf\n'), ((2962, 2976), 'tensorflow.square', 'tf.square', (['(3.0)'], {}), '(3.0)\n', (2971, 2976), True, 'import tensorflow as tf\n'), ((3030, 3046), 'tensorflow.abs', 'tf.abs', (['z_sample'], {}), '(z_sample)\n', (3036, 3046), True, 'import tensorflow as tf\n'), ((3839, 3854), 'numpy.mean', 'np.mean', (['record'], {}), '(record)\n', (3846, 3854), True, 'import numpy as np\n'), ((2625, 2649), 'tensorflow.transpose', 'tf.transpose', (['psi_sample'], {}), '(psi_sample)\n', (2637, 2649), True, 'import tensorflow as tf\n'), ((2743, 2759), 'tensorflow.square', 'tf.square', (['sigma'], {}), '(sigma)\n', (2752, 2759), True, 'import tensorflow as tf\n'), ((2942, 2961), 'tensorflow.square', 'tf.square', (['z_sample'], {}), '(z_sample)\n', (2951, 2961), True, 'import tensorflow as tf\n'), ((2714, 2744), 'tensorflow.square', 'tf.square', (['(z_sample - psi_star)'], {}), '(z_sample - psi_star)\n', (2723, 2744), True, 'import tensorflow as tf\n'), ((4180, 4189), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (4186, 4189), True, 'import numpy as np\n'), ((4242, 4276), 'scipy.stats.norm.pdf', 'stats.norm.pdf', (['r'], {'loc': '(-2)', 'scale': '(1)'}), '(r, loc=-2, scale=1)\n', (4256, 4276), True, 'import scipy.stats as stats\n'), ((4281, 4314), 'scipy.stats.norm.pdf', 'stats.norm.pdf', (['r'], {'loc': '(2)', 'scale': '(1)'}), '(r, loc=2, scale=1)\n', (4295, 4314), True, 'import scipy.stats as stats\n'), ((3118, 3141), 'tensorflow.square', 'tf.square', (['(z_sample + 2)'], {}), '(z_sample + 2)\n', (3127, 3141), True, 'import tensorflow as tf\n'), ((3155, 3178), 'tensorflow.square', 'tf.square', (['(z_sample - 2)'], {}), '(z_sample - 2)\n', (3164, 3178), True, 'import tensorflow as tf\n')] |
import sys
import argparse
import os.path
import math as m
import cv2
import numpy as np
import yaml
import traceback
try:
import quaternion
except:
print('Install numpy-quaternion %s (%s) (which also requires scipy and optionally numba)' %
("pip3 install numpy-quaternion", "https://github.com/moble/quaternion"))
sys.exit(1)
try:
from plyfile import PlyData, PlyElement
except:
print('Install python-plyfile from https://github.com/dranjan/python-plyfile (using pip: pip install plyfile')
sys.exit(1)
from typing import List
from typing import Tuple
Vector = List[float]
SList = List[str]
STuple = Tuple[str]
def main(argv=None):
if argv is None:
argv = sys.argv
files_help = """
The yaml file from TangoCamera. Optionally also specify the ply file and/or
the jpg file if their basenames differ from the yaml file.
If the file basenames are the same then only the basename (without the yaml
extension) needs to be specified without specifying any other specific filenames.
"""
parser = argparse.ArgumentParser(description='Project 3d points from ply file back onto image.')
parser.add_argument("-c", '--caxis', dest='caxis',
help="Axis to color code points from (x, y or z) . Default is none")
parser.add_argument('files', metavar='{file.yaml [file.ply] [file.jpg]}', nargs='+',
help=files_help)
try:
args = parser.parse_args()
except:
try:
parser.print_help()
except:
print('project {files} ', file=sys.stderr)
print('{files} :', files_help, file=sys.stderr)
sys.exit(1)
print(args)
if args.caxis:
caxis = args.caxis.lower().strip()
if caxis != 'y' and caxis != 'x' and caxis != 'z':
print("Axis to color code (%s) must be one of x, y or z" % (caxis,), file=sys.stderr)
sys.exit(1)
else:
caxis = None
yamlfile, imgfile, plyfile, basename = process_files(args.files)
print(yamlfile, imgfile, plyfile)
y = None
try:
with open(yamlfile, "r", encoding="utf-8") as f:
y = yaml.load(f)
except Exception as ex:
print('Error opening yaml file %s (%s)' % (yamlfile, str(ex)), file=sys.stderr)
sys.exit(1)
K = np.array([[y['fx'], 0, y['cx']], [0, y['fy'], y['cy']], [0, 0, 1]])
# dK = np.array([[y['d_fx'], 0, y['d_cx']], [0, y['d_fy'], y['d_cy']], [0, 0, 1]])
# sdK = np.array([[y['imagewidth']/y['d_imagewidth'], 0, 0], [0, y['imageheight']/y['d_imageheight'], 0], [0, 0, 1]])
# dK = sdK.dot(dK)
# IT = np.array([ y['imuRawTranslation'][0], y['imuRawTranslation'][1], y['imuRawTranslation'][2] ])
IT = np.array([y['imuTranslation'][0], y['imuTranslation'][1], y['imuTranslation'][2]])
IT = IT - np.array([y['d_imuTranslation'][0], y['d_imuTranslation'][1], y['d_imuTranslation'][2]])
# Q = np.quaternion(y['imuRawRotation'][0], y['imuRawRotation'][1], y['imuRawRotation'][2], y['imuRawRotation'][3])
# Eu = quaternion.as_euler_angles(Q)
# print(quaternionAxisAngle(Q))
# print('Euler imuRotation: ', m.degrees(Eu[0]), m.degrees(Eu[1]), m.degrees(Eu[1]))
distort = np.array([ y['distortion'][0], y['distortion'][1], y['distortion'][2], y['distortion'][3], y['distortion'][4] ])
# img = project(K, IT, distort, plyfile, imgfile)
img = project(K, IT, None, plyfile, imgfile, caxis)
if img is not None:
cv2.imwrite(basename + "-project.jpg", img)
cv2.imshow(os.path.basename(imgfile), img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def project(K, IT, distort, plyfile, imgfile, caxis=None):
src = cv2.imread(imgfile)
if src is None:
print("Error reading image file %s" % (imgfile,))
return None
try:
plydata = PlyData.read(plyfile)
except Exception as ex:
print("Error reading ply file %s (%s)." % (plyfile, str(ex)), file=sys.stderr)
return None
if caxis is not None:
import pandas as pd
vertex_data = pd.DataFrame(plydata['vertex'].data)
# maxx, maxy, maxz = vertex_data.max()
# minx, miny, minz = vertex_data.min()
# meanx, meany, meanz = vertex_data.mean()
quarters = vertex_data.quantile(.25)
halfs = vertex_data.quantile(.5)
three_quarters = vertex_data.quantile(.75)
if distort is not None:
h, w = src.shape[:2]
NK, roi = cv2.getOptimalNewCameraMatrix(K, distort, (w, h), 0)
mapx, mapy = cv2.initUndistortRectifyMap(K, distort, None, NK, (w, h), 5)
img = cv2.remap(src, mapx, mapy, cv2.INTER_LINEAR)
# print(K, NK)
# img = cv2.undistort(src, K, distort, None, NK)
else:
img = src
NK = K
P = np.eye(4)
#P[0:3, 0:3] = IR
P[0:3, 3] = IT
for vertex in plydata['vertex']:
x = vertex[0]
y = vertex[1]
z = vertex[2]
#X = np.array([vertex[0], vertex[1] + 0.011, vertex[2], 1])
X = np.array([x, y, z, 1])
if caxis is not None:
if caxis == 'x':
v = x
ci = 0
elif caxis == 'z':
v = z
ci = 2
else:
v = y
ci = 1
if v <= quarters[ci]:
color = (255, 0, 0)
elif quarters[ci] < v <= halfs[ci]:
color = (0, 255, 0)
elif halfs[ci] < v <= three_quarters[ci]:
color = (0, 255, 255)
else:
color = (0, 0, 255)
else:
color = (0, 0, 0)
XX = P.dot(X)
xy = NK.dot(XX[0:3])
xy = xy / xy[2]
center = (int(xy[0]), int(xy[1]))
cv2.circle(img, center, 2, color, 2)
return img
def process_files(files: SList) ->STuple:
basename = yamlfile = imgfile = plyfile = ''
if len(files) == 1:
filename = os.path.basename(files[0])
dir = os.path.dirname(os.path.abspath(files[0]))
if filename.endswith('.'):
basename, ext = os.path.splitext(os.path.basename(files[0]))
else:
basename = filename
yamlfile = os.path.join(dir, basename + ".yaml")
imgfile = os.path.join(dir, basename + ".jpg")
plyfile = os.path.join(dir, basename + ".ply")
else:
for filename in files:
dir = os.path.dirname(os.path.abspath(filename))
name, ext = os.path.splitext(os.path.basename(filename))
if ext == '.yaml':
yamlfile = os.path.join(dir, name + ".yaml")
if len(basename) == 0:
basename = name
elif ext == '.jpg':
imgfile = os.path.join(dir, name + ".jpg")
if len(basename) == 0:
basename = name
elif ext == '.ply':
plyfile = os.path.join(dir, name + ".ply")
if len(basename) == 0:
basename = name
if len(basename) > 0:
if len(yamlfile) == 0:
yamlfile = os.path.join(dir, basename + ".yaml")
if len(imgfile) == 0:
imgfile = os.path.join(dir, name + ".jpg")
if len(plyfile) == 0:
plyfile = os.path.join(dir, name + ".ply")
if not os.path.exists(yamlfile):
print("Yaml file %s not found." % (yamlfile,), file=sys.stderr)
return ("", "", "")
if not os.path.exists(imgfile):
print("Image file %s not found." % (imgfile,), file=sys.stderr)
return ("", "", "")
if not os.path.exists(plyfile):
print("Ply file %s not found." % (plyfile,), file=sys.stderr)
return ("", "", "")
return (yamlfile, imgfile, plyfile, basename)
def quaternion_to_euler_angle(Q):
w = Q.w
x = Q.x
y = Q.y
z = Q.z
ysqr = y * y
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + ysqr)
X = m.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
Y = m.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (ysqr + z * z)
Z = m.atan2(t3, t4)
return X, Y, Z
def rotation2Euler(R):
sy = m.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0] )
singular = sy < 1e-6
if not singular:
x = m.atan2(R[2,1] , R[2,2])
y = m.atan2(-R[2,0], sy);
z = m.atan2(R[1,0], R[0,0])
else:
x = m.atan2(-R[1,2], R[1,1])
y = m.atan2(-R[2,0], sy)
z = 0
return x, y, z
def quaternionAxisAngle(q):
Q = q.normalized()
angle = 2 * m.acos(Q.w)
s = m.sqrt(1-Q.w * Q.w)
if s < 0.001:
x = Q.x
y = Q.y;
z = Q.z;
else:
x = Q.x / s
y = Q.y / s
z = Q.z / s
return (angle, np.array([x, y, z]))
def quaternion_matrix(Q, size=3):
q = np.array([Q.w, Q.x, Q.y, Q.z])
n = np.dot(q, q)
if n < np.finfo(float).eps * 4.0:
return np.identity(4)
q *= m.sqrt(2.0 / n)
q = np.outer(q, q)
if size == 3:
return np.array([
[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]]])
else:
return np.array([
[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0],
[0.0, 0.0, 0.0, 1.0]])
AXIS_X = 1
AXIS_Y = 2
AXIS_Z = 3
AXIS_MINUS_X = AXIS_X | 0x80
AXIS_MINUS_Y = AXIS_Y | 0x80
AXIS_MINUS_Z = AXIS_Z | 0x80
def androidRemapCoordinateSystem(R, X, Y):
# Translated from SensorManager.remapCoordinateSystem
length = np.size(R)
rows = np.size(R, 0)
cols = np.size(R, 1)
if (rows != 3) and (rows != 4) and (cols != 3) and (cols != 4):
return None
if (X & 0x7C) != 0 or (Y & 0x7C) != 0:
return None
if ((X & 0x3) == 0) or ((Y & 0x3) == 0):
return None
if (X & 0x3) == (Y & 0x3):
return None
# Z is "the other" axis, its sign is either +/- sign(X)*sign(Y)
# this can be calculated by exclusive-or'ing X and Y; except for
# the sign inversion (+/-) which is calculated below.
Z = X ^Y
# extract the axis (remove the sign), offset in the range 0 to 2.
x = (X & 0x3) - 1
y = (Y & 0x3) - 1
z = (Z & 0x3) - 1
# compute the sign of Z (whether it needs to be inverted)
axis_y = (z + 1) % 3
axis_z = (z + 2) % 3
if ((x ^ axis_y) | (y ^ axis_z)) != 0:
Z ^= 0x80
sx = (X >= 0x80)
sy = (Y >= 0x80)
sz = (Z >= 0x80)
outR = np.zeros((rows, cols))
for j in range(0, 3):
icol = R[:, j]
ocol = outR[:, j]
for i in range(0, 3):
if x == i:
if sx:
ocol[i] = -icol[0]
else:
ocol[i] = icol[0]
if y == i:
if sy:
ocol[i] = -icol[1]
else:
ocol[i] = icol[1]
if z == i:
if sz:
ocol[i] = -icol[2]
else:
ocol[i] = icol[2]
if cols == 4:
outR[3, 3] = 1
return outR
def androidRemapCoordinateSystemMatrix(R, deviceRotation):
global AXIS_X, AXIS_Y, AXIS_Z, AXIS_MINUS_X, AXIS_MINUS_Y, AXIS_MINUS_Z
if deviceRotation == 90:
androidXAxis = AXIS_Y
androidYAxis = AXIS_MINUS_X
elif deviceRotation ==180:
androidXAxis = AXIS_MINUS_X
androidYAxis = AXIS_MINUS_Y
elif deviceRotation == 270:
androidXAxis = AXIS_MINUS_Y
androidYAxis = AXIS_X
else:
androidXAxis = AXIS_X
androidYAxis = AXIS_Y
return androidRemapCoordinateSystem(R, androidXAxis, androidYAxis)
def androidRemapCoordinateSystemVector(v, deviceRotation):
global AXIS_X, AXIS_Y, AXIS_Z, AXIS_MINUS_X, AXIS_MINUS_Y, AXIS_MINUS_Z
I = np.eye(np.size(v))
if deviceRotation == 90:
androidXAxis = AXIS_Y
androidYAxis = AXIS_MINUS_X
elif deviceRotation ==180:
androidXAxis = AXIS_MINUS_X
androidYAxis = AXIS_MINUS_Y
elif deviceRotation == 270:
androidXAxis = AXIS_MINUS_Y
androidYAxis = AXIS_X
else:
androidXAxis = AXIS_X
androidYAxis = AXIS_Y
R = androidRemapCoordinateSystem(I, androidXAxis, androidYAxis)
return R.dot(v)
if __name__ == '__main__':
sys.exit(main())
| [
"cv2.initUndistortRectifyMap",
"math.acos",
"math.sqrt",
"cv2.remap",
"yaml.load",
"numpy.array",
"cv2.destroyAllWindows",
"sys.exit",
"argparse.ArgumentParser",
"numpy.dot",
"pandas.DataFrame",
"cv2.waitKey",
"numpy.identity",
"numpy.eye",
"numpy.size",
"cv2.getOptimalNewCameraMatrix"... | [((1036, 1128), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Project 3d points from ply file back onto image."""'}), "(description=\n 'Project 3d points from ply file back onto image.')\n", (1059, 1128), False, 'import argparse\n'), ((2250, 2317), 'numpy.array', 'np.array', (["[[y['fx'], 0, y['cx']], [0, y['fy'], y['cy']], [0, 0, 1]]"], {}), "([[y['fx'], 0, y['cx']], [0, y['fy'], y['cy']], [0, 0, 1]])\n", (2258, 2317), True, 'import numpy as np\n'), ((2655, 2742), 'numpy.array', 'np.array', (["[y['imuTranslation'][0], y['imuTranslation'][1], y['imuTranslation'][2]]"], {}), "([y['imuTranslation'][0], y['imuTranslation'][1], y[\n 'imuTranslation'][2]])\n", (2663, 2742), True, 'import numpy as np\n'), ((3133, 3248), 'numpy.array', 'np.array', (["[y['distortion'][0], y['distortion'][1], y['distortion'][2], y['distortion'\n ][3], y['distortion'][4]]"], {}), "([y['distortion'][0], y['distortion'][1], y['distortion'][2], y[\n 'distortion'][3], y['distortion'][4]])\n", (3141, 3248), True, 'import numpy as np\n'), ((3595, 3614), 'cv2.imread', 'cv2.imread', (['imgfile'], {}), '(imgfile)\n', (3605, 3614), False, 'import cv2\n'), ((4646, 4655), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4652, 4655), True, 'import numpy as np\n'), ((7593, 7608), 'math.atan2', 'm.atan2', (['t0', 't1'], {}), '(t0, t1)\n', (7600, 7608), True, 'import math as m\n'), ((7716, 7726), 'math.asin', 'm.asin', (['t2'], {}), '(t2)\n', (7722, 7726), True, 'import math as m\n'), ((7802, 7817), 'math.atan2', 'm.atan2', (['t3', 't4'], {}), '(t3, t4)\n', (7809, 7817), True, 'import math as m\n'), ((7869, 7914), 'math.sqrt', 'm.sqrt', (['(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])'], {}), '(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n', (7875, 7914), True, 'import math as m\n'), ((8248, 8269), 'math.sqrt', 'm.sqrt', (['(1 - Q.w * Q.w)'], {}), '(1 - Q.w * Q.w)\n', (8254, 8269), True, 'import math as m\n'), ((8474, 8504), 'numpy.array', 'np.array', (['[Q.w, Q.x, Q.y, Q.z]'], {}), '([Q.w, Q.x, Q.y, Q.z])\n', (8482, 8504), True, 'import numpy as np\n'), ((8512, 8524), 'numpy.dot', 'np.dot', (['q', 'q'], {}), '(q, q)\n', (8518, 8524), True, 'import numpy as np\n'), ((8598, 8613), 'math.sqrt', 'm.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (8604, 8613), True, 'import math as m\n'), ((8621, 8635), 'numpy.outer', 'np.outer', (['q', 'q'], {}), '(q, q)\n', (8629, 8635), True, 'import numpy as np\n'), ((9435, 9445), 'numpy.size', 'np.size', (['R'], {}), '(R)\n', (9442, 9445), True, 'import numpy as np\n'), ((9456, 9469), 'numpy.size', 'np.size', (['R', '(0)'], {}), '(R, 0)\n', (9463, 9469), True, 'import numpy as np\n'), ((9480, 9493), 'numpy.size', 'np.size', (['R', '(1)'], {}), '(R, 1)\n', (9487, 9493), True, 'import numpy as np\n'), ((10327, 10349), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (10335, 10349), True, 'import numpy as np\n'), ((334, 345), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (342, 345), False, 'import sys\n'), ((519, 530), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (527, 530), False, 'import sys\n'), ((2751, 2844), 'numpy.array', 'np.array', (["[y['d_imuTranslation'][0], y['d_imuTranslation'][1], y['d_imuTranslation'][2]]"], {}), "([y['d_imuTranslation'][0], y['d_imuTranslation'][1], y[\n 'd_imuTranslation'][2]])\n", (2759, 2844), True, 'import numpy as np\n'), ((3382, 3425), 'cv2.imwrite', 'cv2.imwrite', (["(basename + '-project.jpg')", 'img'], {}), "(basename + '-project.jpg', img)\n", (3393, 3425), False, 'import cv2\n'), ((3481, 3495), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3492, 3495), False, 'import cv2\n'), ((3502, 3525), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3523, 3525), False, 'import cv2\n'), ((3732, 3753), 'plyfile.PlyData.read', 'PlyData.read', (['plyfile'], {}), '(plyfile)\n', (3744, 3753), False, 'from plyfile import PlyData, PlyElement\n'), ((3955, 3991), 'pandas.DataFrame', 'pd.DataFrame', (["plydata['vertex'].data"], {}), "(plydata['vertex'].data)\n", (3967, 3991), True, 'import pandas as pd\n'), ((4330, 4382), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['K', 'distort', '(w, h)', '(0)'], {}), '(K, distort, (w, h), 0)\n', (4359, 4382), False, 'import cv2\n'), ((4402, 4462), 'cv2.initUndistortRectifyMap', 'cv2.initUndistortRectifyMap', (['K', 'distort', 'None', 'NK', '(w, h)', '(5)'], {}), '(K, distort, None, NK, (w, h), 5)\n', (4429, 4462), False, 'import cv2\n'), ((4475, 4519), 'cv2.remap', 'cv2.remap', (['src', 'mapx', 'mapy', 'cv2.INTER_LINEAR'], {}), '(src, mapx, mapy, cv2.INTER_LINEAR)\n', (4484, 4519), False, 'import cv2\n'), ((4868, 4890), 'numpy.array', 'np.array', (['[x, y, z, 1]'], {}), '([x, y, z, 1])\n', (4876, 4890), True, 'import numpy as np\n'), ((5527, 5563), 'cv2.circle', 'cv2.circle', (['img', 'center', '(2)', 'color', '(2)'], {}), '(img, center, 2, color, 2)\n', (5537, 5563), False, 'import cv2\n'), ((7967, 7992), 'math.atan2', 'm.atan2', (['R[2, 1]', 'R[2, 2]'], {}), '(R[2, 1], R[2, 2])\n', (7974, 7992), True, 'import math as m\n'), ((8002, 8023), 'math.atan2', 'm.atan2', (['(-R[2, 0])', 'sy'], {}), '(-R[2, 0], sy)\n', (8009, 8023), True, 'import math as m\n'), ((8034, 8059), 'math.atan2', 'm.atan2', (['R[1, 0]', 'R[0, 0]'], {}), '(R[1, 0], R[0, 0])\n', (8041, 8059), True, 'import math as m\n'), ((8077, 8103), 'math.atan2', 'm.atan2', (['(-R[1, 2])', 'R[1, 1]'], {}), '(-R[1, 2], R[1, 1])\n', (8084, 8103), True, 'import math as m\n'), ((8112, 8133), 'math.atan2', 'm.atan2', (['(-R[2, 0])', 'sy'], {}), '(-R[2, 0], sy)\n', (8119, 8133), True, 'import math as m\n'), ((8229, 8240), 'math.acos', 'm.acos', (['Q.w'], {}), '(Q.w)\n', (8235, 8240), True, 'import math as m\n'), ((8410, 8429), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (8418, 8429), True, 'import numpy as np\n'), ((8575, 8589), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (8586, 8589), True, 'import numpy as np\n'), ((8666, 8880), 'numpy.array', 'np.array', (['[[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]], [q[1, 2] +\n q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]], [q[1, 3] - q[2, 0\n ], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]]]'], {}), '([[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],\n [q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]], [q[1, \n 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]]])\n', (8674, 8880), True, 'import numpy as np\n'), ((8922, 9179), 'numpy.array', 'np.array', (['[[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0], [q[1,\n 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0], [q[1, 3\n ] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0], [0.0, \n 0.0, 0.0, 1.0]]'], {}), '([[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], \n 0.0], [q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], \n 0.0], [q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], \n 0.0], [0.0, 0.0, 0.0, 1.0]])\n', (8930, 9179), True, 'import numpy as np\n'), ((11573, 11583), 'numpy.size', 'np.size', (['v'], {}), '(v)\n', (11580, 11583), True, 'import numpy as np\n'), ((1619, 1630), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1627, 1630), False, 'import sys\n'), ((1866, 1877), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1874, 1877), False, 'import sys\n'), ((2099, 2111), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (2108, 2111), False, 'import yaml\n'), ((2231, 2242), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2239, 2242), False, 'import sys\n'), ((8535, 8550), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (8543, 8550), True, 'import numpy as np\n')] |
import flask
from flask import json , request
import numpy as np
import base64
from io import BytesIO
import re
from PIL import Image
from flask import jsonify
from flask_cors import CORS
from numpy.lib.type_check import imag
import cv2
from tensorflow.keras.models import load_model
rev_class_map = {0: 'apple', 1: 'bee', 2: 'banana', 3: 'bus', 4: 'cake', 5: 'clock', 6: 'cup', 7: 'door', 8: 'elephant', 9: 'eyeglasses', 10: 'fish', 11: 'flower', 12: 'guitar', 13: 'hexagon', 14: 'ice cream', 15: 'mouse', 16: 'spoon', 17: 'strawberry', 18: 'scissors', 19: 'postcard', 20: 'pineapple', 21: 'skull', 22: 'owl', 23: 'radio', 24: 'paintbrush', 25: 'snake'}
emoji_map = {'apple': '🍎', 'banana': '🍌', 'bee': '🐝', 'bus': '🚌','cake': '🎂','clock': '🕑','cup': '🥤','door': '🚪','elephant': '🐘','eyeglasses': '👓','fish': '🐟','flower': '🌸','guitar': '🎸','hexagon': '🔷','ice cream': '🍨','mouse': '🐁','owl': '🦉','paintbrush': '🖌️','pineapple': '🍍','postcard': '🪧','radio': '📻','scissors': '✂️','skull': '💀','snake': '🐍','spoon': '🥄','strawberry': '🍓'}
model = load_model('server//v5.h5', compile = False)
app = flask.Flask(__name__)
CORS(app)
@app.route('/predict', methods = ['GET','POST'])
def predict():
# print('We are on predict route')
data = request.json
b64 = data['b64']
im = Image.open(BytesIO(base64.b64decode(re.search(r'base64,(.*)', b64).group(1))))
def performImageProcessing(PIL_image):
imgarr = np.array(PIL_image)
gray = cv2.cvtColor(imgarr, cv2.COLOR_BGR2GRAY)
# cv2_imshow(gray)
ret, binary = cv2.threshold(gray, 100, 255,
cv2.THRESH_OTSU)
# cv2_imshow(binary)
inverted_binary = ~binary
# cv2_imshow(inverted_binary)
contours, hierarchy = cv2.findContours(inverted_binary,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# first_contour = cv2.drawContours(imgarr, contours, 0,(255,0,255),3)
# cv2_imshow(contours[0])
for c in contours:
x, y, w, h = cv2.boundingRect(contours[0])
# first_contour = cv2.rectangle(first_contour,(x,y), (x+w+5,y+h+5), (255,150,0), 5)
imfinal = gray[y:y+h,x:x+w]
img_resized = cv2.resize(imfinal, (28, 28))
return img_resized
new = performImageProcessing(im)
new = np.array(new)
import matplotlib.pyplot as plt
# plt.imsave('1.png', new)
preds = model.predict((255-new).reshape(1,28,28,1))
predict_class = np.argmax(preds, axis=1)
predictions = {
'Predictions' : 'Not Recognised'
}
if rev_class_map[predict_class[0]]:
predictions['Predictions'] = rev_class_map[predict_class[0]].capitalize() + ' ' + emoji_map[rev_class_map[predict_class[0]]]
return jsonify(predictions)
app.run(port='8888')
| [
"re.search",
"flask_cors.CORS",
"flask.Flask",
"cv2.threshold",
"cv2.boundingRect",
"numpy.argmax",
"numpy.array",
"tensorflow.keras.models.load_model",
"cv2.cvtColor",
"cv2.findContours",
"cv2.resize",
"flask.jsonify"
] | [((1050, 1092), 'tensorflow.keras.models.load_model', 'load_model', (['"""server//v5.h5"""'], {'compile': '(False)'}), "('server//v5.h5', compile=False)\n", (1060, 1092), False, 'from tensorflow.keras.models import load_model\n'), ((1102, 1123), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (1113, 1123), False, 'import flask\n'), ((1124, 1133), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (1128, 1133), False, 'from flask_cors import CORS\n'), ((2315, 2328), 'numpy.array', 'np.array', (['new'], {}), '(new)\n', (2323, 2328), True, 'import numpy as np\n'), ((2476, 2500), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (2485, 2500), True, 'import numpy as np\n'), ((2758, 2778), 'flask.jsonify', 'jsonify', (['predictions'], {}), '(predictions)\n', (2765, 2778), False, 'from flask import jsonify\n'), ((1442, 1461), 'numpy.array', 'np.array', (['PIL_image'], {}), '(PIL_image)\n', (1450, 1461), True, 'import numpy as np\n'), ((1479, 1519), 'cv2.cvtColor', 'cv2.cvtColor', (['imgarr', 'cv2.COLOR_BGR2GRAY'], {}), '(imgarr, cv2.COLOR_BGR2GRAY)\n', (1491, 1519), False, 'import cv2\n'), ((1569, 1615), 'cv2.threshold', 'cv2.threshold', (['gray', '(100)', '(255)', 'cv2.THRESH_OTSU'], {}), '(gray, 100, 255, cv2.THRESH_OTSU)\n', (1582, 1615), False, 'import cv2\n'), ((1762, 1835), 'cv2.findContours', 'cv2.findContours', (['inverted_binary', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(inverted_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1778, 1835), False, 'import cv2\n'), ((2210, 2239), 'cv2.resize', 'cv2.resize', (['imfinal', '(28, 28)'], {}), '(imfinal, (28, 28))\n', (2220, 2239), False, 'import cv2\n'), ((2025, 2054), 'cv2.boundingRect', 'cv2.boundingRect', (['contours[0]'], {}), '(contours[0])\n', (2041, 2054), False, 'import cv2\n'), ((1334, 1363), 're.search', 're.search', (['"""base64,(.*)"""', 'b64'], {}), "('base64,(.*)', b64)\n", (1343, 1363), False, 'import re\n')] |
from __future__ import print_function, division, absolute_import
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from openmdao.api import Problem, Group, IndepVarComp
from openmdao.utils.assert_utils import assert_check_partials
from dymos.transcriptions.pseudospectral.components import StateInterpComp
from dymos.transcriptions.grid_data import GridData
from dymos.utils.lgr import lgr
SHOW_PLOTS = False
if SHOW_PLOTS:
import matplotlib.pyplot as plt
# Test 1: Let x = t**2, f = 2*t
def x(t):
return t ** 2
def f_x(t):
return 2 * t
# Test 1: Let v = t**3-10*t**2, f = 3*t**2 - 20*t
def v(t):
return t ** 3 - 10 * t ** 2
def f_v(t):
return 3 * t ** 2 - 20 * t
class TestStateInterpComp(unittest.TestCase):
def test_state_interp_comp_lobatto(self):
segends = np.array([0.0, 3.0, 10.0])
gd = GridData(num_segments=2,
transcription_order=3,
segment_ends=segends,
transcription='gauss-lobatto')
p = Problem(model=Group())
states = {'x': {'units': 'm', 'shape': (1,)},
'v': {'units': 'm/s', 'shape': (1,)}}
X_ivc = IndepVarComp()
p.model.add_subsystem('X_ivc', X_ivc, promotes=['state_disc:x', 'state_disc:v'])
X_ivc.add_output('state_disc:x', val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m')
X_ivc.add_output('state_disc:v', val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m/s')
F_ivc = IndepVarComp()
p.model.add_subsystem('F_ivc', F_ivc, promotes=['staterate_disc:x', 'staterate_disc:v'])
F_ivc.add_output('staterate_disc:x',
val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m/s')
F_ivc.add_output('staterate_disc:v',
val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m/s**2')
dt_dtau_ivc = IndepVarComp()
p.model.add_subsystem('dt_dstau_ivc', dt_dtau_ivc, promotes=['dt_dstau'])
dt_dtau_ivc.add_output('dt_dstau', val=0.0*np.zeros(gd.subset_num_nodes['col']), units='s')
p.model.add_subsystem('state_interp_comp',
subsys=StateInterpComp(transcription='gauss-lobatto',
grid_data=gd,
state_options=states,
time_units='s'))
p.model.connect('state_disc:x', 'state_interp_comp.state_disc:x')
p.model.connect('state_disc:v', 'state_interp_comp.state_disc:v')
p.model.connect('staterate_disc:x', 'state_interp_comp.staterate_disc:x')
p.model.connect('staterate_disc:v', 'state_interp_comp.staterate_disc:v')
p.model.connect('dt_dstau', 'state_interp_comp.dt_dstau')
p.setup(force_alloc_complex=True)
segends_disc = segends[np.array((0, 1, 1, 2), dtype=int)]
p['state_disc:x'] = [x(t) for t in segends_disc]
p['staterate_disc:x'] = [f_x(t) for t in segends_disc]
p['state_disc:v'] = [v(t) for t in segends_disc]
p['staterate_disc:v'] = [f_v(t) for t in segends_disc]
p['dt_dstau'] = (segends[1:] - segends[:-1]) / 2.0
p.run_model()
t_disc = segends_disc
t_col = (segends[1:] + segends[:-1]) / 2.0
if SHOW_PLOTS: # pragma: no cover
f, ax = plt.subplots(2, 1)
t = np.linspace(0, 10, 100)
x1 = x(t)
xdot1 = f_x(t)
x2 = v(t)
xdot2 = f_v(t)
ax[0].plot(t, x1, 'b-', label='$x$')
ax[0].plot(t, xdot1, 'b--', label='$\dot{x}$')
ax[0].plot(t_disc, p['state_disc:x'], 'bo', label='$X_d:x$')
ax[0].plot(t_col, p['state_interp_comp.state_col:x'], 'bv', label='$X_c:x$')
ax[0].plot(t_col, p['state_interp_comp.staterate_col:x'], marker='v', color='None',
mec='b', label='$Xdot_c:x$')
ax[1].plot(t, x2, 'r-', label='$v$')
ax[1].plot(t, xdot2, 'r--', label='$\dot{v}$')
ax[1].plot(t_disc, p['state_disc:v'], 'ro', label='$X_d:v$')
ax[1].plot(t_col, p['state_interp_comp.state_col:v'], 'rv', label='$X_c:v$')
ax[1].plot(t_col, p['state_interp_comp.staterate_col:v'], marker='v', color='None',
mec='r', label='$Xdot_c:v$')
ax[0].legend(loc='upper left', ncol=3)
ax[1].legend(loc='upper left', ncol=3)
plt.show()
# Test 1
assert_almost_equal(
p['state_interp_comp.state_col:x'][:, 0], x(t_col))
assert_almost_equal(
p['state_interp_comp.staterate_col:x'][:, 0], f_x(t_col))
# Test 2
assert_almost_equal(
p['state_interp_comp.state_col:v'][:, 0], v(t_col))
assert_almost_equal(
p['state_interp_comp.staterate_col:v'][:, 0], f_v(t_col))
cpd = p.check_partials(compact_print=True, method='cs')
assert_check_partials(cpd, atol=5.0E-5)
def test_state_interp_comp_lobatto_vectorized(self):
segends = np.array([0.0, 3.0, 10.0])
gd = GridData(num_segments=2,
transcription_order=3,
segment_ends=segends,
transcription='gauss-lobatto')
p = Problem(model=Group())
states = {'pos': {'units': 'm', 'shape': (2,)}}
X_ivc = IndepVarComp()
p.model.add_subsystem('X_ivc', X_ivc, promotes=['state_disc:pos'])
X_ivc.add_output('state_disc:pos',
val=np.zeros((gd.subset_num_nodes['state_disc'], 2)), units='m')
F_ivc = IndepVarComp()
p.model.add_subsystem('F_ivc', F_ivc, promotes=['staterate_disc:pos'])
F_ivc.add_output('staterate_disc:pos',
val=np.zeros((gd.subset_num_nodes['state_disc'], 2)),
units='m/s')
dt_dtau_ivc = IndepVarComp()
p.model.add_subsystem('dt_dstau_ivc', dt_dtau_ivc, promotes=['dt_dstau'])
dt_dtau_ivc.add_output('dt_dstau', val=0.0*np.zeros(gd.subset_num_nodes['col']), units='s')
p.model.add_subsystem('state_interp_comp',
subsys=StateInterpComp(transcription='gauss-lobatto',
grid_data=gd,
state_options=states,
time_units='s'))
p.model.connect('state_disc:pos', 'state_interp_comp.state_disc:pos')
p.model.connect('staterate_disc:pos', 'state_interp_comp.staterate_disc:pos')
p.model.connect('dt_dstau', 'state_interp_comp.dt_dstau')
p.setup(force_alloc_complex=True)
segends_disc = segends[np.array((0, 1, 1, 2), dtype=int)]
p['state_disc:pos'][:, 0] = [x(t) for t in segends_disc] # [0.0, 25.0, 25.0, 100.0]
p['staterate_disc:pos'][:, 0] = [f_x(t) for t in segends_disc]
p['state_disc:pos'][:, 1] = [v(t) for t in segends_disc]
p['staterate_disc:pos'][:, 1] = [f_v(t) for t in segends_disc]
p['dt_dstau'] = (segends[1:] - segends[:-1]) / 2.0
p.run_model()
t_disc = segends_disc
t_col = (segends[1:] + segends[:-1]) / 2.0
if SHOW_PLOTS: # pragma: no cover
f, ax = plt.subplots(2, 1)
print(t_disc)
print(t_col)
print(p['dt_dstau'])
print(p['state_disc:pos'][:, 0])
print(p['staterate_disc:pos'][:, 0])
print(p['state_disc:pos'][:, 0])
print(p['staterate_disc:pos'][:, 1])
t = np.linspace(0, 10, 100)
x1 = x(t)
xdot1 = f_x(t)
x2 = v(t)
xdot2 = f_v(t)
ax[0].plot(t, x1, 'b-', label='$x$')
ax[0].plot(t, xdot1, 'b--', label='$\dot{x}$')
ax[0].plot(t_disc, p['state_disc:pos'][:, 0], 'bo', label='$X_d:pos$')
ax[0].plot(t_col, p['state_interp_comp.state_col:pos'][:, 0], 'bv', label='$X_c:pos$')
ax[0].plot(t_col, p['state_interp_comp.staterate_col:pos'][:, 0], marker='v',
color='None', mec='b', label='$Xdot_c:pos$')
ax[1].plot(t, x2, 'r-', label='$v$')
ax[1].plot(t, xdot2, 'r--', label='$\dot{v}$')
ax[1].plot(t_disc, p['state_disc:pos'][:, 1], 'ro', label='$X_d:vel$')
ax[1].plot(t_col, p['state_interp_comp.state_col:pos'][:, 1], 'rv', label='$X_c:vel$')
ax[1].plot(t_col, p['state_interp_comp.staterate_col:pos'][:, 1], marker='v',
color='None', mec='r', label='$Xdot_c:vel$')
ax[0].legend(loc='upper left', ncol=3)
ax[1].legend(loc='upper left', ncol=3)
plt.show()
# Test 1
assert_almost_equal(
p['state_interp_comp.state_col:pos'][:, 0], x(t_col))
assert_almost_equal(
p['state_interp_comp.staterate_col:pos'][:, 0], f_x(t_col))
# Test 2
assert_almost_equal(
p['state_interp_comp.state_col:pos'][:, 1], v(t_col))
assert_almost_equal(
p['state_interp_comp.staterate_col:pos'][:, 1], f_v(t_col))
cpd = p.check_partials(compact_print=True, method='cs')
assert_check_partials(cpd, atol=5.0E-5)
def test_state_interp_comp_lobatto_vectorized_different_orders(self):
segends = np.array([0.0, 3.0, 10.0])
gd = GridData(num_segments=2,
transcription_order=[3, 5],
segment_ends=segends,
transcription='gauss-lobatto')
p = Problem(model=Group())
states = {'pos': {'units': 'm', 'shape': (2,)}}
X_ivc = IndepVarComp()
p.model.add_subsystem('X_ivc', X_ivc, promotes=['state_disc:pos'])
X_ivc.add_output('state_disc:pos',
val=np.zeros((gd.subset_num_nodes['state_disc'], 2)), units='m')
F_ivc = IndepVarComp()
p.model.add_subsystem('F_ivc', F_ivc, promotes=['staterate_disc:pos'])
F_ivc.add_output('staterate_disc:pos',
val=np.zeros((gd.subset_num_nodes['state_disc'], 2)),
units='m/s')
dt_dtau_ivc = IndepVarComp()
p.model.add_subsystem('dt_dstau_ivc', dt_dtau_ivc, promotes=['dt_dstau'])
dt_dtau_ivc.add_output('dt_dstau', val=0.0*np.zeros(gd.subset_num_nodes['col']), units='s')
p.model.add_subsystem('state_interp_comp',
subsys=StateInterpComp(transcription='gauss-lobatto',
grid_data=gd,
state_options=states,
time_units='s'))
p.model.connect('state_disc:pos', 'state_interp_comp.state_disc:pos')
p.model.connect('staterate_disc:pos', 'state_interp_comp.staterate_disc:pos')
p.model.connect('dt_dstau', 'state_interp_comp.dt_dstau')
p.setup(force_alloc_complex=True)
segends_disc = np.array((0, 3, 3, 6.5, 10))
p['state_disc:pos'][:, 0] = [x(t) for t in segends_disc] # [0.0, 25.0, 25.0, 100.0]
p['staterate_disc:pos'][:, 0] = [f_x(t) for t in segends_disc]
p['state_disc:pos'][:, 1] = [v(t) for t in segends_disc]
p['staterate_disc:pos'][:, 1] = [f_v(t) for t in segends_disc]
p['dt_dstau'] = [3.0/2., 7.0/2, 7.0/2]
p.run_model()
cpd = p.check_partials(compact_print=True, method='cs')
assert_check_partials(cpd, atol=5.0E-5)
def test_state_interp_comp_radau(self):
gd = GridData(num_segments=1,
transcription_order=3,
segment_ends=np.array([0, 10]),
transcription='radau-ps')
p = Problem(model=Group())
states = {'x': {'units': 'm', 'shape': (1,)},
'v': {'units': 'm/s', 'shape': (1,)}}
X_ivc = IndepVarComp()
p.model.add_subsystem('X_ivc', X_ivc, promotes=['state_disc:x', 'state_disc:v'])
X_ivc.add_output('state_disc:x', val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m')
X_ivc.add_output('state_disc:v', val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m/s')
dt_dtau_ivc = IndepVarComp()
dt_dtau_ivc.add_output('dt_dstau', val=0.0*np.zeros(gd.subset_num_nodes['col']), units='s')
p.model.add_subsystem('dt_dstau_ivc', dt_dtau_ivc, promotes=['dt_dstau'])
p.model.add_subsystem('state_interp_comp',
subsys=StateInterpComp(transcription='radau-ps',
grid_data=gd,
state_options=states,
time_units='s'))
p.model.connect('state_disc:x', 'state_interp_comp.state_disc:x')
p.model.connect('state_disc:v', 'state_interp_comp.state_disc:v')
p.model.connect('dt_dstau', 'state_interp_comp.dt_dstau')
p.setup(force_alloc_complex=True)
lgr_nodes, lgr_weights = lgr(3, include_endpoint=True)
t_disc = (lgr_nodes + 1.0) * 5.0
t_col = t_disc[:-1]
# Test 1: Let x = t**2, f = 2*t
p['state_disc:x'] = t_disc**2
# Test 1: Let v = t**3-10*t**2, f = 3*t**2 - 20*t
p['state_disc:v'] = t_disc**3-10*t_disc**2
p['dt_dstau'] = 10/2.0
p.run_model()
if SHOW_PLOTS: # pragma: no cover
f, ax = plt.subplots(2, 1)
t_disc = np.array([0, 5, 10])
t_col = np.array([2.5, 7.5])
t = np.linspace(0, 10, 100)
x1 = t**2
xdot1 = 2*t
x2 = t**3 - 10*t**2
xdot2 = 3*t**2 - 20*t
ax[0].plot(t, x1, 'b-', label='$x$')
ax[0].plot(t, xdot1, 'b--', label='$\dot{x}$')
ax[0].plot(t_disc, p['state_disc:x'], 'bo', label='$X_d:x$')
ax[0].plot(t_col, p['state_interp_comp.state_col:x'], 'bv', label='$X_c:x$')
ax[0].plot(t_col, p['state_interp_comp.staterate_col:x'], marker='v', color='None',
mec='b', label='$Xdot_c:x$')
ax[1].plot(t, x2, 'r-', label='$v$')
ax[1].plot(t, xdot2, 'r--', label='$\dot{v}$')
ax[1].plot(t_disc, p['state_disc:v'], 'ro', label='$X_d:v$')
ax[1].plot(t_col, p['state_interp_comp.state_col:v'], 'rv', label='$X_c:v$')
ax[1].plot(t_col, p['state_interp_comp.staterate_col:v'], marker='v', color='None',
mec='r', label='$Xdot_c:v$')
ax[0].legend(loc='upper left', ncol=3)
ax[1].legend(loc='upper left', ncol=3)
plt.show()
# Test 1
assert_almost_equal(
p['state_interp_comp.staterate_col:x'][:, 0], 2*t_col)
# Test 2
assert_almost_equal(
p['state_interp_comp.staterate_col:v'][:, 0], 3*t_col**2 - 20*t_col)
cpd = p.check_partials(compact_print=True, method='cs')
assert_check_partials(cpd, atol=1.0E-5)
if __name__ == '__main__':
unittest.main()
| [
"dymos.transcriptions.grid_data.GridData",
"dymos.transcriptions.pseudospectral.components.StateInterpComp",
"openmdao.utils.assert_utils.assert_check_partials",
"openmdao.api.IndepVarComp",
"openmdao.api.Group",
"numpy.array",
"dymos.utils.lgr.lgr",
"numpy.testing.assert_almost_equal",
"numpy.linsp... | [((15473, 15488), 'unittest.main', 'unittest.main', ([], {}), '()\n', (15486, 15488), False, 'import unittest\n'), ((844, 870), 'numpy.array', 'np.array', (['[0.0, 3.0, 10.0]'], {}), '([0.0, 3.0, 10.0])\n', (852, 870), True, 'import numpy as np\n'), ((885, 989), 'dymos.transcriptions.grid_data.GridData', 'GridData', ([], {'num_segments': '(2)', 'transcription_order': '(3)', 'segment_ends': 'segends', 'transcription': '"""gauss-lobatto"""'}), "(num_segments=2, transcription_order=3, segment_ends=segends,\n transcription='gauss-lobatto')\n", (893, 989), False, 'from dymos.transcriptions.grid_data import GridData\n'), ((1216, 1230), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (1228, 1230), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((1593, 1607), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (1605, 1607), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((2047, 2061), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (2059, 2061), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((5170, 5208), 'openmdao.utils.assert_utils.assert_check_partials', 'assert_check_partials', (['cpd'], {'atol': '(5e-05)'}), '(cpd, atol=5e-05)\n', (5191, 5208), False, 'from openmdao.utils.assert_utils import assert_check_partials\n'), ((5287, 5313), 'numpy.array', 'np.array', (['[0.0, 3.0, 10.0]'], {}), '([0.0, 3.0, 10.0])\n', (5295, 5313), True, 'import numpy as np\n'), ((5328, 5432), 'dymos.transcriptions.grid_data.GridData', 'GridData', ([], {'num_segments': '(2)', 'transcription_order': '(3)', 'segment_ends': 'segends', 'transcription': '"""gauss-lobatto"""'}), "(num_segments=2, transcription_order=3, segment_ends=segends,\n transcription='gauss-lobatto')\n", (5336, 5432), False, 'from dymos.transcriptions.grid_data import GridData\n'), ((5605, 5619), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (5617, 5619), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((5846, 5860), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (5858, 5860), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((6128, 6142), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (6140, 6142), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((9504, 9542), 'openmdao.utils.assert_utils.assert_check_partials', 'assert_check_partials', (['cpd'], {'atol': '(5e-05)'}), '(cpd, atol=5e-05)\n', (9525, 9542), False, 'from openmdao.utils.assert_utils import assert_check_partials\n'), ((9638, 9664), 'numpy.array', 'np.array', (['[0.0, 3.0, 10.0]'], {}), '([0.0, 3.0, 10.0])\n', (9646, 9664), True, 'import numpy as np\n'), ((9679, 9788), 'dymos.transcriptions.grid_data.GridData', 'GridData', ([], {'num_segments': '(2)', 'transcription_order': '[3, 5]', 'segment_ends': 'segends', 'transcription': '"""gauss-lobatto"""'}), "(num_segments=2, transcription_order=[3, 5], segment_ends=segends,\n transcription='gauss-lobatto')\n", (9687, 9788), False, 'from dymos.transcriptions.grid_data import GridData\n'), ((9961, 9975), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (9973, 9975), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((10202, 10216), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (10214, 10216), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((10484, 10498), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (10496, 10498), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((11328, 11356), 'numpy.array', 'np.array', (['(0, 3, 3, 6.5, 10)'], {}), '((0, 3, 3, 6.5, 10))\n', (11336, 11356), True, 'import numpy as np\n'), ((11803, 11841), 'openmdao.utils.assert_utils.assert_check_partials', 'assert_check_partials', (['cpd'], {'atol': '(5e-05)'}), '(cpd, atol=5e-05)\n', (11824, 11841), False, 'from openmdao.utils.assert_utils import assert_check_partials\n'), ((12238, 12252), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (12250, 12252), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((12621, 12635), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (12633, 12635), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((13454, 13483), 'dymos.utils.lgr.lgr', 'lgr', (['(3)'], {'include_endpoint': '(True)'}), '(3, include_endpoint=True)\n', (13457, 13483), False, 'from dymos.utils.lgr import lgr\n'), ((15111, 15187), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["p['state_interp_comp.staterate_col:x'][:, 0]", '(2 * t_col)'], {}), "(p['state_interp_comp.staterate_col:x'][:, 0], 2 * t_col)\n", (15130, 15187), False, 'from numpy.testing import assert_almost_equal\n'), ((15225, 15323), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["p['state_interp_comp.staterate_col:v'][:, 0]", '(3 * t_col ** 2 - 20 * t_col)'], {}), "(p['state_interp_comp.staterate_col:v'][:, 0], 3 * t_col **\n 2 - 20 * t_col)\n", (15244, 15323), False, 'from numpy.testing import assert_almost_equal\n'), ((15400, 15438), 'openmdao.utils.assert_utils.assert_check_partials', 'assert_check_partials', (['cpd'], {'atol': '(1e-05)'}), '(cpd, atol=1e-05)\n', (15421, 15438), False, 'from openmdao.utils.assert_utils import assert_check_partials\n'), ((3047, 3080), 'numpy.array', 'np.array', (['(0, 1, 1, 2)'], {'dtype': 'int'}), '((0, 1, 1, 2), dtype=int)\n', (3055, 3080), True, 'import numpy as np\n'), ((3553, 3571), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (3565, 3571), True, 'import matplotlib.pyplot as plt\n'), ((3589, 3612), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (3600, 3612), True, 'import numpy as np\n'), ((4666, 4676), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4674, 4676), True, 'import matplotlib.pyplot as plt\n'), ((6980, 7013), 'numpy.array', 'np.array', (['(0, 1, 1, 2)'], {'dtype': 'int'}), '((0, 1, 1, 2), dtype=int)\n', (6988, 7013), True, 'import numpy as np\n'), ((7546, 7564), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (7558, 7564), True, 'import matplotlib.pyplot as plt\n'), ((7855, 7878), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (7866, 7878), True, 'import numpy as np\n'), ((8992, 9002), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9000, 9002), True, 'import matplotlib.pyplot as plt\n'), ((13863, 13881), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (13875, 13881), True, 'import matplotlib.pyplot as plt\n'), ((13904, 13924), 'numpy.array', 'np.array', (['[0, 5, 10]'], {}), '([0, 5, 10])\n', (13912, 13924), True, 'import numpy as np\n'), ((13945, 13965), 'numpy.array', 'np.array', (['[2.5, 7.5]'], {}), '([2.5, 7.5])\n', (13953, 13965), True, 'import numpy as np\n'), ((13983, 14006), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (13994, 14006), True, 'import numpy as np\n'), ((15074, 15084), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15082, 15084), True, 'import matplotlib.pyplot as plt\n'), ((1079, 1086), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (1084, 1086), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((1366, 1409), 'numpy.zeros', 'np.zeros', (["gd.subset_num_nodes['state_disc']"], {}), "(gd.subset_num_nodes['state_disc'])\n", (1374, 1409), True, 'import numpy as np\n'), ((1493, 1536), 'numpy.zeros', 'np.zeros', (["gd.subset_num_nodes['state_disc']"], {}), "(gd.subset_num_nodes['state_disc'])\n", (1501, 1536), True, 'import numpy as np\n'), ((1780, 1823), 'numpy.zeros', 'np.zeros', (["gd.subset_num_nodes['state_disc']"], {}), "(gd.subset_num_nodes['state_disc'])\n", (1788, 1823), True, 'import numpy as np\n'), ((1938, 1981), 'numpy.zeros', 'np.zeros', (["gd.subset_num_nodes['state_disc']"], {}), "(gd.subset_num_nodes['state_disc'])\n", (1946, 1981), True, 'import numpy as np\n'), ((2334, 2437), 'dymos.transcriptions.pseudospectral.components.StateInterpComp', 'StateInterpComp', ([], {'transcription': '"""gauss-lobatto"""', 'grid_data': 'gd', 'state_options': 'states', 'time_units': '"""s"""'}), "(transcription='gauss-lobatto', grid_data=gd, state_options=\n states, time_units='s')\n", (2349, 2437), False, 'from dymos.transcriptions.pseudospectral.components import StateInterpComp\n'), ((5522, 5529), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (5527, 5529), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((5768, 5816), 'numpy.zeros', 'np.zeros', (["(gd.subset_num_nodes['state_disc'], 2)"], {}), "((gd.subset_num_nodes['state_disc'], 2))\n", (5776, 5816), True, 'import numpy as np\n'), ((6017, 6065), 'numpy.zeros', 'np.zeros', (["(gd.subset_num_nodes['state_disc'], 2)"], {}), "((gd.subset_num_nodes['state_disc'], 2))\n", (6025, 6065), True, 'import numpy as np\n'), ((6415, 6518), 'dymos.transcriptions.pseudospectral.components.StateInterpComp', 'StateInterpComp', ([], {'transcription': '"""gauss-lobatto"""', 'grid_data': 'gd', 'state_options': 'states', 'time_units': '"""s"""'}), "(transcription='gauss-lobatto', grid_data=gd, state_options=\n states, time_units='s')\n", (6430, 6518), False, 'from dymos.transcriptions.pseudospectral.components import StateInterpComp\n'), ((9878, 9885), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (9883, 9885), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((10124, 10172), 'numpy.zeros', 'np.zeros', (["(gd.subset_num_nodes['state_disc'], 2)"], {}), "((gd.subset_num_nodes['state_disc'], 2))\n", (10132, 10172), True, 'import numpy as np\n'), ((10373, 10421), 'numpy.zeros', 'np.zeros', (["(gd.subset_num_nodes['state_disc'], 2)"], {}), "((gd.subset_num_nodes['state_disc'], 2))\n", (10381, 10421), True, 'import numpy as np\n'), ((10771, 10874), 'dymos.transcriptions.pseudospectral.components.StateInterpComp', 'StateInterpComp', ([], {'transcription': '"""gauss-lobatto"""', 'grid_data': 'gd', 'state_options': 'states', 'time_units': '"""s"""'}), "(transcription='gauss-lobatto', grid_data=gd, state_options=\n states, time_units='s')\n", (10786, 10874), False, 'from dymos.transcriptions.pseudospectral.components import StateInterpComp\n'), ((12007, 12024), 'numpy.array', 'np.array', (['[0, 10]'], {}), '([0, 10])\n', (12015, 12024), True, 'import numpy as np\n'), ((12101, 12108), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (12106, 12108), False, 'from openmdao.api import Problem, Group, IndepVarComp\n'), ((12388, 12431), 'numpy.zeros', 'np.zeros', (["gd.subset_num_nodes['state_disc']"], {}), "(gd.subset_num_nodes['state_disc'])\n", (12396, 12431), True, 'import numpy as np\n'), ((12515, 12558), 'numpy.zeros', 'np.zeros', (["gd.subset_num_nodes['state_disc']"], {}), "(gd.subset_num_nodes['state_disc'])\n", (12523, 12558), True, 'import numpy as np\n'), ((12908, 13006), 'dymos.transcriptions.pseudospectral.components.StateInterpComp', 'StateInterpComp', ([], {'transcription': '"""radau-ps"""', 'grid_data': 'gd', 'state_options': 'states', 'time_units': '"""s"""'}), "(transcription='radau-ps', grid_data=gd, state_options=\n states, time_units='s')\n", (12923, 13006), False, 'from dymos.transcriptions.pseudospectral.components import StateInterpComp\n'), ((2196, 2232), 'numpy.zeros', 'np.zeros', (["gd.subset_num_nodes['col']"], {}), "(gd.subset_num_nodes['col'])\n", (2204, 2232), True, 'import numpy as np\n'), ((6277, 6313), 'numpy.zeros', 'np.zeros', (["gd.subset_num_nodes['col']"], {}), "(gd.subset_num_nodes['col'])\n", (6285, 6313), True, 'import numpy as np\n'), ((10633, 10669), 'numpy.zeros', 'np.zeros', (["gd.subset_num_nodes['col']"], {}), "(gd.subset_num_nodes['col'])\n", (10641, 10669), True, 'import numpy as np\n'), ((12687, 12723), 'numpy.zeros', 'np.zeros', (["gd.subset_num_nodes['col']"], {}), "(gd.subset_num_nodes['col'])\n", (12695, 12723), True, 'import numpy as np\n')] |
import sys
import math
import numpy as np
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
def ortho_weight(ndim):
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
W = np.random.randn(ndim, ndim)
u, s, v = np.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
"""
Random weights drawn from a Gaussian
"""
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * np.random.randn(nin, nout)
return W.astype('float32')
class LSTM_Cell(nn.Module):
def __init__(self, device, in_dim, mem_dim):
super(LSTM_Cell, self).__init__()
self.device = device
self.in_dim = in_dim
self.mem_dim = mem_dim
def new_gate():
h = nn.Linear(self.mem_dim, self.mem_dim, bias=False)
h.weight.data.copy_(torch.from_numpy(ortho_weight(self.mem_dim)))
return h
def new_W():
w = nn.Linear(self.in_dim, self.mem_dim)
w.weight.data.copy_(torch.from_numpy(ortho_weight(self.mem_dim)))
return w
self.ih = new_gate()
self.fh = new_gate()
self.oh = new_gate()
self.ch = new_gate()
self.cx = new_W()
self.ox = new_W()
self.fx = new_W()
self.ix = new_W()
def forward(self, input, h, c):
u = F.tanh(self.cx(input) + self.ch(h))
i = F.sigmoid(self.ix(input) + self.ih(h))
f = F.sigmoid(self.fx(input) + self.fh(h))
c = i*u + f*c
o = F.sigmoid(self.ox(input) + self.oh(h))
h = o * F.tanh(c)
return c, h
class LSTM(nn.Module):
def __init__(self, device, in_dim, mem_dim):
super(LSTM, self).__init__()
self.device = device
self.in_dim = in_dim
self.mem_dim = mem_dim
self.TreeCell = LSTM_Cell(device, in_dim, mem_dim)
self.output_module = None
def forward(self, x, x_mask):
"""
:param x: #step x #sample x dim_emb
:param x_mask: #step x #sample
:param x_left_mask: #step x #sample x #step
:param x_right_mask: #step x #sample x #step
:return:
"""
h = Variable(torch.zeros(x.size(1), x.size(2)))
c = Variable(torch.zeros(x.size(1), x.size(2)))
if torch.cuda.is_available():
h=h.to(self.device)
c=c.to(self.device)
all_hidden=[]
for step in range(x.size(0)):
input=x[step] # #sample x dim_emb
step_c, step_h=self.TreeCell(input, h, c)
h=x_mask[step][:,None] * step_h + (1. - x_mask[step])[:,None] * h
c = x_mask[step][:, None] * step_c + (1. - x_mask[step])[:, None] * c
all_hidden.append(torch.unsqueeze(h,0))
return torch.cat(all_hidden,0)
class ESIM(nn.Module):
"""
Implementation of the multi feed forward network model described in
the paper "A Decomposable Attention Model for Natural Language
Inference" by <NAME> al., 2016.
It applies feedforward MLPs to combinations of parts of the two sentences,
without any recurrent structure.
"""
def __init__(self, num_units, num_classes, embedding_size, dropout, device=0,
training=True, project_input=True,
use_intra_attention=False, distance_biases=10, max_sentence_length=30):
"""
Create the model based on MLP networks.
:param num_units: size of the networks
:param num_classes: number of classes in the problem
:param embedding_size: size of each word embedding
:param use_intra_attention: whether to use intra-attention model
:param training: whether to create training tensors (optimizer)
:param project_input: whether to project input embeddings to a
different dimensionality
:param distance_biases: number of different distances with biases used
in the intra-attention model
"""
super(ESIM, self).__init__()
self.arch = "ESIM"
self.num_units = num_units
self.num_classes = num_classes
self.project_input = project_input
self.embedding_size=embedding_size
self.distance_biases=distance_biases
self.max_sentence_length=max_sentence_length
self.device = device
self.dropout = nn.Dropout(p=dropout)
self.lstm_intra=LSTM(device, embedding_size, num_units)
self.linear_layer_compare = nn.Sequential(nn.Linear(4*num_units*2, num_units), nn.ReLU(), nn.Dropout(p=dropout))
# nn.Dropout(p=0.2), nn.Linear(num_units, num_units), nn.ReLU())
self.lstm_compare=LSTM(device, embedding_size, num_units)
self.linear_layer_aggregate = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(4*num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_classes))
self.init_weight()
def ortho_weight(self):
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
ndim=self.num_units
W = np.random.randn(ndim, ndim)
u, s, v = np.linalg.svd(W)
return u.astype('float32')
def initialize_lstm(self):
if torch.cuda.is_available():
init=torch.Tensor(np.concatenate([self.ortho_weight(),self.ortho_weight(),self.ortho_weight(),self.ortho_weight()], 0)).to(self.device)
else:
init = torch.Tensor(
np.concatenate([self.ortho_weight(), self.ortho_weight(), self.ortho_weight(), self.ortho_weight()], 0))
return init
def init_weight(self):
#nn.init.normal(self.linear_layer_project,mean=0,std=0.1)
#print(self.linear_layer_attend[3])
#self.linear_layer_attend[1].weight.data.normal_(0, 0.01)
#self.linear_layer_attend[1].bias.data.fill_(0)
#self.linear_layer_attend[4].weight.data.normal_(0, 0.01)
#self.linear_layer_attend[4].bias.data.fill_(0)
self.linear_layer_compare[0].weight.data.normal_(0, 0.01)
self.linear_layer_compare[0].bias.data.fill_(0)
#self.linear_layer_compare[4].weight.data.normal_(0, 0.01)
#self.linear_layer_compare[4].bias.data.fill_(0)
self.linear_layer_aggregate[1].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[1].bias.data.fill_(0)
self.linear_layer_aggregate[4].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[4].bias.data.fill_(0)
def attention_softmax3d(self,raw_attentions):
reshaped_attentions = raw_attentions.view(-1, raw_attentions.size(2))
out=nn.functional.softmax(reshaped_attentions, dim=1)
return out.view(raw_attentions.size(0),raw_attentions.size(1),raw_attentions.size(2))
def _transformation_input(self,embed_sent, x1_mask):
embed_sent = self.word_embedding(embed_sent)
embed_sent = self.dropout(embed_sent)
hidden=self.lstm_intra(embed_sent, x1_mask)
return hidden
def aggregate(self,v1, v2):
"""
Aggregate the representations induced from both sentences and their
representations
:param v1: tensor with shape (batch, time_steps, num_units)
:param v2: tensor with shape (batch, time_steps, num_units)
:return: logits over classes, shape (batch, num_classes)
"""
v1_mean = torch.mean(v1, 0)
v2_mean = torch.mean(v2, 0)
v1_max, _ = torch.max(v1, 0)
v2_max, _ = torch.max(v2, 0)
out = self.linear_layer_aggregate(torch.cat((v1_mean, v1_max, v2_mean, v2_max), 1))
#v1_sum=torch.sum(v1,1)
#v2_sum=torch.sum(v2,1)
#out=self.linear_layer_aggregate(torch.cat([v1_sum,v2_sum],1))
return out
def cosine_interaction(self, tensor1, tensor2):
"""
:param tensor1: #step1 * dim
:param tensor2: #step2 * dim
:return: #step1 * #step2
"""
simCube_0=tensor1[0].view(1,-1)
simCube_1=tensor2[0].view(1,-1)
for i in range(tensor1.size(0)):
for j in range(tensor2.size(0)):
if not(i==0 and j==0):
simCube_0=torch.cat((simCube_0, tensor1[i].view(1,-1)))
simCube_1=torch.cat((simCube_1, tensor2[j].view(1,-1)))
simCube=F.cosine_similarity(simCube_0, simCube_1)
return simCube.view(tensor1.size(0),tensor2.size(0))
def create_mask(self, sent):
masks = []
sent_lengths = [len(s.split(" ")) for s in sent]
max_len = max(sent_lengths)
for s_length in sent_lengths:
pad_mask = np.zeros(max_len)
pad_mask[:s_length] = 1
masks.append(pad_mask)
masks = np.array(masks)
return torch.from_numpy(masks).float().to(self.device)
#def forward(self, x1, x1_mask, x2, x2_mask):
def forward(self, sent1, sent2, ext_feats=None, word_to_doc_count=None, raw_sent1=None, raw_sent2=None, visualize=False):
# idx = [i for i in range(embed_sent.size(1) - 1, -1, -1)]
# if torch.cuda.is_available():
# idx = torch.cuda.LongTensor(idx)
# else:
# idx = torch.LongTensor(idx)
sent1 = sent1.permute(2, 0, 1) # from [B * D * T] to [T * B * D]
sent2 = sent2.permute(2, 0, 1)
x1_mask = self.create_mask(raw_sent1)
x2_mask = self.create_mask(raw_sent2)
x1_mask = x1_mask.permute(1, 0)
x2_mask = x2_mask.permute(1, 0)
#x1 = self.word_embedding(x1)
x1 = self.dropout(sent1)
#x2 = self.word_embedding(x2)
x2 = self.dropout(sent2)
idx_1 = [i for i in range(x1.size(0) - 1, -1, -1)]
idx_1 = Variable(torch.LongTensor(idx_1))
if torch.cuda.is_available():
idx_1 = idx_1.to(self.device)
x1_r=torch.index_select(x1,0,idx_1)
x1_mask_r=torch.index_select(x1_mask,0,idx_1)
idx_2=[i for i in range(x2.size(0) -1, -1, -1)]
idx_2 = Variable(torch.LongTensor(idx_2))
if torch.cuda.is_available():
idx_2 = Variable(torch.LongTensor(idx_2)).to(self.device)
x2_r=torch.index_select(x2,0,idx_2)
x2_mask_r=torch.index_select(x2_mask, 0, idx_2)
proj1=self.lstm_intra(x1, x1_mask)
proj1_r=self.lstm_intra(x1_r, x1_mask_r)
proj2=self.lstm_intra(x2, x2_mask)
proj2_r=self.lstm_intra(x2_r, x2_mask_r)
ctx1=torch.cat((proj1, torch.index_select(proj1_r,0,idx_1)),2)
ctx2=torch.cat((proj2, torch.index_select(proj2_r, 0, idx_2)),2)
# ctx1: #step1 x #sample x #dimctx
# ctx2: #step2 x #sample x #dimctx
ctx1 = ctx1 * x1_mask[:, :, None]
ctx2 = ctx2 * x2_mask[:, :, None]
# weight_matrix: #sample x #step1 x #step2
weight_matrix = torch.matmul(ctx1.permute(1, 0, 2), ctx2.permute(1, 2, 0))
if visualize:
return weight_matrix
weight_matrix_1 = torch.exp(weight_matrix - weight_matrix.max(1, keepdim=True)[0]).permute(1, 2, 0)
weight_matrix_2 = torch.exp(weight_matrix - weight_matrix.max(2, keepdim=True)[0]).permute(1, 2, 0)
# weight_matrix_1: #step1 x #step2 x #sample
weight_matrix_1 = weight_matrix_1 * x1_mask[:, None, :]
weight_matrix_2 = weight_matrix_2 * x2_mask[None, :, :]
alpha = weight_matrix_1 / weight_matrix_1.sum(0, keepdim=True)
beta = weight_matrix_2 / weight_matrix_2.sum(1, keepdim=True)
self.alpha=alpha
self.beta=beta
ctx2_ = (torch.unsqueeze(ctx1,1) * torch.unsqueeze(alpha,3)).sum(0)
ctx1_ = (torch.unsqueeze(ctx2, 0) * torch.unsqueeze(beta,3)).sum(1)
# cosine distance and Euclidean distance
'''
tmp_result=[]
for batch_i in range(ctx1.size(1)):
tmp_result.append(torch.unsqueeze(self.cosine_interaction(ctx1[:,batch_i,:], ctx2[:,batch_i,:]), 0))
weight_matrix=torch.cat(tmp_result)
weight_matrix_1 = torch.exp(weight_matrix - weight_matrix.max(1, keepdim=True)[0]).permute(1, 2, 0)
weight_matrix_2 = torch.exp(weight_matrix - weight_matrix.max(2, keepdim=True)[0]).permute(1, 2, 0)
# weight_matrix_1: #step1 x #step2 x #sample
weight_matrix_1 = weight_matrix_1 * x1_mask[:, None, :]
weight_matrix_2 = weight_matrix_2 * x2_mask[None, :, :]
alpha = weight_matrix_1 / weight_matrix_1.sum(0, keepdim=True)
beta = weight_matrix_2 / weight_matrix_2.sum(1, keepdim=True)
ctx2_cos_ = (torch.unsqueeze(ctx1, 1) * torch.unsqueeze(alpha, 3)).sum(0)
ctx1_cos_ = (torch.unsqueeze(ctx2, 0) * torch.unsqueeze(beta, 3)).sum(1)
'''
inp1 = torch.cat([ctx1, ctx1_, ctx1 * ctx1_, ctx1 - ctx1_], 2)
inp2 = torch.cat([ctx2, ctx2_, ctx2 * ctx2_, ctx2 - ctx2_], 2)
#inp1 = torch.cat([ctx1, ctx1_, ctx1_cos_, ctx1 * ctx1_, ctx1 * ctx1_cos_, ctx1 - ctx1_, ctx1 - ctx1_cos_], 2)
#inp2 = torch.cat([ctx2, ctx2_, ctx2_cos_, ctx2 * ctx2_, ctx2 * ctx2_cos_, ctx2 - ctx2_, ctx2 - ctx2_cos_], 2)
inp1=self.dropout(self.linear_layer_compare(inp1))
inp2=self.dropout(self.linear_layer_compare(inp2))
inp1_r=torch.index_select(inp1, 0, idx_1)
inp2_r=torch.index_select(inp2, 0, idx_2)
v1=self.lstm_compare(inp1, x1_mask)
v2=self.lstm_compare(inp2, x2_mask)
v1_r = self.lstm_compare(inp1_r, x1_mask)
v2_r = self.lstm_compare(inp2_r, x2_mask)
v1=torch.cat((v1, torch.index_select(v1_r, 0, idx_1)),2)
v2=torch.cat((v2, torch.index_select(v2_r, 0, idx_2)),2)
out = self.aggregate(v1, v2)
out = F.log_softmax(out, dim=1)
return out
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.LongTensor",
"torch.max",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"torch.mean",
"torch.nn.functional.cosine_similarity",
"torch.unsqueeze",
"torch.nn.functional.tanh",
"torch.nn.functional.log_so... | [((506, 533), 'numpy.random.randn', 'np.random.randn', (['ndim', 'ndim'], {}), '(ndim, ndim)\n', (521, 533), True, 'import numpy as np\n'), ((548, 564), 'numpy.linalg.svd', 'np.linalg.svd', (['W'], {}), '(W)\n', (561, 564), True, 'import numpy as np\n'), ((2682, 2707), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2705, 2707), False, 'import torch\n'), ((3160, 3184), 'torch.cat', 'torch.cat', (['all_hidden', '(0)'], {}), '(all_hidden, 0)\n', (3169, 3184), False, 'import torch\n'), ((4744, 4765), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (4754, 4765), True, 'import torch.nn as nn\n'), ((5717, 5744), 'numpy.random.randn', 'np.random.randn', (['ndim', 'ndim'], {}), '(ndim, ndim)\n', (5732, 5744), True, 'import numpy as np\n'), ((5763, 5779), 'numpy.linalg.svd', 'np.linalg.svd', (['W'], {}), '(W)\n', (5776, 5779), True, 'import numpy as np\n'), ((5858, 5883), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5881, 5883), False, 'import torch\n'), ((7242, 7291), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['reshaped_attentions'], {'dim': '(1)'}), '(reshaped_attentions, dim=1)\n', (7263, 7291), True, 'import torch.nn as nn\n'), ((7994, 8011), 'torch.mean', 'torch.mean', (['v1', '(0)'], {}), '(v1, 0)\n', (8004, 8011), False, 'import torch\n'), ((8030, 8047), 'torch.mean', 'torch.mean', (['v2', '(0)'], {}), '(v2, 0)\n', (8040, 8047), False, 'import torch\n'), ((8068, 8084), 'torch.max', 'torch.max', (['v1', '(0)'], {}), '(v1, 0)\n', (8077, 8084), False, 'import torch\n'), ((8105, 8121), 'torch.max', 'torch.max', (['v2', '(0)'], {}), '(v2, 0)\n', (8114, 8121), False, 'import torch\n'), ((8927, 8968), 'torch.nn.functional.cosine_similarity', 'F.cosine_similarity', (['simCube_0', 'simCube_1'], {}), '(simCube_0, simCube_1)\n', (8946, 8968), True, 'import torch.nn.functional as F\n'), ((9348, 9363), 'numpy.array', 'np.array', (['masks'], {}), '(masks)\n', (9356, 9363), True, 'import numpy as np\n'), ((10358, 10383), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10381, 10383), False, 'import torch\n'), ((10440, 10472), 'torch.index_select', 'torch.index_select', (['x1', '(0)', 'idx_1'], {}), '(x1, 0, idx_1)\n', (10458, 10472), False, 'import torch\n'), ((10489, 10526), 'torch.index_select', 'torch.index_select', (['x1_mask', '(0)', 'idx_1'], {}), '(x1_mask, 0, idx_1)\n', (10507, 10526), False, 'import torch\n'), ((10642, 10667), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10665, 10667), False, 'import torch\n'), ((10752, 10784), 'torch.index_select', 'torch.index_select', (['x2', '(0)', 'idx_2'], {}), '(x2, 0, idx_2)\n', (10770, 10784), False, 'import torch\n'), ((10801, 10838), 'torch.index_select', 'torch.index_select', (['x2_mask', '(0)', 'idx_2'], {}), '(x2_mask, 0, idx_2)\n', (10819, 10838), False, 'import torch\n'), ((13285, 13340), 'torch.cat', 'torch.cat', (['[ctx1, ctx1_, ctx1 * ctx1_, ctx1 - ctx1_]', '(2)'], {}), '([ctx1, ctx1_, ctx1 * ctx1_, ctx1 - ctx1_], 2)\n', (13294, 13340), False, 'import torch\n'), ((13356, 13411), 'torch.cat', 'torch.cat', (['[ctx2, ctx2_, ctx2 * ctx2_, ctx2 - ctx2_]', '(2)'], {}), '([ctx2, ctx2_, ctx2 * ctx2_, ctx2 - ctx2_], 2)\n', (13365, 13411), False, 'import torch\n'), ((13783, 13817), 'torch.index_select', 'torch.index_select', (['inp1', '(0)', 'idx_1'], {}), '(inp1, 0, idx_1)\n', (13801, 13817), False, 'import torch\n'), ((13833, 13867), 'torch.index_select', 'torch.index_select', (['inp2', '(0)', 'idx_2'], {}), '(inp2, 0, idx_2)\n', (13851, 13867), False, 'import torch\n'), ((14238, 14263), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (14251, 14263), True, 'import torch.nn.functional as F\n'), ((841, 867), 'numpy.random.randn', 'np.random.randn', (['nin', 'nout'], {}), '(nin, nout)\n', (856, 867), True, 'import numpy as np\n'), ((1150, 1199), 'torch.nn.Linear', 'nn.Linear', (['self.mem_dim', 'self.mem_dim'], {'bias': '(False)'}), '(self.mem_dim, self.mem_dim, bias=False)\n', (1159, 1199), True, 'import torch.nn as nn\n'), ((1337, 1373), 'torch.nn.Linear', 'nn.Linear', (['self.in_dim', 'self.mem_dim'], {}), '(self.in_dim, self.mem_dim)\n', (1346, 1373), True, 'import torch.nn as nn\n'), ((1972, 1981), 'torch.nn.functional.tanh', 'F.tanh', (['c'], {}), '(c)\n', (1978, 1981), True, 'import torch.nn.functional as F\n'), ((4882, 4921), 'torch.nn.Linear', 'nn.Linear', (['(4 * num_units * 2)', 'num_units'], {}), '(4 * num_units * 2, num_units)\n', (4891, 4921), True, 'import torch.nn as nn\n'), ((4919, 4928), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4926, 4928), True, 'import torch.nn as nn\n'), ((4930, 4951), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (4940, 4951), True, 'import torch.nn as nn\n'), ((5187, 5208), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (5197, 5208), True, 'import torch.nn as nn\n'), ((5210, 5249), 'torch.nn.Linear', 'nn.Linear', (['(4 * num_units * 2)', 'num_units'], {}), '(4 * num_units * 2, num_units)\n', (5219, 5249), True, 'import torch.nn as nn\n'), ((5247, 5256), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5254, 5256), True, 'import torch.nn as nn\n'), ((5310, 5331), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (5320, 5331), True, 'import torch.nn as nn\n'), ((5333, 5366), 'torch.nn.Linear', 'nn.Linear', (['num_units', 'num_classes'], {}), '(num_units, num_classes)\n', (5342, 5366), True, 'import torch.nn as nn\n'), ((8164, 8212), 'torch.cat', 'torch.cat', (['(v1_mean, v1_max, v2_mean, v2_max)', '(1)'], {}), '((v1_mean, v1_max, v2_mean, v2_max), 1)\n', (8173, 8212), False, 'import torch\n'), ((9242, 9259), 'numpy.zeros', 'np.zeros', (['max_len'], {}), '(max_len)\n', (9250, 9259), True, 'import numpy as np\n'), ((10322, 10345), 'torch.LongTensor', 'torch.LongTensor', (['idx_1'], {}), '(idx_1)\n', (10338, 10345), False, 'import torch\n'), ((10606, 10629), 'torch.LongTensor', 'torch.LongTensor', (['idx_2'], {}), '(idx_2)\n', (10622, 10629), False, 'import torch\n'), ((3123, 3144), 'torch.unsqueeze', 'torch.unsqueeze', (['h', '(0)'], {}), '(h, 0)\n', (3138, 3144), False, 'import torch\n'), ((11056, 11093), 'torch.index_select', 'torch.index_select', (['proj1_r', '(0)', 'idx_1'], {}), '(proj1_r, 0, idx_1)\n', (11074, 11093), False, 'import torch\n'), ((11127, 11164), 'torch.index_select', 'torch.index_select', (['proj2_r', '(0)', 'idx_2'], {}), '(proj2_r, 0, idx_2)\n', (11145, 11164), False, 'import torch\n'), ((14083, 14117), 'torch.index_select', 'torch.index_select', (['v1_r', '(0)', 'idx_1'], {}), '(v1_r, 0, idx_1)\n', (14101, 14117), False, 'import torch\n'), ((14148, 14182), 'torch.index_select', 'torch.index_select', (['v2_r', '(0)', 'idx_2'], {}), '(v2_r, 0, idx_2)\n', (14166, 14182), False, 'import torch\n'), ((12136, 12160), 'torch.unsqueeze', 'torch.unsqueeze', (['ctx1', '(1)'], {}), '(ctx1, 1)\n', (12151, 12160), False, 'import torch\n'), ((12162, 12187), 'torch.unsqueeze', 'torch.unsqueeze', (['alpha', '(3)'], {}), '(alpha, 3)\n', (12177, 12187), False, 'import torch\n'), ((12212, 12236), 'torch.unsqueeze', 'torch.unsqueeze', (['ctx2', '(0)'], {}), '(ctx2, 0)\n', (12227, 12236), False, 'import torch\n'), ((12239, 12263), 'torch.unsqueeze', 'torch.unsqueeze', (['beta', '(3)'], {}), '(beta, 3)\n', (12254, 12263), False, 'import torch\n'), ((9379, 9402), 'torch.from_numpy', 'torch.from_numpy', (['masks'], {}), '(masks)\n', (9395, 9402), False, 'import torch\n'), ((10698, 10721), 'torch.LongTensor', 'torch.LongTensor', (['idx_2'], {}), '(idx_2)\n', (10714, 10721), False, 'import torch\n')] |
from abc import ABC, abstractmethod
import numpy as np
class StochasticProcess(ABC):
""" ABC for stochastic process generators """
def __init__(self, t_init, x_init, random_state):
self.rs = np.random.RandomState(random_state)
self.x = np.copy(x_init)
self.t = t_init
def sample(self):
"""
Draw next sample
"""
self.t += 1
return self._sample()
@abstractmethod
def _sample(self):
""" Implementation """
class GaussianWhiteNoiseProcess(StochasticProcess):
""" Generate Gaussian white noise samples """
def __init__(self, mu, sigma, random_state=None):
"""
Params
======
mu (float or array): process mean
sigma (float or array): process std_dev
random_state (None, int, array_like, RandomState): (optional) random
state
"""
self.mu = np.array(mu)
self.sigma = np.array(sigma)
super().__init__(t_init=0, x_init=mu, random_state=random_state)
def _sample(self):
"""Draw next sample"""
self.x = self.rs.normal(self.mu, self.sigma)
return np.copy(self.x)
class OUProcess(StochasticProcess):
""" Generate samples from an OU process"""
def __init__(self, x_inf, time_const, std_dev,
x_init=None, random_state=None):
"""
Params
======
x_inf (float or ndarray): Value to mean revert to. Also determines
dimensions of noise (multi-dimensional
process always uncorrelated)
time_const (float): Mean reversion time constant, i.e. 1/theta,
determines length of auto-correlation of process
std_dev (float): Long-term process standard deviation,
i.e. std_dev = sigma / sqrt(2*theta)
x_init (float or ndarray): (optional) current value of process.
Defaults to x_inf.
random_state (None, int, array_like, RandomState): (optional)
random state
"""
if x_init is None:
x_init = x_inf
super().__init__(0, x_init, random_state)
self.x_inf = x_inf
self.time_const = time_const
if isinstance(std_dev, (int, float)):
std_dev_const = std_dev
std_dev = lambda t: std_dev_const # allow for time-dependency
self.std_dev = std_dev
def _sample(self):
"""
Draw next sample
"""
theta = 1. / self.time_const
sigma = self.std_dev(self.t) * np.sqrt(2. * theta)
dw = self.rs.normal(size=self.x.shape)
dx = - theta * (self.x - self.x_inf) + sigma * dw
self.x += dx
return np.copy(self.x)
class Scrambler(ABC):
""" ABC for classes that scramble actions by adding random noise """
@abstractmethod
def __call__(self, actions):
""" Implement to scramble actions """
class AdditiveNoiseScrambler(Scrambler):
"""
Class that adds a stochastic process to (continuous-valued) action
vectors and then clips output between `lb` and `ub`.
"""
def __init__(self, process, lb=-1., ub=1.):
self.process = process
self.lb = lb
self.ub = ub
def __call__(self, actions):
actions += self.process.sample()
actions = actions.clip(self.lb, self.ub)
return actions
def _required_shape(self, num_agents, action_size):
return (num_agents, action_size)
class OUScrambler(AdditiveNoiseScrambler):
def __init__(self, num_agents, action_size, time_const, std_dev, lb=-1.,
ub=1., random_state=None):
x_inf = np.zeros(self._required_shape(num_agents, action_size))
process = OUProcess(x_inf, time_const, std_dev,
random_state=random_state)
super().__init__(process, lb, ub)
class GaussianWhiteNoiseScrambler(AdditiveNoiseScrambler):
def __init__(self, num_agents, action_size, std_dev, lb=-1., ub=1.,
random_state=None):
shape = self._required_shape(num_agents, action_size)
mu = np.zeros(shape)
sigma = std_dev * np.ones(shape)
process = GaussianWhiteNoiseProcess(mu, sigma, random_state)
super().__init__(process, lb, ub)
| [
"numpy.copy",
"numpy.sqrt",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
] | [((208, 243), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (229, 243), True, 'import numpy as np\n'), ((261, 276), 'numpy.copy', 'np.copy', (['x_init'], {}), '(x_init)\n', (268, 276), True, 'import numpy as np\n'), ((963, 975), 'numpy.array', 'np.array', (['mu'], {}), '(mu)\n', (971, 975), True, 'import numpy as np\n'), ((997, 1012), 'numpy.array', 'np.array', (['sigma'], {}), '(sigma)\n', (1005, 1012), True, 'import numpy as np\n'), ((1209, 1224), 'numpy.copy', 'np.copy', (['self.x'], {}), '(self.x)\n', (1216, 1224), True, 'import numpy as np\n'), ((2883, 2898), 'numpy.copy', 'np.copy', (['self.x'], {}), '(self.x)\n', (2890, 2898), True, 'import numpy as np\n'), ((4289, 4304), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (4297, 4304), True, 'import numpy as np\n'), ((2722, 2742), 'numpy.sqrt', 'np.sqrt', (['(2.0 * theta)'], {}), '(2.0 * theta)\n', (2729, 2742), True, 'import numpy as np\n'), ((4331, 4345), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (4338, 4345), True, 'import numpy as np\n')] |
# <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
from mlxtend.plotting import plot_learning_curves
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
def test_training_size():
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = (train_test_split(X, y,
test_size=0.4, random_state=2))
clf = DecisionTreeClassifier(max_depth=1, random_state=1)
training_errors, test_errors = (plot_learning_curves(X_train, y_train,
X_test, y_test, clf, suppress_plot=True))
desired1 = [0.22, 0.22, 0.22, 0.31, 0.31, 0.3, 0.33, 0.32, 0.33, 0.32]
desired2 = [0.45, 0.45, 0.35, 0.35, 0.45, 0.43, 0.35, 0.35, 0.35, 0.35]
np.testing.assert_almost_equal(training_errors, desired1, decimal=2)
np.testing.assert_almost_equal(test_errors, desired2, decimal=2)
def test_scikit_metrics():
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = (train_test_split(X, y,
test_size=0.4, random_state=2))
clf = DecisionTreeClassifier(max_depth=1, random_state=1)
training_acc, test_acc = (plot_learning_curves(X_train, y_train,
X_test, y_test, clf,
scoring='accuracy',
suppress_plot=True))
desired1 = np.array([0.22, 0.22, 0.22, 0.31, 0.31,
0.3, 0.33, 0.32, 0.33, 0.32])
desired2 = np.array([0.45, 0.45, 0.35, 0.35, 0.45,
0.43, 0.35, 0.35, 0.35, 0.35])
np.testing.assert_almost_equal(training_acc, 1 - desired1, decimal=2)
np.testing.assert_almost_equal(test_acc, 1 - desired2, decimal=2)
| [
"sklearn.datasets.load_iris",
"mlxtend.plotting.plot_learning_curves",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"numpy.testing.assert_almost_equal",
"numpy.array"
] | [((358, 378), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (376, 378), False, 'from sklearn import datasets\n'), ((457, 510), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.4)', 'random_state': '(2)'}), '(X, y, test_size=0.4, random_state=2)\n', (473, 510), False, 'from sklearn.model_selection import train_test_split\n'), ((563, 614), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(1)', 'random_state': '(1)'}), '(max_depth=1, random_state=1)\n', (585, 614), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((651, 730), 'mlxtend.plotting.plot_learning_curves', 'plot_learning_curves', (['X_train', 'y_train', 'X_test', 'y_test', 'clf'], {'suppress_plot': '(True)'}), '(X_train, y_train, X_test, y_test, clf, suppress_plot=True)\n', (671, 730), False, 'from mlxtend.plotting import plot_learning_curves\n'), ((925, 993), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['training_errors', 'desired1'], {'decimal': '(2)'}), '(training_errors, desired1, decimal=2)\n', (955, 993), True, 'import numpy as np\n'), ((998, 1062), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['test_errors', 'desired2'], {'decimal': '(2)'}), '(test_errors, desired2, decimal=2)\n', (1028, 1062), True, 'import numpy as np\n'), ((1103, 1123), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (1121, 1123), False, 'from sklearn import datasets\n'), ((1202, 1255), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.4)', 'random_state': '(2)'}), '(X, y, test_size=0.4, random_state=2)\n', (1218, 1255), False, 'from sklearn.model_selection import train_test_split\n'), ((1308, 1359), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(1)', 'random_state': '(1)'}), '(max_depth=1, random_state=1)\n', (1330, 1359), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1390, 1494), 'mlxtend.plotting.plot_learning_curves', 'plot_learning_curves', (['X_train', 'y_train', 'X_test', 'y_test', 'clf'], {'scoring': '"""accuracy"""', 'suppress_plot': '(True)'}), "(X_train, y_train, X_test, y_test, clf, scoring=\n 'accuracy', suppress_plot=True)\n", (1410, 1494), False, 'from mlxtend.plotting import plot_learning_curves\n'), ((1597, 1666), 'numpy.array', 'np.array', (['[0.22, 0.22, 0.22, 0.31, 0.31, 0.3, 0.33, 0.32, 0.33, 0.32]'], {}), '([0.22, 0.22, 0.22, 0.31, 0.31, 0.3, 0.33, 0.32, 0.33, 0.32])\n', (1605, 1666), True, 'import numpy as np\n'), ((1707, 1777), 'numpy.array', 'np.array', (['[0.45, 0.45, 0.35, 0.35, 0.45, 0.43, 0.35, 0.35, 0.35, 0.35]'], {}), '([0.45, 0.45, 0.35, 0.35, 0.45, 0.43, 0.35, 0.35, 0.35, 0.35])\n', (1715, 1777), True, 'import numpy as np\n'), ((1807, 1876), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['training_acc', '(1 - desired1)'], {'decimal': '(2)'}), '(training_acc, 1 - desired1, decimal=2)\n', (1837, 1876), True, 'import numpy as np\n'), ((1881, 1946), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['test_acc', '(1 - desired2)'], {'decimal': '(2)'}), '(test_acc, 1 - desired2, decimal=2)\n', (1911, 1946), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from sklearn.manifold import MDS
import numpy as np
def accuracy(acc):
max_acc = [max(acc[:i+1]) for i in range(len(acc))]
plt.figure(figsize=(16, 4), dpi=100)
plt.plot(acc, color="grey", linewidth=2.5, label="Accuracy")
plt.plot(max_acc, color="g", linewidth=2.5, label="Best accuracy")
plt.xlabel("Iterations")
plt.xlim(0, len(acc))
plt.legend(loc=4)
plt.show()
def mds_accuracy(X, acc):
X = MDS(n_components=2, random_state=42).fit_transform(X)
plt.figure(figsize=(16, 4), dpi=100)
cb = plt.scatter(X[:, 0], X[:, 1], c=acc,
cmap=plt.cm.get_cmap('jet'),
vmin=0.1, vmax=1, s=45)
plt.colorbar(cb)
plt.title("Accuracy in two components MDS view")
plt.show()
def summary(acc, quantile):
print("Best accuracy: {} at iteration {}".format(acc.max(), acc.argmax()))
print("Number of solutions better than {0:g}%: {1:.1f}%".format(
100 * quantile,
100 * np.sum(acc >= quantile) / float(acc.shape[0])
))
| [
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.title",
"sklearn.manifold.MDS",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((165, 201), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 4)', 'dpi': '(100)'}), '(figsize=(16, 4), dpi=100)\n', (175, 201), True, 'import matplotlib.pyplot as plt\n'), ((207, 267), 'matplotlib.pyplot.plot', 'plt.plot', (['acc'], {'color': '"""grey"""', 'linewidth': '(2.5)', 'label': '"""Accuracy"""'}), "(acc, color='grey', linewidth=2.5, label='Accuracy')\n", (215, 267), True, 'import matplotlib.pyplot as plt\n'), ((272, 338), 'matplotlib.pyplot.plot', 'plt.plot', (['max_acc'], {'color': '"""g"""', 'linewidth': '(2.5)', 'label': '"""Best accuracy"""'}), "(max_acc, color='g', linewidth=2.5, label='Best accuracy')\n", (280, 338), True, 'import matplotlib.pyplot as plt\n'), ((344, 368), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (354, 368), True, 'import matplotlib.pyplot as plt\n'), ((400, 417), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (410, 417), True, 'import matplotlib.pyplot as plt\n'), ((422, 432), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (430, 432), True, 'import matplotlib.pyplot as plt\n'), ((527, 563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 4)', 'dpi': '(100)'}), '(figsize=(16, 4), dpi=100)\n', (537, 563), True, 'import matplotlib.pyplot as plt\n'), ((709, 725), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cb'], {}), '(cb)\n', (721, 725), True, 'import matplotlib.pyplot as plt\n'), ((730, 778), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy in two components MDS view"""'], {}), "('Accuracy in two components MDS view')\n", (739, 778), True, 'import matplotlib.pyplot as plt\n'), ((783, 793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (791, 793), True, 'import matplotlib.pyplot as plt\n'), ((469, 505), 'sklearn.manifold.MDS', 'MDS', ([], {'n_components': '(2)', 'random_state': '(42)'}), '(n_components=2, random_state=42)\n', (472, 505), False, 'from sklearn.manifold import MDS\n'), ((636, 658), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (651, 658), True, 'import matplotlib.pyplot as plt\n'), ((1010, 1033), 'numpy.sum', 'np.sum', (['(acc >= quantile)'], {}), '(acc >= quantile)\n', (1016, 1033), True, 'import numpy as np\n')] |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for calfits object
"""
import pytest
import os
import numpy as np
from astropy.io import fits
from pyuvdata import UVCal
import pyuvdata.tests as uvtest
from pyuvdata.data import DATA_PATH
import pyuvdata.utils as uvutils
pytestmark = pytest.mark.filterwarnings(
"ignore:telescope_location is not set. Using known values",
"ignore:antenna_positions is not set. Using known values",
)
@pytest.mark.filterwarnings("ignore:When converting a delay-style cal to future array")
@pytest.mark.filterwarnings("ignore:Nfreqs will be required to be 1 for wide_band cals")
@pytest.mark.parametrize("future_shapes", [True, False])
@pytest.mark.parametrize("caltype", ["gain", "delay"])
def test_readwriteread(future_shapes, caltype, gain_data, delay_data, tmp_path):
"""
Omnical/Firstcal fits loopback test.
Read in calfits file, write out new calfits file, read back in and check for
object equality.
"""
if caltype == "gain":
cal_in = gain_data
else:
cal_in = delay_data
if future_shapes:
cal_in.use_future_array_shapes()
cal_out = UVCal()
write_file = str(tmp_path / "outtest.fits")
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
assert cal_out.filename == [os.path.basename(write_file)]
if future_shapes:
cal_out.use_future_array_shapes()
assert cal_in == cal_out
return
@pytest.mark.parametrize("future_shapes", [True, False])
def test_write_inttime_equal_timediff(future_shapes, gain_data, delay_data, tmp_path):
"""
test writing out object with integration times close to time diffs
"""
cal_in = gain_data
time_diffs = np.diff(cal_in.time_array)
gain_data.integration_time = np.mean(time_diffs) * (24.0 * 60.0**2)
if future_shapes:
cal_in.use_future_array_shapes()
cal_out = UVCal()
write_file = str(tmp_path / "outtest.fits")
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
if future_shapes:
cal_out.use_future_array_shapes()
assert cal_in == cal_out
return
@pytest.mark.parametrize(
"filein,caltype",
[
("zen.2457698.40355.xx.gain.calfits", "gain"),
("zen.2457698.40355.xx.delay.calfits", "delay"),
],
)
def test_read_metadata_only(filein, caltype, gain_data, delay_data, tmp_path):
"""
check that metadata only reads work
"""
if caltype == "gain":
cal_in = gain_data
else:
cal_in = delay_data
# check that metadata only reads work
cal2 = cal_in.copy(metadata_only=True)
cal3 = UVCal()
testfile = os.path.join(DATA_PATH, filein)
cal3.read_calfits(testfile, read_data=False)
assert cal2 == cal3
return
def test_readwriteread_no_freq_range(gain_data, tmp_path):
# test without freq_range parameter
cal_in = gain_data
cal_out = UVCal()
write_file = str(tmp_path / "outtest_omnical.fits")
cal_in.freq_range = None
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
assert cal_in == cal_out
return
def test_readwriteread_no_time_range(tmp_path):
# test without time_range parameter
cal_in = UVCal()
cal_out = UVCal()
testfile = os.path.join(DATA_PATH, "zen.2457698.40355.xx.gain.calfits")
write_file = str(tmp_path / "outtest_omnical.fits")
cal_in.read_calfits(testfile)
cal_in.time_range = None
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
assert cal_in == cal_out
return
def test_error_unknown_cal_type(delay_data, tmp_path):
"""
Test an error is raised when writing an unknown cal type.
"""
cal_in = delay_data
write_file = str(tmp_path / "outtest_firstcal.fits")
cal_in._set_unknown_cal_type()
with pytest.raises(ValueError, match="unknown calibration type"):
cal_in.write_calfits(write_file, run_check=False, clobber=True)
return
@pytest.mark.parametrize(
"header_dict,error_msg",
[
({"flag": "CDELT2"}, "Jones values are different in FLAGS"),
({"flag": "CDELT3"}, "Time values are different in FLAGS"),
({"flag": "CRVAL5"}, "Spectral window values are different in FLAGS"),
({"totqual": "CDELT1"}, "Jones values are different in TOTQLTY"),
({"totqual": "CDELT2"}, "Time values are different in TOTQLTY"),
({"totqual": "CRVAL4"}, "Spectral window values are different in TOTQLTY"),
],
)
def test_fits_header_errors_delay(delay_data, tmp_path, header_dict, error_msg):
# change values for various axes in flag and total quality hdus to not
# match primary hdu
cal_in = delay_data
cal_out = UVCal()
write_file = str(tmp_path / "outtest_firstcal.fits")
write_file2 = str(tmp_path / "outtest_firstcal2.fits")
# Create filler jones info
cal_in.jones_array = np.array([-5, -6, -7, -8])
cal_in.Njones = 4
cal_in.flag_array = np.zeros(cal_in._flag_array.expected_shape(cal_in), dtype=bool)
cal_in.delay_array = np.ones(
cal_in._delay_array.expected_shape(cal_in), dtype=np.float64
)
cal_in.quality_array = np.zeros(cal_in._quality_array.expected_shape(cal_in))
# add total_quality_array so that can be tested as well
cal_in.total_quality_array = np.zeros(
cal_in._total_quality_array.expected_shape(cal_in)
)
# write file
cal_in.write_calfits(write_file, clobber=True)
unit = list(header_dict.keys())[0]
keyword = header_dict[unit]
with fits.open(write_file) as fname:
data = fname[0].data
primary_hdr = fname[0].header
hdunames = uvutils._fits_indexhdus(fname)
ant_hdu = fname[hdunames["ANTENNAS"]]
flag_hdu = fname[hdunames["FLAGS"]]
flag_hdr = flag_hdu.header
totqualhdu = fname[hdunames["TOTQLTY"]]
totqualhdr = totqualhdu.header
if unit == "flag":
flag_hdr[keyword] *= 2
elif unit == "totqual":
totqualhdr[keyword] *= 2
prihdu = fits.PrimaryHDU(data=data, header=primary_hdr)
hdulist = fits.HDUList([prihdu, ant_hdu])
flag_hdu = fits.ImageHDU(data=flag_hdu.data, header=flag_hdr)
hdulist.append(flag_hdu)
totqualhdu = fits.ImageHDU(data=totqualhdu.data, header=totqualhdr)
hdulist.append(totqualhdu)
hdulist.writeto(write_file2, overwrite=True)
hdulist.close()
with pytest.raises(ValueError, match=error_msg):
cal_out.read_calfits(write_file2)
return
@pytest.mark.parametrize(
"header_dict,error_msg",
[
({"totqual": "CDELT1"}, "Jones values are different in TOTQLTY"),
({"totqual": "CDELT2"}, "Time values are different in TOTQLTY"),
({"totqual": "CDELT3"}, "Frequency values are different in TOTQLTY"),
({"totqual": "CRVAL4"}, "Spectral window values are different in TOTQLTY"),
],
)
def test_fits_header_errors_gain(gain_data, tmp_path, header_dict, error_msg):
# repeat for gain type file
cal_in = gain_data
cal_out = UVCal()
write_file = str(tmp_path / "outtest_omnical.fits")
write_file2 = str(tmp_path / "outtest_omnical2.fits")
# Create filler jones info
cal_in.jones_array = np.array([-5, -6, -7, -8])
cal_in.Njones = 4
cal_in.flag_array = np.zeros(cal_in._flag_array.expected_shape(cal_in), dtype=bool)
cal_in.gain_array = np.ones(
cal_in._gain_array.expected_shape(cal_in), dtype=np.complex64
)
cal_in.quality_array = np.zeros(cal_in._quality_array.expected_shape(cal_in))
# add total_quality_array so that can be tested as well
cal_in.total_quality_array = np.zeros(
cal_in._total_quality_array.expected_shape(cal_in)
)
# write file
cal_in.write_calfits(write_file, clobber=True)
unit = list(header_dict.keys())[0]
keyword = header_dict[unit]
with fits.open(write_file) as fname:
data = fname[0].data
primary_hdr = fname[0].header
hdunames = uvutils._fits_indexhdus(fname)
ant_hdu = fname[hdunames["ANTENNAS"]]
totqualhdu = fname[hdunames["TOTQLTY"]]
totqualhdr = totqualhdu.header
if unit == "totqual":
totqualhdr[keyword] *= 2
prihdu = fits.PrimaryHDU(data=data, header=primary_hdr)
hdulist = fits.HDUList([prihdu, ant_hdu])
totqualhdu = fits.ImageHDU(data=totqualhdu.data, header=totqualhdr)
hdulist.append(totqualhdu)
hdulist.writeto(write_file2, overwrite=True)
hdulist.close()
with pytest.raises(ValueError, match=error_msg):
cal_out.read_calfits(write_file2)
return
def test_latlonalt_noxyz(gain_data, tmp_path):
cal_in = gain_data
cal_out = UVCal()
write_file = str(tmp_path / "outtest.fits")
write_file2 = str(tmp_path / "outtest_noxyz.fits")
cal_in.write_calfits(write_file)
with fits.open(write_file) as fname:
data = fname[0].data
primary_hdr = fname[0].header
hdunames = uvutils._fits_indexhdus(fname)
ant_hdu = fname[hdunames["ANTENNAS"]]
primary_hdr.pop("ARRAYX")
primary_hdr.pop("ARRAYY")
primary_hdr.pop("ARRAYZ")
prihdu = fits.PrimaryHDU(data=data, header=primary_hdr)
hdulist = fits.HDUList([prihdu, ant_hdu])
hdulist.writeto(write_file2, overwrite=True)
cal_out.read_calfits(write_file2)
assert cal_out == cal_in
@pytest.mark.parametrize(
"kwd1,kwd2,val1,val2",
[
["keyword1", "keyword2", True, False],
["keyword1", "keyword2", np.int64(5), 7],
["keyword1", "keyword2", np.int64(5.3), 6.9],
["keyword1", "keyword2", np.complex64(5.3 + 1.2j), 6.9 + 4.6j],
[
"keyword1",
"comment",
"short comment",
"this is a very long comment that will "
"be broken into several lines\nif "
"everything works properly.",
],
],
)
def test_extra_keywords(gain_data, kwd1, kwd2, val1, val2, tmp_path):
cal_in = gain_data
cal_out = UVCal()
testfile = str(tmp_path / "outtest_extrakws.fits")
# check handling of boolean keywords
cal_in.extra_keywords[kwd1] = val1
cal_in.extra_keywords[kwd2] = val2
cal_in.write_calfits(testfile, clobber=True)
cal_out.read_calfits(testfile)
assert cal_in == cal_out
return
@pytest.mark.parametrize(
"ex_val,error_msg",
[
({"testdict": {"testkey": 23}}, "Extra keyword testdict is of"),
({"testlist": [12, 14, 90]}, "Extra keyword testlist is of"),
({"testarr": np.array([12, 14, 90])}, "Extra keyword testarr is of"),
],
)
def test_extra_keywords_errors(gain_data, tmp_path, ex_val, error_msg):
cal_in = gain_data
testfile = str(tmp_path / "outtest_extrakwd_err.fits")
# check for warnings & errors with extra_keywords that are dicts, lists or arrays
keyword = list(ex_val.keys())[0]
val = ex_val[keyword]
cal_in.extra_keywords[keyword] = val
with uvtest.check_warnings(
UserWarning,
match=f"{keyword} in extra_keywords is a list, array or dict",
):
cal_in.check()
with pytest.raises(TypeError, match=error_msg):
cal_in.write_calfits(testfile, run_check=False)
return
def test_extra_keywords_warnings(gain_data, tmp_path):
cal_in = gain_data
testfile = str(tmp_path / "outtest_extrakwd_warn.fits")
# check for warnings with extra_keywords keys that are too long
cal_in.extra_keywords["test_long_key"] = True
with uvtest.check_warnings(
UserWarning,
match="key test_long_key in extra_keywords is longer than 8 characters",
):
cal_in.check()
with uvtest.check_warnings(
UserWarning, "key test_long_key in extra_keywords is longer than 8 characters"
):
cal_in.write_calfits(testfile, run_check=False, clobber=True)
return
@pytest.mark.parametrize("caltype", ["gain", "delay"])
def test_input_flag_array(caltype, gain_data, delay_data, tmp_path):
"""
Test when data file has input flag array.
Currently we do not have a testfile, so we will artifically create one
and check for internal consistency.
"""
if caltype == "gain":
cal_in = gain_data
else:
cal_in = delay_data
cal_out = UVCal()
write_file = str(tmp_path / "outtest_input_flags.fits")
cal_in.input_flag_array = np.zeros(
cal_in._input_flag_array.expected_shape(cal_in), dtype=bool
)
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
assert cal_in == cal_out
@pytest.mark.parametrize("caltype", ["gain", "delay"])
def test_jones(caltype, gain_data, delay_data, tmp_path):
"""
Test when data file has more than one element in Jones matrix.
Currently we do not have a testfile, so we will artifically create one
and check for internal consistency.
"""
if caltype == "gain":
cal_in = gain_data
else:
cal_in = delay_data
cal_out = UVCal()
write_file = str(tmp_path / "outtest_jones.fits")
# Create filler jones info
cal_in.jones_array = np.array([-5, -6, -7, -8])
cal_in.Njones = 4
cal_in.flag_array = np.zeros(cal_in._flag_array.expected_shape(cal_in), dtype=bool)
if caltype == "gain":
cal_in.gain_array = np.ones(
cal_in._gain_array.expected_shape(cal_in), dtype=np.complex64
)
else:
cal_in.delay_array = np.ones(
cal_in._delay_array.expected_shape(cal_in), dtype=np.float64
)
cal_in.quality_array = np.zeros(cal_in._quality_array.expected_shape(cal_in))
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
assert cal_in == cal_out
@pytest.mark.parametrize("caltype", ["gain", "delay"])
def test_readwriteread_total_quality_array(caltype, gain_data, delay_data, tmp_path):
"""
Test when data file has a total quality array.
Currently we have no such file, so we will artificially create one and
check for internal consistency.
"""
if caltype == "gain":
cal_in = gain_data
else:
cal_in = delay_data
cal_out = UVCal()
write_file = str(tmp_path / "outtest_total_quality_array.fits")
# Create filler total quality array
cal_in.total_quality_array = np.zeros(
cal_in._total_quality_array.expected_shape(cal_in)
)
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
assert cal_in == cal_out
del cal_in
del cal_out
@pytest.mark.parametrize("caltype", ["gain", "delay"])
def test_total_quality_array_size(caltype, gain_data, delay_data):
"""
Test that total quality array defaults to the proper size
"""
if caltype == "gain":
cal_in = gain_data
else:
cal_in = delay_data
# Create filler total quality array
cal_in.total_quality_array = np.zeros(
cal_in._total_quality_array.expected_shape(cal_in)
)
if caltype == "gain":
proper_shape = (1, cal_in.Nfreqs, cal_in.Ntimes, cal_in.Njones)
else:
proper_shape = (1, 1, cal_in.Ntimes, cal_in.Njones)
assert cal_in.total_quality_array.shape == proper_shape
del cal_in
def test_write_time_precision(gain_data, tmp_path):
"""
Test that times are being written to appropriate precision (see issue 311).
"""
cal_in = gain_data
cal_out = UVCal()
write_file = str(tmp_path / "outtest_time_prec.fits")
# overwrite time array to break old code
dt = cal_in.integration_time / (24.0 * 60.0 * 60.0)
t0 = cal_in.time_array[0] + dt * 3
cal_in.time_array = dt * np.arange(cal_in.Ntimes) + t0
if cal_in.lst_array is not None:
cal_in.set_lsts_from_time_array()
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
assert cal_in == cal_out
def test_read_noversion_history(gain_data, tmp_path):
"""
Test that version info gets added to the history if it's missing
"""
cal_in = gain_data
cal_out = UVCal()
write_file = str(tmp_path / "outtest_nover.fits")
write_file2 = str(tmp_path / "outtest_nover2.fits")
cal_in.write_calfits(write_file, clobber=True)
with fits.open(write_file) as fname:
data = fname[0].data
primary_hdr = fname[0].header
hdunames = uvutils._fits_indexhdus(fname)
ant_hdu = fname[hdunames["ANTENNAS"]]
primary_hdr["HISTORY"] = ""
prihdu = fits.PrimaryHDU(data=data, header=primary_hdr)
hdulist = fits.HDUList([prihdu, ant_hdu])
hdulist.writeto(write_file2, overwrite=True)
hdulist.close()
cal_out.read_calfits(write_file2)
assert cal_in == cal_out
@pytest.mark.filterwarnings("ignore:Selected frequencies are not contiguous")
def test_write_freq_spacing_not_channel_width(gain_data, tmp_path):
cal_in = gain_data
cal_out = UVCal()
write_file = str(tmp_path / "outtest_freqspace.fits")
# select every other frequency -- then evenly spaced but doesn't match channel width
cal_in.select(freq_chans=np.arange(0, 10, 2))
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
assert cal_in == cal_out
| [
"numpy.mean",
"pyuvdata.UVCal",
"numpy.int64",
"pytest.mark.filterwarnings",
"astropy.io.fits.PrimaryHDU",
"astropy.io.fits.HDUList",
"numpy.arange",
"astropy.io.fits.ImageHDU",
"os.path.join",
"numpy.diff",
"pytest.mark.parametrize",
"numpy.array",
"pyuvdata.utils._fits_indexhdus",
"pytes... | [((380, 534), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:telescope_location is not set. Using known values"""', '"""ignore:antenna_positions is not set. Using known values"""'], {}), "(\n 'ignore:telescope_location is not set. Using known values',\n 'ignore:antenna_positions is not set. Using known values')\n", (406, 534), False, 'import pytest\n'), ((540, 631), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:When converting a delay-style cal to future array"""'], {}), "(\n 'ignore:When converting a delay-style cal to future array')\n", (566, 631), False, 'import pytest\n'), ((628, 720), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Nfreqs will be required to be 1 for wide_band cals"""'], {}), "(\n 'ignore:Nfreqs will be required to be 1 for wide_band cals')\n", (654, 720), False, 'import pytest\n'), ((717, 772), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""future_shapes"""', '[True, False]'], {}), "('future_shapes', [True, False])\n", (740, 772), False, 'import pytest\n'), ((774, 827), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""caltype"""', "['gain', 'delay']"], {}), "('caltype', ['gain', 'delay'])\n", (797, 827), False, 'import pytest\n'), ((1556, 1611), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""future_shapes"""', '[True, False]'], {}), "('future_shapes', [True, False])\n", (1579, 1611), False, 'import pytest\n'), ((2260, 2409), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""filein,caltype"""', "[('zen.2457698.40355.xx.gain.calfits', 'gain'), (\n 'zen.2457698.40355.xx.delay.calfits', 'delay')]"], {}), "('filein,caltype', [(\n 'zen.2457698.40355.xx.gain.calfits', 'gain'), (\n 'zen.2457698.40355.xx.delay.calfits', 'delay')])\n", (2283, 2409), False, 'import pytest\n'), ((4125, 4599), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""header_dict,error_msg"""', "[({'flag': 'CDELT2'}, 'Jones values are different in FLAGS'), ({'flag':\n 'CDELT3'}, 'Time values are different in FLAGS'), ({'flag': 'CRVAL5'},\n 'Spectral window values are different in FLAGS'), ({'totqual': 'CDELT1'\n }, 'Jones values are different in TOTQLTY'), ({'totqual': 'CDELT2'},\n 'Time values are different in TOTQLTY'), ({'totqual': 'CRVAL4'},\n 'Spectral window values are different in TOTQLTY')]"], {}), "('header_dict,error_msg', [({'flag': 'CDELT2'},\n 'Jones values are different in FLAGS'), ({'flag': 'CDELT3'},\n 'Time values are different in FLAGS'), ({'flag': 'CRVAL5'},\n 'Spectral window values are different in FLAGS'), ({'totqual': 'CDELT1'\n }, 'Jones values are different in TOTQLTY'), ({'totqual': 'CDELT2'},\n 'Time values are different in TOTQLTY'), ({'totqual': 'CRVAL4'},\n 'Spectral window values are different in TOTQLTY')])\n", (4148, 4599), False, 'import pytest\n'), ((6699, 7042), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""header_dict,error_msg"""', "[({'totqual': 'CDELT1'}, 'Jones values are different in TOTQLTY'), ({\n 'totqual': 'CDELT2'}, 'Time values are different in TOTQLTY'), ({\n 'totqual': 'CDELT3'}, 'Frequency values are different in TOTQLTY'), ({\n 'totqual': 'CRVAL4'}, 'Spectral window values are different in TOTQLTY')]"], {}), "('header_dict,error_msg', [({'totqual': 'CDELT1'},\n 'Jones values are different in TOTQLTY'), ({'totqual': 'CDELT2'},\n 'Time values are different in TOTQLTY'), ({'totqual': 'CDELT3'},\n 'Frequency values are different in TOTQLTY'), ({'totqual': 'CRVAL4'},\n 'Spectral window values are different in TOTQLTY')])\n", (6722, 7042), False, 'import pytest\n'), ((12084, 12137), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""caltype"""', "['gain', 'delay']"], {}), "('caltype', ['gain', 'delay'])\n", (12107, 12137), False, 'import pytest\n'), ((12793, 12846), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""caltype"""', "['gain', 'delay']"], {}), "('caltype', ['gain', 'delay'])\n", (12816, 12846), False, 'import pytest\n'), ((13947, 14000), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""caltype"""', "['gain', 'delay']"], {}), "('caltype', ['gain', 'delay'])\n", (13970, 14000), False, 'import pytest\n'), ((14749, 14802), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""caltype"""', "['gain', 'delay']"], {}), "('caltype', ['gain', 'delay'])\n", (14772, 14802), False, 'import pytest\n'), ((16936, 17012), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Selected frequencies are not contiguous"""'], {}), "('ignore:Selected frequencies are not contiguous')\n", (16962, 17012), False, 'import pytest\n'), ((1239, 1246), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (1244, 1246), False, 'from pyuvdata import UVCal\n'), ((1827, 1853), 'numpy.diff', 'np.diff', (['cal_in.time_array'], {}), '(cal_in.time_array)\n', (1834, 1853), True, 'import numpy as np\n'), ((2006, 2013), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (2011, 2013), False, 'from pyuvdata import UVCal\n'), ((2757, 2764), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (2762, 2764), False, 'from pyuvdata import UVCal\n'), ((2780, 2811), 'os.path.join', 'os.path.join', (['DATA_PATH', 'filein'], {}), '(DATA_PATH, filein)\n', (2792, 2811), False, 'import os\n'), ((3035, 3042), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (3040, 3042), False, 'from pyuvdata import UVCal\n'), ((3361, 3368), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (3366, 3368), False, 'from pyuvdata import UVCal\n'), ((3383, 3390), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (3388, 3390), False, 'from pyuvdata import UVCal\n'), ((3406, 3466), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2457698.40355.xx.gain.calfits"""'], {}), "(DATA_PATH, 'zen.2457698.40355.xx.gain.calfits')\n", (3418, 3466), False, 'import os\n'), ((4859, 4866), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (4864, 4866), False, 'from pyuvdata import UVCal\n'), ((5040, 5066), 'numpy.array', 'np.array', (['[-5, -6, -7, -8]'], {}), '([-5, -6, -7, -8])\n', (5048, 5066), True, 'import numpy as np\n'), ((7225, 7232), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (7230, 7232), False, 'from pyuvdata import UVCal\n'), ((7404, 7430), 'numpy.array', 'np.array', (['[-5, -6, -7, -8]'], {}), '([-5, -6, -7, -8])\n', (7412, 7430), True, 'import numpy as np\n'), ((8900, 8907), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (8905, 8907), False, 'from pyuvdata import UVCal\n'), ((10234, 10241), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (10239, 10241), False, 'from pyuvdata import UVCal\n'), ((12491, 12498), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (12496, 12498), False, 'from pyuvdata import UVCal\n'), ((13210, 13217), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (13215, 13217), False, 'from pyuvdata import UVCal\n'), ((13329, 13355), 'numpy.array', 'np.array', (['[-5, -6, -7, -8]'], {}), '([-5, -6, -7, -8])\n', (13337, 13355), True, 'import numpy as np\n'), ((14372, 14379), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (14377, 14379), False, 'from pyuvdata import UVCal\n'), ((15620, 15627), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (15625, 15627), False, 'from pyuvdata import UVCal\n'), ((16260, 16267), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (16265, 16267), False, 'from pyuvdata import UVCal\n'), ((17119, 17126), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (17124, 17126), False, 'from pyuvdata import UVCal\n'), ((1888, 1907), 'numpy.mean', 'np.mean', (['time_diffs'], {}), '(time_diffs)\n', (1895, 1907), True, 'import numpy as np\n'), ((3977, 4036), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unknown calibration type"""'}), "(ValueError, match='unknown calibration type')\n", (3990, 4036), False, 'import pytest\n'), ((5688, 5709), 'astropy.io.fits.open', 'fits.open', (['write_file'], {}), '(write_file)\n', (5697, 5709), False, 'from astropy.io import fits\n'), ((5806, 5836), 'pyuvdata.utils._fits_indexhdus', 'uvutils._fits_indexhdus', (['fname'], {}), '(fname)\n', (5829, 5836), True, 'import pyuvdata.utils as uvutils\n'), ((6199, 6245), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'data', 'header': 'primary_hdr'}), '(data=data, header=primary_hdr)\n', (6214, 6245), False, 'from astropy.io import fits\n'), ((6264, 6295), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[prihdu, ant_hdu]'], {}), '([prihdu, ant_hdu])\n', (6276, 6295), False, 'from astropy.io import fits\n'), ((6315, 6365), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', ([], {'data': 'flag_hdu.data', 'header': 'flag_hdr'}), '(data=flag_hdu.data, header=flag_hdr)\n', (6328, 6365), False, 'from astropy.io import fits\n'), ((6420, 6474), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', ([], {'data': 'totqualhdu.data', 'header': 'totqualhdr'}), '(data=totqualhdu.data, header=totqualhdr)\n', (6433, 6474), False, 'from astropy.io import fits\n'), ((6598, 6640), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'error_msg'}), '(ValueError, match=error_msg)\n', (6611, 6640), False, 'import pytest\n'), ((8052, 8073), 'astropy.io.fits.open', 'fits.open', (['write_file'], {}), '(write_file)\n', (8061, 8073), False, 'from astropy.io import fits\n'), ((8170, 8200), 'pyuvdata.utils._fits_indexhdus', 'uvutils._fits_indexhdus', (['fname'], {}), '(fname)\n', (8193, 8200), True, 'import pyuvdata.utils as uvutils\n'), ((8420, 8466), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'data', 'header': 'primary_hdr'}), '(data=data, header=primary_hdr)\n', (8435, 8466), False, 'from astropy.io import fits\n'), ((8485, 8516), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[prihdu, ant_hdu]'], {}), '([prihdu, ant_hdu])\n', (8497, 8516), False, 'from astropy.io import fits\n'), ((8538, 8592), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', ([], {'data': 'totqualhdu.data', 'header': 'totqualhdr'}), '(data=totqualhdu.data, header=totqualhdr)\n', (8551, 8592), False, 'from astropy.io import fits\n'), ((8716, 8758), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'error_msg'}), '(ValueError, match=error_msg)\n', (8729, 8758), False, 'import pytest\n'), ((9059, 9080), 'astropy.io.fits.open', 'fits.open', (['write_file'], {}), '(write_file)\n', (9068, 9080), False, 'from astropy.io import fits\n'), ((9177, 9207), 'pyuvdata.utils._fits_indexhdus', 'uvutils._fits_indexhdus', (['fname'], {}), '(fname)\n', (9200, 9207), True, 'import pyuvdata.utils as uvutils\n'), ((9375, 9421), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'data', 'header': 'primary_hdr'}), '(data=data, header=primary_hdr)\n', (9390, 9421), False, 'from astropy.io import fits\n'), ((9440, 9471), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[prihdu, ant_hdu]'], {}), '([prihdu, ant_hdu])\n', (9452, 9471), False, 'from astropy.io import fits\n'), ((11185, 11287), 'pyuvdata.tests.check_warnings', 'uvtest.check_warnings', (['UserWarning'], {'match': 'f"""{keyword} in extra_keywords is a list, array or dict"""'}), "(UserWarning, match=\n f'{keyword} in extra_keywords is a list, array or dict')\n", (11206, 11287), True, 'import pyuvdata.tests as uvtest\n'), ((11339, 11380), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'error_msg'}), '(TypeError, match=error_msg)\n', (11352, 11380), False, 'import pytest\n'), ((11718, 11830), 'pyuvdata.tests.check_warnings', 'uvtest.check_warnings', (['UserWarning'], {'match': '"""key test_long_key in extra_keywords is longer than 8 characters"""'}), "(UserWarning, match=\n 'key test_long_key in extra_keywords is longer than 8 characters')\n", (11739, 11830), True, 'import pyuvdata.tests as uvtest\n'), ((11882, 11987), 'pyuvdata.tests.check_warnings', 'uvtest.check_warnings', (['UserWarning', '"""key test_long_key in extra_keywords is longer than 8 characters"""'], {}), "(UserWarning,\n 'key test_long_key in extra_keywords is longer than 8 characters')\n", (11903, 11987), True, 'import pyuvdata.tests as uvtest\n'), ((16440, 16461), 'astropy.io.fits.open', 'fits.open', (['write_file'], {}), '(write_file)\n', (16449, 16461), False, 'from astropy.io import fits\n'), ((16558, 16588), 'pyuvdata.utils._fits_indexhdus', 'uvutils._fits_indexhdus', (['fname'], {}), '(fname)\n', (16581, 16588), True, 'import pyuvdata.utils as uvutils\n'), ((16690, 16736), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'data', 'header': 'primary_hdr'}), '(data=data, header=primary_hdr)\n', (16705, 16736), False, 'from astropy.io import fits\n'), ((16755, 16786), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[prihdu, ant_hdu]'], {}), '([prihdu, ant_hdu])\n', (16767, 16786), False, 'from astropy.io import fits\n'), ((1416, 1444), 'os.path.basename', 'os.path.basename', (['write_file'], {}), '(write_file)\n', (1432, 1444), False, 'import os\n'), ((9735, 9746), 'numpy.int64', 'np.int64', (['(5)'], {}), '(5)\n', (9743, 9746), True, 'import numpy as np\n'), ((9785, 9798), 'numpy.int64', 'np.int64', (['(5.3)'], {}), '(5.3)\n', (9793, 9798), True, 'import numpy as np\n'), ((9839, 9863), 'numpy.complex64', 'np.complex64', (['(5.3 + 1.2j)'], {}), '(5.3 + 1.2j)\n', (9851, 9863), True, 'import numpy as np\n'), ((15855, 15879), 'numpy.arange', 'np.arange', (['cal_in.Ntimes'], {}), '(cal_in.Ntimes)\n', (15864, 15879), True, 'import numpy as np\n'), ((17304, 17323), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(2)'], {}), '(0, 10, 2)\n', (17313, 17323), True, 'import numpy as np\n'), ((10765, 10787), 'numpy.array', 'np.array', (['[12, 14, 90]'], {}), '([12, 14, 90])\n', (10773, 10787), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.