code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import matplotlib.pyplot as plt
import os
import trimesh
from mpl_toolkits.mplot3d import axes3d
import time, warnings
from skimage import measure
import random
from sympy import sympify
warnings.filterwarnings("ignore")
class SingleFormulaBasedMaterial:
def __gyroid(self):
return 'sin(x)*cos(y)+sin(y)*cos(z)+sin(z)*cos(x)'
def __SchD(self):
return 'sin(x)*sin(y)*sin(z)+sin(x)*cos(y)*cos(z)+cos(x)*sin(y)*cos(z)+cos(x)*cos(y)*sin(z)'
def __randomFormulaString(self,n_terms=5):
formula='{:.2f}'.format(random.random())
for i in range(n_terms):
ch_digit = '{:.2f}'.format(random.random())
ch_X = random.choice(['sin(x)', 'cos(x)', '1'])
ch_Y = random.choice(['sin(y)', 'cos(y)', '1'])
ch_Z = random.choice(['sin(z)', 'cos(z)', '1'])
formula+='+'+ch_digit+'*'+ch_X+'*'+ch_Y+'*'+ch_Z
return formula
def __formula_string(self):
f = sympify(self.__formula)
from sympy.abc import x, y, z
from sympy.utilities.lambdify import lambdify
f = lambdify([x,y,z], f, 'numpy')
return f(self.__x*np.pi*2/self.__a[0],self.__y*np.pi*2/self.__a[1],self.__z*np.pi*2/self.__a[2])
def __init__(self, unit=None, formula = None, l=10, r=[1,1,1], a=[1,1,1], eps=0.1, res=0.1):
self.__l = l
self.__r = r
self.__a = a
self.__eps = eps
self.__res = res
if formula:
self.__formula = formula
unit = 'user-defined'
else:
if unit.lower() == 'gyroid':
self.__formula = self.__gyroid()
elif unit.lower() == 'schd':
self.__formula = self.__SchD()
else:
self.__formula = self.__randomFormulaString()
unit = 'random'
print('Using formula: {}'.format(self.__formula))
rx,ry,rz = self.__r
_res=int(self.__l/self.__res)
self.__x=np.array([i for i in range(_res*rx)])
self.__y=np.array([i for i in range(_res*ry)])
self.__z=np.array([i for i in range(_res*rz)])
lx=len(self.__x)
ly=len(self.__y)
lz=len(self.__z)
self._model = '{}_{}x{}x{}_r{:.1f}'.format(unit,rx,ry,rz,self.__res)
if type(self.__eps) is not float:
self._model += '_custom_eps'
self.__x, self.__y, self.__z = np.meshgrid(self.__x/_res, self.__y/_res, self.__z/_res, indexing='ij')
self._vox = self._buildvox()
while self.get_porosity() > 0.99:
self.__eps+=0.001
self.update_eps(self.__eps)
print('Finding matched material, but porosity: {} is too high. Update eps with {}'.format(self.get_porosity(), self.__eps))
def _buildvox(self):
return np.fabs(self.__formula_string())<=self.__eps
def update_eps(self, eps):
self.__eps=eps
rx,ry,rz = self.__r
_res=int(self.__l/self.__res)
self.__x=np.array([i for i in range(_res*rx)])
self.__y=np.array([i for i in range(_res*ry)])
self.__z=np.array([i for i in range(_res*rz)])
lx=len(self.__x)
ly=len(self.__y)
lz=len(self.__z)
self.__x, self.__y, self.__z = np.meshgrid(self.__x/_res, self.__y/_res, self.__z/_res, indexing='ij')
self._vox = self._buildvox()
if self.get_porosity() == 0:
raise NameError('Didn\'t find matched material with {}'.format(self.__formula))
return self
def update_or(self, mix):
print('Initial porosity: {}'.format(self.get_porosity()))
self._vox = np.logical_or(self._vox, mix)
print('Final porosity after ''OR'': {}'.format(self.get_porosity()))
self._model+='_OR'
return self
def update_xor(self, mix):
print('Initial porosity: {}'.format(self.get_porosity()))
self._vox = np.logical_xor(self._vox, mix)
print('Final porosity after ''XOR'': {}'.format(self.get_porosity()))
self._model+='_XOR'
return self
def update_sub(self, mix):
print('Initial porosity: {}'.format(self.get_porosity()))
self._vox = np.logical_xor(np.logical_or(self._vox, mix), mix)
print('Final porosity after ''SUB'': {}'.format(self.get_porosity()))
self._model+='_SUB'
return self
def update_and(self, mix):
print('Initial porosity: {}'.format(self.get_porosity()))
self._vox = np.logical_and(self._vox, mix)
print('Final porosity after ''AND'': {}'.format(self.get_porosity()))
self._model+='_AND'
return self
#======================================================================================================================
def get_porosity(self):
return 1-(np.sum(self._vox)/self._vox.size)
def get_vox(self):
return self._vox
def get_formula(self):
return self.__formula
def get_eps(self):
return self.__eps
def formSolid(self, save=True, smooth=True):
mesh = trimesh.voxel.ops.matrix_to_marching_cubes(self._vox, pitch=self.__res)
if smooth:
mesh = trimesh.smoothing.filter_humphrey(mesh)
mesh.rezero()
if save:
loc='STL/'+self._model
os.makedirs(loc, exist_ok=True)
with open(loc+'/info.txt','w') as f:
print('Formula: {}'.format(self.__formula), file=f)
print('Porosity: {}'.format(self.get_porosity()), file=f)
print('L: {}'.format(self.__l), file=f)
print('a: {}'.format(self.__a), file=f)
print('eps: {}'.format(self.__eps), file=f)
for i in range(self._vox.shape[0]):
temp_img=self._vox[i]
plt.imsave(loc+'/'+str(i)+'.png', temp_img, cmap='gray')
from IPython import display
display.clear_output(wait=True)
plt.imshow(temp_img, cmap='gray')
plt.axis('off')
plt.title(str(i))
plt.show()
mesh.export(loc+'/'+self._model+'.stl')
print('save stl model to {}'.format(loc))
return mesh
def formSurface(self, save=True):
verts, faces, _, _ = measure.marching_cubes_lewiner(self.__formula_string(), 0, spacing=[self.__res]*3)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection='3d')
ax.plot_trisurf(verts[:, 0], verts[:, 1], faces, verts[:, 2])
plt.title(sympify(self.get_formula()))
plt.tight_layout()
plt.show()
mesh = trimesh.base.Trimesh(vertices=verts, faces=faces)
if save:
loc='STL/'+self._model
os.makedirs(loc, exist_ok=True)
with open(loc+'/info_surface.txt','w') as f:
print('Formula: {}'.format(self.__formula), file=f)
print('L: {}'.format(self.__l), file=f)
print('a: {}'.format(self.__a), file=f)
mesh.export(loc+'/'+self._model+'_surface.stl')
print('save surface stl model to {}'.format(loc))
return mesh
if __name__=='__main__':
try:
import argparse
parser = argparse.ArgumentParser(description='generate stl by formula')
parser.add_argument('--unit', type=str, default='')
parser.add_argument('--formula', type=str, default=None)
parser.add_argument('--l', type=float, default=10)
parser.add_argument('--r', nargs=3, type=int, default=[1,1,1])
parser.add_argument('--eps', type=float, default=0.1)
parser.add_argument('--res', type=float, default=0.1)
parser.add_argument('--save', type=bool, default=False)
parser.add_argument('--smooth', type=bool, default=True)
args = parser.parse_args()
unit=args.unit #'gyroid'
formula=args.formula #''
l=args.l # 1 unit => 10*10*10mm
r=args.r # [1,1,1]
eps=args.eps
res=args.res # mm/pixel
smooth=args.smooth
save=args.save
test=SingleFormulaBasedMaterial(unit=unit, formula=formula, l=l, r=r, eps=eps, res=res)
test.formSolid(save=save, smooth=smooth)
test.formSurface(save=save)
# except SystemExit:
except argparse.ArgumentError:
pass
except ValueError:
print('The formula-based material with {} and eps {} does not exist. Please try again.'.format(test.get_formula(), test.get_eps()))
| [
"trimesh.smoothing.filter_humphrey",
"matplotlib.pyplot.imshow",
"argparse.ArgumentParser",
"sympy.sympify",
"numpy.meshgrid",
"matplotlib.pyplot.axis",
"sympy.utilities.lambdify.lambdify",
"random.choice",
"trimesh.base.Trimesh",
"trimesh.voxel.ops.matrix_to_marching_cubes",
"numpy.logical_xor"... | [((209, 242), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (232, 242), False, 'import time, warnings\n'), ((996, 1019), 'sympy.sympify', 'sympify', (['self.__formula'], {}), '(self.__formula)\n', (1003, 1019), False, 'from sympy import sympify\n'), ((1124, 1155), 'sympy.utilities.lambdify.lambdify', 'lambdify', (['[x, y, z]', 'f', '"""numpy"""'], {}), "([x, y, z], f, 'numpy')\n", (1132, 1155), False, 'from sympy.utilities.lambdify import lambdify\n'), ((2475, 2552), 'numpy.meshgrid', 'np.meshgrid', (['(self.__x / _res)', '(self.__y / _res)', '(self.__z / _res)'], {'indexing': '"""ij"""'}), "(self.__x / _res, self.__y / _res, self.__z / _res, indexing='ij')\n", (2486, 2552), True, 'import numpy as np\n'), ((3322, 3399), 'numpy.meshgrid', 'np.meshgrid', (['(self.__x / _res)', '(self.__y / _res)', '(self.__z / _res)'], {'indexing': '"""ij"""'}), "(self.__x / _res, self.__y / _res, self.__z / _res, indexing='ij')\n", (3333, 3399), True, 'import numpy as np\n'), ((3697, 3726), 'numpy.logical_or', 'np.logical_or', (['self._vox', 'mix'], {}), '(self._vox, mix)\n', (3710, 3726), True, 'import numpy as np\n'), ((3969, 3999), 'numpy.logical_xor', 'np.logical_xor', (['self._vox', 'mix'], {}), '(self._vox, mix)\n', (3983, 3999), True, 'import numpy as np\n'), ((4539, 4569), 'numpy.logical_and', 'np.logical_and', (['self._vox', 'mix'], {}), '(self._vox, mix)\n', (4553, 4569), True, 'import numpy as np\n'), ((5119, 5190), 'trimesh.voxel.ops.matrix_to_marching_cubes', 'trimesh.voxel.ops.matrix_to_marching_cubes', (['self._vox'], {'pitch': 'self.__res'}), '(self._vox, pitch=self.__res)\n', (5161, 5190), False, 'import trimesh\n'), ((6446, 6472), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (6456, 6472), True, 'import matplotlib.pyplot as plt\n'), ((6648, 6666), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6664, 6666), True, 'import matplotlib.pyplot as plt\n'), ((6675, 6685), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6683, 6685), True, 'import matplotlib.pyplot as plt\n'), ((6710, 6759), 'trimesh.base.Trimesh', 'trimesh.base.Trimesh', ([], {'vertices': 'verts', 'faces': 'faces'}), '(vertices=verts, faces=faces)\n', (6730, 6759), False, 'import trimesh\n'), ((7332, 7394), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""generate stl by formula"""'}), "(description='generate stl by formula')\n", (7355, 7394), False, 'import argparse\n'), ((579, 594), 'random.random', 'random.random', ([], {}), '()\n', (592, 594), False, 'import random\n'), ((705, 745), 'random.choice', 'random.choice', (["['sin(x)', 'cos(x)', '1']"], {}), "(['sin(x)', 'cos(x)', '1'])\n", (718, 745), False, 'import random\n'), ((765, 805), 'random.choice', 'random.choice', (["['sin(y)', 'cos(y)', '1']"], {}), "(['sin(y)', 'cos(y)', '1'])\n", (778, 805), False, 'import random\n'), ((825, 865), 'random.choice', 'random.choice', (["['sin(z)', 'cos(z)', '1']"], {}), "(['sin(z)', 'cos(z)', '1'])\n", (838, 865), False, 'import random\n'), ((4259, 4288), 'numpy.logical_or', 'np.logical_or', (['self._vox', 'mix'], {}), '(self._vox, mix)\n', (4272, 4288), True, 'import numpy as np\n'), ((5230, 5269), 'trimesh.smoothing.filter_humphrey', 'trimesh.smoothing.filter_humphrey', (['mesh'], {}), '(mesh)\n', (5263, 5269), False, 'import trimesh\n'), ((5357, 5388), 'os.makedirs', 'os.makedirs', (['loc'], {'exist_ok': '(True)'}), '(loc, exist_ok=True)\n', (5368, 5388), False, 'import os\n'), ((6824, 6855), 'os.makedirs', 'os.makedirs', (['loc'], {'exist_ok': '(True)'}), '(loc, exist_ok=True)\n', (6835, 6855), False, 'import os\n'), ((669, 684), 'random.random', 'random.random', ([], {}), '()\n', (682, 684), False, 'import random\n'), ((4867, 4884), 'numpy.sum', 'np.sum', (['self._vox'], {}), '(self._vox)\n', (4873, 4884), True, 'import numpy as np\n'), ((5974, 6005), 'IPython.display.clear_output', 'display.clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (5994, 6005), False, 'from IPython import display\n'), ((6022, 6055), 'matplotlib.pyplot.imshow', 'plt.imshow', (['temp_img'], {'cmap': '"""gray"""'}), "(temp_img, cmap='gray')\n", (6032, 6055), True, 'import matplotlib.pyplot as plt\n'), ((6076, 6091), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6084, 6091), True, 'import matplotlib.pyplot as plt\n'), ((6142, 6152), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6150, 6152), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 15 18:53:22 2021
@author: <NAME>
"""
import argparse
import numpy as np
from zdm import zdm
#import pcosmic
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
from scipy import interpolate
import matplotlib
from pkg_resources import resource_filename
import os
import sys
import scipy as sp
import time
from matplotlib.ticker import NullFormatter
from zdm import iteration as it
from zdm import survey
from zdm import cosmology as cos
from zdm import pcosmic
from zdm import beams
from zdm import misc_functions
import pickle
np.seterr(divide='ignore')
####setting up the initial grid and plotting some stuff####
setH0=67.74
cos.set_cosmology(H0=setH0)
# get the grid of p(DM|z)
zDMgrid, zvals,dmvals,H0=misc_functions.get_zdm_grid(H0=setH0,new=True,plot=False,method='analytic')
Wbins=10
Wscale=2
Nbeams=[20,20,20] #Full beam NOT Std
thresh=0
method=2
Wlogmean=1.70267
Wlogsigma=0.899148
sdir = os.path.join(resource_filename('zdm', 'data'), 'Surveys/')
lat50=survey.survey()
lat50.process_survey_file(sdir+'CRAFT_class_I_and_II.dat')
DMhalo=50
lat50.init_DMEG(DMhalo)
lat50.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(lat50,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies=lat50.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
weights=lat50.wplist
ics=survey.survey()
ics.process_survey_file(sdir+'CRAFT_ICS.dat')
DMhalo=50
ics.init_DMEG(DMhalo)
ics.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(ics,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies=ics.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
weights=ics.wplist
pks=survey.survey()
pks.process_survey_file(sdir+'parkes_mb_class_I_and_II.dat')
DMhalo=50
pks.init_DMEG(DMhalo)
pks.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(pks,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies=pks.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
weights=pks.wplist
ICS892=survey.survey()
ICS892.process_survey_file(sdir+'CRAFT_ICS_892.dat')
ICS892.init_DMEG(DMhalo)
ICS892.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(ICS892,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies892=ICS892.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
surveys=[lat50,ics,ICS892,pks]
#updated best-fit values
alpha_method=0
logmean=2.11
logsigma=0.53
alpha=1.55
gamma=-1.09
Emax=10**(41.7)
Emin=10**(30)
sfr_n=1.67
C=3.188
#alpha_method=1
#Emin=10**30
#Emax =10**41.40
#alpha =-0.66
#gamma = -1.01
#sfr_n= 0.73
#logmean=2.18
#logsigma=0.48
#C=2.36 ##it.GetFirstConstantEstimate(grids,surveys,pset)
pset=[np.log10(float(Emin)),np.log10(float(Emax)),alpha,gamma,sfr_n,logmean,logsigma,C,setH0]
it.print_pset(pset)
grids=misc_functions.initialise_grids(surveys,zDMgrid, zvals,dmvals,pset,wdist=True,source_evolution=0,alpha_method=0)
plots=False
zmax=[0.6,1,1,3]
DMmax=[1500,2000,2000,3000]
zmax2=[0.75,1,1,3]
DMmax2=[1000,2000,2000,4000]
if plots:
for i in range (len(surveys)):
grid=grids[i]
sv=surveys[i]
pcosmic.plot_mean(zvals,'mean_DM.pdf')
#misc_functions.plot_efficiencies(lat50)
misc_functions.plot_zdm_basic_paper(grid.grid,grid.zvals,grid.dmvals,zmax=3,DMmax=3000,
name='Plots/p_dm_z_grid_image.pdf',norm=1,log=True,
label='$\\log_{10}p(DM_{\\rm EG}|z)$',
conts=[0.16,0.5,0.88],title='Grid at H0 '+str(i),
H0=setH0,showplot=True)
misc_functions.plot_zdm_basic_paper(grid.smear_grid,grid.zvals,grid.dmvals,zmax=3,
DMmax=3000,norm=1,log=True,
ylabel='${\\rm DM_{\\rm EG}}$',
label='$\\log_{10} p({\\rm DM_{cosmic}+DM_{host}}|z)$',
conts=[0.023, 0.159,0.5,0.841,0.977],
title='Smear grid at H0 '+str(i),H0=setH0,
showplot=True)
misc_functions.plot_grid_2(grid.pdv,grid.zvals,grid.dmvals,zmax=zmax[i],DMmax=DMmax[i],
name='Plots/pdv.pdf',norm=2,log=True
,label='$p(DM_{\\rm EG},z)dV$ [Mpc$^3$]',
title="Pdv at H0" + str(i),showplot=True)
muDM=10**pset[5]
Macquart=muDM
misc_functions.plot_grid_2(grid.rates,grid.zvals,grid.dmvals,zmax=zmax[i],DMmax=DMmax[i],
norm=2,log=True,label='$\\log_{10} p({\\rm DM}_{\\rm EG},z)$',
project=False,FRBDM=sv.DMEGs,FRBZ=None,Aconts=[0.01,0.1,0.5],
Macquart=Macquart,title="H0 value "+str(i),H0= setH0,showplot=True)
misc_functions.make_dm_redshift(grid,
DMmax=DMmax2[i],zmax=zmax2[i],loc='upper right',Macquart=Macquart,
H0=setH0,showplot=True)
print ("initial grid setup done")
scanoverH0=False
# just testing....should NOT be used (update_grid routine should not be modified)
if scanoverH0:
for k in range (len(surveys)):
grid=grids[k]
sv=surveys[k]
###### shows how to do a 1D scan of parameter values #######
pset=[np.log10(float(grid.Emin)),np.log10(float(grid.Emax)),grid.alpha,grid.gamma,grid.sfr_n,grid.smear_mean,grid.smear_sigma,C,grid.H0]
#lEmaxs=np.linspace(40,44,21)
#lscan,lllist,expected=it.scan_likelihoods_1D(grid,pset,lat50,1,lEmaxs,norm=True)
#print (lscan, lllist, expected)
#misc_functions.plot_1d(lEmaxs,lscan,'$E_{\\rm max}$','Plots/test_lik_fn_emax.pdf')
#for H0
t0=time.process_time()
H0iter=np.linspace(50,100,4)
lscanH0,lllistH0,expectedH0=it.scan_likelihoods_1D(grid,pset,sv,8,H0iter,norm=True)
misc_functions.plot_1d(H0iter,lscanH0,'$H_{\\rm 0}$','Plots/test_lik_fn_emax.pdf')
t1=time.process_time()
print (lscanH0,"done")
print ("Took ",t1-t0,"seconds")
def scan_H0(H0_start,H0_stop,n_iterations,surveys,plots=False):
"""Routine for scanning over H0 values in 1D"""
t0=time.process_time()
H0values=np.linspace(H0_start,H0_stop,n_iterations)
H0likes=[]
for i in H0values:
setH0=i
cos.set_cosmology(H0=setH0)
zDMgrid, zvals,dmvals,H0=misc_functions.get_zdm_grid(H0=setH0,new=True,plot=False,
method='analytic')
mean=10**2.16
sigma=10**0.51
logmean=np.log10(mean)
logsigma=np.log10(sigma)
alpha=1.54
gamma=-1.16
Emax=10**(41.84)
Emin=10**(30)
sfr_n=1.77
C=4.19
pset=[np.log10(float(Emin)),np.log10(float(Emax)),alpha,gamma,sfr_n,logmean,logsigma,C,setH0]
it.print_pset(pset)
grids=misc_functions.initialise_grids(surveys,zDMgrid, zvals,dmvals,pset,wdist=True,source_evolution=0,alpha_method=0)
grid=grids[0]
if plots:
pcosmic.plot_mean(zvals,'mean_DM.pdf', title="Mean DM at" + str(i))
misc_functions.plot_zdm_basic_paper(grid.grid,grid.zvals,grid.dmvals,zmax=3,DMmax=3000,
name='Plots/p_dm_z_grid_image.pdf',norm=1,log=True,
label='$\\log_{10}p(DM_{\\rm cosmic}|z)$', ylabel='${\\rm DM}_{\\rm cosmic}$',
conts=[0.16,0.5,0.88],title='Cosmological p(z,DM) at $H_{0}$',
H0=setH0,showplot=True)
misc_functions.plot_zdm_basic_paper(grid.smear_grid,grid.zvals,grid.dmvals,
zmax=3,DMmax=3000,norm=1,log=True,
ylabel='${\\rm DM_{\\rm EG}}$',
label='$\\log_{10} p({\\rm DM_{cosmic}+DM_{host}}|z)$',
conts=[0.023, 0.159,0.5,0.841,0.977],
title='Cosmological + Host p(z,DM) at $H_{0}$ ',H0=setH0,
showplot=True)
likessurvey=[]
for j in range (len(surveys)):
grid=grids[j]
sv=surveys[j]
if plots:
misc_functions.plot_grid_2(grid.pdv,grid.zvals,grid.dmvals,zmax=zmax[j],DMmax=DMmax[j],
name='Plots/pdv.pdf',norm=2,log=True,
label='$p(DM_{\\rm EG},z)dV$ [Mpc$^3$]',showplot=True,
title='Pdv of '+ str(j)+ 'at $H_{0}$ ')
misc_functions.plot_grid_2(grid.rates,grid.zvals,grid.dmvals,zmax=zmax[j],DMmax=DMmax[j],
name='Plots/project_rate_dm_z_grid_image.pdf',norm=2,
log=True,label='$f(DM_{\\rm EG},z)p(DM_{\\rm EG},z)dV$ [Mpc$^3$]',
project=False, title='Rates of ' + str(j) + ' at $H_{0}$ ',
H0=setH0,showplot=True)
#from test.py
muDM=10**pset[5]
Macquart=muDM
misc_functions.plot_grid_2(grid.rates,grid.zvals,grid.dmvals,zmax=zmax[j],DMmax=DMmax[j],
norm=2,log=True,label='$\\log_{10} p({\\rm DM}_{\\rm EG},z)$',
project=False,FRBDM=sv.DMEGs,FRBZ=None,Aconts=[0.01,0.1,0.5],
Macquart=Macquart,title='p(z,DM) of '+ str(j) + ' at $H_{0}$',H0= setH0, showplot=True)
misc_functions.make_dm_redshift(grid,
DMmax=DMmax2[j],zmax=zmax2[j],loc='upper right',Macquart=Macquart,
H0=setH0, showplot=True)
if sv.nD==1:
llsum= it.calc_likelihoods_1D(grid, sv, pset, psnr=True)
else:
llsum= it.calc_likelihoods_2D(grid, sv, pset, psnr=True)
likessurvey.append(llsum)
#print (grid.Emin, grid.Emax,np.log10(float(grid.Emin)),np.log10(float(grid.Emax)),setH0)
print ("Calculationg done for $H_{0}$ ",setH0)
likessurvey=np.array(likessurvey)
H0likes.append(likessurvey)
t1=time.process_time()
print ("Done. ",n_iterations," iterations took ",t1-t0," seconds")
H0likes=np.array(H0likes)
print (H0likes)
H0likes=np.transpose(H0likes)
for a in range(len(surveys)):
sv=surveys[a]
H0likesa=H0likes[a]
plt.plot(H0values,H0likesa)
plt.title("Likelihood scan while varying H0 for " + str(sv.name))
plt.xlabel("H0 value")
plt.ylabel("Log Likelihood")
plt.show()
plt.close()
H0likessum=np.sum(H0likes,axis=0)
plt.plot(H0values,H0likessum)
tckj = interpolate.splrep(H0values,H0likessum, s=0)
H0new=np.arange(50,100,0.01)
ynewj = interpolate.splev(H0new, tckj)
plt.plot(H0new,ynewj)
#plt.title("Likelihood scan while varying H0 for " + str(sv.name))
plt.xlabel("Value of ${\\rm H_{\\rm 0}}$ in km s${^{-1}}$ Mpc{$^-1$}")
plt.ylabel("Log Likelihood")
plt.show()
H0best=H0new[np.argmin(ynewj)]
print ('Best fit for alpha method= ' +str(grid.alpha_method) +'$H_{0}$ is', H0best)
plt.close()
scan_H0(50,100,5,surveys,plots=True)
| [
"numpy.log10",
"matplotlib.pyplot.ylabel",
"zdm.misc_functions.get_zdm_grid",
"numpy.array",
"time.process_time",
"zdm.iteration.calc_likelihoods_2D",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.linspace",
"scipy.interpolate.splev",
... | [((621, 647), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (630, 647), True, 'import numpy as np\n'), ((723, 750), 'zdm.cosmology.set_cosmology', 'cos.set_cosmology', ([], {'H0': 'setH0'}), '(H0=setH0)\n', (740, 750), True, 'from zdm import cosmology as cos\n'), ((810, 888), 'zdm.misc_functions.get_zdm_grid', 'misc_functions.get_zdm_grid', ([], {'H0': 'setH0', 'new': '(True)', 'plot': '(False)', 'method': '"""analytic"""'}), "(H0=setH0, new=True, plot=False, method='analytic')\n", (837, 888), False, 'from zdm import misc_functions\n'), ((1077, 1092), 'zdm.survey.survey', 'survey.survey', ([], {}), '()\n', (1090, 1092), False, 'from zdm import survey\n'), ((1308, 1375), 'zdm.survey.make_widths', 'survey.make_widths', (['lat50', 'Wlogmean', 'Wlogsigma', 'Wbins'], {'scale': 'Wscale'}), '(lat50, Wlogmean, Wlogsigma, Wbins, scale=Wscale)\n', (1326, 1375), False, 'from zdm import survey\n'), ((1466, 1481), 'zdm.survey.survey', 'survey.survey', ([], {}), '()\n', (1479, 1481), False, 'from zdm import survey\n'), ((1680, 1745), 'zdm.survey.make_widths', 'survey.make_widths', (['ics', 'Wlogmean', 'Wlogsigma', 'Wbins'], {'scale': 'Wscale'}), '(ics, Wlogmean, Wlogsigma, Wbins, scale=Wscale)\n', (1698, 1745), False, 'from zdm import survey\n'), ((1832, 1847), 'zdm.survey.survey', 'survey.survey', ([], {}), '()\n', (1845, 1847), False, 'from zdm import survey\n'), ((2061, 2126), 'zdm.survey.make_widths', 'survey.make_widths', (['pks', 'Wlogmean', 'Wlogsigma', 'Wbins'], {'scale': 'Wscale'}), '(pks, Wlogmean, Wlogsigma, Wbins, scale=Wscale)\n', (2079, 2126), False, 'from zdm import survey\n'), ((2216, 2231), 'zdm.survey.survey', 'survey.survey', ([], {}), '()\n', (2229, 2231), False, 'from zdm import survey\n'), ((2433, 2501), 'zdm.survey.make_widths', 'survey.make_widths', (['ICS892', 'Wlogmean', 'Wlogsigma', 'Wbins'], {'scale': 'Wscale'}), '(ICS892, Wlogmean, Wlogsigma, Wbins, scale=Wscale)\n', (2451, 2501), False, 'from zdm import survey\n'), ((3017, 3036), 'zdm.iteration.print_pset', 'it.print_pset', (['pset'], {}), '(pset)\n', (3030, 3036), True, 'from zdm import iteration as it\n'), ((3044, 3166), 'zdm.misc_functions.initialise_grids', 'misc_functions.initialise_grids', (['surveys', 'zDMgrid', 'zvals', 'dmvals', 'pset'], {'wdist': '(True)', 'source_evolution': '(0)', 'alpha_method': '(0)'}), '(surveys, zDMgrid, zvals, dmvals, pset,\n wdist=True, source_evolution=0, alpha_method=0)\n', (3075, 3166), False, 'from zdm import misc_functions\n'), ((12210, 12221), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12219, 12221), True, 'import matplotlib.pyplot as plt\n'), ((1025, 1057), 'pkg_resources.resource_filename', 'resource_filename', (['"""zdm"""', '"""data"""'], {}), "('zdm', 'data')\n", (1042, 1057), False, 'from pkg_resources import resource_filename\n'), ((6764, 6783), 'time.process_time', 'time.process_time', ([], {}), '()\n', (6781, 6783), False, 'import time\n'), ((6797, 6841), 'numpy.linspace', 'np.linspace', (['H0_start', 'H0_stop', 'n_iterations'], {}), '(H0_start, H0_stop, n_iterations)\n', (6808, 6841), True, 'import numpy as np\n'), ((11162, 11181), 'time.process_time', 'time.process_time', ([], {}), '()\n', (11179, 11181), False, 'import time\n'), ((11275, 11292), 'numpy.array', 'np.array', (['H0likes'], {}), '(H0likes)\n', (11283, 11292), True, 'import numpy as np\n'), ((11325, 11346), 'numpy.transpose', 'np.transpose', (['H0likes'], {}), '(H0likes)\n', (11337, 11346), True, 'import numpy as np\n'), ((11668, 11691), 'numpy.sum', 'np.sum', (['H0likes'], {'axis': '(0)'}), '(H0likes, axis=0)\n', (11674, 11691), True, 'import numpy as np\n'), ((11695, 11725), 'matplotlib.pyplot.plot', 'plt.plot', (['H0values', 'H0likessum'], {}), '(H0values, H0likessum)\n', (11703, 11725), True, 'import matplotlib.pyplot as plt\n'), ((11736, 11781), 'scipy.interpolate.splrep', 'interpolate.splrep', (['H0values', 'H0likessum'], {'s': '(0)'}), '(H0values, H0likessum, s=0)\n', (11754, 11781), False, 'from scipy import interpolate\n'), ((11791, 11815), 'numpy.arange', 'np.arange', (['(50)', '(100)', '(0.01)'], {}), '(50, 100, 0.01)\n', (11800, 11815), True, 'import numpy as np\n'), ((11826, 11856), 'scipy.interpolate.splev', 'interpolate.splev', (['H0new', 'tckj'], {}), '(H0new, tckj)\n', (11843, 11856), False, 'from scipy import interpolate\n'), ((11861, 11883), 'matplotlib.pyplot.plot', 'plt.plot', (['H0new', 'ynewj'], {}), '(H0new, ynewj)\n', (11869, 11883), True, 'import matplotlib.pyplot as plt\n'), ((11958, 12028), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value of ${\\\\rm H_{\\\\rm 0}}$ in km s${^{-1}}$ Mpc{$^-1$}"""'], {}), "('Value of ${\\\\rm H_{\\\\rm 0}}$ in km s${^{-1}}$ Mpc{$^-1$}')\n", (11968, 12028), True, 'import matplotlib.pyplot as plt\n'), ((12033, 12061), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log Likelihood"""'], {}), "('Log Likelihood')\n", (12043, 12061), True, 'import matplotlib.pyplot as plt\n'), ((12066, 12076), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12074, 12076), True, 'import matplotlib.pyplot as plt\n'), ((3372, 3411), 'zdm.pcosmic.plot_mean', 'pcosmic.plot_mean', (['zvals', '"""mean_DM.pdf"""'], {}), "(zvals, 'mean_DM.pdf')\n", (3389, 3411), False, 'from zdm import pcosmic\n'), ((5293, 5430), 'zdm.misc_functions.make_dm_redshift', 'misc_functions.make_dm_redshift', (['grid'], {'DMmax': 'DMmax2[i]', 'zmax': 'zmax2[i]', 'loc': '"""upper right"""', 'Macquart': 'Macquart', 'H0': 'setH0', 'showplot': '(True)'}), "(grid, DMmax=DMmax2[i], zmax=zmax2[i], loc=\n 'upper right', Macquart=Macquart, H0=setH0, showplot=True)\n", (5324, 5430), False, 'from zdm import misc_functions\n'), ((6266, 6285), 'time.process_time', 'time.process_time', ([], {}), '()\n', (6283, 6285), False, 'import time\n'), ((6310, 6333), 'numpy.linspace', 'np.linspace', (['(50)', '(100)', '(4)'], {}), '(50, 100, 4)\n', (6321, 6333), True, 'import numpy as np\n'), ((6368, 6428), 'zdm.iteration.scan_likelihoods_1D', 'it.scan_likelihoods_1D', (['grid', 'pset', 'sv', '(8)', 'H0iter'], {'norm': '(True)'}), '(grid, pset, sv, 8, H0iter, norm=True)\n', (6390, 6428), True, 'from zdm import iteration as it\n'), ((6432, 6521), 'zdm.misc_functions.plot_1d', 'misc_functions.plot_1d', (['H0iter', 'lscanH0', '"""$H_{\\\\rm 0}$"""', '"""Plots/test_lik_fn_emax.pdf"""'], {}), "(H0iter, lscanH0, '$H_{\\\\rm 0}$',\n 'Plots/test_lik_fn_emax.pdf')\n", (6454, 6521), False, 'from zdm import misc_functions\n'), ((6526, 6545), 'time.process_time', 'time.process_time', ([], {}), '()\n', (6543, 6545), False, 'import time\n'), ((6916, 6943), 'zdm.cosmology.set_cosmology', 'cos.set_cosmology', ([], {'H0': 'setH0'}), '(H0=setH0)\n', (6933, 6943), True, 'from zdm import cosmology as cos\n'), ((6977, 7055), 'zdm.misc_functions.get_zdm_grid', 'misc_functions.get_zdm_grid', ([], {'H0': 'setH0', 'new': '(True)', 'plot': '(False)', 'method': '"""analytic"""'}), "(H0=setH0, new=True, plot=False, method='analytic')\n", (7004, 7055), False, 'from zdm import misc_functions\n'), ((7185, 7199), 'numpy.log10', 'np.log10', (['mean'], {}), '(mean)\n', (7193, 7199), True, 'import numpy as np\n'), ((7217, 7232), 'numpy.log10', 'np.log10', (['sigma'], {}), '(sigma)\n', (7225, 7232), True, 'import numpy as np\n'), ((7463, 7482), 'zdm.iteration.print_pset', 'it.print_pset', (['pset'], {}), '(pset)\n', (7476, 7482), True, 'from zdm import iteration as it\n'), ((7506, 7628), 'zdm.misc_functions.initialise_grids', 'misc_functions.initialise_grids', (['surveys', 'zDMgrid', 'zvals', 'dmvals', 'pset'], {'wdist': '(True)', 'source_evolution': '(0)', 'alpha_method': '(0)'}), '(surveys, zDMgrid, zvals, dmvals, pset,\n wdist=True, source_evolution=0, alpha_method=0)\n', (7537, 7628), False, 'from zdm import misc_functions\n'), ((11088, 11109), 'numpy.array', 'np.array', (['likessurvey'], {}), '(likessurvey)\n', (11096, 11109), True, 'import numpy as np\n'), ((11439, 11467), 'matplotlib.pyplot.plot', 'plt.plot', (['H0values', 'H0likesa'], {}), '(H0values, H0likesa)\n', (11447, 11467), True, 'import matplotlib.pyplot as plt\n'), ((11549, 11571), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""H0 value"""'], {}), "('H0 value')\n", (11559, 11571), True, 'import matplotlib.pyplot as plt\n'), ((11580, 11608), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log Likelihood"""'], {}), "('Log Likelihood')\n", (11590, 11608), True, 'import matplotlib.pyplot as plt\n'), ((11617, 11627), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11625, 11627), True, 'import matplotlib.pyplot as plt\n'), ((11636, 11647), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11645, 11647), True, 'import matplotlib.pyplot as plt\n'), ((12099, 12115), 'numpy.argmin', 'np.argmin', (['ynewj'], {}), '(ynewj)\n', (12108, 12115), True, 'import numpy as np\n'), ((7786, 8120), 'zdm.misc_functions.plot_zdm_basic_paper', 'misc_functions.plot_zdm_basic_paper', (['grid.grid', 'grid.zvals', 'grid.dmvals'], {'zmax': '(3)', 'DMmax': '(3000)', 'name': '"""Plots/p_dm_z_grid_image.pdf"""', 'norm': '(1)', 'log': '(True)', 'label': '"""$\\\\log_{10}p(DM_{\\\\rm cosmic}|z)$"""', 'ylabel': '"""${\\\\rm DM}_{\\\\rm cosmic}$"""', 'conts': '[0.16, 0.5, 0.88]', 'title': '"""Cosmological p(z,DM) at $H_{0}$"""', 'H0': 'setH0', 'showplot': '(True)'}), "(grid.grid, grid.zvals, grid.dmvals,\n zmax=3, DMmax=3000, name='Plots/p_dm_z_grid_image.pdf', norm=1, log=\n True, label='$\\\\log_{10}p(DM_{\\\\rm cosmic}|z)$', ylabel=\n '${\\\\rm DM}_{\\\\rm cosmic}$', conts=[0.16, 0.5, 0.88], title=\n 'Cosmological p(z,DM) at $H_{0}$', H0=setH0, showplot=True)\n", (7821, 8120), False, 'from zdm import misc_functions\n'), ((8292, 8635), 'zdm.misc_functions.plot_zdm_basic_paper', 'misc_functions.plot_zdm_basic_paper', (['grid.smear_grid', 'grid.zvals', 'grid.dmvals'], {'zmax': '(3)', 'DMmax': '(3000)', 'norm': '(1)', 'log': '(True)', 'ylabel': '"""${\\\\rm DM_{\\\\rm EG}}$"""', 'label': '"""$\\\\log_{10} p({\\\\rm DM_{cosmic}+DM_{host}}|z)$"""', 'conts': '[0.023, 0.159, 0.5, 0.841, 0.977]', 'title': '"""Cosmological + Host p(z,DM) at $H_{0}$ """', 'H0': 'setH0', 'showplot': '(True)'}), "(grid.smear_grid, grid.zvals, grid.\n dmvals, zmax=3, DMmax=3000, norm=1, log=True, ylabel=\n '${\\\\rm DM_{\\\\rm EG}}$', label=\n '$\\\\log_{10} p({\\\\rm DM_{cosmic}+DM_{host}}|z)$', conts=[0.023, 0.159, \n 0.5, 0.841, 0.977], title='Cosmological + Host p(z,DM) at $H_{0}$ ', H0\n =setH0, showplot=True)\n", (8327, 8635), False, 'from zdm import misc_functions\n'), ((10431, 10568), 'zdm.misc_functions.make_dm_redshift', 'misc_functions.make_dm_redshift', (['grid'], {'DMmax': 'DMmax2[j]', 'zmax': 'zmax2[j]', 'loc': '"""upper right"""', 'Macquart': 'Macquart', 'H0': 'setH0', 'showplot': '(True)'}), "(grid, DMmax=DMmax2[j], zmax=zmax2[j], loc=\n 'upper right', Macquart=Macquart, H0=setH0, showplot=True)\n", (10462, 10568), False, 'from zdm import misc_functions\n'), ((10685, 10734), 'zdm.iteration.calc_likelihoods_1D', 'it.calc_likelihoods_1D', (['grid', 'sv', 'pset'], {'psnr': '(True)'}), '(grid, sv, pset, psnr=True)\n', (10707, 10734), True, 'from zdm import iteration as it\n'), ((10776, 10825), 'zdm.iteration.calc_likelihoods_2D', 'it.calc_likelihoods_2D', (['grid', 'sv', 'pset'], {'psnr': '(True)'}), '(grid, sv, pset, psnr=True)\n', (10798, 10825), True, 'from zdm import iteration as it\n')] |
import numpy as np
def point_dist(a, b):
return np.sqrt(np.sum(np.square(a-b)))
def point_center(a, b):
return (a+b)/2
def face_sz(l_eye, r_eye, mouse):
return point_dist(mouse, point_center(l_eye, r_eye))
def face_bbox(l_eye, r_eye, mouse):
sz = face_sz(l_eye, r_eye, mouse)
center = point_center(mouse, point_center(l_eye, r_eye))
left = center[0] - sz
right = center[0] + sz
top = center[1] - sz
bottom = center[1] + sz
return [int(x+0.5) for x in [left, top, right, bottom]]
| [
"numpy.square"
] | [((68, 84), 'numpy.square', 'np.square', (['(a - b)'], {}), '(a - b)\n', (77, 84), True, 'import numpy as np\n')] |
import numpy as np
import tflearn
import sys
# Load CSV file
# For some reason, the CSV must have a single label column. So the dataset has a last dummy column.
from tflearn.data_utils import load_csv
input_data, dummy = load_csv("data.csv", columns_to_ignore=[5, 6, 7, 8])
input_labels, dummy = load_csv("data.csv", columns_to_ignore=[1, 2, 3, 4])
# Put data and labels into a numpy array (matrix)
data = np.array(input_data, dtype=np.float32)
labels = np.array(input_labels, dtype=np.float32)
# Build neural network
net = tflearn.input_data(shape=[None, 4]) # 4 inputs
net = tflearn.fully_connected(net, 16, activation='relu') # hidden layer of 16 nodes
net = tflearn.fully_connected(net, 16, activation='relu') # hidden layer of 16 nodes
net = tflearn.fully_connected(net, 4, activation='relu') # 4 outputs
net = tflearn.regression(net, loss='mean_square')
# Define model
model = tflearn.DNN(net)
# Start training (apply gradient descent algorithm)
model.fit(data, labels, n_epoch=10, show_metric=True)
# User testing loop
while True:
# Ask the user for values (0.0 - 1.0) for each sensor
print("front proximity?")
f = sys.stdin.readline()
print("rear proximity?")
b = sys.stdin.readline()
print("left proximity?")
l = sys.stdin.readline()
print("right proximity?")
r = sys.stdin.readline()
# Make prediction
test = [[f, b, l, r]] # test input
pred = model.predict(test) # run test input through neural net
# Report
print("Prediction: ")
print("brakes: "+str(pred[0][0]))
print("accelerator: "+str(pred[0][1]))
print("steer left: "+str(pred[0][2]))
print("steer right: "+str(pred[0][3]))
print("--")
| [
"tflearn.DNN",
"sys.stdin.readline",
"numpy.array",
"tflearn.data_utils.load_csv",
"tflearn.regression",
"tflearn.fully_connected",
"tflearn.input_data"
] | [((222, 274), 'tflearn.data_utils.load_csv', 'load_csv', (['"""data.csv"""'], {'columns_to_ignore': '[5, 6, 7, 8]'}), "('data.csv', columns_to_ignore=[5, 6, 7, 8])\n", (230, 274), False, 'from tflearn.data_utils import load_csv\n'), ((297, 349), 'tflearn.data_utils.load_csv', 'load_csv', (['"""data.csv"""'], {'columns_to_ignore': '[1, 2, 3, 4]'}), "('data.csv', columns_to_ignore=[1, 2, 3, 4])\n", (305, 349), False, 'from tflearn.data_utils import load_csv\n'), ((408, 446), 'numpy.array', 'np.array', (['input_data'], {'dtype': 'np.float32'}), '(input_data, dtype=np.float32)\n', (416, 446), True, 'import numpy as np\n'), ((456, 496), 'numpy.array', 'np.array', (['input_labels'], {'dtype': 'np.float32'}), '(input_labels, dtype=np.float32)\n', (464, 496), True, 'import numpy as np\n'), ((527, 562), 'tflearn.input_data', 'tflearn.input_data', ([], {'shape': '[None, 4]'}), '(shape=[None, 4])\n', (545, 562), False, 'import tflearn\n'), ((580, 631), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(16)'], {'activation': '"""relu"""'}), "(net, 16, activation='relu')\n", (603, 631), False, 'import tflearn\n'), ((665, 716), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(16)'], {'activation': '"""relu"""'}), "(net, 16, activation='relu')\n", (688, 716), False, 'import tflearn\n'), ((750, 800), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(4)'], {'activation': '"""relu"""'}), "(net, 4, activation='relu')\n", (773, 800), False, 'import tflearn\n'), ((819, 862), 'tflearn.regression', 'tflearn.regression', (['net'], {'loss': '"""mean_square"""'}), "(net, loss='mean_square')\n", (837, 862), False, 'import tflearn\n'), ((887, 903), 'tflearn.DNN', 'tflearn.DNN', (['net'], {}), '(net)\n', (898, 903), False, 'import tflearn\n'), ((1131, 1151), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1149, 1151), False, 'import sys\n'), ((1183, 1203), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1201, 1203), False, 'import sys\n'), ((1235, 1255), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1253, 1255), False, 'import sys\n'), ((1288, 1308), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1306, 1308), False, 'import sys\n')] |
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_katex
import numpy as np
import plotly.express as px
from scipy import stats
from app import app
layout = html.Div([
dash_katex.DashKatex(
expression=r'f_X(x) = \frac{1}{b - a}',
displayMode=True
),
dcc.Graph(id='uniform_graph'),
dash_katex.DashKatex(expression=r'a, b'),
dcc.RangeSlider(
id='uniform_interval',
min=-10,
max=10,
marks={
x: str(x) for x in range(-10, 11)
},
step=0.01,
value=[2, 4],
allowCross=False,
tooltip={'placement': 'top'}
)
])
@app.callback(
Output('uniform_graph', 'figure'),
[Input('uniform_interval', 'value')]
)
def plot(interval):
a, b = interval
x = np.linspace(a, b, 1000)
y = stats.uniform.pdf(x, a, b - a)
range_x=[-11, 11]
range_y=[-0.2, max(1.2, max(y) + 0.2)]
figure = px.line(x=x, y=y, range_x=range_x, range_y=range_y)
return figure
| [
"dash.dependencies.Output",
"dash.dependencies.Input",
"plotly.express.line",
"numpy.linspace",
"scipy.stats.uniform.pdf",
"dash_katex.DashKatex",
"dash_core_components.Graph"
] | [((853, 876), 'numpy.linspace', 'np.linspace', (['a', 'b', '(1000)'], {}), '(a, b, 1000)\n', (864, 876), True, 'import numpy as np\n'), ((885, 915), 'scipy.stats.uniform.pdf', 'stats.uniform.pdf', (['x', 'a', '(b - a)'], {}), '(x, a, b - a)\n', (902, 915), False, 'from scipy import stats\n'), ((994, 1045), 'plotly.express.line', 'px.line', ([], {'x': 'x', 'y': 'y', 'range_x': 'range_x', 'range_y': 'range_y'}), '(x=x, y=y, range_x=range_x, range_y=range_y)\n', (1001, 1045), True, 'import plotly.express as px\n'), ((727, 760), 'dash.dependencies.Output', 'Output', (['"""uniform_graph"""', '"""figure"""'], {}), "('uniform_graph', 'figure')\n", (733, 760), False, 'from dash.dependencies import Input, Output\n'), ((252, 330), 'dash_katex.DashKatex', 'dash_katex.DashKatex', ([], {'expression': '"""f_X(x) = \\\\frac{1}{b - a}"""', 'displayMode': '(True)'}), "(expression='f_X(x) = \\\\frac{1}{b - a}', displayMode=True)\n", (272, 330), False, 'import dash_katex\n'), ((358, 387), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""uniform_graph"""'}), "(id='uniform_graph')\n", (367, 387), True, 'import dash_core_components as dcc\n'), ((393, 432), 'dash_katex.DashKatex', 'dash_katex.DashKatex', ([], {'expression': '"""a, b"""'}), "(expression='a, b')\n", (413, 432), False, 'import dash_katex\n'), ((767, 801), 'dash.dependencies.Input', 'Input', (['"""uniform_interval"""', '"""value"""'], {}), "('uniform_interval', 'value')\n", (772, 801), False, 'from dash.dependencies import Input, Output\n')] |
import torch
import torch.utils.data
from rlkit.torch.pytorch_util import from_numpy
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
from rlkit.pythonplusplus import identity
from rlkit.torch import pytorch_util as ptu
import numpy as np
class RefinementNetwork(nn.Module):
def __init__(
self,
input_width,
input_height,
input_channels,
output_size,
kernel_sizes,
n_channels,
strides,
paddings,
hidden_sizes,
lstm_size,
lstm_input_size,
added_fc_input_size=0,
batch_norm_conv=False,
batch_norm_fc=False,
init_w=1e-4,
hidden_init=nn.init.xavier_uniform_,
hidden_activation=nn.ReLU(),
output_activation=identity,
):
if hidden_sizes is None:
hidden_sizes = []
assert len(kernel_sizes) == \
len(n_channels) == \
len(strides) == \
len(paddings)
super().__init__()
self.hidden_sizes = hidden_sizes
self.input_width = input_width
self.input_height = input_height
self.input_channels = input_channels
self.lstm_size = lstm_size
self.output_size = output_size
self.output_activation = output_activation
self.hidden_activation = hidden_activation
self.batch_norm_conv = batch_norm_conv
self.batch_norm_fc = batch_norm_fc
self.added_fc_input_size = added_fc_input_size
self.conv_input_length = self.input_width * self.input_height * self.input_channels
self.conv_layers = nn.ModuleList()
self.conv_norm_layers = nn.ModuleList()
self.fc_layers = nn.ModuleList()
self.fc_norm_layers = nn.ModuleList()
self.lstm = nn.LSTM(lstm_input_size, lstm_size, num_layers=1, batch_first=True)
for out_channels, kernel_size, stride, padding in \
zip(n_channels, kernel_sizes, strides, paddings):
conv = nn.Conv2d(input_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding)
hidden_init(conv.weight)
conv.bias.data.fill_(0)
conv_layer = conv
self.conv_layers.append(conv_layer)
input_channels = out_channels
# find output dim of conv_layers by trial and add normalization conv layers
test_mat = torch.zeros(1, self.input_channels, self.input_width,
self.input_height) # initially the model is on CPU (caller should then move it to GPU if
for conv_layer in self.conv_layers:
test_mat = conv_layer(test_mat)
#self.conv_norm_layers.append(nn.BatchNorm2d(test_mat.shape[1]))
fc_input_size = int(np.prod(test_mat.shape))
# used only for injecting input directly into fc layers
fc_input_size += added_fc_input_size
for idx, hidden_size in enumerate(hidden_sizes):
fc_layer = nn.Linear(fc_input_size, hidden_size)
#norm_layer = nn.BatchNorm1d(hidden_size)
fc_layer.weight.data.uniform_(-init_w, init_w)
fc_layer.bias.data.uniform_(-init_w, init_w)
self.fc_layers.append(fc_layer)
#self.fc_norm_layers.append(norm_layer)
fc_input_size = hidden_size
self.last_fc = nn.Linear(lstm_size, output_size)
#self.last_fc.weight.data.uniform_(-init_w, init_w)
#self.last_fc.bias.data.uniform_(-init_w, init_w)
self.last_fc2 = nn.Linear(lstm_size, output_size)
xcoords = np.expand_dims(np.linspace(-1, 1, self.input_width), 0).repeat(self.input_height, 0)
ycoords = np.repeat(np.linspace(-1, 1, self.input_height), self.input_width).reshape((self.input_height, self.input_width))
self.coords = from_numpy(np.expand_dims(np.stack([xcoords, ycoords], 0), 0))
def forward(self, input, hidden1, hidden2, extra_input=None):
# need to reshape from batch of flattened images into (channsls, w, h)
# import pdb; pdb.set_trace()
# h = input.view(input.shape[0],
# self.input_channels-2,
# self.input_height,
# self.input_width)
h = input
coords = self.coords.repeat(input.shape[0], 1, 1, 1)
h = torch.cat([h, coords], 1)
h = self.apply_forward(h, self.conv_layers, self.conv_norm_layers,
use_batch_norm=self.batch_norm_conv)
# flatten channels for fc layers
h = h.view(h.size(0), -1)
# if extra_input is not None:
# h = torch.cat((h, extra_input), dim=1)
output = self.apply_forward(h, self.fc_layers, self.fc_norm_layers,
use_batch_norm=self.batch_norm_fc)
if extra_input is not None:
output = torch.cat([output, extra_input], dim=1)
if len(hidden1.shape) == 2:
hidden1, hidden2 = hidden1.unsqueeze(0), hidden2.unsqueeze(0)
self.lstm.flatten_parameters()
output, hidden = self.lstm(output.unsqueeze(1), (hidden1, hidden2))
#output1 = self.output_activation(self.last_fc(output.squeeze()))
output1 = self.output_activation(self.last_fc(output.squeeze()))
output2 = self.output_activation(self.last_fc2(output.squeeze()))
return output1, output2, hidden[0], hidden[1]
def initialize_hidden(self, bs):
return (Variable(ptu.from_numpy(np.zeros((1, bs, self.lstm_size)))),
Variable(ptu.from_numpy(np.zeros((1, bs, self.lstm_size)))))
def apply_forward(self, input, hidden_layers, norm_layers,
use_batch_norm=False):
h = input
for layer in hidden_layers:
h = layer(h)
#if use_batch_norm:
# h = norm_layer(h)
h = self.hidden_activation(h)
return h
| [
"numpy.prod",
"torch.nn.ReLU",
"torch.nn.ModuleList",
"torch.nn.LSTM",
"torch.nn.Conv2d",
"numpy.stack",
"numpy.linspace",
"numpy.zeros",
"torch.nn.Linear",
"torch.zeros",
"torch.cat"
] | [((839, 848), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (846, 848), False, 'from torch import nn\n'), ((1731, 1746), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1744, 1746), False, 'from torch import nn\n'), ((1779, 1794), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1792, 1794), False, 'from torch import nn\n'), ((1820, 1835), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1833, 1835), False, 'from torch import nn\n'), ((1866, 1881), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1879, 1881), False, 'from torch import nn\n'), ((1903, 1970), 'torch.nn.LSTM', 'nn.LSTM', (['lstm_input_size', 'lstm_size'], {'num_layers': '(1)', 'batch_first': '(True)'}), '(lstm_input_size, lstm_size, num_layers=1, batch_first=True)\n', (1910, 1970), False, 'from torch import nn\n'), ((2616, 2688), 'torch.zeros', 'torch.zeros', (['(1)', 'self.input_channels', 'self.input_width', 'self.input_height'], {}), '(1, self.input_channels, self.input_width, self.input_height)\n', (2627, 2688), False, 'import torch\n'), ((3570, 3603), 'torch.nn.Linear', 'nn.Linear', (['lstm_size', 'output_size'], {}), '(lstm_size, output_size)\n', (3579, 3603), False, 'from torch import nn\n'), ((3746, 3779), 'torch.nn.Linear', 'nn.Linear', (['lstm_size', 'output_size'], {}), '(lstm_size, output_size)\n', (3755, 3779), False, 'from torch import nn\n'), ((4560, 4585), 'torch.cat', 'torch.cat', (['[h, coords]', '(1)'], {}), '([h, coords], 1)\n', (4569, 4585), False, 'import torch\n'), ((2117, 2206), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', 'out_channels', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(input_channels, out_channels, kernel_size, stride=stride, padding\n =padding)\n', (2126, 2206), False, 'from torch import nn\n'), ((2985, 3008), 'numpy.prod', 'np.prod', (['test_mat.shape'], {}), '(test_mat.shape)\n', (2992, 3008), True, 'import numpy as np\n'), ((3200, 3237), 'torch.nn.Linear', 'nn.Linear', (['fc_input_size', 'hidden_size'], {}), '(fc_input_size, hidden_size)\n', (3209, 3237), False, 'from torch import nn\n'), ((5098, 5137), 'torch.cat', 'torch.cat', (['[output, extra_input]'], {'dim': '(1)'}), '([output, extra_input], dim=1)\n', (5107, 5137), False, 'import torch\n'), ((4065, 4096), 'numpy.stack', 'np.stack', (['[xcoords, ycoords]', '(0)'], {}), '([xcoords, ycoords], 0)\n', (4073, 4096), True, 'import numpy as np\n'), ((3814, 3850), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'self.input_width'], {}), '(-1, 1, self.input_width)\n', (3825, 3850), True, 'import numpy as np\n'), ((3912, 3949), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'self.input_height'], {}), '(-1, 1, self.input_height)\n', (3923, 3949), True, 'import numpy as np\n'), ((5718, 5751), 'numpy.zeros', 'np.zeros', (['(1, bs, self.lstm_size)'], {}), '((1, bs, self.lstm_size))\n', (5726, 5751), True, 'import numpy as np\n'), ((5795, 5828), 'numpy.zeros', 'np.zeros', (['(1, bs, self.lstm_size)'], {}), '((1, bs, self.lstm_size))\n', (5803, 5828), True, 'import numpy as np\n')] |
import logging
from ledfxcontroller.devices import Device
import voluptuous as vol
import numpy as np
import sacn
import time
_LOGGER = logging.getLogger(__name__)
class E131Device(Device):
"""E1.31 device support"""
CONFIG_SCHEMA = vol.Schema({
vol.Required('host'): str,
vol.Required('universe', default=1): int,
vol.Required('universe_size', default=512): int,
vol.Required('channel_offset', default=1): int,
vol.Required(vol.Any('pixel_count', 'channel_count')): vol.Coerce(int)
})
def __init__(self, config):
self._config = config
# Allow for configuring in terms of "pixels" or "channels"
if 'pixel_count' in self._config:
self._config['channel_count'] = self._config['pixel_count'] * 3
else:
self._config['pixel_count'] = self._config['channel_count'] // 3
span = self._config['channel_offset'] + self._config['channel_count'] - 1
self._config['universe_end'] = self._config['universe'] + int(span / self._config['universe_size'])
if span % self._config['universe_size'] == 0:
self._config['universe_end'] -= 1
self._sacn = None
@property
def pixel_count(self):
return int(self._config['pixel_count'])
def activate(self):
if self._sacn:
raise Exception('sACN sender already started.')
# Configure sACN and start the dedicated thread to flush the buffer
self._sacn = sacn.sACNsender()
for universe in range(self._config['universe'], self._config['universe_end'] + 1):
_LOGGER.info("sACN activating universe {}".format(universe))
self._sacn.activate_output(universe)
if (self._config['host'] == None):
self._sacn[universe].multicast = True
else:
self._sacn[universe].destination = self._config['host']
self._sacn[universe].multicast = False
#self._sacn.fps = 60
self._sacn.start()
_LOGGER.info("sACN sender started.")
super().activate()
def deactivate(self):
super().deactivate()
if not self._sacn:
raise Exception('sACN sender not started.')
# Turn off all the LEDs when deactivating. With how the sender
# works currently we need to sleep to ensure the pixels actually
# get updated. Need to replace the sACN sender such that flush
# directly writes the pixels.
self.flush(np.zeros(self._config['channel_count']))
time.sleep(1.5)
self._sacn.stop()
self._sacn = None
_LOGGER.info("sACN sender stopped.")
def flush(self, data):
"""Flush the data to all the E1.31 channels account for spanning universes"""
if not self._sacn:
raise Exception('sACN sender not started.')
if data.size != self._config['channel_count']:
raise Exception('Invalid buffer size.')
data = data.flatten()
current_index = 0
for universe in range(self._config['universe'], self._config['universe_end'] + 1):
# Calculate offset into the provide input buffer for the channel. There are some
# cleaner ways this can be done... This is just the quick and dirty
universe_start = (universe - self._config['universe']) * self._config['universe_size']
universe_end = (universe - self._config['universe'] + 1) * self._config['universe_size']
dmx_start = max(universe_start, self._config['channel_offset']) % self._config['universe_size']
dmx_end = min(universe_end, self._config['channel_offset'] + self._config['channel_count']) % self._config['universe_size']
if dmx_end == 0:
dmx_end = self._config['universe_size']
input_start = current_index
input_end = current_index + dmx_end - dmx_start
current_index = input_end
dmx_data = np.array(self._sacn[universe].dmx_data)
dmx_data[dmx_start:dmx_end] = data[input_start:input_end]
self._sacn[universe].dmx_data = dmx_data | [
"logging.getLogger",
"voluptuous.Required",
"sacn.sACNsender",
"voluptuous.Any",
"time.sleep",
"numpy.array",
"numpy.zeros",
"voluptuous.Coerce"
] | [((137, 164), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (154, 164), False, 'import logging\n'), ((1495, 1512), 'sacn.sACNsender', 'sacn.sACNsender', ([], {}), '()\n', (1510, 1512), False, 'import sacn\n'), ((2564, 2579), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (2574, 2579), False, 'import time\n'), ((265, 285), 'voluptuous.Required', 'vol.Required', (['"""host"""'], {}), "('host')\n", (277, 285), True, 'import voluptuous as vol\n'), ((300, 335), 'voluptuous.Required', 'vol.Required', (['"""universe"""'], {'default': '(1)'}), "('universe', default=1)\n", (312, 335), True, 'import voluptuous as vol\n'), ((350, 392), 'voluptuous.Required', 'vol.Required', (['"""universe_size"""'], {'default': '(512)'}), "('universe_size', default=512)\n", (362, 392), True, 'import voluptuous as vol\n'), ((407, 448), 'voluptuous.Required', 'vol.Required', (['"""channel_offset"""'], {'default': '(1)'}), "('channel_offset', default=1)\n", (419, 448), True, 'import voluptuous as vol\n'), ((518, 533), 'voluptuous.Coerce', 'vol.Coerce', (['int'], {}), '(int)\n', (528, 533), True, 'import voluptuous as vol\n'), ((2515, 2554), 'numpy.zeros', 'np.zeros', (["self._config['channel_count']"], {}), "(self._config['channel_count'])\n", (2523, 2554), True, 'import numpy as np\n'), ((4010, 4049), 'numpy.array', 'np.array', (['self._sacn[universe].dmx_data'], {}), '(self._sacn[universe].dmx_data)\n', (4018, 4049), True, 'import numpy as np\n'), ((476, 515), 'voluptuous.Any', 'vol.Any', (['"""pixel_count"""', '"""channel_count"""'], {}), "('pixel_count', 'channel_count')\n", (483, 515), True, 'import voluptuous as vol\n')] |
import numpy as np
from scipy.stats import skew, kurtosis
__all__ = ['sky_noise_error', 'propagate_noise_error', 'mcnoise']
def sky_noise_error(nu_obs, nu_emit, nu_ch_bw, tint, a_eff, n_station, bmax):
"""Calculate instrument noise error of an interferometer.
This assume that Tsys is dominated by Tsky.
(see Furlanetto et al. (2006) section 9)
Parameters
----------
nu_obs : float or array-like
Observing frequency in [MHz].
Can be array-like to compute noise at multiple frequencies.
nu_emit : float
Emitted frequency of the observed spectral line in [MHz].
nu_ch_bw : float
Observed frequency channel bandwidth in [MHz].
tint : float, optional
Integration time. Default is 1000. [hours]
a_eff : float
Effective area of a station in [m**2].
n_station : integer
Number of antennas (or stations in case of a phase-array).
bmax : float
Maximum baseline length of the array in [wavelength].
Returns
-------
Noise error (standard deviation) in [mK] in the same format as nu_obs.
"""
nu_obs = np.asarray(nu_obs)
a_tot = a_eff * n_station
z = (nu_emit / nu_obs) - 1.
theta = 1.22 / bmax * 60 * 180 / np.pi
err = 2.9 * (1.e5 / a_tot) * (10. / theta) ** 2 * \
((1 + z) / 10.0) ** 4.6 * np.sqrt(100. / (nu_ch_bw * tint))
return err
def propagate_noise_error(noise_err, m2, m3, m4, m6, npix):
"""Analytically propagate error to variance and skewness.
Based on error propagation described in the appendix of
Watkinson & Pritchard (2014)
Parameters
----------
noise_err : float or array-like
Noise error.
m2 : float
2nd moment of the data
m3 : float
3rd moment of the data
m4 : float
4th moment of the data
m6 : float
6th moment of the data
npix : int
Number of pixels in the data
Returns
-------
Error of 2nd moment, 3rd moment and skewness.
"""
noise_var = np.asarray(noise_err) ** 2
m2_var = (2. / npix) * (2 * m2 * noise_var + noise_var ** 2)
m3_var = (3. / npix) * (3 * noise_var * m4 + 12 * m2 * noise_var ** 2 +
5 * noise_var ** 3)
m4_var = (8. / npix) * (2 * m6 * noise_var + 21 * m4 * noise_var ** 2 +
48 * m2 * noise_var ** 3 + 12 * noise_var ** 4)
m2m3_cov = (6 / npix) * m3 * noise_var
m2m4_cov = (4. / npix) * (2 * m4 * noise_var + 9 * m2 * noise_var ** 2 +
3 * noise_var ** 3)
skew_var = (m3_var / (m2 ** 3)) + \
((9 * m3 ** 2 * m2_var) / (4 * m2 ** 5)) - \
(3 * m3 * m2m3_cov / (m2 ** 4))
kurt_var = (1. / m2 ** 4) * m4_var + 4 * (m4 ** 2 / m2 ** 6) * m2_var - \
4 * (m4 / m2 ** 5) * m2m4_cov
return np.sqrt(m2_var), np.sqrt(skew_var), np.sqrt(kurt_var)
def mcnoise(data, noise_std, n, noise_scaling=1.):
"""
Parameters
----------
data : ndarray
Array of data.
noise_std : float
Standard deviation of the noise
n : int
Number of repetition
noise_scaling: float
Scaling factor for noise
Returns
-------
variance, variance error, skewness, skewness error, kurtosis, kurtosis error
"""
noise_arr = np.random.normal(0, noise_std, (n, data.size)) * noise_scaling
var_sample = np.var(data + noise_arr, axis=1)
skew_sample = skew(data + noise_arr, axis=1)
kurt_sample = kurtosis(data + noise_arr, axis=1)
var_val = np.mean(var_sample)
skew_val = np.mean(skew_sample)
kurt_val = np.mean(kurt_sample)
var_err = np.std(var_sample)
skew_err = np.std(skew_sample)
kurt_err = np.std(kurt_sample)
return var_val, var_err, skew_val, skew_err, kurt_val, kurt_err
| [
"numpy.random.normal",
"numpy.mean",
"numpy.sqrt",
"scipy.stats.kurtosis",
"numpy.asarray",
"scipy.stats.skew",
"numpy.std",
"numpy.var"
] | [((1127, 1145), 'numpy.asarray', 'np.asarray', (['nu_obs'], {}), '(nu_obs)\n', (1137, 1145), True, 'import numpy as np\n'), ((3402, 3434), 'numpy.var', 'np.var', (['(data + noise_arr)'], {'axis': '(1)'}), '(data + noise_arr, axis=1)\n', (3408, 3434), True, 'import numpy as np\n'), ((3453, 3483), 'scipy.stats.skew', 'skew', (['(data + noise_arr)'], {'axis': '(1)'}), '(data + noise_arr, axis=1)\n', (3457, 3483), False, 'from scipy.stats import skew, kurtosis\n'), ((3502, 3536), 'scipy.stats.kurtosis', 'kurtosis', (['(data + noise_arr)'], {'axis': '(1)'}), '(data + noise_arr, axis=1)\n', (3510, 3536), False, 'from scipy.stats import skew, kurtosis\n'), ((3551, 3570), 'numpy.mean', 'np.mean', (['var_sample'], {}), '(var_sample)\n', (3558, 3570), True, 'import numpy as np\n'), ((3586, 3606), 'numpy.mean', 'np.mean', (['skew_sample'], {}), '(skew_sample)\n', (3593, 3606), True, 'import numpy as np\n'), ((3622, 3642), 'numpy.mean', 'np.mean', (['kurt_sample'], {}), '(kurt_sample)\n', (3629, 3642), True, 'import numpy as np\n'), ((3657, 3675), 'numpy.std', 'np.std', (['var_sample'], {}), '(var_sample)\n', (3663, 3675), True, 'import numpy as np\n'), ((3691, 3710), 'numpy.std', 'np.std', (['skew_sample'], {}), '(skew_sample)\n', (3697, 3710), True, 'import numpy as np\n'), ((3726, 3745), 'numpy.std', 'np.std', (['kurt_sample'], {}), '(kurt_sample)\n', (3732, 3745), True, 'import numpy as np\n'), ((1341, 1375), 'numpy.sqrt', 'np.sqrt', (['(100.0 / (nu_ch_bw * tint))'], {}), '(100.0 / (nu_ch_bw * tint))\n', (1348, 1375), True, 'import numpy as np\n'), ((2031, 2052), 'numpy.asarray', 'np.asarray', (['noise_err'], {}), '(noise_err)\n', (2041, 2052), True, 'import numpy as np\n'), ((2843, 2858), 'numpy.sqrt', 'np.sqrt', (['m2_var'], {}), '(m2_var)\n', (2850, 2858), True, 'import numpy as np\n'), ((2860, 2877), 'numpy.sqrt', 'np.sqrt', (['skew_var'], {}), '(skew_var)\n', (2867, 2877), True, 'import numpy as np\n'), ((2879, 2896), 'numpy.sqrt', 'np.sqrt', (['kurt_var'], {}), '(kurt_var)\n', (2886, 2896), True, 'import numpy as np\n'), ((3322, 3368), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise_std', '(n, data.size)'], {}), '(0, noise_std, (n, data.size))\n', (3338, 3368), True, 'import numpy as np\n')] |
import pandas as pd
import matplotlib.pyplot as plt
from PPImage import PPImage
import numpy as np
from PIL import Image
import os
import config
def plot_df_count(df, column='diagnosis'):
df_plot = df[column].value_counts().sort_index()
print(df_plot)
df_plot.plot.bar(df_plot)
plt.show()
def preprocess_image(row):
image = PPImage()
imgName = f'{row.id_code}.{config.IMAGE_EXTENSION}'
image.from_zip(config.ZIP_FILE_PATH, imgName, config.FOLDER_IN_ZIP) #perfectExample
if np.sum(image.data, axis=2).mean() < 15:
return False
target.data = target.resize(dim=(image.data.shape[1], image.data.shape[0]))
image.hist_match_rgb(target=target.data)
image.data = image.crop_image_only_outside(image.data, tol=15)
image.data = image.hist_equalize(image.data)
image.export(f'{config.HIST_MATCH_PATH}{imgName}', quality='good')
orig = image.crop_image_only_outside(np.array(image.image), tol=15)
orig = image.hist_equalize(orig)
orig = Image.fromarray(orig)
orig.save(f'{config.HIST_EQL_PATH}{imgName}', subsampling=0, quality=100)
return True
# preprocess_image()
'''
idrid = pd.read_csv(root_path+'idrid.csv')
idrid['zip'] = 'idrid.zip'
for i in range(len(idrid)):
row = idrid.iloc[i]
print(i, "Processing: ", row.id_code)
savePath = root_path + 'idrid/'
preprocess_image(root_path, 'idrid/', row, '.jpg', savePath)
aptos = pd.read_csv(root_path+'aptos.csv')
aptos['zip'] = 'aptos.zip'
for i in range(len(aptos)):
row = aptos.iloc[i]
print(i, "Processing: ", row.id_code)
savePath = root_path + 'aptos/'
preprocess_image(root_path, '', row, '.png', savePath)
'''
target = PPImage(config.TARGET_IMAGE)
google = pd.read_csv(config.CSV_PATH)
os.makedirs(config.HIST_MATCH_PATH, exist_ok=True)
os.makedirs(config.HIST_EQL_PATH, exist_ok=True)
existing = os.listdir(config.HIST_MATCH_PATH)
existing2 = os.listdir(config.HIST_EQL_PATH)
errors = []
for i in range(len(google)):
row = google.iloc[i]
img = f'{row.id_code}.{config.IMAGE_EXTENSION}'
if img in existing and img in existing2:
continue
print(i, "Processing: ", row.id_code)
done = preprocess_image(row)
if not done:
errors.append(img)
pd.DataFrame(errors, columns=['id_code']).to_csv("errors.csv", index=False)
# exit()
# all = pd.concat([idrid, google, aptos])
# print("==================== ALL ==================")
# plot_df_count(all)
# print("==================== APTOS ==================")
# plot_df_count(aptos)
# print("==================== GOOGLE ==================")
# plot_df_count(google)
# print("==================== IDRID ==================")
# plot_df_count(idrid)
#
# random_all = all.sample(frac=1).reset_index(drop=True)
# print("Done")
| [
"PIL.Image.fromarray",
"os.listdir",
"os.makedirs",
"pandas.read_csv",
"PPImage.PPImage",
"numpy.array",
"numpy.sum",
"pandas.DataFrame",
"matplotlib.pyplot.show"
] | [((1692, 1720), 'PPImage.PPImage', 'PPImage', (['config.TARGET_IMAGE'], {}), '(config.TARGET_IMAGE)\n', (1699, 1720), False, 'from PPImage import PPImage\n'), ((1731, 1759), 'pandas.read_csv', 'pd.read_csv', (['config.CSV_PATH'], {}), '(config.CSV_PATH)\n', (1742, 1759), True, 'import pandas as pd\n'), ((1760, 1810), 'os.makedirs', 'os.makedirs', (['config.HIST_MATCH_PATH'], {'exist_ok': '(True)'}), '(config.HIST_MATCH_PATH, exist_ok=True)\n', (1771, 1810), False, 'import os\n'), ((1811, 1859), 'os.makedirs', 'os.makedirs', (['config.HIST_EQL_PATH'], {'exist_ok': '(True)'}), '(config.HIST_EQL_PATH, exist_ok=True)\n', (1822, 1859), False, 'import os\n'), ((1872, 1906), 'os.listdir', 'os.listdir', (['config.HIST_MATCH_PATH'], {}), '(config.HIST_MATCH_PATH)\n', (1882, 1906), False, 'import os\n'), ((1919, 1951), 'os.listdir', 'os.listdir', (['config.HIST_EQL_PATH'], {}), '(config.HIST_EQL_PATH)\n', (1929, 1951), False, 'import os\n'), ((295, 305), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (303, 305), True, 'import matplotlib.pyplot as plt\n'), ((346, 355), 'PPImage.PPImage', 'PPImage', ([], {}), '()\n', (353, 355), False, 'from PPImage import PPImage\n'), ((1004, 1025), 'PIL.Image.fromarray', 'Image.fromarray', (['orig'], {}), '(orig)\n', (1019, 1025), False, 'from PIL import Image\n'), ((925, 946), 'numpy.array', 'np.array', (['image.image'], {}), '(image.image)\n', (933, 946), True, 'import numpy as np\n'), ((508, 534), 'numpy.sum', 'np.sum', (['image.data'], {'axis': '(2)'}), '(image.data, axis=2)\n', (514, 534), True, 'import numpy as np\n'), ((2260, 2301), 'pandas.DataFrame', 'pd.DataFrame', (['errors'], {'columns': "['id_code']"}), "(errors, columns=['id_code'])\n", (2272, 2301), True, 'import pandas as pd\n')] |
import sys
import numpy as np
import argparse
from PIL import Image
def find_message(img_path):
input_img = Image.open(img_path)
pixels = np.array(input_img)
colors = pixels.flatten()
message = ""
character_byte = 0x00
for i, color in enumerate(colors):
if i % 8 == 0 and i != 0:
character_byte = character_byte >> 1
if character_byte == ord("\0"):
break
message += chr(character_byte)
character_byte = 0x00
# Uzima zadnji bit iz boje te ga invertira.
last_byte = color & 0b00000001
last_byte = ~last_byte
last_byte = last_byte & 0b00000001
# Upisuje zadnji bit iz boje na zadnje mjesto charactera.
character_byte = character_byte | last_byte
# Sve bitove pomice za jedno mjesto u lijevo
# time dodaje nulu na desnoj strani
# to radi kako bi u sljedecem loopu
# na mjesto nule upisao last_byte.
character_byte = character_byte << 1
return message
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Finds a hidden message from an image file.")
arg_parser.add_argument("img_path", help="image where the message is hidden")
arg_parser.add_argument("-o", "--output", help="file to write message in")
arguments = arg_parser.parse_args()
img_path = arguments.img_path
out_path = arguments.output
try:
message = find_message(img_path)
if arguments.output:
output_file = open(arguments.output, "w")
output_file.write(message)
output_file.close()
else:
print(message)
except FileNotFoundError:
print("File", img_path, "does not exist")
| [
"numpy.array",
"PIL.Image.open",
"argparse.ArgumentParser"
] | [((114, 134), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (124, 134), False, 'from PIL import Image\n'), ((148, 167), 'numpy.array', 'np.array', (['input_img'], {}), '(input_img)\n', (156, 167), True, 'import numpy as np\n'), ((1091, 1177), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Finds a hidden message from an image file."""'}), "(description=\n 'Finds a hidden message from an image file.')\n", (1114, 1177), False, 'import argparse\n')] |
import h5py
import numpy as np
import include.diag as diag
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
def angular_derivative(array, wvn):
return np.fft.ifft(1j * wvn * np.fft.fft(array))
quench_rates = [100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850]
spin_winding_list = []
for quench in quench_rates:
filename = '../../data/1d_kibble-zurek/single_runs/1d_polar-BA-FM_{}.hdf5'.format(quench)
with h5py.File(filename, 'r') as data_file:
# Load in data:
x = data_file['grid/x']
Nx = len(x)
dx = x[1] - x[0]
dkx = 2 * np.pi / (Nx * dx)
Kx = np.fft.fftshift(np.arange(-Nx // 2, Nx // 2) * dkx)
dt = data_file['time/dt'][...]
Nframe = data_file['time/Nframe'][...]
frame = int(quench / (Nframe * dt))
psi_plus = data_file['wavefunction/psi_plus'][:, frame]
psi_0 = data_file['wavefunction/psi_0'][:, frame]
psi_minus = data_file['wavefunction/psi_minus'][:, frame]
n = abs(psi_plus) ** 2 + abs(psi_0) ** 2 + abs(psi_minus) ** 2
# Calculate spin vectors:
fx, fy, fz, F = diag.calculate_spin(psi_plus, psi_0, psi_minus, n)
F_plus = fx + 1j * fy
F_minus = fx - 1j * fy
R = Nx * dx / (2 * np.pi) # Radius of ring
dF_plus = angular_derivative(F_plus, Kx)
dF_minus = angular_derivative(F_minus, Kx)
integral = (R / (2j * abs(F_plus) ** 2)) * (F_minus * dF_plus - F_plus * dF_minus)
spin_winding = int(dx * sum(np.real(integral)) / (2 * np.pi * 2 * np.sqrt(Nx)))
spin_winding_list.append(spin_winding)
print('Spin winding for t_q={} is: {}'.format(quench, spin_winding))
plt.plot(quench_rates, spin_winding_list, 'ko')
plt.xlabel(r'$\tau_Q$')
plt.ylabel(r'$w$')
plt.show()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"h5py.File",
"numpy.real",
"include.diag.calculate_spin",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((109, 132), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (123, 132), False, 'import matplotlib\n'), ((1740, 1787), 'matplotlib.pyplot.plot', 'plt.plot', (['quench_rates', 'spin_winding_list', '"""ko"""'], {}), "(quench_rates, spin_winding_list, 'ko')\n", (1748, 1787), True, 'import matplotlib.pyplot as plt\n'), ((1788, 1811), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\tau_Q$"""'], {}), "('$\\\\tau_Q$')\n", (1798, 1811), True, 'import matplotlib.pyplot as plt\n'), ((1812, 1829), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$w$"""'], {}), "('$w$')\n", (1822, 1829), True, 'import matplotlib.pyplot as plt\n'), ((1831, 1841), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1839, 1841), True, 'import matplotlib.pyplot as plt\n'), ((477, 501), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (486, 501), False, 'import h5py\n'), ((1168, 1218), 'include.diag.calculate_spin', 'diag.calculate_spin', (['psi_plus', 'psi_0', 'psi_minus', 'n'], {}), '(psi_plus, psi_0, psi_minus, n)\n', (1187, 1218), True, 'import include.diag as diag\n'), ((205, 222), 'numpy.fft.fft', 'np.fft.fft', (['array'], {}), '(array)\n', (215, 222), True, 'import numpy as np\n'), ((682, 710), 'numpy.arange', 'np.arange', (['(-Nx // 2)', '(Nx // 2)'], {}), '(-Nx // 2, Nx // 2)\n', (691, 710), True, 'import numpy as np\n'), ((1600, 1611), 'numpy.sqrt', 'np.sqrt', (['Nx'], {}), '(Nx)\n', (1607, 1611), True, 'import numpy as np\n'), ((1562, 1579), 'numpy.real', 'np.real', (['integral'], {}), '(integral)\n', (1569, 1579), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 3 15:55:18 2014
@author: leo
"""
import numpy as np
import matplotlib.pyplot as plt
# Macros
pi = np.pi; exp = np.exp; arange = np.arange; zeros = np.zeros
indexed = lambda l, offset=0: zip(np.arange(len(l))+offset,l)
# Constantes
w = 2.0*pi*0.25
a0 = 6.0/4.0
# Funções
ak = lambda k: a0 if k == 0 else (1.0/(4*pow(1j*k*w,2))) * (2.0*exp(-3j*k*w)*(3j*k*w + 1) - 2.0*exp(-2j*k*w)*(2j*k*w + 1) - 2.0*exp(-1j*k*w)*(1j*k*w + 1) + 2) \
+ (1.0/(4j*k*w))* (-6.0*exp(-3j*k*w) + 2.0*exp(-2j*k*w) + 4.0*exp(-1j*k*w))
harms = lambda k: arange(-k,k+1)
# Domínio do Tempo
td = arange(0,6,.05)
# X(t) com n harmônicas.
def xt(k):
xt = zeros(td.shape)
for n, t in indexed(td):
xt[n] = np.matrix([ak(h)*exp(1j*h*w*t) for h in harms(k)]).sum()
return np.abs(xt)
# Plota
def ploth(harm):
plt.figure()
plt.plot(td, xt(harm))
plt.grid(True)
plt.title('$x(t)$')
plt.xlabel('$t$')
ploth(5)
ploth(10)
ploth(20)
plt.figure()
plt.vlines(harms(10), [0], [abs(ak(k)) for k in harms(10)], 'k', lw=2)
plt.plot(harms(10), [abs(ak(k)) for k in harms(10)], 'ko')
plt.xlim(-11,11)
plt.grid(True)
plt.title('$a_k$')
plt.legend() | [
"numpy.abs",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend"
] | [((998, 1010), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1008, 1010), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1158), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-11)', '(11)'], {}), '(-11, 11)\n', (1149, 1158), True, 'import matplotlib.pyplot as plt\n'), ((1159, 1173), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1167, 1173), True, 'import matplotlib.pyplot as plt\n'), ((1174, 1192), 'matplotlib.pyplot.title', 'plt.title', (['"""$a_k$"""'], {}), "('$a_k$')\n", (1183, 1192), True, 'import matplotlib.pyplot as plt\n'), ((1193, 1205), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1203, 1205), True, 'import matplotlib.pyplot as plt\n'), ((817, 827), 'numpy.abs', 'np.abs', (['xt'], {}), '(xt)\n', (823, 827), True, 'import numpy as np\n'), ((862, 874), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (872, 874), True, 'import matplotlib.pyplot as plt\n'), ((906, 920), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (914, 920), True, 'import matplotlib.pyplot as plt\n'), ((925, 944), 'matplotlib.pyplot.title', 'plt.title', (['"""$x(t)$"""'], {}), "('$x(t)$')\n", (934, 944), True, 'import matplotlib.pyplot as plt\n'), ((949, 966), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t$"""'], {}), "('$t$')\n", (959, 966), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding:utf-8 -*-
#
# cluster.py
"""Cluster module."""
import networkx as nx
import numpy as np
import pandas as pd
from .utils import flatten_dict
from .utils import get_within_cutoff_matrix
from .utils import pairwise_distances
class Cluster:
"""Object to store and compute data about an individual particle cluster
Args:
graph (networkx Graph): Contains nodes and edges corresponding to
particles and bonds, respectively, where a bond implies the particles
are within a distance of `cutoff_distance` from each other.
particle_df (dataframe): Dataframe where index is `particle_id`, and
there are `n_dimensions` columns labelled `x0`, x1`, ... `xN`
box_lengths (ndarray): Must contain `n_dimensions` values representing
the lengths of each dimension of a rectangular box.
cutoff_distance (float): Maximum distance two particles can be from
each other to be considered part of the same cluster
Attributes:
graph (networkx Graph): Contains nodes and edges corresponding to
particles and bonds, respectively, where a bond implies the particles
are within a distance of `cutoff_distance` from each other.
particle_df (dataframe): Dataframe where index is `particle_id`, and
there are `n_dimensions` columns labelled `x0`, x1`, ... `xN`
box_lengths (ndarray): Must contain `n_dimensions` values representing
the lengths of each dimension of a rectangular box.
cutoff_distance (float): Maximum distance two particles can be from
each other to be considered part of the same cluster
n_dimensions (int): Number of dimensions in the system
n_particles (int): Number of particles in the cluster
"""
def __init__(self, graph, particle_df, box_lengths, cutoff_distance):
self._cluster_property_map = dict(
n_particles=self.compute_n_particles,
minimum_node_cuts=self.compute_minimum_node_cuts,
center_of_mass=self.compute_center_of_mass,
unwrapped_center_of_mass=self.compute_unwrapped_center_of_mass,
rg=self.compute_rg,
asphericity=self.compute_asphericity,
)
self._particle_property_map = dict(
coordination_number=self.compute_coordination_number,
distance_from_com=self.compute_distance_from_com,
)
self.graph = graph
self.particle_df = particle_df.copy()
self.box_lengths = box_lengths
self.cutoff_distance = cutoff_distance
self.n_dimensions = len(box_lengths)
self.n_particles = len(particle_df)
self._minimum_node_cuts_dict = None
self._unwrapped_x_df = None
self._unwrapped_center_of_mass_dict = None
self._center_of_mass_dict = None
self._gyration_tensor = None
self._gyration_eigenvals = None
self._rg = None
def _split_edges_with_faces_1_dim(self, graph, dim):
"""Breaks all edges that cross the `dim`-dimension's periodic boundary
condition (PBC). Replaces those edges with edges connecting the lower
particle to the lower wall and the higher particle to the higher wall
Args:
graph (networkx Graph): [description]
dim (int): Dimension in which to break the PBC.
(0, 1, 2, ... etc.)
Returns:
networkx Graph: Copy of the input graph, modified to replace
PBC-crossing edges with conections to "face" nodes
"""
graph = graph.copy()
dim_str = f"x{dim}"
low_face_node_position = {
f"x{d}": None for d in range(self.n_dimensions)
}
low_face_node_position[dim_str] = 0
high_face_node_position = {
f"x{d}": None for d in range(self.n_dimensions)
}
high_face_node_position[dim_str] = self.box_lengths[dim]
low_face_node_str = f"{dim_str}_low"
high_face_node_str = f"{dim_str}_high"
graph.add_node(low_face_node_str, **low_face_node_position)
graph.add_node(high_face_node_str, **high_face_node_position)
edges_to_add = []
edges_to_remove = []
for u, v in graph.edges:
if isinstance(u, str) or isinstance(v, str):
# Only the face nodes are strings
continue
u_x = self.particle_df.loc[u, dim_str]
v_x = self.particle_df.loc[v, dim_str]
if np.abs(u_x - v_x) > self.cutoff_distance:
edges_to_remove.append((u, v))
if u_x < v_x:
low_node = u
high_node = v
else:
low_node = v
high_node = u
edges_to_add.append((low_face_node_str, low_node))
edges_to_add.append((high_face_node_str, high_node))
for u, v in edges_to_add:
graph.add_edge(u, v)
for u, v in edges_to_remove:
graph.remove_edge(u, v)
return graph
def compute_cluster_properties(self, properties=["n_particles"]):
"""Compute cluster properties passed in `properties` variable
Args:
properties (list or str, optional): List of cluster properties to
compute, or "all" to compute all available properties.
Defaults to ["n_particles"].
Returns:
dict: `property_name → property_value` key-value pairs
"""
if properties == "all":
properties = self._cluster_property_map.keys()
cluster_properties_dict = dict()
for prop in properties:
if prop not in self._cluster_property_map:
raise ValueError(f"Property '{prop}' is not valid!")
prop_function = self._cluster_property_map[prop]
cluster_properties_dict[prop] = prop_function()
cluster_properties_dict = flatten_dict(cluster_properties_dict)
return cluster_properties_dict
def compute_particle_properties(self, properties=["coordination_number"]):
"""Compute particle properties passed in `properties` variable
Args:
properties (list or str, optional): List of particle properties to
compute, or "all" to compute all available properties.
Defaults to ["coordination_number"].
Returns:
dataframe: Shape (`n_particles`, `n_dimensions` + `n_properties`)
`particle_id` as index, `x*` and particle property columns.
"""
if properties == "all":
properties = self._particle_property_map.keys()
particle_df = self.particle_df.copy()
for prop in properties:
if prop not in self._particle_property_map:
raise ValueError(f"Property '{prop}' is not valid!")
prop_function = self._particle_property_map[prop]
property_df = prop_function()
assert np.all(property_df.index == self.particle_df.index)
particle_df = particle_df.join(property_df, how="left")
return particle_df
def compute_bonds(self):
"""Returns a dataframe with 2 columns, where each row has a pair of `particle_id`s associated with bonded particles
Returns:
dataframe: Shape `(n_bonds, 2)`. Column names `particle_id_1` and
`particle_id_2`.
"""
bonds_df = pd.DataFrame(
self.graph.edges(), columns=["particle_id_1", "particle_id_2"]
).sort_values(["particle_id_1", "particle_id_2"])
return bonds_df
######################
# Cluster Properties #
######################
def compute_n_particles(self):
"""Returns the number of particles in the cluster
Returns:
int: number of particles in the cluster
"""
return self.n_particles
def compute_minimum_node_cuts(self):
"""Returns dictionary of minimum node cuts required to break the
connection between faces normal to a given direction.
Returns:
dict: `dimension_str → minimum_node_cuts` key-value pairs
"""
# If this was already computed, return the stored dictionary
if self._minimum_node_cuts_dict is not None:
return self._minimum_node_cuts_dict
minimum_node_cuts_dict = dict()
for dim in range(self.n_dimensions):
split_graph = self._split_edges_with_faces_1_dim(self.graph, dim)
node_cut = nx.minimum_node_cut(
split_graph, f"x{dim}_low", f"x{dim}_high"
)
minimum_node_cuts_dict[f"x{dim}"] = len(node_cut)
# Store this because other computations like center of mass rely on it
self._minimum_node_cuts_dict = minimum_node_cuts_dict
return minimum_node_cuts_dict
def compute_center_of_mass(self, wrapped=True):
"""Returns cluster center of mass dictionary
Args:
wrapped (boolean, optional): If True, a center of mass that
falls outside the box bounds is forced to be in range
[0, `box_lengths[d]`) for each dimension `d`. If using this to
compare to unwrapped particle coordinates, leave as False.
Defaults to False.
Returns:
dict: `{"x0": x0, "x1": x1, ...}`
"""
if wrapped is True and self._center_of_mass_dict is not None:
return self._center_of_mass_dict
if (
wrapped is False
and self._unwrapped_center_of_mass_dict is not None
):
return self._unwrapped_center_of_mass_dict
unwrapped_x_df = self._compute_unwrapped_x()
if unwrapped_x_df is None:
# _compute_unwrapped_x returns None if the particles bridge the
# faces of at least 1 dimension. If that's the case, center of mass
# can't necessarily be computed either
center_of_mass = {
f"x{d}": np.nan for d in range(self.n_dimensions)
}
self._unwrapped_center_of_mass_dict = center_of_mass
self._center_of_mass_dict = center_of_mass
return center_of_mass
unwrapped_x_df.columns = [f"x{d}" for d in range(self.n_dimensions)]
center_of_mass = unwrapped_x_df.mean(axis=0)
self._unwrapped_center_of_mass_dict = center_of_mass.to_dict()
if wrapped is True:
while np.any(center_of_mass < 0) or np.any(
center_of_mass > self.box_lengths
):
center_of_mass = np.where(
center_of_mass < 0,
center_of_mass + self.box_lengths,
center_of_mass,
)
center_of_mass = np.where(
center_of_mass >= self.box_lengths,
center_of_mass - self.box_lengths,
center_of_mass,
)
self._center_of_mass_dict = {
f"x{i}": v for i, v in enumerate(center_of_mass)
}
return self._center_of_mass_dict
else:
return self._unwrapped_center_of_mass_dict
def compute_unwrapped_center_of_mass(self):
"""Returns unwrapped center of mass, meaning it's the center of mass of
the unwrapped particle coordinates, and isn't necessarily inside the box
coordinates.
Returns:
dict: Unwrapped center of mass coordinates, `"x*" → number`
key-value pairs. Technically, no max or min restriction, but
probably within 1 period of the box bounds.
"""
return self.compute_center_of_mass(wrapped=False)
def _compute_gyration_tensor(self):
"""Returns cluster gyration tensor
Returns:
ndarray: Shape (`n_dimensions`, `n_dimensions`) gyration tensor
"""
if self._gyration_tensor is not None:
return self._gyration_tensor
dx_from_com = self.compute_distance_from_com(
include_distance=False
).values
if np.isnan(dx_from_com).sum() > 0:
# If there are NaN values, that means it's a percolated cluster
gyration_tensor = np.nan * np.ones(
(self.n_dimensions, self.n_dimensions)
)
else:
# This implements the first equation in
# https://en.wikipedia.org/wiki/Gyration_tensor
gyration_tensor = (
np.sum(
dx_from_com[:, :, None] * dx_from_com[:, None, :], axis=0
)
/ self.n_particles
)
# Make sure gyration_tensor is symmetric
assert np.allclose(gyration_tensor, gyration_tensor.T)
self._gyration_tensor = gyration_tensor
return gyration_tensor
def _compute_gyration_eigenvals(self):
"""Returns numpy array of eigenvalues of the gyration tensor. Values are
not sorted.
Returns:
ndarry: Shape (`n_dimensions`,) eigenvalues array
"""
if self._gyration_eigenvals is not None:
return self._gyration_eigenvals
gyration_tensor = self._compute_gyration_tensor()
if np.isnan(gyration_tensor).sum() > 0:
# NaNs exist if the cluster is percolated
eigenvals = np.nan * np.ones(self.n_dimensions)
else:
eigenvals, eigenvecs = np.linalg.eig(gyration_tensor)
# Make sure all the numbers are real
assert np.isclose(np.sum(np.abs(np.imag(eigenvals))), 0.0)
# Drop the 0 imaginary part if it's there
eigenvals = eigenvals.real
self._gyration_eigenvals = eigenvals
return eigenvals
def compute_rg(self):
"""Returns cluster radius of gyration.
Returns:
float: Cluster radius of gyration
"""
if self._rg is not None:
return self._rg
eigenvals = self._compute_gyration_eigenvals()
if np.any(np.isnan(eigenvals)):
rg = np.nan
else:
rg = np.sqrt(np.sum(eigenvals ** 2))
self._rg = rg
return rg
def compute_asphericity(self):
"""Returns cluster asphericity
(see https://en.wikipedia.org/wiki/Gyration_tensor#Shape_descriptors)
Returns:
float: Asphericity, normalized by radius of gyration squared
"""
rg = self.compute_rg()
if rg == 0.0:
asphericity = 0
elif np.isnan(rg):
asphericity = np.nan
else:
scaled_eigenvals = self._compute_gyration_eigenvals() / rg
evals_squared = np.sort(scaled_eigenvals ** 2)
asphericity = evals_squared[-1] - np.mean(evals_squared[:-1])
return asphericity
#######################
# Particle Properties #
#######################
def compute_coordination_number(self):
"""Returns a dataframe of coordination numbers corresponding to each
particle in the cluster
Returns:
dataframe: Coordination numbers for particles in the cluster. Index
is `particle_id`s and matches `particle_df.index`
"""
distances = pairwise_distances(self.particle_df, self.box_lengths)
within_cutoff_matrix = get_within_cutoff_matrix(
distances, self.cutoff_distance
)
coordination_numbers = within_cutoff_matrix.sum(axis=1).astype(int)
coordination_numbers_df = pd.DataFrame(
dict(coordination_number=coordination_numbers),
index=self.particle_df.index,
)
return coordination_numbers_df
def _compute_unwrapped_x(self):
"""Returns unwrapped particle coordinates dataframe
Returns:
dataframe: Index is `particle_id`, matching index of `particle_df`.
Columns are `unwrapped_x*` where `*` represents 0, 1, ...
`n_particles`
"""
if self._unwrapped_x_df is not None:
return self._unwrapped_x_df
minimum_node_cuts_dict = self.compute_minimum_node_cuts()
n_node_cuts = sum(value for value in minimum_node_cuts_dict.values())
# If n_node_cuts is greater than 0, that means the cluster spans the
# length of at least 1 dimension, and a center of mass can't
# necessarily be computed.
if n_node_cuts > 0:
return None
column_names_1 = [f"x{d}" for d in range(self.n_dimensions)]
column_names_2 = [f"unwrapped_x{d}" for d in range(self.n_dimensions)]
if len(self.graph) == 1:
unwrapped_x_df = self.particle_df.filter(column_names_1).copy()
unwrapped_x_df.columns = column_names_2
return unwrapped_x_df
x_array_dict = dict()
first = True
x_df = self.particle_df.filter(column_names_1)
for node_1, node_2 in nx.dfs_edges(self.graph):
if first:
x_array_1 = x_df.loc[node_1, :].values
x_array_dict[node_1] = x_array_1
first = False
else:
x_array_1 = x_array_dict[node_1]
x_array_2 = x_df.loc[node_2, :].values
dx_array = x_array_2 - x_array_1
dx_array = np.where(
dx_array < -self.box_lengths / 2,
dx_array + self.box_lengths,
dx_array,
)
dx_array = np.where(
dx_array >= self.box_lengths / 2,
dx_array - self.box_lengths,
dx_array,
)
x_array_2 = x_array_1 + dx_array
x_array_dict[node_2] = x_array_2
unwrapped_x_df = pd.DataFrame(x_array_dict).transpose().sort_index()
assert np.all(unwrapped_x_df.index == self.particle_df.index)
assert unwrapped_x_df.shape == (self.n_particles, self.n_dimensions)
unwrapped_x_df.columns = column_names_2
self._unwrapped_x_df = unwrapped_x_df
return unwrapped_x_df
def compute_distance_from_com(
self, include_dx=True, include_distance=True
):
"""Returns dataframe of distances from the center of mass for each
particle
Args:
include_dx (bool, optional): If True, includes `dx_from_com_x*`
columns. Defaults to True.
include_distance (bool, optional): If True, includes
`distance_from_com` column. Defaults to True
Raises:
ValueError: both include_dx and include_distance are False
Returns:
dataframe: Index is `particle_id` (matching index of `particle_df`),
columns are `distance_from_com` (Euclidean distance from center of
mass), and `dx_from_com_x*` (Vector difference) where `*` represents
0, 1, ... `n_particles`.
"""
if include_dx is False and include_distance is False:
raise ValueError(
"one of include_dx or include_distance must be True"
)
x_columns = [f"x{d}" for d in range(self.n_dimensions)]
unwrapped_x_df = self._compute_unwrapped_x()
if unwrapped_x_df is None:
center_of_mass_dict = {xc: None for xc in x_columns}
distance_from_com_df = pd.DataFrame(index=self.particle_df.index)
if include_dx is True:
for d in range(self.n_dimensions):
distance_from_com_df[f"dx_from_com_x{d}"] = np.nan
if include_distance is True:
distance_from_com_df["distance_from_com"] = np.nan
else:
unwrapped_x = unwrapped_x_df.values
center_of_mass_dict = self.compute_center_of_mass(wrapped=False)
center_of_mass = (
pd.DataFrame([center_of_mass_dict]).filter(x_columns).values
)
dx = unwrapped_x - center_of_mass
if include_dx is True:
arrays_dict = {
f"dx_from_com_x{d}": dx[:, d]
for d in range(self.n_dimensions)
}
else:
arrays_dict = {}
if include_distance is True:
distances = np.linalg.norm(dx, axis=1)
arrays_dict["distance_from_com"] = distances
distance_from_com_df = pd.DataFrame(
dict(arrays_dict), index=self.particle_df.index
)
return distance_from_com_df
| [
"numpy.abs",
"numpy.mean",
"numpy.allclose",
"numpy.linalg.eig",
"numpy.ones",
"numpy.where",
"numpy.sort",
"numpy.linalg.norm",
"numpy.any",
"numpy.sum",
"numpy.isnan",
"networkx.minimum_node_cut",
"pandas.DataFrame",
"numpy.all",
"numpy.imag",
"networkx.dfs_edges"
] | [((17203, 17227), 'networkx.dfs_edges', 'nx.dfs_edges', (['self.graph'], {}), '(self.graph)\n', (17215, 17227), True, 'import networkx as nx\n'), ((18066, 18120), 'numpy.all', 'np.all', (['(unwrapped_x_df.index == self.particle_df.index)'], {}), '(unwrapped_x_df.index == self.particle_df.index)\n', (18072, 18120), True, 'import numpy as np\n'), ((7099, 7150), 'numpy.all', 'np.all', (['(property_df.index == self.particle_df.index)'], {}), '(property_df.index == self.particle_df.index)\n', (7105, 7150), True, 'import numpy as np\n'), ((8675, 8738), 'networkx.minimum_node_cut', 'nx.minimum_node_cut', (['split_graph', 'f"""x{dim}_low"""', 'f"""x{dim}_high"""'], {}), "(split_graph, f'x{dim}_low', f'x{dim}_high')\n", (8694, 8738), True, 'import networkx as nx\n'), ((12933, 12980), 'numpy.allclose', 'np.allclose', (['gyration_tensor', 'gyration_tensor.T'], {}), '(gyration_tensor, gyration_tensor.T)\n', (12944, 12980), True, 'import numpy as np\n'), ((13667, 13697), 'numpy.linalg.eig', 'np.linalg.eig', (['gyration_tensor'], {}), '(gyration_tensor)\n', (13680, 13697), True, 'import numpy as np\n'), ((14273, 14292), 'numpy.isnan', 'np.isnan', (['eigenvals'], {}), '(eigenvals)\n', (14281, 14292), True, 'import numpy as np\n'), ((14780, 14792), 'numpy.isnan', 'np.isnan', (['rg'], {}), '(rg)\n', (14788, 14792), True, 'import numpy as np\n'), ((17571, 17656), 'numpy.where', 'np.where', (['(dx_array < -self.box_lengths / 2)', '(dx_array + self.box_lengths)', 'dx_array'], {}), '(dx_array < -self.box_lengths / 2, dx_array + self.box_lengths,\n dx_array)\n', (17579, 17656), True, 'import numpy as np\n'), ((17739, 17824), 'numpy.where', 'np.where', (['(dx_array >= self.box_lengths / 2)', '(dx_array - self.box_lengths)', 'dx_array'], {}), '(dx_array >= self.box_lengths / 2, dx_array - self.box_lengths,\n dx_array)\n', (17747, 17824), True, 'import numpy as np\n'), ((19617, 19659), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.particle_df.index'}), '(index=self.particle_df.index)\n', (19629, 19659), True, 'import pandas as pd\n'), ((4560, 4577), 'numpy.abs', 'np.abs', (['(u_x - v_x)'], {}), '(u_x - v_x)\n', (4566, 4577), True, 'import numpy as np\n'), ((10640, 10666), 'numpy.any', 'np.any', (['(center_of_mass < 0)'], {}), '(center_of_mass < 0)\n', (10646, 10666), True, 'import numpy as np\n'), ((10670, 10711), 'numpy.any', 'np.any', (['(center_of_mass > self.box_lengths)'], {}), '(center_of_mass > self.box_lengths)\n', (10676, 10711), True, 'import numpy as np\n'), ((10776, 10855), 'numpy.where', 'np.where', (['(center_of_mass < 0)', '(center_of_mass + self.box_lengths)', 'center_of_mass'], {}), '(center_of_mass < 0, center_of_mass + self.box_lengths, center_of_mass)\n', (10784, 10855), True, 'import numpy as np\n'), ((10968, 11068), 'numpy.where', 'np.where', (['(center_of_mass >= self.box_lengths)', '(center_of_mass - self.box_lengths)', 'center_of_mass'], {}), '(center_of_mass >= self.box_lengths, center_of_mass - self.\n box_lengths, center_of_mass)\n', (10976, 11068), True, 'import numpy as np\n'), ((12456, 12503), 'numpy.ones', 'np.ones', (['(self.n_dimensions, self.n_dimensions)'], {}), '((self.n_dimensions, self.n_dimensions))\n', (12463, 12503), True, 'import numpy as np\n'), ((12708, 12773), 'numpy.sum', 'np.sum', (['(dx_from_com[:, :, None] * dx_from_com[:, None, :])'], {'axis': '(0)'}), '(dx_from_com[:, :, None] * dx_from_com[:, None, :], axis=0)\n', (12714, 12773), True, 'import numpy as np\n'), ((13591, 13617), 'numpy.ones', 'np.ones', (['self.n_dimensions'], {}), '(self.n_dimensions)\n', (13598, 13617), True, 'import numpy as np\n'), ((14358, 14380), 'numpy.sum', 'np.sum', (['(eigenvals ** 2)'], {}), '(eigenvals ** 2)\n', (14364, 14380), True, 'import numpy as np\n'), ((14940, 14970), 'numpy.sort', 'np.sort', (['(scaled_eigenvals ** 2)'], {}), '(scaled_eigenvals ** 2)\n', (14947, 14970), True, 'import numpy as np\n'), ((20541, 20567), 'numpy.linalg.norm', 'np.linalg.norm', (['dx'], {'axis': '(1)'}), '(dx, axis=1)\n', (20555, 20567), True, 'import numpy as np\n'), ((12308, 12329), 'numpy.isnan', 'np.isnan', (['dx_from_com'], {}), '(dx_from_com)\n', (12316, 12329), True, 'import numpy as np\n'), ((13467, 13492), 'numpy.isnan', 'np.isnan', (['gyration_tensor'], {}), '(gyration_tensor)\n', (13475, 13492), True, 'import numpy as np\n'), ((15017, 15044), 'numpy.mean', 'np.mean', (['evals_squared[:-1]'], {}), '(evals_squared[:-1])\n', (15024, 15044), True, 'import numpy as np\n'), ((13791, 13809), 'numpy.imag', 'np.imag', (['eigenvals'], {}), '(eigenvals)\n', (13798, 13809), True, 'import numpy as np\n'), ((17999, 18025), 'pandas.DataFrame', 'pd.DataFrame', (['x_array_dict'], {}), '(x_array_dict)\n', (18011, 18025), True, 'import pandas as pd\n'), ((20111, 20146), 'pandas.DataFrame', 'pd.DataFrame', (['[center_of_mass_dict]'], {}), '([center_of_mass_dict])\n', (20123, 20146), True, 'import pandas as pd\n')] |
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import gizmo_analysis as ga
import utilities as ga_ut
import sys
FIRE_elements = ['h','he','c','n','o','ne','mg','si','s','ca','fe']
FIRE_metals = ['c','n','o','ne','mg','si','s','ca','fe']
#
# wrapper to load data set and set FIRE
# abundance tables
#
def load_with_FIRE(sim_index, wdir, return_model = False, model_Z = 1.0):
"""
Convenience wrapper to load a dataset and set the appropriate initial
abundances and FIRE yield model tables to do age tracer postprocessing
with default FIRE2 yields
"""
initial_part = ga.io.Read.read_snapshots(['gas'],'index',0,simulation_directory=wdir)
initial_abundances = {}
# -- note: np.average isn't necessary since they all should have the same value...
for e in FIRE_elements:
initial_abundances[e] = np.average(initial_part['gas'].prop('massfraction.' + e))
initial_abundances['metals'] = np.average(initial_part['gas'].prop('massfraction.metals'))
part = ga.io.Read.read_snapshots(['gas','star'],
'index',
sim_index,
simulation_directory = wdir)
FIRE_yield_model = ga.agetracers.FIRE2_yields(model_Z = model_Z # yield table metallicity in solar units,
)
FIRE_yield_table = ga.agetracers.construct_yield_table(FIRE_yield_model, # object with a `yields` function
part.ageprop.age_bins/1000.0) # in Gyr
part.set_yield_table(FIRE_yield_table, # the 2D table
FIRE_yield_model.elements # list of elements we want to be able to post process
# these can be anything included in the yield model
)
# finally, set the initial abundances:
# As generated above, this is a dictionary corresponding to the initial
# mass fractions of each element (and all metals). If an element is missing,
# it is assumed to have an initial mass fraction of 0
part.set_initial_abundances(initial_abundances)
if return_model:
return part, FIRE_yield_model, FIRE_yield_table
else:
return part
def compute_error(part, element, particle_type = 'star', filter=True):
"""
For a single element, compute and bin the error
"""
if filter:
select = part[particle_type].prop('metallicity.o') > -3.8
else:
select = part[particle_type].prop('metallicity.o') == part[particle_type].prop('metallicity.o')
error = np.abs(part[particle_type].prop('metallicity.agetracer.'+element)[select] - #+np.log10(0.68) -
part[particle_type].prop('metallicity.' + element)[select])
return error
def bin_error(part, element, particle_type='star',
amin = 0.00, amax = 3.0, da = 0.001, logbins=False):
"""
Return a distribution of the error
"""
error = compute_error(part,element,particle_type)
print(element, np.min(error), np.max(error))
if logbins:
bins = np.arange(np.log10(amin), np.log10(amax), da)
_error = np.log10(error)
else:
bins = np.arange(amin, amax+0.5*da, da)
_error = error
hist, bins = np.histogram(_error, bins=bins)
stats = {'bins' : bins,
'hist' : hist,
'cbins': 0.5*(bins[1:]+bins[:-1]),
'median' : np.percentile(error,50.0),
'onesigma' : np.percentile(error,68.0),
'twosigma' : np.percentile(error,95.0),
'threesigma' : np.percentile(error,99.7)}
return stats
def compute_all_errors(part,particle_type='star',logbins=False,amin=0.0,amax=3,da=0.001):
"""
"""
all_hist = {}
all_stats = {}
for e in FIRE_metals:
all_stats[e] = bin_error(part,e,particle_type,amin=amin,amax=amax,da=da,logbins=logbins)
return all_stats
def generate_analysis(runs):
all_part = {}
all_data = {}
for runname in runs.keys():
all_part[runname] = load_with_FIRE(600,"./" + runname, model_Z = 0.1)
all_data[runname] = {}
all_data[runname] = compute_all_errors(all_part[runname],'star')
fs = 5
fig,ax = plt.subplots(3,3, sharex=True,sharey=True)
fig.set_size_inches(3*fs,3*fs)
fig.subplots_adjust(hspace=0,wspace=0)
axi, axj = 0,0
for e in FIRE_metals:
axindex = (axi,axj)
for runname in runs.keys():
ploty = np.cumsum(all_data[runname][e]['hist']*1.0)
ploty = ploty/ploty[-1]
ax[axindex].plot(all_data[runname][e]['cbins'], ploty,
lw = 3, label = runs[runname])
ax[axindex].set_ylim(0.0,1.0)
ax[axindex].set_xlim(0.001, 1.0)
ax[axindex].semilogx()
axj = axj + 1
if axj >= 3:
axj = 0
axi = axi + 1
for i in np.arange(3):
ax[(i,0)].set_ylabel("Cumulative Histogram")
ax[(2,0)].set_xlabel("Error [dex]")
fig.savefig("rate_error_panel.png")
#
# now compute the metallicities for each and the difference
#
def average_stats(runname, elements):
one = two = three = 0.0
for e in elements:
one = one + all_data[runname][e]['onesigma']
two = two + all_data[runname][e]['twosigma']
three = three + all_data[runname][e]['threesigma']
n = 1.0 * len(elements)
return one / n, two / n, three / n
f = open('rates_results.dat','w')
f.write("name alpha_one alpha_two alpha_three wind_one wind_two wind_three ia_one ia_two ia_three\n")
for run in runs.keys():
f.write(run)
for elist in [ ['o','mg','si','ca'], ['c','n'], ['fe']]:
one, two, three = average_stats(run, elist)
f.write(" %.4f %.4f %.4f"%(one,two,three))
f.write("\n")
f.close()
def compare_distributions(runs):
all_part = {}
all_data = {}
amin = 0.01
amax = 2.0
da = 0.01
for runname in runs.keys():
all_part[runname] = load_with_FIRE(600,"./" + runname, model_Z = 0.1)
all_data[runname] = {}
all_data[runname] = compute_all_errors(all_part[runname],'star',
amin = amin, amax = amax, da = da,
logbins=True)
fs = 6
fig,ax = plt.subplots(1,3, sharex=True,sharey=True)
fig.set_size_inches(3*fs,1*fs)
fig.subplots_adjust(wspace=0)
for axi, element_list in enumerate([ ['o','mg','si','ca'], ['c','n'], ['fe']]):
for runname in runs.keys():
# compute average
avg_hist = np.zeros(np.size(all_data[runname]['c']['hist']))
count = 0
for e in element_list:
avg_hist += all_data[runname][e]['hist']
count = count + 1
avg_hist = avg_hist / (1.0*count)
dz = all_data[runname][e]['bins'][1:] - all_data[runname][e]['bins'][:-1]
ax[axi].plot(all_data[runname][e]['cbins'],
avg_hist / (1.0*np.sum(avg_hist)) / dz,
lw = 3, label = runs[runname])
ax[axi].set_ylim(0,7)
ax[axi].set_xlim(np.log10(amin),np.log10(amax))
# ax[axi].semilogx()
ax[axi].set_xlabel(r"log$_{10}$(Error [dex])")
ax[0].set_ylabel("dN/d(log(Error))")
xy = (0.8,0.1)
ax[0].annotate(r'$\alpha$', xy, xy, xycoords='axes fraction')
ax[1].annotate("C+N", xy, xy, xycoords='axes fraction')
ax[2].annotate("Fe", xy, xy, xycoords='axes fraction')
ax[0].legend(loc='best')
fig.savefig('distributions.png', bbox_inches='tight', pad_inches=0.0)
return
def compare_runtime(runs):
return
def plot_sigma(runs):
data = np.genfromtxt("rates_results.dat",names=True)
xvals = np.arange(6)
fig, ax = plt.subplots(1,3, sharey=True)
fig.set_size_inches(18,7)
fig.subplots_adjust(wspace=0)
colors = {'one': 'C0', 'two' : 'C1', 'three' :'C2'}
labels = {'one': r'1 $\sigma$', 'two': r'2 $\sigma$', 'three' : r'3 $\sigma$'}
annotation = {'alpha': r"$\alpha$ (CCSNe)", 'wind' : "C + N (Winds)", 'ia' : "Fe (Ia's)"}
for i, metal in enumerate(['alpha','wind','ia']):
for sigma in ['one','two','three']:
ax[i].plot(xvals, data[metal + '_' + sigma], color = colors[sigma], lw = '2', ls = ':')
ax[i].scatter(xvals, data[metal + '_' + sigma], s = 30, color = colors[sigma], label = labels[sigma])
ax[i].set_ylim(0.0,1.0)
ax[i].set_xlim(-0.5, 5.5)
ax[i].set_xticks(xvals)
ax[i].set_xticklabels( list(runs.values()), rotation = 'vertical', fontsize = 12)
xy = (0.8,0.05)
ax[i].annotate(annotation[metal], xy,xy,xycoords='axes fraction')
ax[0].set_ylabel("Simulation - AgeTracer [dex]")
ax[0].legend(loc='best')
fig.savefig("sigma_comparison.png", bbox_inches='tight', pad_inches=0.05)
return
if __name__ == "__main__":
runs = {"test0" : "R = 1.0", 'test1' : "R = 0.5",
"test2" : "R = 0.1", "test3" : "R = -1",
'test4' : "R = -10", "test5" : "R = -100"}
generate = False
compare_runtime = False
compare_sigma = False
plot_distributions = False
if len(sys.argv) > 1:
if 'generate' in sys.argv:
generate = True
if 'runtime' in sys.argv:
compare_runtime = True
if 'compare_sigma' in sys.argv:
compare_sigma = True
if 'plot_distributions' in sys.argv:
plot_distributions = True
else:
generate = True
if generate:
generate_analysis(runs)
if compare_runtime:
compare_runtime(runs)
if compare_sigma:
plot_sigma(runs)
if plot_distributions:
compare_distributions(runs)
| [
"numpy.histogram",
"numpy.log10",
"matplotlib.use",
"numpy.size",
"gizmo_analysis.agetracers.construct_yield_table",
"gizmo_analysis.io.Read.read_snapshots",
"numpy.max",
"numpy.sum",
"numpy.cumsum",
"numpy.min",
"numpy.percentile",
"gizmo_analysis.agetracers.FIRE2_yields",
"numpy.genfromtxt... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (32, 39), False, 'import matplotlib\n'), ((642, 715), 'gizmo_analysis.io.Read.read_snapshots', 'ga.io.Read.read_snapshots', (["['gas']", '"""index"""', '(0)'], {'simulation_directory': 'wdir'}), "(['gas'], 'index', 0, simulation_directory=wdir)\n", (667, 715), True, 'import gizmo_analysis as ga\n'), ((1058, 1151), 'gizmo_analysis.io.Read.read_snapshots', 'ga.io.Read.read_snapshots', (["['gas', 'star']", '"""index"""', 'sim_index'], {'simulation_directory': 'wdir'}), "(['gas', 'star'], 'index', sim_index,\n simulation_directory=wdir)\n", (1083, 1151), True, 'import gizmo_analysis as ga\n'), ((1264, 1307), 'gizmo_analysis.agetracers.FIRE2_yields', 'ga.agetracers.FIRE2_yields', ([], {'model_Z': 'model_Z'}), '(model_Z=model_Z)\n', (1290, 1307), True, 'import gizmo_analysis as ga\n'), ((1420, 1509), 'gizmo_analysis.agetracers.construct_yield_table', 'ga.agetracers.construct_yield_table', (['FIRE_yield_model', '(part.ageprop.age_bins / 1000.0)'], {}), '(FIRE_yield_model, part.ageprop.age_bins /\n 1000.0)\n', (1455, 1509), True, 'import gizmo_analysis as ga\n'), ((3373, 3404), 'numpy.histogram', 'np.histogram', (['_error'], {'bins': 'bins'}), '(_error, bins=bins)\n', (3385, 3404), True, 'import numpy as np\n'), ((4342, 4386), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, 3, sharex=True, sharey=True)\n', (4354, 4386), True, 'import matplotlib.pyplot as plt\n'), ((5019, 5031), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (5028, 5031), True, 'import numpy as np\n'), ((6526, 6570), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'sharex': '(True)', 'sharey': '(True)'}), '(1, 3, sharex=True, sharey=True)\n', (6538, 6570), True, 'import matplotlib.pyplot as plt\n'), ((7950, 7996), 'numpy.genfromtxt', 'np.genfromtxt', (['"""rates_results.dat"""'], {'names': '(True)'}), "('rates_results.dat', names=True)\n", (7963, 7996), True, 'import numpy as np\n'), ((8009, 8021), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (8018, 8021), True, 'import numpy as np\n'), ((8037, 8068), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'sharey': '(True)'}), '(1, 3, sharey=True)\n', (8049, 8068), True, 'import matplotlib.pyplot as plt\n'), ((3133, 3146), 'numpy.min', 'np.min', (['error'], {}), '(error)\n', (3139, 3146), True, 'import numpy as np\n'), ((3148, 3161), 'numpy.max', 'np.max', (['error'], {}), '(error)\n', (3154, 3161), True, 'import numpy as np\n'), ((3258, 3273), 'numpy.log10', 'np.log10', (['error'], {}), '(error)\n', (3266, 3273), True, 'import numpy as np\n'), ((3299, 3335), 'numpy.arange', 'np.arange', (['amin', '(amax + 0.5 * da)', 'da'], {}), '(amin, amax + 0.5 * da, da)\n', (3308, 3335), True, 'import numpy as np\n'), ((3534, 3560), 'numpy.percentile', 'np.percentile', (['error', '(50.0)'], {}), '(error, 50.0)\n', (3547, 3560), True, 'import numpy as np\n'), ((3587, 3613), 'numpy.percentile', 'np.percentile', (['error', '(68.0)'], {}), '(error, 68.0)\n', (3600, 3613), True, 'import numpy as np\n'), ((3640, 3666), 'numpy.percentile', 'np.percentile', (['error', '(95.0)'], {}), '(error, 95.0)\n', (3653, 3666), True, 'import numpy as np\n'), ((3695, 3721), 'numpy.percentile', 'np.percentile', (['error', '(99.7)'], {}), '(error, 99.7)\n', (3708, 3721), True, 'import numpy as np\n'), ((3205, 3219), 'numpy.log10', 'np.log10', (['amin'], {}), '(amin)\n', (3213, 3219), True, 'import numpy as np\n'), ((3221, 3235), 'numpy.log10', 'np.log10', (['amax'], {}), '(amax)\n', (3229, 3235), True, 'import numpy as np\n'), ((4604, 4649), 'numpy.cumsum', 'np.cumsum', (["(all_data[runname][e]['hist'] * 1.0)"], {}), "(all_data[runname][e]['hist'] * 1.0)\n", (4613, 4649), True, 'import numpy as np\n'), ((7389, 7403), 'numpy.log10', 'np.log10', (['amin'], {}), '(amin)\n', (7397, 7403), True, 'import numpy as np\n'), ((7404, 7418), 'numpy.log10', 'np.log10', (['amax'], {}), '(amax)\n', (7412, 7418), True, 'import numpy as np\n'), ((6823, 6862), 'numpy.size', 'np.size', (["all_data[runname]['c']['hist']"], {}), "(all_data[runname]['c']['hist'])\n", (6830, 6862), True, 'import numpy as np\n'), ((7252, 7268), 'numpy.sum', 'np.sum', (['avg_hist'], {}), '(avg_hist)\n', (7258, 7268), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from src.classical_processing.pre_processing import compute_sigma
from src.tests.test_data_sets import ExampleDataSetRef19, ExampleDataSetMain
class ComputeSigmaTestCase(unittest.TestCase):
def test_with_data_set_main(self):
self.skipTest("error unitary operation computation")
test_data = ExampleDataSetMain()
feature_x, feature_y = test_data.get_features()
expected = test_data.get_sigma()
actual = compute_sigma(feature_x, feature_y)
self.assertEqual(expected.all(), actual.all())
def test_with_data_set_ref19(self):
self.skipTest("error unitary operation computation")
test_data = ExampleDataSetRef19()
feature_x, feature_y = test_data.get_features()
expected = test_data.get_sigma()
actual = compute_sigma(feature_x, feature_y)
self.assertEqual(expected.all(), actual.all())
class ComputePTestCase(unittest.TestCase):
def runTest(self):
pass
def test_with_data_set_main(self):
test_data = ExampleDataSetMain()
sigma = test_data.get_sigma()
expected = 1/np.trace(sigma) * sigma
if __name__ == '__main__':
unittest.main()
| [
"numpy.trace",
"src.tests.test_data_sets.ExampleDataSetRef19",
"src.tests.test_data_sets.ExampleDataSetMain",
"unittest.main",
"src.classical_processing.pre_processing.compute_sigma"
] | [((1202, 1217), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1215, 1217), False, 'import unittest\n'), ((349, 369), 'src.tests.test_data_sets.ExampleDataSetMain', 'ExampleDataSetMain', ([], {}), '()\n', (367, 369), False, 'from src.tests.test_data_sets import ExampleDataSetRef19, ExampleDataSetMain\n'), ((484, 519), 'src.classical_processing.pre_processing.compute_sigma', 'compute_sigma', (['feature_x', 'feature_y'], {}), '(feature_x, feature_y)\n', (497, 519), False, 'from src.classical_processing.pre_processing import compute_sigma\n'), ((697, 718), 'src.tests.test_data_sets.ExampleDataSetRef19', 'ExampleDataSetRef19', ([], {}), '()\n', (716, 718), False, 'from src.tests.test_data_sets import ExampleDataSetRef19, ExampleDataSetMain\n'), ((833, 868), 'src.classical_processing.pre_processing.compute_sigma', 'compute_sigma', (['feature_x', 'feature_y'], {}), '(feature_x, feature_y)\n', (846, 868), False, 'from src.classical_processing.pre_processing import compute_sigma\n'), ((1065, 1085), 'src.tests.test_data_sets.ExampleDataSetMain', 'ExampleDataSetMain', ([], {}), '()\n', (1083, 1085), False, 'from src.tests.test_data_sets import ExampleDataSetRef19, ExampleDataSetMain\n'), ((1145, 1160), 'numpy.trace', 'np.trace', (['sigma'], {}), '(sigma)\n', (1153, 1160), True, 'import numpy as np\n')] |
import numpy as np
import numpy.testing
import pytest
from gl0learn import Bounds
from gl0learn.utils import ClosedInterval
@pytest.mark.parametrize(
"bounds", [(0, 0), (-1, -1), (1, 1), (np.NAN, np.NAN), (np.NAN, 1), (-1, np.NAN)]
)
def test_scalar_bad_bounds(bounds):
with pytest.raises(ValueError):
_ = Bounds(*bounds)
@pytest.mark.parametrize(
"bounds",
[
(np.zeros([2, 2]), np.zeros([2, 2])),
(-np.ones([2, 2]), -np.ones([2, 2])),
(np.ones([2, 2]), np.ones([2, 2])),
(np.NAN * np.ones([2, 2]), np.NAN * np.ones([2, 2])),
(np.NAN * np.ones([2, 2]), np.ones([2, 2])),
(-np.ones([2, 2]), np.NAN * np.ones([2, 2])),
(-np.ones([3, 1]), np.ones([2, 2])),
(-np.ones([2, 2]), np.ones([3, 1])),
(-np.ones([3, 3]), np.ones([2, 2])),
(-np.ones([3, 3]), np.arange(0, 9).reshape(3, 3)),
(-np.arange(0, 9).reshape(3, 3), np.ones([3, 3])),
],
)
def test_matrix_bad_bounds(bounds):
with pytest.raises(ValueError):
_ = Bounds(*bounds)
@pytest.mark.parametrize(
"bounds",
[
(np.zeros([2, 2]), 0),
(0, np.zeros([2, 2])),
(-1, -np.ones([2, 2])),
(-np.ones([2, 2]), -1),
(np.ones([2, 2]), 1),
(1, np.ones([2, 2])),
(np.NAN, np.NAN * np.ones([2, 2])),
(np.NAN * np.ones([2, 2]), np.NAN),
(np.NAN, np.ones([2, 2])),
(np.NAN * np.ones([2, 2]), 1),
(-np.ones([2, 2]), np.NAN),
(-1, np.NAN * np.ones([2, 2])),
],
)
def test_mixed_bad_bounds(bounds):
with pytest.raises(ValueError):
_ = Bounds(*bounds)
def test_good_bounds_ex1():
bounds = (-1, 1)
b = Bounds(*bounds)
numpy.testing.assert_equal(bounds[0], b.lows)
numpy.testing.assert_equal(bounds[1], b.highs)
assert b.num_features == ClosedInterval(1, np.inf)
def test_good_bounds_ex2(n=2):
bounds = (-1, np.ones([n, n]))
b = Bounds(*bounds)
numpy.testing.assert_equal(-np.ones([n, n]), b.lows)
numpy.testing.assert_equal(bounds[1], b.highs)
# assert not numpy.shares_memory(bounds[1], b.highs). Unsure if this is the correct way to check for memory
# locations
assert b.num_features == n
def test_good_bounds_ex3(n=2):
bounds = (-1, np.ones([n, n], order="F"))
b = Bounds(*bounds)
numpy.testing.assert_equal(-np.ones([n, n]), b.lows)
numpy.testing.assert_equal(bounds[1], b.highs)
# assert numpy.shares_memory(bounds[1], b.highs). Unsure if this is the correct way to check for memory locations
| [
"numpy.ones",
"gl0learn.Bounds",
"gl0learn.utils.ClosedInterval",
"pytest.mark.parametrize",
"numpy.zeros",
"pytest.raises",
"numpy.arange"
] | [((128, 239), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bounds"""', '[(0, 0), (-1, -1), (1, 1), (np.NAN, np.NAN), (np.NAN, 1), (-1, np.NAN)]'], {}), "('bounds', [(0, 0), (-1, -1), (1, 1), (np.NAN, np.\n NAN), (np.NAN, 1), (-1, np.NAN)])\n", (151, 239), False, 'import pytest\n'), ((1695, 1710), 'gl0learn.Bounds', 'Bounds', (['*bounds'], {}), '(*bounds)\n', (1701, 1710), False, 'from gl0learn import Bounds\n'), ((1943, 1958), 'gl0learn.Bounds', 'Bounds', (['*bounds'], {}), '(*bounds)\n', (1949, 1958), False, 'from gl0learn import Bounds\n'), ((2314, 2329), 'gl0learn.Bounds', 'Bounds', (['*bounds'], {}), '(*bounds)\n', (2320, 2329), False, 'from gl0learn import Bounds\n'), ((286, 311), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (299, 311), False, 'import pytest\n'), ((325, 340), 'gl0learn.Bounds', 'Bounds', (['*bounds'], {}), '(*bounds)\n', (331, 340), False, 'from gl0learn import Bounds\n'), ((1001, 1026), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1014, 1026), False, 'import pytest\n'), ((1040, 1055), 'gl0learn.Bounds', 'Bounds', (['*bounds'], {}), '(*bounds)\n', (1046, 1055), False, 'from gl0learn import Bounds\n'), ((1581, 1606), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1594, 1606), False, 'import pytest\n'), ((1620, 1635), 'gl0learn.Bounds', 'Bounds', (['*bounds'], {}), '(*bounds)\n', (1626, 1635), False, 'from gl0learn import Bounds\n'), ((1841, 1866), 'gl0learn.utils.ClosedInterval', 'ClosedInterval', (['(1)', 'np.inf'], {}), '(1, np.inf)\n', (1855, 1866), False, 'from gl0learn.utils import ClosedInterval\n'), ((1918, 1933), 'numpy.ones', 'np.ones', (['[n, n]'], {}), '([n, n])\n', (1925, 1933), True, 'import numpy as np\n'), ((2278, 2304), 'numpy.ones', 'np.ones', (['[n, n]'], {'order': '"""F"""'}), "([n, n], order='F')\n", (2285, 2304), True, 'import numpy as np\n'), ((398, 414), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {}), '([2, 2])\n', (406, 414), True, 'import numpy as np\n'), ((416, 432), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {}), '([2, 2])\n', (424, 432), True, 'import numpy as np\n'), ((490, 505), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (497, 505), True, 'import numpy as np\n'), ((507, 522), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (514, 522), True, 'import numpy as np\n'), ((622, 637), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (629, 637), True, 'import numpy as np\n'), ((721, 736), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (728, 736), True, 'import numpy as np\n'), ((766, 781), 'numpy.ones', 'np.ones', (['[3, 1]'], {}), '([3, 1])\n', (773, 781), True, 'import numpy as np\n'), ((811, 826), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (818, 826), True, 'import numpy as np\n'), ((929, 944), 'numpy.ones', 'np.ones', (['[3, 3]'], {}), '([3, 3])\n', (936, 944), True, 'import numpy as np\n'), ((1113, 1129), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {}), '([2, 2])\n', (1121, 1129), True, 'import numpy as np\n'), ((1147, 1163), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {}), '([2, 2])\n', (1155, 1163), True, 'import numpy as np\n'), ((1239, 1254), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (1246, 1254), True, 'import numpy as np\n'), ((1272, 1287), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (1279, 1287), True, 'import numpy as np\n'), ((1395, 1410), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (1402, 1410), True, 'import numpy as np\n'), ((1991, 2006), 'numpy.ones', 'np.ones', (['[n, n]'], {}), '([n, n])\n', (1998, 2006), True, 'import numpy as np\n'), ((2362, 2377), 'numpy.ones', 'np.ones', (['[n, n]'], {}), '([n, n])\n', (2369, 2377), True, 'import numpy as np\n'), ((445, 460), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (452, 460), True, 'import numpy as np\n'), ((463, 478), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (470, 478), True, 'import numpy as np\n'), ((543, 558), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (550, 558), True, 'import numpy as np\n'), ((569, 584), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (576, 584), True, 'import numpy as np\n'), ((605, 620), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (612, 620), True, 'import numpy as np\n'), ((650, 665), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (657, 665), True, 'import numpy as np\n'), ((676, 691), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (683, 691), True, 'import numpy as np\n'), ((704, 719), 'numpy.ones', 'np.ones', (['[3, 1]'], {}), '([3, 1])\n', (711, 719), True, 'import numpy as np\n'), ((749, 764), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (756, 764), True, 'import numpy as np\n'), ((794, 809), 'numpy.ones', 'np.ones', (['[3, 3]'], {}), '([3, 3])\n', (801, 809), True, 'import numpy as np\n'), ((839, 854), 'numpy.ones', 'np.ones', (['[3, 3]'], {}), '([3, 3])\n', (846, 854), True, 'import numpy as np\n'), ((1180, 1195), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (1187, 1195), True, 'import numpy as np\n'), ((1208, 1223), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (1215, 1223), True, 'import numpy as np\n'), ((1316, 1331), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (1323, 1331), True, 'import numpy as np\n'), ((1352, 1367), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (1359, 1367), True, 'import numpy as np\n'), ((1431, 1446), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (1438, 1446), True, 'import numpy as np\n'), ((1462, 1477), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (1469, 1477), True, 'import numpy as np\n'), ((1510, 1525), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (1517, 1525), True, 'import numpy as np\n'), ((856, 871), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {}), '(0, 9)\n', (865, 871), True, 'import numpy as np\n'), ((898, 913), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {}), '(0, 9)\n', (907, 913), True, 'import numpy as np\n')] |
from scipy import stats
import numpy as np
def simbolizar(X, m = 3):
"""
Convierte una serie numérica de valores a su versión simbólica basándose en
ventanas de m valores consecutivos.
Parámetros
----------
X : Serie a simbolizar
m : Longitud de la ventana
Regresa
----------
Arreglo de X simbólico
"""
if type(X) != np.ndarray:
X = np.array(X)
if m >= X.size:
raise ValueError("La serie debe ser más grande que la ventana")
dummy = []
for i in range(m):
l = np.roll(X, -i)
dummy.append(l[: -(m - 1)])
dummy = np.array(dummy)
simX = []
for ventana in dummy.T:
ranking = stats.rankdata(ventana, method = "dense")
simbolo = np.array2string(ranking, separator = "")
simbolo = simbolo[1 : -1]
simX.append(simbolo)
return np.array(simX)
def informacion_mutua(simX, simY):
"""
Computa el valor IM(X, Y) entre las series simbólicas X y Y
Parámetros
----------
simX : Serie simbólica X
simY : Serie simbólica Y
Regresa
----------
Valor de la información mútua simbólica
"""
if simX.size != simY.size:
raise ValueError('Los arreglos deben tener la misma longitud')
n_simbolos = len(np.unique(np.concatenate((simX, simY))).tolist())
jp = probabilidades_conjuntas(simX, simY)
pX = probabilidades(simX)
pY = probabilidades(simY)
IM = 0
for yi in list(pY.keys()):
for xi in list(pX.keys()):
a = pX[xi]
b = pY[yi]
try:
c = jp[yi][xi]
IM += c * np.log(c /(a * b)) / np.log(n_simbolos)
except KeyError:
continue
except:
print("Error inesperado")
raise
return IM
def entropia_transferencia(simX, simY):
"""
Computa el valor T(Y->X) de las series simbólicas de Y a X
Parámetros
----------
simX : Serie simbólica X
simY : Serie simbólica Y
Regresa
----------
Valor de la entropía de transferencia simbólica
"""
if simX.size != simY.size:
raise ValueError('Los arreglos deben tener la misma longitud')
cp = probabilidades_transicion(simX)
cp2 = probabilidades_transicion_dobles(simX, simY)
jp = probabilidades_counjuntas_consecutivas(simX, simY)
ETS = 0
for yi in list(jp.keys()):
for xi in list(jp[yi].keys()):
for xii in list(jp[yi][xi].keys()):
try:
a = cp[xi][xii]
b = cp2[yi][xi][xii]
c = jp[yi][xi][xii]
ETS += c * np.log2(b / a)
except KeyError:
continue
except:
print("Error inesperado")
raise
del cp
del cp2
del jp
return ETS
def curva_ets(simX, simY, pasos = 101):
"""
Genera las dos curvas de entropía de transferencia simbólica
T(X->Y) y T(Y->X) sobre un determinado número de pasos
Parámetros
----------
simX : Serie simbólica X
simY : Serie simbólica Y
pasos : Número de pasos en los cuales se calcula la entropía de
transferencia simbólica
Regresa
----------
Las arreglos de curvas de entropía de transferencia simbólica
"""
# Inicialización
ets_xy = np.empty(pasos + 1)
ets_yx = np.empty(pasos + 1)
# Cálculo de valores
for i in range(-1, pasos):
ets_xy[i + 1] = entropia_transferencia(simX, np.roll(simY, -i))
ets_yx[i + 1] = entropia_transferencia(simY, np.roll(simX, -i))
return ets_xy, ets_yx
def curva_ets_simple(simX, pasos = 101):
"""
Genera la curva de entropía de transferencia simbólica
T(X->X) sobre un determinado número de pasos
Parámetros
----------
simX : Serie simbólica X
pasos : Número de pasos en los cuales se calcula la entropía de
transferencia simbólica
Regresa
----------
Las arreglos de curvas de entropía de transferencia simbólica
"""
# Inicialización
ets = np.empty(pasos + 1)
# Cálculo de valores
for i in range(-1, pasos):
ets[i + 1] = entropia_transferencia(simX, np.roll(simX, -i))
return ets
def probabilidades(simX):
"""
Computa las probabilidades de los elementos del alfabeto
de símbolos de la serie X.
Parámetros
----------
simX : Serie simbólica X
Regresa
----------
Probabilidades del diccionario de símbolos
"""
# Inicialización
p = {}
n = simX.size
for xi in simX:
if xi in p:
p[xi] += 1.0 / n
else:
p[xi] = 1.0 / n
return p
def probabilidades_conjuntas(simX, simY):
"""
Computa las probabilidades conjuntas P(yi, xi)
Parámetros
----------
simX : Serie simbólica X
simY : Serie simbólica Y
Regresa
----------
Matriz de probabilidades conjuntas
"""
if simX.size != simY.size:
raise ValueError('Los arreglos deben tener la misma longitud')
# Inicialización
jp = {}
n = simX.size
for yi, xi in zip(simY, simX):
if yi in jp:
if xi in jp[yi]:
jp[yi][xi] += 1.0 / n
else:
jp[yi][xi] = 1.0 / n
else:
jp[yi] = {}
jp[yi][xi] = 1.0 / n
return jp
def probabilidades_condicionales(simX, simY):
"""
Computa las probabilidades condicionales P(xi | yi)
Parámetros
----------
simX : Serie simbólica X
simY : Serie simbólica Y
Regresa
----------
Matriz de las probabilidades condicionales
"""
if simX.size != simY.size:
raise ValueError('Los arreglos deben tener la misma longitud')
# Inicialización
cp = {}
n = {}
for xi, yi in zip(simX, simY):
if yi in cp:
n[yi] += 1
if xi in cp[yi]:
cp[yi][xi] += 1.0
else:
cp[yi][xi] = 1.0
else:
cp[yi] = {}
cp[yi][xi] = 1.0
n[yi] = 1
for yi in list(cp.keys()):
for xi in list(cp[yi].keys()):
cp[yi][xi] /= n[yi]
return cp
def probabilidades_transicion(simX):
"""
Computa las probabilidades de transición P(xii | xi)
Parámetros
----------
simX : Serie simbólica X
Regresa
----------
Matriz de probabilidades de transición
"""
cp = probabilidades_condicionales(simX[1 :], simX[: -1])
return cp
def probabilidades_condicionales_dobles(simX, simY, simZ):
"""
Computa las probabilidades condicionales P(xi | yi, zi).
Parámetros
----------
simX : Serie simbólica X.
simY : Serie simbólica Y.
simZ : Serie simbólica Z.
Regresa
----------
Matriz de probabilidades condicionales
"""
if (simX.size != simY.size) or (simY.size != simZ.size):
raise ValueError('Los arreglos deben tener la misma longitud')
# Inicialización
cp = {}
n = {}
for x, y, z in zip(simX, simY, simZ):
if y in cp:
if z in cp[y]:
n[y][z] += 1.0
if x in cp[y][z]:
cp[y][z][x] += 1.0
else:
cp[y][z][x] = 1.0
else:
cp[y][z] = {}
cp[y][z][x] = 1.0
n[y][z] = 1.0
else:
cp[y] = {}
n[y] = {}
cp[y][z] = {}
n[y][z] = 1.0
cp[y][z][x] = 1.0
for y in list(cp.keys()):
for z in list(cp[y].keys()):
for x in list(cp[y][z].keys()):
cp[y][z][x] /= n[y][z]
return cp
def probabilidades_transicion_dobles(simX, simY):
"""
Computa las probabilidades de transición dobles P(xii | xi, yi)
Parámetros
----------
simX : Serie simbólica X
simY : Serie simbólica Y
Regresa
----------
Matriz de probabilidades de transición dobles
"""
if simX.size != simY.size:
raise ValueError('Los arreglos deben tener la misma longitud')
cp = probabilidades_condicionales_dobles(simX[1 :], simY[: -1], simX[: -1])
return cp
def probabilidades_conjuntas_triples(simX, simY, simZ):
"""
Computa las probabilidades conjuntas P(xi, yi, zi).
Parámetros
----------
simX : Serie simbólica X
simY : Serie simbólica Y
simZ : Serie simbólica Z
Regresa
----------
Matriz de probabilidades conjuntas triples
"""
if (simX.size != simY.size) or (simY.size != simZ.size):
raise ValueError('Los arreglos deben tener la misma longitud')
# Inicialización
jp = {}
n = len(simX)
for x, y, z in zip(simX,simY,simZ):
if y in jp:
if z in jp[y]:
if x in jp[y][z]:
jp[y][z][x] += 1.0 / n
else:
jp[y][z][x] = 1.0 / n
else:
jp[y][z] = {}
jp[y][z][x] = 1.0 / n
else:
jp[y] = {}
jp[y][z] = {}
jp[y][z][x] = 1.0 / n
return jp
def probabilidades_counjuntas_consecutivas(simX, simY):
"""
Computa las probabilidades conjuntas P(xii, xi, yi)
Parámetros
----------
simX : Serie simbólica X
simY : Serie simbólica Y
Regresa
----------
Matriz de probabilidades conjuntas consecutivas
"""
if len(simX) != len(simY):
raise ValueError('All arrays must have same length')
jp = probabilidades_conjuntas_triples(simX[1 :], simY[: -1], simX[: -1])
return jp | [
"numpy.roll",
"scipy.stats.rankdata",
"numpy.array2string",
"numpy.log",
"numpy.array",
"numpy.empty",
"numpy.concatenate",
"numpy.log2"
] | [((633, 648), 'numpy.array', 'np.array', (['dummy'], {}), '(dummy)\n', (641, 648), True, 'import numpy as np\n'), ((903, 917), 'numpy.array', 'np.array', (['simX'], {}), '(simX)\n', (911, 917), True, 'import numpy as np\n'), ((3590, 3609), 'numpy.empty', 'np.empty', (['(pasos + 1)'], {}), '(pasos + 1)\n', (3598, 3609), True, 'import numpy as np\n'), ((3623, 3642), 'numpy.empty', 'np.empty', (['(pasos + 1)'], {}), '(pasos + 1)\n', (3631, 3642), True, 'import numpy as np\n'), ((4337, 4356), 'numpy.empty', 'np.empty', (['(pasos + 1)'], {}), '(pasos + 1)\n', (4345, 4356), True, 'import numpy as np\n'), ((405, 416), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (413, 416), True, 'import numpy as np\n'), ((565, 579), 'numpy.roll', 'np.roll', (['X', '(-i)'], {}), '(X, -i)\n', (572, 579), True, 'import numpy as np\n'), ((719, 758), 'scipy.stats.rankdata', 'stats.rankdata', (['ventana'], {'method': '"""dense"""'}), "(ventana, method='dense')\n", (733, 758), False, 'from scipy import stats\n'), ((779, 817), 'numpy.array2string', 'np.array2string', (['ranking'], {'separator': '""""""'}), "(ranking, separator='')\n", (794, 817), True, 'import numpy as np\n'), ((3752, 3769), 'numpy.roll', 'np.roll', (['simY', '(-i)'], {}), '(simY, -i)\n', (3759, 3769), True, 'import numpy as np\n'), ((3824, 3841), 'numpy.roll', 'np.roll', (['simX', '(-i)'], {}), '(simX, -i)\n', (3831, 3841), True, 'import numpy as np\n'), ((4463, 4480), 'numpy.roll', 'np.roll', (['simX', '(-i)'], {}), '(simX, -i)\n', (4470, 4480), True, 'import numpy as np\n'), ((1342, 1370), 'numpy.concatenate', 'np.concatenate', (['(simX, simY)'], {}), '((simX, simY))\n', (1356, 1370), True, 'import numpy as np\n'), ((1734, 1752), 'numpy.log', 'np.log', (['n_simbolos'], {}), '(n_simbolos)\n', (1740, 1752), True, 'import numpy as np\n'), ((1713, 1732), 'numpy.log', 'np.log', (['(c / (a * b))'], {}), '(c / (a * b))\n', (1719, 1732), True, 'import numpy as np\n'), ((2815, 2829), 'numpy.log2', 'np.log2', (['(b / a)'], {}), '(b / a)\n', (2822, 2829), True, 'import numpy as np\n')] |
from nltk.tokenize import WordPunctTokenizer
import nltk.data
import numpy as np
import re
import os
root = os.path.dirname(os.path.abspath(__file__))
##################
# TEXTS INVOLVED #
##################
##<NAME>
# 0:The Three Musketeers
# 1:Twenty Years After (D'Artagnan Series: Part Two)
# 2:The Count of Monte Cristo
##<NAME>
# 3:Adventures of Huckleberry Finn
# 4:The American Claimant
##<NAME>
# 5:Around the World in 80 Days
# 6:Twenty Thousand Leagues Under the Sea
##################
# These pull out the core text of their respective stories.
rulesStory = [
r'our history\.\n{5}(.*)\s+----',
r'Conclusion\.\n{5}(.*)\s+----',
r', Pere\n{5}(.*)\n{6}End of',
r'years ago\n{5}(.*)THE END\. YOURS TRULY, HUCK FINN\.',
r'goes along.\n{6}(.*)\n{6}APPENDIX',
r'\n{5}(.*)\n{10}',
r'\n{6}(.*)\n{10}'
]
# These represent meta elements of the text that must be stripped out, e.g. chapter headings.
rulesMeta = [
r'\n(\d+.*)\n',
r'\n(\d+\..*)\n',
r'\n(Chapter \d+\..*)\n',
r'\n(Chapter [XVIL]+\.)\n',
r'\n(Chapter [XVIL]+\.)\n',
r'\n{2}(Chapter [XVIL]+)\n',
r'\n{2}(Chapter [XVIL]+)\n'
]
def getText(idx):
file = open(root+'/'+str(idx)+'.book', encoding='utf8').read()
m = re.search(rulesStory[idx],re.sub(rulesMeta[idx], '', file),re.DOTALL)
if m:
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
text = [WordPunctTokenizer().tokenize(s) for s in tokenizer.tokenize(m.group(1).rstrip().replace('\n', ' '))]
t = []
for sentence in text:
s = []
for word in sentence:
r = re.search(r'(-|.)(-|")', word)
s+=[r.group(1),r.group(2)] if r else [word]
t+=[s]
return t
# return([w for s in t for w in s if w not in '.,:;()!?"\'_-'])
else:
raise Exception('Story regex failure in '+str(idx)+'.')
def getFuzzyList(word):
return [word, word.lower()]+\
([word[:-1], word[:-1].lower()] if word[-1] == 's' else [])+\
([word[:-2], word[:-2].lower()] if word[-2:] == 'ed' else [])+\
([word[:-2], word[:-2].lower()] if word[-2:] == 'er' else [])+\
([word[:-3], word[:-3].lower()] if word[-3:] == 'ing' else [])+\
([word[:-3]+'y', word[:-3].lower()+'y'] if word[-3:] == 'ied' else [])
def getFuzzyMatch(word, dict):
for w in getFuzzyList(word):
if w in dict:
return w
return None
def isAdmissible(sentence, dict):
for word in sentence:
if not getFuzzyMatch(word, dict):
return False
return True
def rate(pos, neg):
return pos/(pos+neg)
#This sampling code taken from lstm_example.py in the Keras examples subfolder
def sample(a, temperature=1.0):
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
| [
"nltk.tokenize.WordPunctTokenizer",
"numpy.log",
"numpy.random.multinomial",
"numpy.exp",
"os.path.abspath",
"re.sub",
"re.search"
] | [((125, 150), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (140, 150), False, 'import os\n'), ((1224, 1256), 're.sub', 're.sub', (['rulesMeta[idx]', '""""""', 'file'], {}), "(rulesMeta[idx], '', file)\n", (1230, 1256), False, 'import re\n'), ((2541, 2550), 'numpy.log', 'np.log', (['a'], {}), '(a)\n', (2547, 2550), True, 'import numpy as np\n'), ((2573, 2582), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (2579, 2582), True, 'import numpy as np\n'), ((2624, 2654), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'a', '(1)'], {}), '(1, a, 1)\n', (2645, 2654), True, 'import numpy as np\n'), ((2592, 2601), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (2598, 2601), True, 'import numpy as np\n'), ((1527, 1556), 're.search', 're.search', (['"""(-|.)(-|")"""', 'word'], {}), '(\'(-|.)(-|")\', word)\n', (1536, 1556), False, 'import re\n'), ((1349, 1369), 'nltk.tokenize.WordPunctTokenizer', 'WordPunctTokenizer', ([], {}), '()\n', (1367, 1369), False, 'from nltk.tokenize import WordPunctTokenizer\n')] |
from pathlib import Path
import hydra
import numpy as np
import torch
from hydra.utils import to_absolute_path
from nnsvs.base import PredictionType
from nnsvs.mdn import mdn_loss
from nnsvs.pitch import nonzero_segments
from nnsvs.train_util import save_checkpoint, setup
from nnsvs.util import make_non_pad_mask
from omegaconf import DictConfig, OmegaConf
from torch import nn
from tqdm import tqdm
def note_segments(lf0_score_denorm):
"""Compute note segments (start and end indices) from log-F0
Note that unvoiced frames must be set to 0 in advance.
Args:
lf0_score_denorm (Tensor): (B, T)
Returns:
list: list of note (start, end) indices
"""
segments = []
for s, e in nonzero_segments(lf0_score_denorm):
out = torch.sign(torch.abs(torch.diff(lf0_score_denorm[s : e + 1])))
transitions = torch.where(out > 0)[0]
note_start, note_end = s, -1
for pos in transitions:
note_end = int(s + pos)
segments.append((note_start, note_end))
note_start = note_end
return segments
def compute_pitch_regularization_weight(segments, N, decay_size=25, max_w=0.5):
"""Compute pitch regularization weight given note segments
Args:
segments (list): list of note (start, end) indices
N (int): number of frames
decay_size (int): size of the decay window
max_w (float): maximum weight
Returns:
Tensor: weights of shape (N,)
"""
w = torch.zeros(N)
for s, e in segments:
L = e - s
w[s:e] = max_w
if L > decay_size * 2:
w[s : s + decay_size] *= torch.arange(decay_size) / decay_size
w[e - decay_size : e] *= torch.arange(decay_size - 1, -1, -1) / decay_size
return w
def compute_batch_pitch_regularization_weight(lf0_score_denorm):
"""Batch version of computing pitch regularization weight
Args:
lf0_score_denorm (Tensor): (B, T)
Returns:
Tensor: weights of shape (B, N, 1)
"""
B, T = lf0_score_denorm.shape
w = torch.zeros_like(lf0_score_denorm)
for idx in range(len(lf0_score_denorm)):
segments = note_segments(lf0_score_denorm[idx])
w[idx, :] = compute_pitch_regularization_weight(segments, T).to(w.device)
return w.unsqueeze(-1)
def train_step(
model,
optimizer,
train,
in_feats,
out_feats,
lengths,
pitch_reg_dyn_ws,
pitch_reg_weight=1.0,
):
optimizer.zero_grad()
criterion = nn.MSELoss(reduction="none")
# Apply preprocess if required (e.g., FIR filter for shallow AR)
# defaults to no-op
out_feats = model.preprocess_target(out_feats)
# Run forward
pred_out_feats, lf0_residual = model(in_feats, lengths)
# Mask (B, T, 1)
mask = make_non_pad_mask(lengths).unsqueeze(-1).to(in_feats.device)
# Compute loss
if model.prediction_type() == PredictionType.PROBABILISTIC:
pi, sigma, mu = pred_out_feats
# (B, max(T)) or (B, max(T), D_out)
mask_ = mask if len(pi.shape) == 4 else mask.squeeze(-1)
# Compute loss and apply mask
loss = mdn_loss(pi, sigma, mu, out_feats, reduce=False)
loss = loss.masked_select(mask_).mean()
else:
loss = criterion(
pred_out_feats.masked_select(mask), out_feats.masked_select(mask)
).mean()
# Pitch regularization
# NOTE: l1 loss seems to be better than mse loss in my experiments
# we could use l2 loss as suggested in the sinsy's paper
loss += (
pitch_reg_weight
* (pitch_reg_dyn_ws * lf0_residual.abs()).masked_select(mask).mean()
)
if train:
loss.backward()
optimizer.step()
return loss
def train_loop(
config,
logger,
device,
model,
optimizer,
lr_scheduler,
data_loaders,
writer,
in_scaler,
):
out_dir = Path(to_absolute_path(config.train.out_dir))
best_loss = torch.finfo(torch.float32).max
in_lf0_idx = config.data.in_lf0_idx
in_rest_idx = config.data.in_rest_idx
if in_lf0_idx is None or in_rest_idx is None:
raise ValueError("in_lf0_idx and in_rest_idx must be specified")
pitch_reg_weight = config.train.pitch_reg_weight
for epoch in tqdm(range(1, config.train.nepochs + 1)):
for phase in data_loaders.keys():
train = phase.startswith("train")
model.train() if train else model.eval()
running_loss = 0
for in_feats, out_feats, lengths in data_loaders[phase]:
# NOTE: This is needed for pytorch's PackedSequence
lengths, indices = torch.sort(lengths, dim=0, descending=True)
in_feats, out_feats = (
in_feats[indices].to(device),
out_feats[indices].to(device),
)
# Compute denormalized log-F0 in the musical scores
lf0_score_denorm = (
in_feats[:, :, in_lf0_idx]
* float(
in_scaler.data_max_[in_lf0_idx]
- in_scaler.data_min_[in_lf0_idx]
)
+ in_scaler.data_min_[in_lf0_idx]
)
# Fill zeros for rest and padded frames
lf0_score_denorm *= (in_feats[:, :, in_rest_idx] <= 0).float()
for idx, length in enumerate(lengths):
lf0_score_denorm[idx, length:] = 0
# Compute time-variant pitch regularization weight vector
pitch_reg_dyn_ws = compute_batch_pitch_regularization_weight(
lf0_score_denorm
)
loss = train_step(
model,
optimizer,
train,
in_feats,
out_feats,
lengths,
pitch_reg_dyn_ws,
pitch_reg_weight,
)
running_loss += loss.item()
ave_loss = running_loss / len(data_loaders[phase])
writer.add_scalar(f"Loss/{phase}", ave_loss, epoch)
ave_loss = running_loss / len(data_loaders[phase])
logger.info("[%s] [Epoch %s]: loss %s", phase, epoch, ave_loss)
if not train and ave_loss < best_loss:
best_loss = ave_loss
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, epoch, is_best=True
)
lr_scheduler.step()
if epoch % config.train.checkpoint_epoch_interval == 0:
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, epoch, is_best=False
)
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, config.train.nepochs
)
logger.info("The best loss was %s", best_loss)
def _check_resf0_config(logger, model, config, in_scaler, out_scaler):
logger.info("Checking model configs for residual F0 prediction")
if in_scaler is None or out_scaler is None:
raise ValueError("in_scaler and out_scaler must be specified")
in_lf0_idx = config.data.in_lf0_idx
in_rest_idx = config.data.in_rest_idx
out_lf0_idx = config.data.out_lf0_idx
if in_lf0_idx is None or in_rest_idx is None or out_lf0_idx is None:
raise ValueError("in_lf0_idx, in_rest_idx and out_lf0_idx must be specified")
logger.info("in_lf0_idx: %s", in_lf0_idx)
logger.info("in_rest_idx: %s", in_rest_idx)
logger.info("out_lf0_idx: %s", out_lf0_idx)
ok = True
if hasattr(model, "in_lf0_idx"):
if model.in_lf0_idx != in_lf0_idx:
logger.warn(
"in_lf0_idx in model and data config must be same",
model.in_lf0_idx,
in_lf0_idx,
)
ok = False
if hasattr(model, "out_lf0_idx"):
if model.out_lf0_idx != out_lf0_idx:
logger.warn(
"out_lf0_idx in model and data config must be same",
model.out_lf0_idx,
out_lf0_idx,
)
ok = False
if hasattr(model, "in_lf0_min") and hasattr(model, "in_lf0_max"):
# Inject values from the input scaler
if model.in_lf0_min is None or model.in_lf0_max is None:
model.in_lf0_min = in_scaler.data_min_[in_lf0_idx]
model.in_lf0_max = in_scaler.data_max_[in_lf0_idx]
logger.info("in_lf0_min: %s", model.in_lf0_min)
logger.info("in_lf0_max: %s", model.in_lf0_max)
if not np.allclose(model.in_lf0_min, in_scaler.data_min_[model.in_lf0_idx]):
logger.warn(
f"in_lf0_min is set to {model.in_lf0_min}, "
f"but should be {in_scaler.data_min_[model.in_lf0_idx]}"
)
ok = False
if not np.allclose(model.in_lf0_max, in_scaler.data_max_[model.in_lf0_idx]):
logger.warn(
f"in_lf0_max is set to {model.in_lf0_max}, "
f"but should be {in_scaler.data_max_[model.in_lf0_idx]}"
)
ok = False
if hasattr(model, "out_lf0_mean") and hasattr(model, "out_lf0_scale"):
# Inject values from the output scaler
if model.out_lf0_mean is None or model.out_lf0_scale is None:
model.out_lf0_mean = out_scaler.mean_[out_lf0_idx]
model.out_lf0_scale = out_scaler.scale_[out_lf0_idx]
logger.info("model.out_lf0_mean: %s", model.out_lf0_mean)
logger.info("model.out_lf0_scale: %s", model.out_lf0_scale)
if not np.allclose(model.out_lf0_mean, out_scaler.mean_[model.out_lf0_idx]):
logger.warn(
f"out_lf0_mean is set to {model.out_lf0_mean}, "
f"but should be {out_scaler.mean_[model.out_lf0_idx]}"
)
ok = False
if not np.allclose(model.out_lf0_scale, out_scaler.scale_[model.out_lf0_idx]):
logger.warn(
f"out_lf0_scale is set to {model.out_lf0_scale}, "
f"but should be {out_scaler.scale_[model.out_lf0_idx]}"
)
ok = False
if not ok:
if (
model.in_lf0_idx == in_lf0_idx
and hasattr(model, "in_lf0_min")
and hasattr(model, "out_lf0_mean")
):
logger.info(
f"""
If you are 100% sure that you set model.in_lf0_idx and model.out_lf0_idx correctly,
Please consider the following parameters in your model config:
in_lf0_idx: {model.in_lf0_idx}
out_lf0_idx: {model.out_lf0_idx}
in_lf0_min: {in_scaler.data_min_[model.in_lf0_idx]}
in_lf0_max: {in_scaler.data_max_[model.in_lf0_idx]}
out_lf0_mean: {out_scaler.mean_[model.out_lf0_idx]}
out_lf0_scale: {out_scaler.scale_[model.out_lf0_idx]}
"""
)
raise ValueError("The model config has wrong configurations.")
# Overwrite the parameters to the config
for key in ["in_lf0_min", "in_lf0_max", "out_lf0_mean", "out_lf0_scale"]:
config.model.netG[key] = float(getattr(model, key))
@hydra.main(config_path="conf/train_resf0", config_name="config")
def my_app(config: DictConfig) -> None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
(
model,
optimizer,
lr_scheduler,
data_loaders,
writer,
logger,
in_scaler,
out_scaler,
) = setup(config, device)
_check_resf0_config(logger, model, config, in_scaler, out_scaler)
# Save configs again in case the model config has been changed
out_dir = Path(to_absolute_path(config.train.out_dir))
with open(out_dir / "config.yaml", "w") as f:
OmegaConf.save(config, f)
with open(out_dir / "model.yaml", "w") as f:
OmegaConf.save(config.model, f)
train_loop(
config,
logger,
device,
model,
optimizer,
lr_scheduler,
data_loaders,
writer,
in_scaler,
)
def entry():
my_app()
if __name__ == "__main__":
my_app()
| [
"torch.sort",
"numpy.allclose",
"hydra.main",
"nnsvs.mdn.mdn_loss",
"nnsvs.pitch.nonzero_segments",
"nnsvs.train_util.save_checkpoint",
"torch.nn.MSELoss",
"nnsvs.train_util.setup",
"hydra.utils.to_absolute_path",
"omegaconf.OmegaConf.save",
"torch.finfo",
"torch.cuda.is_available",
"nnsvs.u... | [((11126, 11190), 'hydra.main', 'hydra.main', ([], {'config_path': '"""conf/train_resf0"""', 'config_name': '"""config"""'}), "(config_path='conf/train_resf0', config_name='config')\n", (11136, 11190), False, 'import hydra\n'), ((723, 757), 'nnsvs.pitch.nonzero_segments', 'nonzero_segments', (['lf0_score_denorm'], {}), '(lf0_score_denorm)\n', (739, 757), False, 'from nnsvs.pitch import nonzero_segments\n'), ((1500, 1514), 'torch.zeros', 'torch.zeros', (['N'], {}), '(N)\n', (1511, 1514), False, 'import torch\n'), ((2079, 2113), 'torch.zeros_like', 'torch.zeros_like', (['lf0_score_denorm'], {}), '(lf0_score_denorm)\n', (2095, 2113), False, 'import torch\n'), ((2516, 2544), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (2526, 2544), False, 'from torch import nn\n'), ((6768, 6859), 'nnsvs.train_util.save_checkpoint', 'save_checkpoint', (['logger', 'out_dir', 'model', 'optimizer', 'lr_scheduler', 'config.train.nepochs'], {}), '(logger, out_dir, model, optimizer, lr_scheduler, config.\n train.nepochs)\n', (6783, 6859), False, 'from nnsvs.train_util import save_checkpoint, setup\n'), ((11468, 11489), 'nnsvs.train_util.setup', 'setup', (['config', 'device'], {}), '(config, device)\n', (11473, 11489), False, 'from nnsvs.train_util import save_checkpoint, setup\n'), ((3149, 3197), 'nnsvs.mdn.mdn_loss', 'mdn_loss', (['pi', 'sigma', 'mu', 'out_feats'], {'reduce': '(False)'}), '(pi, sigma, mu, out_feats, reduce=False)\n', (3157, 3197), False, 'from nnsvs.mdn import mdn_loss\n'), ((3905, 3943), 'hydra.utils.to_absolute_path', 'to_absolute_path', (['config.train.out_dir'], {}), '(config.train.out_dir)\n', (3921, 3943), False, 'from hydra.utils import to_absolute_path\n'), ((3961, 3987), 'torch.finfo', 'torch.finfo', (['torch.float32'], {}), '(torch.float32)\n', (3972, 3987), False, 'import torch\n'), ((11648, 11686), 'hydra.utils.to_absolute_path', 'to_absolute_path', (['config.train.out_dir'], {}), '(config.train.out_dir)\n', (11664, 11686), False, 'from hydra.utils import to_absolute_path\n'), ((11746, 11771), 'omegaconf.OmegaConf.save', 'OmegaConf.save', (['config', 'f'], {}), '(config, f)\n', (11760, 11771), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((11829, 11860), 'omegaconf.OmegaConf.save', 'OmegaConf.save', (['config.model', 'f'], {}), '(config.model, f)\n', (11843, 11860), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((858, 878), 'torch.where', 'torch.where', (['(out > 0)'], {}), '(out > 0)\n', (869, 878), False, 'import torch\n'), ((6646, 6736), 'nnsvs.train_util.save_checkpoint', 'save_checkpoint', (['logger', 'out_dir', 'model', 'optimizer', 'lr_scheduler', 'epoch'], {'is_best': '(False)'}), '(logger, out_dir, model, optimizer, lr_scheduler, epoch,\n is_best=False)\n', (6661, 6736), False, 'from nnsvs.train_util import save_checkpoint, setup\n'), ((8609, 8677), 'numpy.allclose', 'np.allclose', (['model.in_lf0_min', 'in_scaler.data_min_[model.in_lf0_idx]'], {}), '(model.in_lf0_min, in_scaler.data_min_[model.in_lf0_idx])\n', (8620, 8677), True, 'import numpy as np\n'), ((8890, 8958), 'numpy.allclose', 'np.allclose', (['model.in_lf0_max', 'in_scaler.data_max_[model.in_lf0_idx]'], {}), '(model.in_lf0_max, in_scaler.data_max_[model.in_lf0_idx])\n', (8901, 8958), True, 'import numpy as np\n'), ((9627, 9695), 'numpy.allclose', 'np.allclose', (['model.out_lf0_mean', 'out_scaler.mean_[model.out_lf0_idx]'], {}), '(model.out_lf0_mean, out_scaler.mean_[model.out_lf0_idx])\n', (9638, 9695), True, 'import numpy as np\n'), ((9910, 9980), 'numpy.allclose', 'np.allclose', (['model.out_lf0_scale', 'out_scaler.scale_[model.out_lf0_idx]'], {}), '(model.out_lf0_scale, out_scaler.scale_[model.out_lf0_idx])\n', (9921, 9980), True, 'import numpy as np\n'), ((11267, 11292), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11290, 11292), False, 'import torch\n'), ((794, 831), 'torch.diff', 'torch.diff', (['lf0_score_denorm[s:e + 1]'], {}), '(lf0_score_denorm[s:e + 1])\n', (804, 831), False, 'import torch\n'), ((1651, 1675), 'torch.arange', 'torch.arange', (['decay_size'], {}), '(decay_size)\n', (1663, 1675), False, 'import torch\n'), ((1726, 1762), 'torch.arange', 'torch.arange', (['(decay_size - 1)', '(-1)', '(-1)'], {}), '(decay_size - 1, -1, -1)\n', (1738, 1762), False, 'import torch\n'), ((4653, 4696), 'torch.sort', 'torch.sort', (['lengths'], {'dim': '(0)', 'descending': '(True)'}), '(lengths, dim=0, descending=True)\n', (4663, 4696), False, 'import torch\n'), ((6417, 6506), 'nnsvs.train_util.save_checkpoint', 'save_checkpoint', (['logger', 'out_dir', 'model', 'optimizer', 'lr_scheduler', 'epoch'], {'is_best': '(True)'}), '(logger, out_dir, model, optimizer, lr_scheduler, epoch,\n is_best=True)\n', (6432, 6506), False, 'from nnsvs.train_util import save_checkpoint, setup\n'), ((2802, 2828), 'nnsvs.util.make_non_pad_mask', 'make_non_pad_mask', (['lengths'], {}), '(lengths)\n', (2819, 2828), False, 'from nnsvs.util import make_non_pad_mask\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import Perceptron
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import ComplementNB
class ModelAlteration():
def strat_kfold_evaluation(
self,
df,
model,
target:int,
folds:int,
shuffle:bool=True,
random_state:int=None) -> [float, ([],[])]:
'''
Implements some centroid based clustering algorithms on n-dimensional data
Parameters
------------
df : Your dataframe
model : A scikitlearn model used to classify labels
target : The index of your target column
folds : How often your dataframe should be split
shuffle : Specifies if the samples should be shuffled
random_state: If shuffle=True, random_state specifies the used seed.
if None, shuffle will always be random.
Returns
------------
accuracy : A list which contains the accuracy of the model over each folds
best_fold : The fold with the highest accuracy with the used model
'''
data, target = df.loc[:, df.columns!=target].values, df[target].values
skf = StratifiedKFold(n_splits=folds, shuffle=shuffle, random_state=random_state)
accuracy = [0 for _ in range(folds)]
best_fold = []
for i, index in enumerate(skf.split(data, target)):
x_train, x_test = data[index[0]], data[index[1]]
y_train, y_test = target[index[0]], target[index[1]]
model.fit(x_train, y_train)
accuracy[i] = (model.score(x_test, y_test))*100
if accuracy[i] >= max(accuracy[:-1]): best_fold = index
return(accuracy, best_fold)
def plot_accuracy(self, acc:[[float]], xlab:str, legend:[str], xaxis:[]=[]):
'''
Plots all permutation of the parameters.
------------
acc :[[float]]
Contains the accuracy of all folds.
xlab :String
Contains the name for the x-axis.
legend :[String]
Contains the values for the plot legend.
xaxis :[int] or [float]
Contains values for the x-axis.
'''
plt.xlabel(xlab)
plt.ylabel('Accuracy [%]')
acc = acc if len(acc)>0 else [acc]
if not xaxis:
for i, accuracy in enumerate(acc):
plt.plot(range(len(accuracy)), accuracy, label = legend[i])
else:
for i, accuracy in enumerate(acc):
plt.plot(xaxis, accuracy, label = legend[i])
plt.legend(loc="upper left")
plt.show()
def optimize_knn(self,
df,
target:int,
neighbours:[int] = list(range(1,11)),
metric:[int]=[1,2,3],
folds:int = 10,
plot:bool=True):
'''
Attempts to find the most optimal model parameters for the k-nearest-
neighbours (kNN) classifier by finding the best fold for each permutation
of the parameters. The best fold is determined by strat_kfold_evaluation().
The accuracy of all best folds is then compared and the parameters of
the best fold are returned (in addition to the fold itself)
Parameters
------------
df : dataframe
Your datatable
target : int
The index of your target column
neighbours : [int]
A list which contains the number of neighbors which should be used in kNN.
metric : [int]
Which metric should be used for kNN
1 - Manhattan
2 - Euclidean
3>= - Minkowski
folds : int
How often your dataframe should be split in strat_kfold_evaluation
plot : bool
Plots the accuracies over each fold
Returns
------------
best_fold: (np.array(int), {model_parameters})
l : An indexlist of the fold which has performed best overall
dic : And a dict with the model parameters for the best fold
'''
best_acc, best_model, fold_acc = 0, 0, [[None for _ in neighbours] for _ in metric]
epoch, end = 1, len(neighbours)*len(metric)
for i,m in enumerate(metric):
for j,k in enumerate(neighbours):
model = KNeighborsClassifier(n_neighbors=k, p = m)
fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))
if fold_acc[i][j] > best_acc:
best_acc = fold_acc[i][j]
best_model = (tmp_fold, {"n_neighbors" : k, "p" : m})
print("Epoch %s/%s | neighbours=%s, metric=%s, Accuracy=%s" % (epoch, end, k, m, fold_acc[i][j]))
epoch += 1
if plot: self.plot_accuracy(fold_acc, "Number of neighbours", list(map(lambda x: "Metric " + x, list(map(str, metric)))), neighbours)
return(best_model)
def optimize_perceptron(self,
df,
target:int,
learning_rate:[float] = np.linspace(1, 20, num=20),
penalty:[int]=[0,1,2,3],
folds:int = 10,
plot:bool=True):
'''
Attempts to find the most optimal model parameters for the perceptron
classifier by finding the best fold for each permutation of the
parameters. The best fold is determined by strat_kfold_evaluation().
The accuracy of all best folds is then compared and the parameters of
the best fold are returned (in addition to the fold itself)
Parameters
------------
df : dataframe
Your datatable
target : int
The index of your target column
learning_rate : [float]
A list containing the number of learning_rates the algorithm should
try out
penalty : [int]
Which penalty should be used
0 - None
1 - l1
2 - l2
3 - elasticnet
folds : int
How often your dataframe should be split in strat_kfold_evaluation
plot : bool
Plots the accuracies over each fold
Returns
------------
best_fold: (np.array(int), {model_parameters})
l : An indexlist of the fold which has performed best overall
dic : And a dict with the model parameters for the best fold
'''
best_acc, best_model, fold_acc = 0, 0, [[None for _ in learning_rate] for _ in penalty]
epoch, end = 1, len(learning_rate)*len(penalty)
penalty = list(map((lambda x, d={0:None, 1:"l1", 2:"l2", 3:"elasticnet"}: d[x]), penalty))
for i, m in enumerate(penalty):
for j, k in enumerate(learning_rate):
model = Perceptron(eta0=k, penalty=m)
fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))
if fold_acc[i][j] > best_acc:
best_acc = fold_acc[i][j]
best_model = (tmp_fold, { "eta0" : k, "penalty" : m})
print("Epoch %s/%s | learning_rate=%s, penalty=%s, Accuracy=%s" % (epoch, end, k, m, fold_acc[i][j]))
epoch += 1
if plot: self.plot_accuracy(fold_acc, "Used learning_rate", list(map(lambda x: "penalty: " + str(x), penalty)), list(learning_rate))
return(best_model)
def optimize_SVM(self,
df,
target:int,
regularization:[float] = np.linspace(1, 10, num=10),
kernel:[int]=[1,2,3],
folds:int = 10,
plot:bool=True):
'''
Attempts to find the most optimal model parameters for the SVM
classifier by finding the best fold for each permutation of the
parameters. The best fold is determined by strat_kfold_evaluation().
The accuracy of all best folds is then compared and the parameters of
the best fold are returned (in addition to the fold itself)
Parameters
------------
df : dataframe
Your datatable
target : int
The index of your target column
regularization: [float]
A list containing all penalties which should be tried out on the
respective kernel function
kernel : [int]
Which kernel functions should be used (refers to sklearn.svm.SVC)
0 - Linear (Takes a long time without dimension reduction)
1 - Poly
2 - rbf
3 - sigmoid
4 - precomputed (Look at Sklearns documentary first if you want to use it)
folds : int
How often your dataframe should be split in strat_kfold_evaluation
plot : bool
Plots the accuracies over each fold if True
Returns
------------
best_fold: (np.array(int), {model_parameters})
l : An indexlist of the fold which has performed best overall
dic : And a dict with the model parameters for the best fold
'''
best_acc, best_model, fold_acc = 0, 0, [[None for _ in regularization] for _ in kernel]
epoch, end = 1, len(regularization)*len(kernel)
kernel = list(map((lambda x, d={0:"linear", 1:"poly", 2:"rbf", 3:"sigmoid"}: d[x]), kernel))
for i, kern in enumerate(kernel):
for j, reg in enumerate(regularization):
model = SVC(C=reg, kernel=kern)
fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))
if fold_acc[i][j] > best_acc:
best_acc = fold_acc[i][j]
best_model = (tmp_fold, {"C" :reg, "kernel" :kern})
print("Epoch %s/%s | regularization = %s, kernel = %s, Accuracy = %s" % (epoch, end, reg, kern, fold_acc[i][j]))
epoch += 1
if plot: self.plot_accuracy(fold_acc, "Used regularization", list(map(lambda x: "kernel: " + str(x), kernel)), list(regularization))
return(best_model)
def optimize_decision_tree(self,
df,
target:int,
criterion = ["gini", "entropy"],
max_depth:[int]= np.linspace(1, 10, num=10),
splitter = ["best", "random"],
folds:int = 10,
plot:bool=True):
'''
Attempts to find the most optimal model parameters for the decision tree
classifier by finding the best fold for each permutation of the
parameters. The best fold is determined by strat_kfold_evaluation().
The accuracy of all best folds is then compared and the parameters of
the best fold are returned (in addition to the fold itself)
Parameters
------------
df : dataframe
Your datatable
target : int
The index of your target column
criterion : [String]
A list containing "gini" and "entropy"
max_depth : [int]
A list containing the number of max_depth the algorithm should
try out
splitter : [String]
A list containing "best" and "random"
folds : int
How often your dataframe should be split in strat_kfold_evaluation
plot : bool
Plots the accuracies over each fold
Returns
------------
best_fold: (np.array(int), {model_parameters})
l : An indexlist of the fold which has performed best overall
dic : And a dict with the model parameters for the best fold
'''
best_acc, best_model, fold_acc = 0, 0, [[[None for _ in max_depth] for _ in splitter] for _ in criterion]
epoch, end = 1, len(criterion)*len(splitter)*len(max_depth)
for i, cri in enumerate(criterion):
for j, split in enumerate(splitter):
for k, max_d in enumerate(max_depth):
model = DecisionTreeClassifier(criterion = cri, splitter = split, max_depth = max_d)
fold_acc[i][j][k], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))
if fold_acc[i][j][k] > best_acc:
best_acc = fold_acc[i][j][k]
best_model = (tmp_fold, {"criterion": cri, "splitter": split, "max_depth": max_d})
print("Epoch %s/%s | criterion = %s, splitter = %s, max_depth = %s, Accuracy = %s" % (epoch, end, cri, split, max_d, fold_acc[i][j][k]))
epoch += 1
if plot:
tmp, fold_acc = [], [x for y in fold_acc for x in y]
for i, _ in enumerate(criterion):
tmp += list(map(lambda x, y : "crit: " + str(x) + " split: " + str(y), [criterion[i]]*len(criterion), splitter))
self.plot_accuracy(fold_acc, "Used max depth", tmp, list(max_depth))
return(best_model)
def optimize_NB(self,
df,
target:int,
alpha:[float]= np.linspace(1, 10, num=10),
fit_prior:[bool] = [True, False],
folds:int = 10,
plot:bool=True):
'''
Attempts to find the most optimal model parameters for the NB
classifier by finding the best fold for each permutation of the
parameters. The best fold is determined by strat_kfold_evaluation().
The accuracy of all best folds is then compared and the parameters of
the best fold are returned (in addition to the fold itself)
Parameters
------------
df : dataframe
Your datatable
target : int
The index of your target column
fit_prior : [bool]
A list of True and False
alpha : [int]
A list containing the number of alpha, the algorithm should
try out
folds : int
How often your dataframe should be split in strat_kfold_evaluation
plot : bool
Plots the accuracies over each fold
Returns
------------
best_fold: (np.array(int), {model_parameters})
l : An indexlist of the fold which has performed best overall
dic : And a dict with the model parameters for the best fold
'''
best_acc, best_model, fold_acc = 0, 0, [[None for _ in alpha ] for _ in fit_prior]
epoch, end = 1, len(alpha)*len(fit_prior)
for i, fit_p in enumerate(fit_prior):
for j, alp in enumerate(alpha):
model = ComplementNB(alpha = alp, fit_prior = fit_p)
fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))
if fold_acc[i][j] > best_acc:
best_acc = fold_acc[i][j]
best_model = (tmp_fold, {"alpha" : alp, "fit_prior" : fit_p})
print("Epoch %s/%s | fit_prior = %s, alpha = %s, Accuracy = %s" % (epoch, end, fit_p, alp, fold_acc[i][j]))
epoch += 1
if plot: self.plot_accuracy(fold_acc, "Used alpha", list(map(lambda x: "fit_prior: " + str(x), fit_prior)), list(alpha))
return(best_model)
| [
"sklearn.naive_bayes.ComplementNB",
"sklearn.svm.SVC",
"sklearn.linear_model.Perceptron",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.model_selection.StratifiedKFold",
"n... | [((1312, 1387), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'folds', 'shuffle': 'shuffle', 'random_state': 'random_state'}), '(n_splits=folds, shuffle=shuffle, random_state=random_state)\n', (1327, 1387), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((2412, 2428), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (2422, 2428), True, 'import matplotlib.pyplot as plt\n'), ((2438, 2464), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy [%]"""'], {}), "('Accuracy [%]')\n", (2448, 2464), True, 'import matplotlib.pyplot as plt\n'), ((2793, 2821), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2803, 2821), True, 'import matplotlib.pyplot as plt\n'), ((2831, 2841), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2839, 2841), True, 'import matplotlib.pyplot as plt\n'), ((5322, 5348), 'numpy.linspace', 'np.linspace', (['(1)', '(20)'], {'num': '(20)'}), '(1, 20, num=20)\n', (5333, 5348), True, 'import numpy as np\n'), ((7824, 7850), 'numpy.linspace', 'np.linspace', (['(1)', '(10)'], {'num': '(10)'}), '(1, 10, num=10)\n', (7835, 7850), True, 'import numpy as np\n'), ((10584, 10610), 'numpy.linspace', 'np.linspace', (['(1)', '(10)'], {'num': '(10)'}), '(1, 10, num=10)\n', (10595, 10610), True, 'import numpy as np\n'), ((13418, 13444), 'numpy.linspace', 'np.linspace', (['(1)', '(10)'], {'num': '(10)'}), '(1, 10, num=10)\n', (13429, 13444), True, 'import numpy as np\n'), ((2737, 2779), 'matplotlib.pyplot.plot', 'plt.plot', (['xaxis', 'accuracy'], {'label': 'legend[i]'}), '(xaxis, accuracy, label=legend[i])\n', (2745, 2779), True, 'import matplotlib.pyplot as plt\n'), ((4560, 4600), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'k', 'p': 'm'}), '(n_neighbors=k, p=m)\n', (4580, 4600), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((7078, 7107), 'sklearn.linear_model.Perceptron', 'Perceptron', ([], {'eta0': 'k', 'penalty': 'm'}), '(eta0=k, penalty=m)\n', (7088, 7107), False, 'from sklearn.linear_model import Perceptron\n'), ((9788, 9811), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'reg', 'kernel': 'kern'}), '(C=reg, kernel=kern)\n', (9791, 9811), False, 'from sklearn.svm import SVC\n'), ((14956, 14996), 'sklearn.naive_bayes.ComplementNB', 'ComplementNB', ([], {'alpha': 'alp', 'fit_prior': 'fit_p'}), '(alpha=alp, fit_prior=fit_p)\n', (14968, 14996), False, 'from sklearn.naive_bayes import ComplementNB\n'), ((12334, 12404), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': 'cri', 'splitter': 'split', 'max_depth': 'max_d'}), '(criterion=cri, splitter=split, max_depth=max_d)\n', (12356, 12404), False, 'from sklearn.tree import DecisionTreeClassifier\n')] |
import paddle
import numpy as np
from ppgan.models.generators.generator_styleganv2ada import StyleGANv2ADA_AugmentPipe
# 默认配置
xflip = 0
rotate90 = 0
xint = 0
xint_max = 0.125
scale = 0
rotate = 0
aniso = 0
xfrac = 0
scale_std = 0.2
rotate_max = 1
aniso_std = 0.2
xfrac_std = 0.125
brightness = 0
contrast = 0
lumaflip = 0
hue = 0
saturation = 0
brightness_std = 0.2
contrast_std = 0.5
hue_max = 1
saturation_std = 1
imgfilter = 0
imgfilter_bands = [1, 1, 1, 1]
imgfilter_std = 1
noise = 0
cutout = 0
noise_std = 0.1
cutout_size = 0.5
# afhqcat配置
# xflip = 1
# rotate90 = 1
# xint = 1
# xint_max = 0.125
# scale = 1
# rotate = 1
# aniso = 1
# xfrac = 1
# scale_std = 0.2
# rotate_max = 1
# aniso_std = 0.2
# xfrac_std = 0.125
# brightness = 1
# contrast = 1
# lumaflip = 1
# hue = 1
# saturation = 1
# brightness_std = 0.2
# contrast_std = 0.5
# hue_max = 1
# saturation_std = 1
# imgfilter = 0
# imgfilter_bands = [1, 1, 1, 1]
# imgfilter_std = 1
# noise = 0
# cutout = 0
# noise_std = 0.1
# cutout_size = 0.5
# 所有,除了noise = 0
xflip = 1
rotate90 = 1
xint = 1
xint_max = 0.125
scale = 1
rotate = 1
aniso = 1
xfrac = 1
scale_std = 0.2
rotate_max = 1
aniso_std = 0.2
xfrac_std = 0.125
brightness = 1
contrast = 1
lumaflip = 1
hue = 1
saturation = 1
brightness_std = 0.2
contrast_std = 0.5
hue_max = 1
saturation_std = 1
imgfilter = 1
imgfilter_bands = [1, 1, 1, 1]
imgfilter_std = 1
noise = 0
cutout = 1
noise_std = 0.1
cutout_size = 0.5
lr = 0.0001
model = StyleGANv2ADA_AugmentPipe(xflip, rotate90, xint, xint_max,
scale, rotate, aniso, xfrac, scale_std, rotate_max, aniso_std, xfrac_std,
brightness, contrast, lumaflip, hue, saturation, brightness_std, contrast_std, hue_max, saturation_std,
imgfilter, imgfilter_bands, imgfilter_std,
noise, cutout, noise_std, cutout_size)
model.train()
# optimizer = paddle.optimizer.Momentum(parameters=model.parameters(), learning_rate=lr, momentum=0.9)
# model.set_state_dict(paddle.load("55.pdparams"))
debug_percentile = 0.7
dic2 = np.load('55.npz')
for batch_idx in range(8):
print('======================== batch_%.3d ========================'%batch_idx)
# optimizer.clear_gradients()
x = dic2['batch_%.3d.input0'%batch_idx]
y_pytorch = dic2['batch_%.3d.output'%batch_idx]
dy_dx_pytorch = dic2['batch_%.3d.dy_dx'%batch_idx]
x = paddle.to_tensor(x)
x.stop_gradient = False
y = model(x, debug_percentile)
# dy_dx = paddle.grad(outputs=[y.sum()], inputs=[x], create_graph=True)[0]
# dy_dx = paddle.grad(outputs=[y.sum()], inputs=[x], create_graph=False)[0]
dysum_dy = paddle.ones(y.shape, dtype=paddle.float32)
dy_dx = model.grad_layer(dysum_dy)
y_paddle = y.numpy()
ddd = np.sum((y_pytorch - y_paddle) ** 2)
print('ddd=%.6f' % ddd)
dy_dx_paddle = dy_dx.numpy()
ddd = np.sum((dy_dx_pytorch - dy_dx_paddle) ** 2)
print('ddd=%.6f' % ddd)
ddd = np.mean((y_pytorch - y_paddle) ** 2)
print('ddd=%.6f' % ddd)
ddd = np.mean((dy_dx_pytorch - dy_dx_paddle) ** 2)
print('ddd=%.6f' % ddd)
# loss = dy_dx.sum() + y.sum()
# loss = y.sum()
# loss.backward()
# optimizer.step()
print('================= last dy_dx =================')
print('dy_dx_pytorch[:, :2, :2, :2]=\n', dy_dx_pytorch[:, :2, :2, :2])
print()
print('dy_dx_paddle[:, :2, :2, :2]=\n', dy_dx_paddle[:, :2, :2, :2])
print()
| [
"numpy.mean",
"paddle.ones",
"ppgan.models.generators.generator_styleganv2ada.StyleGANv2ADA_AugmentPipe",
"numpy.sum",
"paddle.to_tensor",
"numpy.load"
] | [((1463, 1797), 'ppgan.models.generators.generator_styleganv2ada.StyleGANv2ADA_AugmentPipe', 'StyleGANv2ADA_AugmentPipe', (['xflip', 'rotate90', 'xint', 'xint_max', 'scale', 'rotate', 'aniso', 'xfrac', 'scale_std', 'rotate_max', 'aniso_std', 'xfrac_std', 'brightness', 'contrast', 'lumaflip', 'hue', 'saturation', 'brightness_std', 'contrast_std', 'hue_max', 'saturation_std', 'imgfilter', 'imgfilter_bands', 'imgfilter_std', 'noise', 'cutout', 'noise_std', 'cutout_size'], {}), '(xflip, rotate90, xint, xint_max, scale, rotate,\n aniso, xfrac, scale_std, rotate_max, aniso_std, xfrac_std, brightness,\n contrast, lumaflip, hue, saturation, brightness_std, contrast_std,\n hue_max, saturation_std, imgfilter, imgfilter_bands, imgfilter_std,\n noise, cutout, noise_std, cutout_size)\n', (1488, 1797), False, 'from ppgan.models.generators.generator_styleganv2ada import StyleGANv2ADA_AugmentPipe\n'), ((2014, 2031), 'numpy.load', 'np.load', (['"""55.npz"""'], {}), "('55.npz')\n", (2021, 2031), True, 'import numpy as np\n'), ((2337, 2356), 'paddle.to_tensor', 'paddle.to_tensor', (['x'], {}), '(x)\n', (2353, 2356), False, 'import paddle\n'), ((2595, 2637), 'paddle.ones', 'paddle.ones', (['y.shape'], {'dtype': 'paddle.float32'}), '(y.shape, dtype=paddle.float32)\n', (2606, 2637), False, 'import paddle\n'), ((2714, 2749), 'numpy.sum', 'np.sum', (['((y_pytorch - y_paddle) ** 2)'], {}), '((y_pytorch - y_paddle) ** 2)\n', (2720, 2749), True, 'import numpy as np\n'), ((2822, 2865), 'numpy.sum', 'np.sum', (['((dy_dx_pytorch - dy_dx_paddle) ** 2)'], {}), '((dy_dx_pytorch - dy_dx_paddle) ** 2)\n', (2828, 2865), True, 'import numpy as np\n'), ((2905, 2941), 'numpy.mean', 'np.mean', (['((y_pytorch - y_paddle) ** 2)'], {}), '((y_pytorch - y_paddle) ** 2)\n', (2912, 2941), True, 'import numpy as np\n'), ((2980, 3024), 'numpy.mean', 'np.mean', (['((dy_dx_pytorch - dy_dx_paddle) ** 2)'], {}), '((dy_dx_pytorch - dy_dx_paddle) ** 2)\n', (2987, 3024), True, 'import numpy as np\n')] |
import Examples.metadata_manager_results as results_manager
import Source.io_util as io
import numpy as np
import os
def improvements_err_speedup_size(obj: np.ndarray, ref: np.ndarray, i_obj=0) -> np.ndarray:
assert obj.shape[1] > i_obj and ref.shape[0] > i_obj
valid = obj[:, i_obj] < ref[i_obj]
obj = obj[valid]
improvements = np.ones((obj.shape[1], obj.shape[1]))
improvements[:, 0] *= 0
if len(obj) > 0:
for i in range(obj.shape[1]):
extreme = np.argmin(obj[:, i])
if obj[extreme, i] < ref[i]:
improvements[i, :] = obj[extreme]
else:
improvements[i, :] = ref.copy()
improvements[:, 0] = ref[0]-improvements[:, 0]
improvements[:, 1] = ref[1]/improvements[:, 1]
improvements[:, 2] /= ref[2]
return improvements
def improvements_all(obj: np.ndarray, ref: np.ndarray) -> np.ndarray:
valid = np.all(np.less(obj, ref), axis=1)
obj = obj[valid]
improvements = np.ones((obj.shape[1], obj.shape[1]))
improvements[:, 0] *= 0
if len(obj) > 0:
for i in range(obj.shape[1]):
extreme = np.argmin(obj[:, i])
improvements[i, :] = obj[extreme]
improvements[:, 0] = ref[0]-improvements[:, 0]
improvements[:, 1] = ref[1]/improvements[:, 1]
improvements[:, 2] /= ref[2]
return improvements
def get_most_accurate_nn_result(R: dict, phase="val"):
assert phase == "val" or "test"
max = 0
r = None
if phase == "val":
for k, v in R.items():
if len(v.val) < 3 and v.val["system"].accuracy > max:
max = v.val["system"].accuracy
r = v.val
else:
for k, v in R.items():
if len(v.test) < 3 and v.test["system"].accuracy > max:
max = v.test["system"].accuracy
r = v.test
return r
def results_to_numpy(R: dict, phase="val") -> np.ndarray:
assert phase == "val" or "test"
if phase == "val":
obj = np.array([[1-result[1].val["system"].accuracy,
result[1].val["system"].time,
result[1].val["system"].params] for result in R.items()])
else:
obj = np.array([[1-result[1].test["system"].accuracy,
result[1].test["system"].time,
result[1].test["system"].params] for result in R.items()])
return obj
if __name__ == "__main__":
metadata_file = os.path.join("../../../compute/bagging_boosting_of_chains_GA/results/metadata.json")
phase = "test"
params_match = {
"dataset": "sota_models_svhn-32-dev_validation",
"population": 500,
"offspring": 200,
"iterations": 50,
"step_th": 0.1,
"pm": 0.2,
"rm": 0.8,
"k": 1,
"a": [
1,
1,
1
]
}
ids = results_manager.get_ids_by_fieldval(metadata_file, "params", params_match)
improvements = np.zeros((3, 3))
ref = None
for id in ids:
R_path = results_manager.get_results_by_id(metadata_file, id)
individuals_fitness_generation = io.read_pickle(os.path.join(R_path, 'individuals_fitness_per_generation.pkl'))
R_dict = io.read_pickle(os.path.join(R_path, 'results_ensembles.pkl'))
# Last generation of ensembles
last_generation = individuals_fitness_generation[-1][0]
R_dict_last = {ensemble_id: R_dict[ensemble_id] for ensemble_id in last_generation}
# Most accurate NN as reference point
if ref is None:
r_ref = get_most_accurate_nn_result(R_dict, phase)
ref = np.array([1-r_ref["system"].accuracy, r_ref["system"].time, r_ref["system"].params])
# Evaluation results to numpy array
obj = results_to_numpy(R_dict_last, phase)
# Get improvements from reference point
improvements += improvements_err_speedup_size(obj, ref)
print()
print("Average ensemble improvements on %s over %d runs" % (params_match["dataset"][12:], len(ids)))
print(improvements/len(ids))
| [
"numpy.less",
"numpy.ones",
"os.path.join",
"numpy.array",
"numpy.zeros",
"numpy.argmin",
"Examples.metadata_manager_results.get_ids_by_fieldval",
"Examples.metadata_manager_results.get_results_by_id"
] | [((349, 386), 'numpy.ones', 'np.ones', (['(obj.shape[1], obj.shape[1])'], {}), '((obj.shape[1], obj.shape[1]))\n', (356, 386), True, 'import numpy as np\n'), ((1007, 1044), 'numpy.ones', 'np.ones', (['(obj.shape[1], obj.shape[1])'], {}), '((obj.shape[1], obj.shape[1]))\n', (1014, 1044), True, 'import numpy as np\n'), ((2497, 2586), 'os.path.join', 'os.path.join', (['"""../../../compute/bagging_boosting_of_chains_GA/results/metadata.json"""'], {}), "(\n '../../../compute/bagging_boosting_of_chains_GA/results/metadata.json')\n", (2509, 2586), False, 'import os\n'), ((2974, 3048), 'Examples.metadata_manager_results.get_ids_by_fieldval', 'results_manager.get_ids_by_fieldval', (['metadata_file', '"""params"""', 'params_match'], {}), "(metadata_file, 'params', params_match)\n", (3009, 3048), True, 'import Examples.metadata_manager_results as results_manager\n'), ((3068, 3084), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3076, 3084), True, 'import numpy as np\n'), ((940, 957), 'numpy.less', 'np.less', (['obj', 'ref'], {}), '(obj, ref)\n', (947, 957), True, 'import numpy as np\n'), ((3137, 3189), 'Examples.metadata_manager_results.get_results_by_id', 'results_manager.get_results_by_id', (['metadata_file', 'id'], {}), '(metadata_file, id)\n', (3170, 3189), True, 'import Examples.metadata_manager_results as results_manager\n'), ((497, 517), 'numpy.argmin', 'np.argmin', (['obj[:, i]'], {}), '(obj[:, i])\n', (506, 517), True, 'import numpy as np\n'), ((1155, 1175), 'numpy.argmin', 'np.argmin', (['obj[:, i]'], {}), '(obj[:, i])\n', (1164, 1175), True, 'import numpy as np\n'), ((3246, 3308), 'os.path.join', 'os.path.join', (['R_path', '"""individuals_fitness_per_generation.pkl"""'], {}), "(R_path, 'individuals_fitness_per_generation.pkl')\n", (3258, 3308), False, 'import os\n'), ((3342, 3387), 'os.path.join', 'os.path.join', (['R_path', '"""results_ensembles.pkl"""'], {}), "(R_path, 'results_ensembles.pkl')\n", (3354, 3387), False, 'import os\n'), ((3737, 3828), 'numpy.array', 'np.array', (["[1 - r_ref['system'].accuracy, r_ref['system'].time, r_ref['system'].params]"], {}), "([1 - r_ref['system'].accuracy, r_ref['system'].time, r_ref[\n 'system'].params])\n", (3745, 3828), True, 'import numpy as np\n')] |
""" Classes to implement the artificial bee colony algorithm. """
from numpy.random import uniform
class Colony:
""" Implements the artificial bee colony algorithm.
Args:
objective: objective function called by each bee at each food source.
Must return a "honey" value that will be maximized by the colony.
params: dictionary of optimization parameters and their ranges::
params = {'param_1': (min, max),
'param_2': (min, max),
...,
'param_n': (min, max)}
Keyword Args:
num_bees: number of employed bees in the colony, i.e. number of
solutions searched at each iteration of the algorithm.
limit: number of trails without improvement at a food source before it
is "depleted" and the colony moves on to new food sources.
max_iter: maximum number of loops through the colonies fit function.
Note that the objective function will be evaluated a number of
times equal to::
max_iter * (num_bees + num_scouts)
num_scouts: number of additional bees in the colony that are solely
responsible for exploring, i.e. the number of additional random
guesses made at every iteration of the fit function.
Properties:
is_initialized: indicates whether the colony has been initialized and
is ready to be fit.
colony: list of Bee objects that make up the colony.
food: list of food values for each bee in the colony.
Raises:
TypeError: if objective is not callable.
TypeError: if params is not a dictionary.
TypeError: if any entry in params is not a range or callable.
"""
def __init__(self, objective, params: dict, num_bees: int = 10,
limit: int = 5, max_iter: int = 1000, num_scouts: int = 1):
""" Initialize the colony. """
if not callable(objective):
raise TypeError('objective must be callable!')
if not isinstance(params, dict):
msg = 'params argument must be a dictionary of parameters and '\
+ 'ranges or callable distributions!'
raise TypeError(msg)
for key in params.keys():
if not isinstance(params[key], tuple):
if not callable(params[key]):
msg = f'params[{key}] must be a range or callable!'
raise TypeError(msg)
self.objective = objective
self.num_bees = num_bees
self.params = params
self.limit = limit
self.max_iter = max_iter
self.num_scouts = num_scouts
self.is_initialized = False
self.colony = [_Bee(self.objective) for b in range(self.num_bees + self.num_scouts)]
self.food = [0.] * len(self.colony)
self.chosen = False * len(self.colony)
def fit(self, verbose: bool = False):
""" Maximizes the objective function using the bee colony.
Keyword Args:
verbose: if true, output periodic updates about the fitting process.
Default is False.
"""
if not self.is_initialized:
self.initialize()
for _ in range(self.max_iter):
for bee in self.colony:
bee.evaluate()
self.food = [bee.food for bee in self.colony]
# TODO (ENG): pick where the bees go next based on food values.
self._choose_food
self._perturb_params
# Send scouts to random locations always.
for bee in self.colony[-self.num_scouts:]:
bee.goto(self._draw_params)
def initialize(self):
""" Initializes the colony with first guesses for all bees. """
for bee in self.colony:
bee.goto(self._draw_params)
self.is_initialized = True
def _draw_params(self):
""" Makes a random draw of parameters. """
draw = {}
for k, v in params.items():
if isinstance(v, tuple):
draw[k] = uniform(*v) # uniform for ranges
elif callable(v):
draw[k] = v() # call function for provided distributions
return draw
def _choose_food(self):
""" Chooses which food sources to keep or abandon. """
pass
def _perturb_params(self):
""" Perturbs param values for selected food sources. """
pass
# Maybe this should be a sub-class of Colony?
class _Bee:
""" Defines a single bee in the colony.
Args:
objective: objective function called by each bee at each food source.
Must return a "honey" value that will be maximized by the colony.
position: dicationary of parameter/value pairs defining the initial
food source location to test, i.e. initial guess for each bee in
the colony.
Properties:
food: value of the objective function at position.
"""
def __init__(self, objective, position: dict = None):
self.objective = objective
self.position = position
self.food = None # objective function value at position
def evaluate(self):
""" Evaluate the objective function at the given position.
TODO:
- Spawn a new process for each bee.
- Spawn processes intelligently for bees.
"""
self.food = self.objective(self.position)
def goto(self, params: dict):
""" Tells the bee where to go next.
Args:
params: dictionary of key, value pairs, where each key is a
parameter of the objective function, and each value is a
concrete value that the parameter takes.
"""
self.position = params
| [
"numpy.random.uniform"
] | [((4169, 4180), 'numpy.random.uniform', 'uniform', (['*v'], {}), '(*v)\n', (4176, 4180), False, 'from numpy.random import uniform\n')] |
import numpy as np
import scipy as sp
from ipdb import set_trace as st
import skimage as ski
import utils
from matplotlib import pyplot as plt
import matplotlib as mpl
from common import *
from munch import Munch as M
from scipy import sparse
from scipy.interpolate import Rbf
import os
class Register:
def __init__(
self,
work_dir,
params = M(
max_stars = 500, #take this many stars at most
nneighbors = 500, #must be even (1k before)
#more stars
# max_stars = 5000, #take this many stars at most
# nneighbors = 1000, #must be even (1k before)
ba_max_ratio = 0.99,
cb_max_ratio = 0.99,
epsilon = 1E-3, #match tol
min_abs_diff = 1, #abs and rel diff for match success
min_rel_diff = 1.4,
ransac_iters = 50,
ransac_keep_percentile = 99,
linear_fit_tol = 2.0, #pixels tol on linear fit
)):
self.work_dir = work_dir
for k in params:
setattr(self, k, params[k])
def gen_triangles(self, pts):
NN = min(self.nneighbors, 2*((pts.shape[0]-1)//2))
#1st nn is the point itself, so we need to trim it out...
indices = utils.get_nearest_neighbors(pts, NN+1)[1][:,1:] #N x nbs
indices = indices.reshape(pts.shape[0], NN//2, 2)
indices = cat(
(indices,
np.tile(np.arange(pts.shape[0])[:,None,None], (1,NN//2,1))),
axis = 2
)
indices = indices.reshape(-1,3) #triangle indices..
distances = np.stack((
np.linalg.norm(pts[indices[:,0]] - pts[indices[:,1]], axis = 1),
np.linalg.norm(pts[indices[:,0]] - pts[indices[:,2]], axis = 1),
np.linalg.norm(pts[indices[:,1]] - pts[indices[:,2]], axis = 1),
), axis = 1) #Tx3 distancse
#a triangle has 5 components...
#1. ratio of sides b/a, c/b and index of the elements i,j,k
# long and medium being i, long and short being j, medium and short being k
dorder = distances.argsort(axis=1)
dsorted = np.sort(distances, axis = -1)
# there's a kind of clever trick here...
# if dorder[:,0] == 0, then shortest edge is 01, which means lm must be 2
# if dorder[:,2] == 2, then longest edge is 12, which means lm must be 0
lm_i = 2*(dorder[:,0] == 0) + 1*(dorder[:,0] == 1) + 0*(dorder[:,0] == 2)
ms_i = 2*(dorder[:,2] == 0) + 1*(dorder[:,2] == 1) + 0*(dorder[:,2] == 2)
ls_i = 3 - lm_i - ms_i
ba_r = dsorted[:,1] / dsorted[:,2]
cb_r = dsorted[:,0] / dsorted[:,1]
tri_ratio = np.stack((ba_r, cb_r), axis = 1)
tri_index_local = np.stack((lm_i, ms_i, ls_i), axis = 1)
tri_index = indices[
np.arange(indices.shape[0]).repeat(3),
tri_index_local.reshape(-1)
].reshape(-1,3)
#filter ba as described in paper to reduce false matches
valid = (ba_r < self.ba_max_ratio) & (cb_r < self.cb_max_ratio)
tri_ratio = tri_ratio[valid]
tri_index = tri_index[valid]
# visualize trangles
# ax = plt.axes()
# scatter(pts, ax)
# tripaths = mmap(lambda tri: mpl.path.Path(pts[tri][:,::-1], closed=False), tri_index)
# patches = mpl.collections.PathCollection(tripaths, linewidths=0.1, facecolors='none')
# ax.add_artist(patches)
# plt.show()
print(f'{tri_ratio.shape[0]} triangles generated')
return M(ratio = tri_ratio, index = tri_index)
def match_triangles(self, source, target, source_tris, target_tris):
''' using the voting algorithm '''
N, M = source.shape[0], target.shape[0]
# scatterk(source_tris.ratio, c='red')
# scatterk(target_tris.ratio, c='blue')
# plt.show()
matches = utils.nearby_pairs(source_tris.ratio, target_tris.ratio, self.epsilon)
print(f'{matches.shape[0]} triangle correspondences found')
source_pts = source_tris.index[matches[:,0]].reshape(-1)
target_pts = target_tris.index[matches[:,1]].reshape(-1)
coords = np.stack([source_pts, target_pts], axis = 1)
ucoords, counts = np.unique(coords, axis = 0, return_counts = True)
votes = np.zeros((N,M), dtype = int)
votes[ucoords[...,0], ucoords[...,1]] = counts
#"best" matches
cy, cx = ((votes >= votes.max(axis=0)[None]) & (votes >= votes.max(axis=1)[...,None])).nonzero()
cvotes = votes[cy,cx]
#now we need the runner up votes
runner_up = np.maximum(
np.sort(votes, axis = 1)[:,2][:,None],
np.sort(votes, axis = 0)[-2][None],
)[cy,cx]
good_match = (cvotes-runner_up >= self.min_abs_diff) & (cvotes/(runner_up+1E-3) > self.min_rel_diff)
matches = np.stack((cy,cx),axis=1)[good_match]
print(f'matched {len(matches)} stars')
return matches
def ransac_linear(self, source, target):
'''
fit a affine transform from source to target
discard outlier points
'''
valid = np.ones(source.shape[0], dtype = np.bool)
for i in range(self.ransac_iters):
T, residuals = utils.fit_affine(source[valid], target[valid])
residuals = np.linalg.norm(residuals, axis = -1)
if i == self.ransac_iters -1:
valid_criteria = residuals < self.linear_fit_tol
else:
valid_criteria = residuals < np.percentile(residuals, self.ransac_keep_percentile)
valid[valid] &= valid_criteria
if residuals[valid_criteria].mean() < self.linear_fit_tol/2:
break
source = source[valid]
target = target[valid]
print(f'{source.shape[0]} inlier stars, mean error {residuals.mean():.3f} px')
print(T)
return source, target, T
def register(self, source, target):
source, target = source[...,:2], target[...,:2] #don't make use of std
source_tris = self.gen_triangles(source)
target_tris = self.gen_triangles(target)
matches = self.match_triangles(source, target, source_tris, target_tris)
source_matched = source[matches[...,0]]
target_matched = target[matches[...,1]]
# ax = plt.axes()
# scatterk(source_matched, c='red', ax=ax)
# scatterk(target_matched, c='blue', ax=ax)
# corresp = mmap(lambda st: mpl.path.Path(np.stack(st,axis=0)[:,::-1]), zip(source_matched, target_matched))
# patches = mpl.collections.PathCollection(corresp, linewidths=1, facecolors='none', alpha=0.5)
# ax.add_artist(patches)
# plt.show()
# st()
#this step fits a linear transform and discards outliers
source_lin, target_lin, T = self.ransac_linear(source_matched, target_matched)
# source_at_target = dehom(hom(source_lin) @ T)
# ax = plt.axes()
# scatterk(source_at_target, c='red', ax=ax)
# scatterk(target_lin, c='blue', ax=ax)
# corresp = mmap(lambda st: mpl.path.Path(np.stack(st,axis=0)[:,::-1]), zip(source_at_target, target_lin))
# patches = mpl.collections.PathCollection(corresp, linewidths=1, facecolors='none', alpha=0.5)
# ax.add_artist(patches);
# ax.set_aspect('equal')
# plt.show()
# st()
return cat((source_lin, target_lin), axis=1) #Nx4
def __call__(self, paths, other=None):
stars = mmap(np.load, paths)
#sort by #stars, descending
paths, stars = zip(*sorted(
zip(paths, stars),
key = lambda x: -x[1].shape[0]
))
if other is None:
stars = [star[-self.max_stars:] for star in stars]
for i, star in enumerate(stars[1:]):
matches = self.register(stars[0], star)
name0 = os.path.basename(paths[0]).replace('.npy', '')
namei = os.path.basename(paths[i+1]).replace('.npy', '')
out_path = f'{self.work_dir}/registration/{name0}-{namei}'
np.save(out_path, matches)
else:
other_stars = mmap(np.load, other)
other_paths, other_stars = zip(*sorted(
zip(other, other_stars),
key = lambda x: -x[1].shape[0]
))
refstars = other_stars[0][-self.max_stars:]
stars = [star[-self.max_stars:] for star in stars]
for i, star in enumerate(stars):
matches = self.register(refstars, star)
name0 = 'rel'
namei = os.path.basename(paths[i]).replace('.npy', '')
out_path = f'{self.work_dir}/registration/{name0}-{namei}'
np.save(out_path, matches)
if __name__ == '__main__':
pass
| [
"utils.get_nearest_neighbors",
"numpy.unique",
"numpy.ones",
"utils.nearby_pairs",
"numpy.arange",
"numpy.sort",
"numpy.stack",
"numpy.zeros",
"os.path.basename",
"numpy.linalg.norm",
"numpy.percentile",
"munch.Munch",
"numpy.save",
"utils.fit_affine"
] | [((383, 574), 'munch.Munch', 'M', ([], {'max_stars': '(500)', 'nneighbors': '(500)', 'ba_max_ratio': '(0.99)', 'cb_max_ratio': '(0.99)', 'epsilon': '(0.001)', 'min_abs_diff': '(1)', 'min_rel_diff': '(1.4)', 'ransac_iters': '(50)', 'ransac_keep_percentile': '(99)', 'linear_fit_tol': '(2.0)'}), '(max_stars=500, nneighbors=500, ba_max_ratio=0.99, cb_max_ratio=0.99,\n epsilon=0.001, min_abs_diff=1, min_rel_diff=1.4, ransac_iters=50,\n ransac_keep_percentile=99, linear_fit_tol=2.0)\n', (384, 574), True, 'from munch import Munch as M\n'), ((2260, 2287), 'numpy.sort', 'np.sort', (['distances'], {'axis': '(-1)'}), '(distances, axis=-1)\n', (2267, 2287), True, 'import numpy as np\n'), ((2806, 2836), 'numpy.stack', 'np.stack', (['(ba_r, cb_r)'], {'axis': '(1)'}), '((ba_r, cb_r), axis=1)\n', (2814, 2836), True, 'import numpy as np\n'), ((2865, 2901), 'numpy.stack', 'np.stack', (['(lm_i, ms_i, ls_i)'], {'axis': '(1)'}), '((lm_i, ms_i, ls_i), axis=1)\n', (2873, 2901), True, 'import numpy as np\n'), ((3681, 3716), 'munch.Munch', 'M', ([], {'ratio': 'tri_ratio', 'index': 'tri_index'}), '(ratio=tri_ratio, index=tri_index)\n', (3682, 3716), True, 'from munch import Munch as M\n'), ((4022, 4092), 'utils.nearby_pairs', 'utils.nearby_pairs', (['source_tris.ratio', 'target_tris.ratio', 'self.epsilon'], {}), '(source_tris.ratio, target_tris.ratio, self.epsilon)\n', (4040, 4092), False, 'import utils\n'), ((4309, 4351), 'numpy.stack', 'np.stack', (['[source_pts, target_pts]'], {'axis': '(1)'}), '([source_pts, target_pts], axis=1)\n', (4317, 4351), True, 'import numpy as np\n'), ((4380, 4425), 'numpy.unique', 'np.unique', (['coords'], {'axis': '(0)', 'return_counts': '(True)'}), '(coords, axis=0, return_counts=True)\n', (4389, 4425), True, 'import numpy as np\n'), ((4447, 4474), 'numpy.zeros', 'np.zeros', (['(N, M)'], {'dtype': 'int'}), '((N, M), dtype=int)\n', (4455, 4474), True, 'import numpy as np\n'), ((5296, 5335), 'numpy.ones', 'np.ones', (['source.shape[0]'], {'dtype': 'np.bool'}), '(source.shape[0], dtype=np.bool)\n', (5303, 5335), True, 'import numpy as np\n'), ((5018, 5044), 'numpy.stack', 'np.stack', (['(cy, cx)'], {'axis': '(1)'}), '((cy, cx), axis=1)\n', (5026, 5044), True, 'import numpy as np\n'), ((5409, 5455), 'utils.fit_affine', 'utils.fit_affine', (['source[valid]', 'target[valid]'], {}), '(source[valid], target[valid])\n', (5425, 5455), False, 'import utils\n'), ((5480, 5514), 'numpy.linalg.norm', 'np.linalg.norm', (['residuals'], {'axis': '(-1)'}), '(residuals, axis=-1)\n', (5494, 5514), True, 'import numpy as np\n'), ((1371, 1411), 'utils.get_nearest_neighbors', 'utils.get_nearest_neighbors', (['pts', '(NN + 1)'], {}), '(pts, NN + 1)\n', (1398, 1411), False, 'import utils\n'), ((1750, 1813), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts[indices[:, 0]] - pts[indices[:, 1]])'], {'axis': '(1)'}), '(pts[indices[:, 0]] - pts[indices[:, 1]], axis=1)\n', (1764, 1813), True, 'import numpy as np\n'), ((1827, 1890), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts[indices[:, 0]] - pts[indices[:, 2]])'], {'axis': '(1)'}), '(pts[indices[:, 0]] - pts[indices[:, 2]], axis=1)\n', (1841, 1890), True, 'import numpy as np\n'), ((1904, 1967), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts[indices[:, 1]] - pts[indices[:, 2]])'], {'axis': '(1)'}), '(pts[indices[:, 1]] - pts[indices[:, 2]], axis=1)\n', (1918, 1967), True, 'import numpy as np\n'), ((8297, 8323), 'numpy.save', 'np.save', (['out_path', 'matches'], {}), '(out_path, matches)\n', (8304, 8323), True, 'import numpy as np\n'), ((8953, 8979), 'numpy.save', 'np.save', (['out_path', 'matches'], {}), '(out_path, matches)\n', (8960, 8979), True, 'import numpy as np\n'), ((5688, 5741), 'numpy.percentile', 'np.percentile', (['residuals', 'self.ransac_keep_percentile'], {}), '(residuals, self.ransac_keep_percentile)\n', (5701, 5741), True, 'import numpy as np\n'), ((1562, 1585), 'numpy.arange', 'np.arange', (['pts.shape[0]'], {}), '(pts.shape[0])\n', (1571, 1585), True, 'import numpy as np\n'), ((4785, 4807), 'numpy.sort', 'np.sort', (['votes'], {'axis': '(1)'}), '(votes, axis=1)\n', (4792, 4807), True, 'import numpy as np\n'), ((4836, 4858), 'numpy.sort', 'np.sort', (['votes'], {'axis': '(0)'}), '(votes, axis=0)\n', (4843, 4858), True, 'import numpy as np\n'), ((8086, 8112), 'os.path.basename', 'os.path.basename', (['paths[0]'], {}), '(paths[0])\n', (8102, 8112), False, 'import os\n'), ((8157, 8187), 'os.path.basename', 'os.path.basename', (['paths[i + 1]'], {}), '(paths[i + 1])\n', (8173, 8187), False, 'import os\n'), ((8815, 8841), 'os.path.basename', 'os.path.basename', (['paths[i]'], {}), '(paths[i])\n', (8831, 8841), False, 'import os\n'), ((2945, 2972), 'numpy.arange', 'np.arange', (['indices.shape[0]'], {}), '(indices.shape[0])\n', (2954, 2972), True, 'import numpy as np\n')] |
"""
@author : <NAME>
@date : 1-10-2021
Ensemble Learning is an often overshadowed and underestimated field of machine learning. Here we provide 2 algorithms
central to the game - random forests and ensemble/voting classifier. Random Forests are very especially fast
with parallel processing to fit multiple decision trees at the same time.
"""
import pandas as pd
import numpy as np
from multiprocessing import cpu_count
from joblib import parallel_backend, delayed, Parallel
import random
import math
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
class RandomForest:
"""
Random Forests may seem intimidating but they are super simple. They are just a bunch of Decision Trees that are
trained on different sets of the data. You give us the data, and we will create those different sets. You may choose
for us to sample data with replacement or without, either way that's up to you. Keep in mind that because this is
a bunch of Decision Trees, classification is only supported (avoid using decision trees for regression - it's
range of predictions is limited.) The random forest will have each of its decision trees predict on data and just
choose the most common prediction (not the average.)
Enjoy this module - it's one of our best.
"""
def __init__(self, num_classifiers=20, max_branches=math.inf, min_samples=1, replacement=True, min_data=None):
"""
:param num_classifiers: Number of decision trees you want created.
:param max_branches: Maximum number of branches each Decision Tree can have.
:param min_samples: Minimum number of samples for a branch in any decision tree (in the forest) to split.
:param replacement: Whether or not any of the data points in different chunks/sets of data can overlap.
:param min_data: Minimum number of data there can be in any given data chunk. Each classifier is trained on a
chunk of data, and if you want to make sure each chunk has 3 points for example you can set min_data = 3. It's
default is 50% of the amount of data, the None is just a placeholder.
"""
from .decision_trees import DecisionTree
self.DecisionTree = DecisionTree
self.trees = []
self.num_classifiers = num_classifiers
self.max_branches = max_branches
self.min_samples = min_samples
self.replacement = replacement
self.min_data = min_data
def fit(self, x_train, y_train):
"""
:param x_train: 2D training data
:param y_train: 1D training labels
:return:
"""
data, labels = np.array(x_train).tolist(), np.array(y_train).tolist()
num_classifiers = self.num_classifiers
max_branches = self.max_branches
min_samples = self.min_samples
replacement = self.replacement
min_data = self.min_data
# on default set min_data = 50% of your dataset
if not min_data:
min_data = round(0.5 * len(data))
# merge data and labels together [(d1, l1) .. (dN, lN)]
data_and_labels = [
(data_point, label) for data_point, label in zip(data, labels)
]
self.chunk_data, self.chunk_labels = [], []
if replacement:
for classifier in range(num_classifiers):
num_samples = min_data + random.randint(0, len(data) - min_data)
data_and_labels_set = random.sample(data_and_labels, num_samples)
self.chunk_data.append(
[data_point for data_point, _ in data_and_labels_set]
)
self.chunk_labels.append([label for _, label in data_and_labels_set])
else:
"""no replacement just use up all of the data here"""
data_and_labels_df = pd.DataFrame({"data": data, "labels": labels})
data_and_labels_full_set = np.array_split(
data_and_labels_df, num_classifiers
) # splits into num_classifiers dataframes
for df in data_and_labels_full_set:
self.chunk_data.append(np.array(df)[:, 0].flatten())
self.chunk_labels.append(np.array(df)[:, 1].flatten())
self.trees = []
from joblib import parallel_backend
with parallel_backend("threading", n_jobs=-1):
Parallel()(
delayed(RandomForest._train_new_tree)(self, data_chunk, label_chunk)
for data_chunk, label_chunk in zip(self.chunk_data, self.chunk_labels)
)
self.decision_trees = [] # stores each tree in a decision tree class
for tree in self.trees:
dt = self.DecisionTree()
dt.give_tree(tree)
assert dt.tree == tree
self.decision_trees.append(dt)
def _train_new_tree(self, data, labels):
dt = self.DecisionTree(
max_branches=self.max_branches, min_samples=self.min_samples
)
dt.fit(data, labels)
self.trees.append(dt.tree)
def predict(self, x_test):
"""
:param x_test: testing data (2D)
:return: Predictions in 1D vector/list.
"""
predictions = np.zeros(len(x_test))
for decision_tree in self.decision_trees:
predictions += decision_tree.predict(x_test)
return np.round_(predictions / len(self.decision_trees))
def evaluate(self, x_test, y_test):
"""
:param x_test: testing data (2D)
:param y_test: testing labels (1D)
:return: accuracy score
"""
y_pred = RandomForest.predict(self, x_test)
amount_correct = sum(
[1 if pred == label else 0 for pred, label in zip(y_pred, y_test)]
)
return amount_correct / len(x_test)
def give_best_tree(self, x_test, y_test):
"""
You give it the data and the labels, and it will find the tree in the forest that does the best. Then it will
return that tree. You can then take that tree and put it into the DecisionTree class using the give_tree method.
:param x_test: testing data (2D)
:param y_test: testing labels (1D)
:return: tree that performs the best (dictionary data type)
"""
evaluations = {
decision_tree.evaluate(x_test, y_test): decision_tree
for decision_tree in self.decision_trees
}
return evaluations[max(evaluations)].tree # tree with best score
def visualize_evaluation(self, y_pred, y_test):
"""
:param y_pred: predictions from the predict() method
:param y_test: labels for the data
:return: a matplotlib image of the predictions and the labels ("correct answers") for you to see how well the model did.
"""
import matplotlib.pyplot as plt
plt.cla()
y_pred, y_test = y_pred.flatten(), y_test.flatten()
plt.scatter(
[_ for _ in range(len(y_pred))],
y_pred,
color="blue",
label="predictions/y_pred",
)
plt.scatter(
[_ for _ in range(len(y_test))],
y_test,
color="green",
label="labels/y_test",
)
plt.title("Predictions & Labels Plot")
plt.xlabel("Data number")
plt.ylabel("Prediction")
plt.legend()
plt.show()
def _train_new_predictor(prediction_dataset):
predictor, name, x_train, y_train = prediction_dataset
predictor.fit(x_train, y_train)
return [name, predictor]
class EnsembleClassifier:
"""
Aside from random forests, voting/ensemble classifiers are also another popular way of ensemble learning. How it works
is by training multiple different classifiers (you choose!) and predicting the most common class (or the average for
regression - more on that later.) Pretty simple actually, and works quite effectively. This module also can
tell you the best classifier in a group with its get_best_predictor(), so that could be useful. Similar to
``give_best_tree()`` in the random forest module, what it does is give the class of the algorithm that did the best on the
data you gave it. This can also be used for rapid hypertuning on the exact same module (giving the same class but
with different parameters in the init.)
Example:
>>> from sealion.regression import SoftmaxRegression
>>> from sealion.naive_bayes import GaussianNaiveBayes
>>> from sealion.nearest_neighbors import KNearestNeighbors
>>> ec = EnsembleClassifier({'algo1': SoftmaxRegression(num_classes=3), 'algo2': GaussianNaiveBayes(), 'algo3': KNearestNeighbors()},
... classification=True)
>>> ec.fit(X_train, y_train)
>>> y_pred = ec.predict(X_test) # predict
>>> ec.evaluate_all_predictors(X_test, y_test)
algo1 : 95%
algo2 : 90%
algo3 : 75%
>>> best_predictor = ec.get_best_predictor(X_test, y_test) # get the best predictor
>>> print(best_predictor) # is it Softmax Regression, Gaussian Naive Bayes, or KNearestNeighbors that did the best?
<regression.SoftmaxRegression object at 0xsomethingsomething>
>>> y_pred = best_predictor.predict(X_test) # looks like softmax regression, let's use it
Here we first important all the algorithms we are going to be using from their respective modules. Then
we create an ensemble classifier by passing in a dictionary where each key stores the name, and each value stores
the algorithm. ``Classification = True`` by default, so we didn't need to put that (if you want regression put it to
False. A good way to remember ``classification = True`` is the default is that this is an EnsembleCLASSIFIER.)
We then fitted that and got it's predictions. We saw how well each predictor did (that's where the names come in)
through the ``evaluate_all_predictors()`` method. We could then get the best predictor and use that class. Note that
this class will ONLY use algorithms other than neural networks, which should be plenty. This is because neural networks
have a different ``evaluate()`` method and typically will be more random in performance than other algorithms.
I hope that example cleared anything up. The ``fit()`` method trains in parallel (thanks joblib!) so it's pretty
fast. As usual, enjoy this algorithm!
"""
def __init__(self, predictors, classification=True):
"""
:param predictors: dict of ``{name (string): algorithm (class)}``. See example above.
:param classification: is it a classification or regression task? default classification - if regression set this
to False.
"""
from .cython_ensemble_learning import CythonEnsembleClassifier
self.classification = classification
self.cython_ensemble_classifier = CythonEnsembleClassifier(
predictors, self.classification
)
def fit(self, x_train, y_train):
"""
:param x_train: 2D training data
:param y_train: 1D training labels
:return:
"""
x_train, y_train = np.array(x_train), np.array(y_train)
if len(x_train.shape) != 2:
raise ValueError("x_train must be 2D (even if only one sample.)")
if len(y_train.shape) != 1:
raise ValueError("y_train must be 1D.")
self.predictors = self.cython_ensemble_classifier.get_predictors()
with parallel_backend("threading", n_jobs=cpu_count()):
for name, predictor in self.predictors.items():
self.predictors[name].fit(x_train, y_train)
self.trained_predictors = self.predictors
self.cython_ensemble_classifier.give_trained_predictors(self.trained_predictors)
def predict(self, x_test):
"""
:param x_test: testing data (2D)
:return: Predictions in 1D vector/list.
"""
x_test = np.array(x_test)
if len(x_test.shape) != 2:
raise ValueError("x_test must be 2D (even if only one sample.)")
return self.cython_ensemble_classifier.predict(x_test)
def evaluate(self, x_test, y_test):
"""
:param x_test: testing data (2D)
:param y_test: testing labels (1D)
:return: accuracy score
"""
x_test, y_test = np.array(x_test), np.array(y_test)
if len(x_test.shape) != 2:
raise ValueError("x_test must be 2D (even if only one sample.)")
if len(y_test.shape) != 1:
raise ValueError("y_test must be 1D.")
return self.cython_ensemble_classifier.evaluate(x_test, y_test)
def evaluate_all_predictors(self, x_test, y_test):
"""
:param x_test: testing data (2D)
:param y_test: testing labels (1D)
:return: None, just prints out the name of each algorithm in the predictors dict fed to the __init__ and its
score on the data given.
"""
x_test, y_test = np.array(x_test), np.array(y_test)
if len(x_test.shape) != 2:
raise ValueError("x_test must be 2D (even if only one sample.)")
if len(y_test.shape) != 1:
raise ValueError("y_test must be 1D.")
return self.cython_ensemble_classifier.evaluate_all_predictors(x_test, y_test)
def get_best_predictor(self, x_test, y_test):
"""
:param x_test: testing data (2D)
:param y_test: testing labels (1D)
:return: the class of the algorithm that did best on the given data. look at the above example if this doesn't
make sense.
"""
x_test, y_test = np.array(x_test), np.array(y_test)
if len(x_test.shape) != 2:
raise ValueError("x_test must be 2D (even if only one sample.)")
if len(y_test.shape) != 1:
raise ValueError("y_test must be 1D.")
return self.cython_ensemble_classifier.get_best_predictor(x_test, y_test)
def visualize_evaluation(self, y_pred, y_test):
"""
:param y_pred: predictions from the predict() method
:param y_test: labels for the data
:return: a matplotlib image of the predictions and the labels ("correct answers") for you to see how well the model did.
"""
import matplotlib.pyplot as plt
plt.cla()
y_pred, y_test = y_pred.flatten(), y_test.flatten()
if self.classification:
plt.scatter(
[_ for _ in range(len(y_pred))],
y_pred,
color="blue",
label="predictions/y_pred",
)
plt.scatter(
[_ for _ in range(len(y_test))],
y_test,
color="green",
label="labels/y_test",
)
else:
plt.plot(
[_ for _ in range(len(y_pred))],
y_pred,
color="blue",
label="predictions/y_pred",
)
plt.plot(
[_ for _ in range(len(y_test))],
y_test,
color="green",
label="labels/y_test",
)
plt.title("Predictions & Labels Plot")
plt.xlabel("Data number")
plt.ylabel("Prediction")
plt.legend()
plt.show()
| [
"random.sample",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"multiprocessing.cpu_count",
"joblib.delayed",
"numpy.array_split",
"numpy.array",
"joblib.Parallel",
"joblib.parallel_backend",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.cla",
"warnings.filterwa... | [((521, 576), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (544, 576), False, 'import warnings\n'), ((6863, 6872), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (6870, 6872), True, 'import matplotlib.pyplot as plt\n'), ((7261, 7299), 'matplotlib.pyplot.title', 'plt.title', (['"""Predictions & Labels Plot"""'], {}), "('Predictions & Labels Plot')\n", (7270, 7299), True, 'import matplotlib.pyplot as plt\n'), ((7308, 7333), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data number"""'], {}), "('Data number')\n", (7318, 7333), True, 'import matplotlib.pyplot as plt\n'), ((7342, 7366), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Prediction"""'], {}), "('Prediction')\n", (7352, 7366), True, 'import matplotlib.pyplot as plt\n'), ((7375, 7387), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7385, 7387), True, 'import matplotlib.pyplot as plt\n'), ((7396, 7406), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7404, 7406), True, 'import matplotlib.pyplot as plt\n'), ((11944, 11960), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (11952, 11960), True, 'import numpy as np\n'), ((14311, 14320), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (14318, 14320), True, 'import matplotlib.pyplot as plt\n'), ((15165, 15203), 'matplotlib.pyplot.title', 'plt.title', (['"""Predictions & Labels Plot"""'], {}), "('Predictions & Labels Plot')\n", (15174, 15203), True, 'import matplotlib.pyplot as plt\n'), ((15212, 15237), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data number"""'], {}), "('Data number')\n", (15222, 15237), True, 'import matplotlib.pyplot as plt\n'), ((15246, 15270), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Prediction"""'], {}), "('Prediction')\n", (15256, 15270), True, 'import matplotlib.pyplot as plt\n'), ((15279, 15291), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15289, 15291), True, 'import matplotlib.pyplot as plt\n'), ((15300, 15310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15308, 15310), True, 'import matplotlib.pyplot as plt\n'), ((3841, 3887), 'pandas.DataFrame', 'pd.DataFrame', (["{'data': data, 'labels': labels}"], {}), "({'data': data, 'labels': labels})\n", (3853, 3887), True, 'import pandas as pd\n'), ((3927, 3978), 'numpy.array_split', 'np.array_split', (['data_and_labels_df', 'num_classifiers'], {}), '(data_and_labels_df, num_classifiers)\n', (3941, 3978), True, 'import numpy as np\n'), ((4322, 4362), 'joblib.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': '(-1)'}), "('threading', n_jobs=-1)\n", (4338, 4362), False, 'from joblib import parallel_backend\n'), ((11143, 11160), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (11151, 11160), True, 'import numpy as np\n'), ((11162, 11179), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (11170, 11179), True, 'import numpy as np\n'), ((12342, 12358), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (12350, 12358), True, 'import numpy as np\n'), ((12360, 12376), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (12368, 12376), True, 'import numpy as np\n'), ((12990, 13006), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (12998, 13006), True, 'import numpy as np\n'), ((13008, 13024), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (13016, 13024), True, 'import numpy as np\n'), ((13637, 13653), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (13645, 13653), True, 'import numpy as np\n'), ((13655, 13671), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (13663, 13671), True, 'import numpy as np\n'), ((3466, 3509), 'random.sample', 'random.sample', (['data_and_labels', 'num_samples'], {}), '(data_and_labels, num_samples)\n', (3479, 3509), False, 'import random\n'), ((4376, 4386), 'joblib.Parallel', 'Parallel', ([], {}), '()\n', (4384, 4386), False, 'from joblib import parallel_backend, delayed, Parallel\n'), ((2656, 2673), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (2664, 2673), True, 'import numpy as np\n'), ((2684, 2701), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2692, 2701), True, 'import numpy as np\n'), ((11508, 11519), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (11517, 11519), False, 'from multiprocessing import cpu_count\n'), ((4404, 4441), 'joblib.delayed', 'delayed', (['RandomForest._train_new_tree'], {}), '(RandomForest._train_new_tree)\n', (4411, 4441), False, 'from joblib import parallel_backend, delayed, Parallel\n'), ((4138, 4150), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (4146, 4150), True, 'import numpy as np\n'), ((4209, 4221), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (4217, 4221), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
activations = nn.ModuleDict([
['sigmoid', nn.Sigmoid()],
['tanh', nn.Tanh()],
['lrelu', nn.LeakyReLU()],
['relu', nn.ReLU()],
['selu', nn.SELU()],
['elu', nn.ELU()]
])
def compute_flattened_maps(cnn_layers, input_shape):
"""
Utility function to compute the size of the flattened feature maps
after the convolutional layers, which should be passed as input
together with the shape of the input tensors.
"""
x = torch.randn(1, 1, *input_shape)
with torch.no_grad():
x = cnn_layers(x)
return np.prod(x.shape[1:])
def cnn_weights_init(m):
"""
Reinitialise the parameters of a network with custom init functions.
Xavier initialisation is used for Linear layers, whereas convolutional
layers are initialised with the Hu Kaiming method for more stability.
This method only supports, for the moment, conv and linear layers. The idea
of this method is "reset and refine", which ensures that all layer are reinit.
"""
if ("reset_parameters" in dir(m)):
m.reset_parameters() # first of all reset the layer
if isinstance(m, (nn.Conv1d, nn.Conv2d)):
nn.init.kaiming_uniform_(m.weight)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
def torch_weights_init(m):
"""
Reinitialise the parameters of a layer as the good torch would do.
This method is not very useful as it is right now.
"""
if ("reset_parameters" in dir(m)):
m.reset_parameters() # first reset the layer
# TODO: do something more from the specs
def create_dense_block(
in_feats, out_feats, architecture=['fc', 'act', 'drop'],
activation='relu', dropout_prob=0, wrapping=True):
"""
Factory method for fully connected layers, with the possibility
to choose the activation function and the regularisation technique.
TODO:
- add the support for batch normalisation;
"""
assert all(name in ['fc', 'act', 'drop'] for name in architecture)
dense_block = {
'fc': nn.Linear(in_feats, out_feats),
'act' : activations[activation],
'drop': nn.Dropout(p=dropout_prob),
}
dense_block = [dense_block[name] for name in architecture]
return nn.Sequential(*dense_block) if wrapping else dense_block
def create_2d_convolutional_block(
in_feats, num_filters, filter_size, architecture=['bn', 'act', 'pool', 'drop'],
pool_size=(2,2), padding=0, stride=1, activation='relu', dropout_prob=0):
"""
Factory method for convolutional layers, with the possibility.
Args:
in_features (int): number of input features;
num_filters (int): number of kernels;
filter_size (tuple): size of the 2D filters/kernels;
architecture (list): list of strings describing the cnn items;
pool_size (tuple): size of the pooling operation (same of stride);
padding (int or tuple): the amount of padding for each dimension;
stride (int or tuple): stride of the convolutional kernel;
activation (str): namne of the activation function;
dropout_prob (float): probability of dropping out;
"""
assert all(name in ['bn', 'act', 'pool', 'drop'] for name in architecture)
cnn_block = {
'bn' : nn.BatchNorm2d(num_filters),
'act' : activations[activation],
'pool': nn.MaxPool2d(pool_size),
'drop': nn.Dropout(p=dropout_prob),
}
return nn.Sequential(
nn.Conv2d(in_feats, num_filters, filter_size,
padding=padding, stride=stride),
*[cnn_block[name] for name in architecture])
class DeezerConv1d(nn.Module):
"""
Simple implementation of the AudioCNN presented in
"Music Mood Detection Based On Audio And Lyrics With Deep Neural Net".
Code adapted from https://github.com/Dohppak/Music_Emotion_Recognition
"""
def __init__(self, input_shape, n_kernels=[32, 16], kernel_sizes=[8, 8],
mpool_stride=[4, 4], fc_units=[64, 2]):
"""
Class constructor for the creation of a static 1DCNN.
Args:
input_shape (2-tuple): (number of mel bands, frames).
n_kernels (2-tuple): number of 1D filters per conv layer;
kernel_sizes (2-tuple): size of kernels as number of frames;
mpool_stride (2-tuple): strides of 1D max pooling (same as size);
fc_units (2-tuple): number of units in the last fully-connected layers.
TODO:
- Class constructor from sample input instead of specifying nmel;
- The parameterisation of the net can be more beautiful;
- It is still not clear which activation function is used in the first FCL.
"""
super(DeezerConv1d, self).__init__()
self.flattened_size = int(np.floor(
((np.floor((input_shape[1] - kernel_sizes[0] + 1) / mpool_stride[0]))
- kernel_sizes[1] + 1) / mpool_stride[1]) * n_kernels[-1])
self.conv_blocks = nn.Sequential(
nn.Sequential(
nn.Conv1d(input_shape[0], n_kernels[0], kernel_size=kernel_sizes[0]),
nn.MaxPool1d(mpool_stride[0], stride=mpool_stride[0]),
nn.BatchNorm1d(n_kernels[0])),
nn.Sequential(
nn.Conv1d(n_kernels[0], n_kernels[1], kernel_size=kernel_sizes[1]),
nn.MaxPool1d(mpool_stride[1], stride=mpool_stride[1]),
nn.BatchNorm1d(n_kernels[1]))
)
self._fcl = nn.Sequential(
nn.Dropout(),
nn.Linear(in_features=self.flattened_size, out_features=fc_units[0]),
#nn.Tanh(), # we use a relu instead
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_features=fc_units[0], out_features=fc_units[1]),
)
self.apply(self._init_weights)
def convolutional_features(self, x):
x = self.conv_blocks(x)
return x.view(x.size(0), -1)
def forward(self, x):
x_cnn_flat = self.convolutional_features(x)
pred = self._fcl(x_cnn_flat)
return pred
def _init_weights(self, layer) -> None:
if isinstance(layer, nn.Conv1d):
nn.init.kaiming_uniform_(layer.weight)
elif isinstance(layer, nn.Linear):
nn.init.xavier_uniform_(layer.weight)
class VGGishEmoNet(nn.Module):
"""
A VGG-based 2dCNN typically used for music tagging and transfer learning as in:
"Transfer learning for music classification and regression tasks"
Architecture inspired from https://github.com/keunwoochoi/transfer_learning_music/
"""
def __init__(
self, input_shape, n_kernels=[32]*5, kernel_sizes=[(3,3)]*5,
pooling_sizes=None, dropout=0., cnn_activation='elu', fc_units=2):
"""
Class constructor for the creation of a static 2DCNN.
Args:
input_shape (2-tuple): (number of mel bands, frames).
n_kernels (list): number of 2D filters per conv layer;
kernel_sizes (list): size of kernels for each conc layer;
pooling_sizes (list): size of each 2D maxpooling operation;
dropout (float): probability of dropping out conv activations;
cnn_activation (str): name of the activation function for conv layers;
fc_units (int): number of units in the last fully-connected layer.
TODO:
- The parameterisation of the net can be more beautiful;
"""
super(VGGishEmoNet, self).__init__()
if pooling_sizes is None:
pooling_sizes = get_vggish_poolings_from_features(*input_shape)
assert len(n_kernels) == len(kernel_sizes) == len(pooling_sizes)
conv_input_shapes = [1] + n_kernels[:-1]
cnn_arch = ['bn', 'act', 'pool', 'drop']
conv_blocks = [create_2d_convolutional_block(
conv_input_shape, n_kernel, kernel_size, cnn_arch,
pooling_size, 1, 1, cnn_activation, dropout) \
for conv_input_shape, n_kernel, kernel_size, pooling_size \
in zip(conv_input_shapes, n_kernels, kernel_sizes, pooling_sizes)]
self.conv_blocks = nn.Sequential(
*conv_blocks, nn.AdaptiveAvgPool2d((1, 1)))
# the following operation is not needed as we already have the adaptive pooling
# self.flattened_size = compute_flattened_maps(self.conv_blocks, input_shape)
self.flattened_size = n_kernels[-1]
self._fcl = nn.Sequential(
nn.Linear(in_features=self.flattened_size, out_features=fc_units),
)
def convolutional_features(self, x):
x = x.unsqueeze(1) # to ensure n_channels is 1
x = self.conv_blocks(x)
return x.view(x.size(0), -1)
def forward(self, x):
x_cnn_flat = self.convolutional_features(x)
pred = self._fcl(x_cnn_flat)
return pred
class VGGishExplainable(nn.Module):
"""
A VGG-based 2dCNN designed for explainable MER, presented in:
"Towards explainable MER, by using mid-level features".
This is the model that is denoted as A2E in the paper.
"""
def __init__(
self, input_shape, n_kernels=[64, 64, 128, 128, 256, 256, 384, 512, 256],
kernel_sizes=[(5,5)]+[(3,3)]*8, pooling_sizes=[(2, 2), (2, 2)],
strides=[2]+[1]*8, paddings=[2]+[1]*7+[0], dropout=[.3, .3],
cnn_activation='relu', fc_units=2):
"""
Class constructor for the creation of a static 2DCNN.
Args:
input_shape (2-tuple): (number of mel bands, frames).
n_kernels (list): number of 2D filters per conv layer;
kernel_sizes (list): size of kernels for each conc layer;
pooling_sizes (list): size of each 2D maxpooling operation;
dropout (float): probability of dropping out conv activations;
cnn_activation (str): name of the activation function for conv layers;
fc_units (int): number of units in the last fully-connected layer.
TODO:
- The parameterisation of the net can be more beautiful;
"""
super(VGGishExplainable, self).__init__()
assert len(n_kernels) == len(kernel_sizes) == len(strides) == len(paddings)
conv_input_shapes = [1] + n_kernels[:-1]
conv_blocks = [create_2d_convolutional_block(
conv_input_shape, n_kernel, kernel_size, ['bn', 'act'],
None, padding, stride, cnn_activation) \
for conv_input_shape, n_kernel, kernel_size, padding, stride \
in zip(conv_input_shapes, n_kernels, kernel_sizes, paddings, strides)]
self.conv_blocks = nn.Sequential(
*conv_blocks[:2],
nn.MaxPool2d(pooling_sizes[0]),
nn.Dropout(p=dropout[0]),
*conv_blocks[2:4],
nn.MaxPool2d(pooling_sizes[1]),
nn.Dropout(p=dropout[1]),
*conv_blocks[4:],
nn.AdaptiveAvgPool2d((1, 1)),
)
# the following operation is not needed as we already have the adaptive pooling
# flattened_size = compute_flattened_maps(self.conv_blocks, input_shape)
self.flattened_size = n_kernels[-1]
self._fcl = nn.Sequential(
nn.Linear(in_features=self.flattened_size, out_features=fc_units),
)
def convolutional_features(self, x):
x = x.unsqueeze(1) # to ensure n_channels is 1
x = self.conv_blocks(x)
return x.view(x.size(0), -1)
def forward(self, x):
x_cnn_flat = self.convolutional_features(x)
pred = self._fcl(x_cnn_flat)
return pred
def get_vggish_poolings_from_features(n_mels=96, n_frames=1360):
"""
Get the pooling sizes for the standard VGG-based model for audio tagging.
Code from: https://github.com/keunwoochoi/transfer_learning_music/blob/master/models_transfer.py
Todo:
- This code is ugly, reorganise in a config file;
- This method is assuming (at the moment) a certain number of frames (1360 covering 30s);
"""
if n_mels >= 256:
poolings = [(2, 4), (4, 4), (4, 5), (2, 4), (4, 4)]
elif n_mels >= 128:
poolings = [(2, 4), (4, 4), (2, 5), (2, 4), (4, 4)]
elif n_mels >= 96:
poolings = [(2, 4), (3, 4), (2, 5), (2, 4), (4, 4)]
elif n_mels >= 72:
poolings = [(2, 4), (3, 4), (2, 5), (2, 4), (3, 4)]
elif n_mels >= 64:
poolings = [(2, 4), (2, 4), (2, 5), (2, 4), (4, 4)]
elif n_mels >= 48:
poolings = [(2, 4), (2, 4), (2, 5), (2, 4), (3, 4)]
elif n_mels >= 32:
poolings = [(2, 4), (2, 4), (2, 5), (2, 4), (2, 4)]
elif n_mels >= 24:
poolings = [(2, 4), (2, 4), (2, 5), (3, 4), (1, 4)]
elif n_mels >= 18:
poolings = [(2, 4), (1, 4), (3, 5), (1, 4), (3, 4)]
elif n_mels >= 18:
poolings = [(2, 4), (1, 4), (3, 5), (1, 4), (3, 4)]
elif n_mels >= 16:
poolings = [(2, 4), (2, 4), (2, 5), (2, 4), (1, 4)]
elif n_mels >= 12:
poolings = [(2, 4), (1, 4), (2, 5), (3, 4), (1, 4)]
elif n_mels >= 8:
poolings = [(2, 4), (1, 4), (2, 5), (2, 4), (1, 4)]
elif n_mels >= 6:
poolings = [(2, 4), (1, 4), (3, 5), (1, 4), (1, 4)]
elif n_mels >= 4:
poolings = [(2, 4), (1, 4), (2, 5), (1, 4), (1, 4)]
elif n_mels >= 2:
poolings = [(2, 4), (1, 4), (1, 5), (1, 4), (1, 4)]
else: # n_mels == 1
poolings = [(1, 4), (1, 4), (1, 5), (1, 4), (1, 4)]
ratio = n_frames / 1360 # as these measures are referred to this unit
# print([(poo_w, pool_l * ratio) for poo_w, pool_l in poolings])
return [(poo_w, round(pool_l * ratio)) for poo_w, pool_l in poolings]
def simple_param_count(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def tensor_shape_flows_through(conv_blocks, feat_shape):
"""
Currently works just for a CNN...
TODO:
- Make it general for any network
"""
print('Generating random batch of 2 x {} data'.format(feat_shape))
x = torch.rand((2, *feat_shape))
conv_blocks.eval()
conv_blocks.to(device=torch.device('cpu'), dtype=torch.float)
x.to(device=torch.device('cpu'), dtype=torch.float)
print("Initial shape: {}".format(x.shape))
for i, layer in enumerate(conv_blocks):
if isinstance(layer, nn.Sequential):
for j, sub_layer in enumerate(layer):
x = sub_layer(x)
if isinstance(sub_layer, nn.Conv2d) or isinstance(sub_layer, nn.MaxPool2d):
print("Layer {} ({}) | Shape after {}: {} "
.format(i, j, sub_layer.__class__.__name__, x.shape))
else:
x = layer(x)
# only print if the level is expected to afffect the shape
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.MaxPool2d) or isinstance(layer, nn.AdaptiveAvgPool2d):
print("Layer {} | Shape after {}: {} "
.format(i, layer.__class__.__name__, x.shape)) | [
"numpy.prod",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Tanh",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.MaxPool1d",
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"torch.nn.init.xavier_uniform_",
"torch.nn.AdaptiveAvgPool2d",
"torch.randn",
"torch.nn.LeakyReLU",
"numpy.fl... | [((626, 657), 'torch.randn', 'torch.randn', (['(1)', '(1)', '*input_shape'], {}), '(1, 1, *input_shape)\n', (637, 657), False, 'import torch\n'), ((726, 746), 'numpy.prod', 'np.prod', (['x.shape[1:]'], {}), '(x.shape[1:])\n', (733, 746), True, 'import numpy as np\n'), ((14486, 14514), 'torch.rand', 'torch.rand', (['(2, *feat_shape)'], {}), '((2, *feat_shape))\n', (14496, 14514), False, 'import torch\n'), ((667, 682), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (680, 682), False, 'import torch\n'), ((1347, 1381), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['m.weight'], {}), '(m.weight)\n', (1371, 1381), True, 'import torch.nn as nn\n'), ((2255, 2285), 'torch.nn.Linear', 'nn.Linear', (['in_feats', 'out_feats'], {}), '(in_feats, out_feats)\n', (2264, 2285), True, 'import torch.nn as nn\n'), ((2344, 2370), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_prob'}), '(p=dropout_prob)\n', (2354, 2370), True, 'import torch.nn as nn\n'), ((2457, 2484), 'torch.nn.Sequential', 'nn.Sequential', (['*dense_block'], {}), '(*dense_block)\n', (2470, 2484), True, 'import torch.nn as nn\n'), ((3505, 3532), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['num_filters'], {}), '(num_filters)\n', (3519, 3532), True, 'import torch.nn as nn\n'), ((3591, 3614), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {}), '(pool_size)\n', (3603, 3614), True, 'import torch.nn as nn\n'), ((3632, 3658), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_prob'}), '(p=dropout_prob)\n', (3642, 3658), True, 'import torch.nn as nn\n'), ((3705, 3782), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_feats', 'num_filters', 'filter_size'], {'padding': 'padding', 'stride': 'stride'}), '(in_feats, num_filters, filter_size, padding=padding, stride=stride)\n', (3714, 3782), True, 'import torch.nn as nn\n'), ((147, 159), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (157, 159), True, 'import torch.nn as nn\n'), ((187, 196), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (194, 196), True, 'import torch.nn as nn\n'), ((225, 239), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (237, 239), True, 'import torch.nn as nn\n'), ((267, 276), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (274, 276), True, 'import torch.nn as nn\n'), ((304, 313), 'torch.nn.SELU', 'nn.SELU', ([], {}), '()\n', (311, 313), True, 'import torch.nn as nn\n'), ((340, 348), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (346, 348), True, 'import torch.nn as nn\n'), ((1426, 1459), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (1449, 1459), True, 'import torch.nn as nn\n'), ((5812, 5824), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (5822, 5824), True, 'import torch.nn as nn\n'), ((5838, 5906), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.flattened_size', 'out_features': 'fc_units[0]'}), '(in_features=self.flattened_size, out_features=fc_units[0])\n', (5847, 5906), True, 'import torch.nn as nn\n'), ((5969, 5978), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5976, 5978), True, 'import torch.nn as nn\n'), ((5992, 6004), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (6002, 6004), True, 'import torch.nn as nn\n'), ((6018, 6078), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'fc_units[0]', 'out_features': 'fc_units[1]'}), '(in_features=fc_units[0], out_features=fc_units[1])\n', (6027, 6078), True, 'import torch.nn as nn\n'), ((6498, 6536), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['layer.weight'], {}), '(layer.weight)\n', (6522, 6536), True, 'import torch.nn as nn\n'), ((8562, 8590), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (8582, 8590), True, 'import torch.nn as nn\n'), ((8866, 8931), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.flattened_size', 'out_features': 'fc_units'}), '(in_features=self.flattened_size, out_features=fc_units)\n', (8875, 8931), True, 'import torch.nn as nn\n'), ((11132, 11162), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pooling_sizes[0]'], {}), '(pooling_sizes[0])\n', (11144, 11162), True, 'import torch.nn as nn\n'), ((11176, 11200), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout[0]'}), '(p=dropout[0])\n', (11186, 11200), True, 'import torch.nn as nn\n'), ((11245, 11275), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pooling_sizes[1]'], {}), '(pooling_sizes[1])\n', (11257, 11275), True, 'import torch.nn as nn\n'), ((11289, 11313), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout[1]'}), '(p=dropout[1])\n', (11299, 11313), True, 'import torch.nn as nn\n'), ((11357, 11385), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (11377, 11385), True, 'import torch.nn as nn\n'), ((11675, 11740), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.flattened_size', 'out_features': 'fc_units'}), '(in_features=self.flattened_size, out_features=fc_units)\n', (11684, 11740), True, 'import torch.nn as nn\n'), ((14565, 14584), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (14577, 14584), False, 'import torch\n'), ((14621, 14640), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (14633, 14640), False, 'import torch\n'), ((5338, 5406), 'torch.nn.Conv1d', 'nn.Conv1d', (['input_shape[0]', 'n_kernels[0]'], {'kernel_size': 'kernel_sizes[0]'}), '(input_shape[0], n_kernels[0], kernel_size=kernel_sizes[0])\n', (5347, 5406), True, 'import torch.nn as nn\n'), ((5424, 5477), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['mpool_stride[0]'], {'stride': 'mpool_stride[0]'}), '(mpool_stride[0], stride=mpool_stride[0])\n', (5436, 5477), True, 'import torch.nn as nn\n'), ((5495, 5523), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_kernels[0]'], {}), '(n_kernels[0])\n', (5509, 5523), True, 'import torch.nn as nn\n'), ((5569, 5635), 'torch.nn.Conv1d', 'nn.Conv1d', (['n_kernels[0]', 'n_kernels[1]'], {'kernel_size': 'kernel_sizes[1]'}), '(n_kernels[0], n_kernels[1], kernel_size=kernel_sizes[1])\n', (5578, 5635), True, 'import torch.nn as nn\n'), ((5653, 5706), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['mpool_stride[1]'], {'stride': 'mpool_stride[1]'}), '(mpool_stride[1], stride=mpool_stride[1])\n', (5665, 5706), True, 'import torch.nn as nn\n'), ((5724, 5752), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_kernels[1]'], {}), '(n_kernels[1])\n', (5738, 5752), True, 'import torch.nn as nn\n'), ((6593, 6630), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['layer.weight'], {}), '(layer.weight)\n', (6616, 6630), True, 'import torch.nn as nn\n'), ((5103, 5169), 'numpy.floor', 'np.floor', (['((input_shape[1] - kernel_sizes[0] + 1) / mpool_stride[0])'], {}), '((input_shape[1] - kernel_sizes[0] + 1) / mpool_stride[0])\n', (5111, 5169), True, 'import numpy as np\n')] |
# coding=utf-8
import math
import types
import numpy as np
import pandas as pd
from ....data.materials.CompositionEntry import CompositionEntry
from ....data.materials.util.LookUpData import LookUpData
class YangOmegaAttributeGenerator:
"""Class to compute the attributes :math:`\Omega` and :math:`\delta`
developed by Yang and Zhang [1].
These parameters are based on the liquid formation enthalpy and atomic
sizes of elements respectively and were originally developed to predict
whether a metal alloy will form a solid solution of bulk metallic glass.
Notes
-----
:math: `\Omega` is derived from the melting temperature, ideal mixing
entropy, and regular solution solution interaction parameter (
:math: `\Omega_{i,j}`) predicted by the Miedema model for binary liquids.
Specifically, it is computed using the relationship:
.. math:: \Omega = \displaystyle\frac{T_m \Delta S_{mix}} {|\Delta H_{mix}|}
where :math: `T_m` is the composition-weighted average of the melting
temperature, :math: `\Delta S_{mix}` is the ideal solution entropy,
and :math: `\Delta H_{mix}` is the mixing enthalpy. The mixing enthalpy
is computed using the Miedema mixing enthalpies tabulated by Takeuchi and
Inoue [2] where:
.. math:: \Delta H_{mix} = \displaystyle\sum \Omega_{i,j} c_i c_j
and :math: `\Omega_{i,j} = 4 * \Delta H_{mix}`.
:math: `\delta` is related to the polydispersity of atomic sizes, and is
computed using the relationship:
.. math:: \delta = [\displaystyle\sum c_i (1 - \frac{r_i}{r_{
average})^2]^0.5
where :math: `r_i` is the atomic size. Here, we use the atomic radii
compiled by Miracle et al. [3] rather than those compiled by Kittel,
as in the original work.
References
----------
.. [1] <NAME> and <NAME>, "Prediction of high-entropy stabilized
solid-solution in multi-component alloys," Materials Chemistry and
Physics, vol. 132, no. 2--3, pp. 233--238, Feb. 2012.
.. [2] <NAME> and <NAME>, "Classification of Bulk Metallic Glasses
by Atomic Size Difference, Heat of Mixing and Period of Constituent
Elements and Its Application to Characterization of the Main Alloying
Element," MATERIALS TRANSACTIONS, vol. 46, no. 12, pp. 2817--2829, 2005.
.. [3] <NAME>, <NAME>, <NAME>,
and <NAME>, "An assessment of binary metallic glasses: correlations
between structure, glass forming ability and stability," International
Materials Reviews, vol. 55, no. 4, pp. 218--256, Jul. 2010.
"""
def generate_features(self, entries):
"""Function to generate features as mentioned in the class description.
Parameters
----------
entries : array-like
Compositions for which features are to be generated. A list of
CompositionEntry's.
Returns
----------
features : DataFrame
Features for the given entries. Pandas data frame containing the
names and values of the descriptors.
Raises
------
ValueError
If input is not of type list.
If items in the list are not CompositionEntry instances.
"""
# Initialize lists of feature values and headers for pandas data frame.
feat_values = []
feat_headers = []
# Raise exception if input argument is not of type list of
# CompositionEntry's.
if not isinstance(entries, list):
raise ValueError("Argument should be of type list of "
"CompositionEntry's")
elif (entries and not isinstance(entries[0], CompositionEntry)):
raise ValueError("Argument should be of type list of "
"CompositionEntry's")
# Insert header names here.
feat_headers.append("Yang_Omega")
feat_headers.append("Yang_delta")
# Load property values here.
radii = LookUpData.load_property("MiracleRadius")
meltingT = LookUpData.load_property("MeltingT")
miedema = LookUpData.load_pair_property("MiedemaLiquidDeltaHf")
for entry in entries:
tmp_list = []
tmp_radii = []
tmp_meltingT = []
elem_fracs = entry.get_element_fractions()
elem_ids = entry.get_element_ids()
for elem_id in elem_ids:
tmp_radii.append(radii[elem_id])
tmp_meltingT.append(meltingT[elem_id])
# Compute the average melting point.
averageTm = np.average(tmp_meltingT, weights=elem_fracs)
# Compute the ideal entropy.
entropy = 0.0
for f in elem_fracs:
entropy += f*math.log(f) if f > 0 else 0.0
entropy *= 8.314/1000
# Compute the enthalpy
enthalpy = 0.0
for i in range(len(elem_ids)):
for j in range(i + 1, len(elem_ids)):
enthalpy += miedema[max(elem_ids[i], elem_ids[j])][min(
elem_ids[i], elem_ids[j])] * elem_fracs[i] * \
elem_fracs[j]
enthalpy *= 4
# Compute omega
tmp_list.append(abs(averageTm * entropy / enthalpy))
# Compute delta
delta_squared = 0.0
average_r = np.average(tmp_radii, weights=elem_fracs)
for i in range(len(elem_ids)):
delta_squared += elem_fracs[i] * (1 - tmp_radii[i] /
average_r)**2
tmp_list.append(math.sqrt(delta_squared))
feat_values.append(tmp_list)
features = pd.DataFrame(feat_values, columns=feat_headers)
return features
| [
"pandas.DataFrame",
"math.sqrt",
"math.log",
"numpy.average"
] | [((5705, 5752), 'pandas.DataFrame', 'pd.DataFrame', (['feat_values'], {'columns': 'feat_headers'}), '(feat_values, columns=feat_headers)\n', (5717, 5752), True, 'import pandas as pd\n'), ((4572, 4616), 'numpy.average', 'np.average', (['tmp_meltingT'], {'weights': 'elem_fracs'}), '(tmp_meltingT, weights=elem_fracs)\n', (4582, 4616), True, 'import numpy as np\n'), ((5370, 5411), 'numpy.average', 'np.average', (['tmp_radii'], {'weights': 'elem_fracs'}), '(tmp_radii, weights=elem_fracs)\n', (5380, 5411), True, 'import numpy as np\n'), ((5617, 5641), 'math.sqrt', 'math.sqrt', (['delta_squared'], {}), '(delta_squared)\n', (5626, 5641), False, 'import math\n'), ((4747, 4758), 'math.log', 'math.log', (['f'], {}), '(f)\n', (4755, 4758), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 15:54:52 2019
@author: <NAME>.
"""
#importing the libraries.
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.autograd as variable
from sklearn.model_selection import train_test_split
from RBM import * #self made Restricted Boltzmann Machines class.
from hyperopt import *
#importing the dataset and specifying some required variables.
training_set = pd.read_csv('ml-100k/u1.base', delimiter = '\t').values
test_set = pd.read_csv('ml-100k/u1.test',delimiter = '\t').values
test_set, val_set = train_test_split(test_set, test_size = 0.5, random_state = 42)
nb_users = int(max(max(training_set[:,0]), max(test_set[:,0])))
nb_movies = int(max(max(training_set[:,1]), max(test_set[:,1])))
#converting the dataset to plausible format. (each row = user, each column movie = -1 if not rated, 0 if disliked, 1 if liked.)
def convert(data):
l0 = []
for user in range(nb_users):
l1 = np.zeros(nb_movies)
usrs_movies = data[:,1][data[:,0] == user]
l1[usrs_movies-1] = data[:,2][data[:,0] == user]
l0.append(list(l1))
l0 = torch.FloatTensor(l0)
data[data == 0] = -1
data[data == 1] = 0
data[data == 2] = 0
data[data == 3] = 1
data[data >= 4 ] = 1
return l0
#converting our sets according to given function.
training_set = convert(training_set)
test_set = convert(test_set)
val_set = convert(val_set)
#Making our RBM and tuning hyperparameters.
#Description of Hparams taken by the train_rbm:
#nv = nb_movies
#nh = 200 (200 features to learn).
#batch_size = 128 (no. of training examples to take for batch learning).
#epochs = 20 (no. of iterations through the training set).
#rbm = RBM(nv, nh) (Object declaration of the RBM class).
#Function to get accuracy on the cross-validation set of the dataset.
def validate_rbm(rbm):
test_loss_mae = 0.0
s = 0.0
for user in range(nb_users):
v = training_set[user:user+1]
v0 = test_set[user:user+1]
if(len(v0[v0>=0]) > 0):
_,h = rbm.sample_h(v)
_,v = rbm.sample_v(h)
test_loss_mae += torch.mean(torch.abs(v0[v0 >=0 ] - v[v0 >=0]))
s+=1.0
return float(test_loss_mae/s)
#Function to train the RBM based on given Hparams.
def train_rbm(nv, nh, batch_size, epochs,val_set, gibbs_sampling_nb, rbm = None):
if(rbm == None):
rbm = RBM(nv, nh)
for epoch in range(1, epochs+1):
training_loss = 0
s = 0.0
for user in range(0, nb_users-batch_size, batch_size):
vk = val_set[user:user+batch_size]
v0 = val_set[user:user+batch_size]
for sample in range(gibbs_sampling_nb):
_,hk = rbm.sample_h(vk)
_,vk = rbm.sample_v(hk)
vk[v0 <0] = v0[v0 <0]
phk,_ = rbm.sample_h(vk)
ph0,_ = rbm.sample_h(v0)
rbm.train(v0,vk,ph0,phk)
training_loss += torch.mean(torch.abs(v0[v0 >=0 ] - vk[v0 >=0]))
s+=1
# print("Epoch:", epoch, "Training Loss:", training_loss/s)
return float(training_loss/s),rbm
#Defining the Hparam space.
space = {
'nh' : hp.choice('nh', [int (x) for x in range(100,500,50)]),
'batch_size' : hp.choice('batch_size',[int (x) for x in range(32, 256, 16)]),
'epochs' : hp.choice('epochs', [int (x) for x in range(10,50,10)]),
'gibbs_sampling_nb' : hp.choice( 'gibbs_sampling_nb', [int (x) for x in range(5,30,5)])
}
#The function to optimize by training on specific hparams and then getting the optimization
#cost by validate_rbm() function.
def RBM_opt_fn(space):
nv = nb_movies
nh = space['nh']
batch_size = space['batch_size']
epochs = space['epochs']
gibbs_sampling_nb = space['gibbs_sampling_nb']
val_train,rbm = train_rbm(nv, nh, batch_size, epochs,training_set, gibbs_sampling_nb)
val = validate_rbm(rbm)
print('Hyperopt Loss:',val, 'nh:', nh, 'batsz:',batch_size, 'ep:',epochs, 'gsnb:', gibbs_sampling_nb )
return{'loss' : val , 'status' : STATUS_OK}
#getting the best params using hyperopt class.
trials = Trials()
best = fmin(
fn = RBM_opt_fn,
space = space,
algo = tpe.suggest,
max_evals = 100,
trials = trials
)
print(best)
#best params : nh = 100, bs = 80, epochs = 40, gsnb = 5.
# training_loss = 0
# s = 0.0
# for user in range(0, nb_users-batch_size, batch_size):
# vk = training_set[user:user+batch_size]
# v0 = training_set[user:user+batch_size]
# for sample in range(gibbs_sampling_nb):
# _,hk = rbm.sample_h(vk)
# _,vk = rbm.sample_v(hk)
# vk[v0 <0] = v0[v0 <0]
# phk,_ = rbm.sample_h(vk)
# ph0,_ = rbm.sample_h(v0)
# rbm.train(v0,vk,ph0,phk)
# training_loss += torch.mean(torch.abs(v0[v0 >=0 ] - vk[v0 >=0]))
# s+=1
# print("Epoch:", epoch, "Training Loss:", training_loss/s)
#Testing the RBM against the test set.
test_loss_mae = 0.0
s = 0.0
for user in range(nb_users):
v = training_set[user:user+1]
v0 = test_set[user:user+1]
if(len(v0[v0>=0]) > 0):
_,h = rbm.sample_h(v)
_,v = rbm.sample_v(h)
test_loss_mae += torch.mean(torch.abs(v0[v0 >=0 ] - v[v0 >=0]))
s+=1.0
print("Test Loss Mean Absolute Error:", test_loss_mae/s) | [
"torch.abs",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.zeros",
"torch.FloatTensor"
] | [((637, 695), 'sklearn.model_selection.train_test_split', 'train_test_split', (['test_set'], {'test_size': '(0.5)', 'random_state': '(42)'}), '(test_set, test_size=0.5, random_state=42)\n', (653, 695), False, 'from sklearn.model_selection import train_test_split\n'), ((495, 541), 'pandas.read_csv', 'pd.read_csv', (['"""ml-100k/u1.base"""'], {'delimiter': '"""\t"""'}), "('ml-100k/u1.base', delimiter='\\t')\n", (506, 541), True, 'import pandas as pd\n'), ((562, 608), 'pandas.read_csv', 'pd.read_csv', (['"""ml-100k/u1.test"""'], {'delimiter': '"""\t"""'}), "('ml-100k/u1.test', delimiter='\\t')\n", (573, 608), True, 'import pandas as pd\n'), ((1200, 1221), 'torch.FloatTensor', 'torch.FloatTensor', (['l0'], {}), '(l0)\n', (1217, 1221), False, 'import torch\n'), ((1035, 1054), 'numpy.zeros', 'np.zeros', (['nb_movies'], {}), '(nb_movies)\n', (1043, 1054), True, 'import numpy as np\n'), ((5448, 5483), 'torch.abs', 'torch.abs', (['(v0[v0 >= 0] - v[v0 >= 0])'], {}), '(v0[v0 >= 0] - v[v0 >= 0])\n', (5457, 5483), False, 'import torch\n'), ((2225, 2260), 'torch.abs', 'torch.abs', (['(v0[v0 >= 0] - v[v0 >= 0])'], {}), '(v0[v0 >= 0] - v[v0 >= 0])\n', (2234, 2260), False, 'import torch\n'), ((3064, 3100), 'torch.abs', 'torch.abs', (['(v0[v0 >= 0] - vk[v0 >= 0])'], {}), '(v0[v0 >= 0] - vk[v0 >= 0])\n', (3073, 3100), False, 'import torch\n')] |
'''
SpeakDiar.py
21 audio recordings of academic conferences making up the NIST speaker
diarization dataset, created to asses the ability of different models to
segment speech data into unique speakers.
The 21 recordings are meant to be trained on independently.
Thus, get_data() takes a meetingNum parameter (default 1)
which determines which sequence will be loaded.
The meeting number can be changed with the argument
--meetingNum 3
Notes
-----
rttm format specification:
http://www.itl.nist.gov/iad/mig/tests/rt/2003-fall/docs/RTTM-format-v13.pdf
'''
import numpy as np
from bnpy.data import GroupXData
import scipy.io
import os
import sys
suffix = '_Nmeans25features_SpNsp'
fileNames = [
'AMI_20041210-1052_Nmeans25features_SpNsp.mat',
'AMI_20050204-1206_Nmeans25features_SpNsp.mat',
'CMU_20050228-1615_Nmeans25features_SpNsp.mat',
'CMU_20050301-1415_Nmeans25features_SpNsp.mat',
'CMU_20050912-0900_Nmeans25features_SpNsp.mat',
'CMU_20050914-0900_Nmeans25features_SpNsp.mat',
'EDI_20050216-1051_Nmeans25features_SpNsp.mat',
'EDI_20050218-0900_Nmeans25features_SpNsp.mat',
'ICSI_20000807-1000_Nmeans25features_SpNsp.mat',
'ICSI_20010208-1430_Nmeans25features_SpNsp.mat',
'LDC_20011116-1400_Nmeans25features_SpNsp.mat',
'LDC_20011116-1500_Nmeans25features_SpNsp.mat',
'NIST_20030623-1409_Nmeans25features_SpNsp.mat',
'NIST_20030925-1517_Nmeans25features_SpNsp.mat',
'NIST_20051024-0930_Nmeans25features_SpNsp.mat',
'NIST_20051102-1323_Nmeans25features_SpNsp.mat',
'TNO_20041103-1130_Nmeans25features_SpNsp.mat',
'VT_20050304-1300_Nmeans25features_SpNsp.mat',
'VT_20050318-1430_Nmeans25features_SpNsp.mat',
'VT_20050623-1400_Nmeans25features_SpNsp.mat',
'VT_20051027-1400_Nmeans25features_SpNsp.mat']
datasetdir = os.path.sep.join(
os.path.abspath(__file__).split(
os.path.sep)[
:-
1])
if not os.path.isdir(datasetdir):
raise ValueError('CANNOT FIND DATASET DIRECTORY:\n' + datasetdir)
def get_data(meetingNum=1, **kwargs):
''' Load data for specified single sequence.
Args
----
meetingNum : int
Identifies which sequence out of the 21 possible to use.
Must be valid number in range [1,2,3, ... 21].
Returns
-------
Data : GroupXData
holding only the data for a single sequence.
'''
if meetingNum <= 0 or meetingNum > len(fileNames):
raise ValueError('Bad value for meetingNum: %s' % (meetingNum))
fName = fileNames[meetingNum - 1].replace(suffix, '')
matfilepath = os.path.join(datasetdir, 'rawData',
'speakerDiarizationData', fName)
if not os.path.isfile(matfilepath):
raise ValueError(
'CANNOT FIND SPEAKDIAR DATASET MAT FILE:\n' + matfilepath)
Data = GroupXData.read_from_mat(matfilepath)
Data.summary = \
'Pre-processed audio data from NIST file %s (meeting %d / 21)' \
% (fName.replace(suffix, ''), meetingNum)
Data.name = 'SpeakerDiar' + str(meetingNum)
Data.fileNames = [fName]
return Data
def createBetterBNPYDatasetFromMATFiles():
''' Create new MAT files that relabel states.
Post Condition
--------------
rawData directory contains files of the form:
EDI_20050216-1051.mat
'''
for file in fileNames:
matfilepath = os.path.join(os.path.expandvars(
'$BNPYDATADIR/rawData/speakerDiarizationData'), file)
SavedVars = scipy.io.loadmat(matfilepath)
outmatpath = matfilepath.replace(suffix, '')
print(file.replace(suffix, ''))
SavedVars['TrueZ'] = \
relabelStateSeqWithNegativeIDsForNonspeakerIntervals(
SavedVars['TrueZ'])
scipy.io.savemat(outmatpath, SavedVars)
def relabelStateSeqWithNegativeIDsForNonspeakerIntervals(Z):
''' Relabel provided Z sequence so nonspeaker intervals have neg. ids.
Returns
-------
Znew : 1D array, size of Z
Znew will have "speaker" states with ids 0, 1, 2, ... K-1
where the ids are ordered from most to least common.
and non-speaker states with ids -1 (for silence) and -2 (for overlap)
'''
uLabels = np.unique(Z)
uLabels = np.asarray([u for u in uLabels if u > 0 and u < 10])
sizes = np.asarray([np.sum(Z == u) for u in uLabels])
sortIDs = np.argsort(-1 * sizes)
Znew = np.zeros_like(Z, dtype=np.int32)
aggFrac = 0
for rankID, uID in enumerate(uLabels[sortIDs]):
Znew[Z == uID] = rankID
size = np.sum(Z == uID)
frac = size / float(Z.size)
aggFrac += frac
print('state %3d: %5d tsteps (%.3f, %.3f)' % (
rankID, size, frac, aggFrac))
Znew[Z == 0] = -1
Znew[Z == 10] = -2
for uID in [-1, -2]:
size = np.sum(Znew == uID)
frac = size / float(Z.size)
aggFrac += frac
print('state %3d: %5d tsteps (%.3f, %.3f)' % (
uID, size, frac, aggFrac))
assert np.allclose(1.0, aggFrac)
return Znew
def createBNPYDatasetFromOriginalMATFiles(dataPath):
for file in fileNames:
fpath = os.path.join(dataPath, file)
data = scipy.io.loadmat(fpath)
X = np.transpose(data['u'])
TrueZ = data['zsub']
doc_range = [0, np.size(TrueZ)]
matfilepath = os.path.join(os.path.expandvars(
'$BNPYDATADIR/rawData/speakerDiarizationData'), file)
SaveDict = {'X': X, 'TrueZ': TrueZ, 'doc_range': doc_range}
scipy.io.savemat(matfilepath, SaveDict)
def plotXPairHistogram(meetingNum=1, dimIDs=[0, 1, 2, 3], numStatesToShow=3):
from matplotlib import pylab
Data = get_data(meetingNum=meetingNum)
TrueZ = Data.TrueParams['Z']
uniqueLabels = np.unique(TrueZ)
sizeOfLabels = np.asarray(
[np.sum(TrueZ == labelID) for labelID in uniqueLabels])
sortIDs = np.argsort(-1 * sizeOfLabels)
topLabelIDs = uniqueLabels[sortIDs[:numStatesToShow]]
Colors = ['k', 'r', 'b', 'm', 'c']
D = len(dimIDs)
pylab.subplots(nrows=len(dimIDs), ncols=len(dimIDs))
for id1, d1 in enumerate(dimIDs):
for id2, d2 in enumerate(dimIDs):
pylab.subplot(D, D, id2 + D * id1 + 1)
if id1 == id2:
pylab.xticks([])
pylab.yticks([])
continue
pylab.hold('on')
if id1 < id2:
order = reversed([x for x in enumerate(topLabelIDs)])
else:
order = enumerate(topLabelIDs)
cur_d1 = np.minimum(d1, d2)
cur_d2 = np.maximum(d1, d2)
for kID, labelID in order:
dataIDs = TrueZ == labelID
pylab.plot(Data.X[dataIDs, cur_d1],
Data.X[dataIDs, cur_d2], '.',
color=Colors[kID], markeredgecolor=Colors[kID])
pylab.ylim([-25, 25])
pylab.xlim([-25, 25])
if (id2 > 0):
pylab.yticks([])
if (id1 < D - 1):
pylab.xticks([])
'''
# make a color map of fixed colors
from matplotlib import colors
cmap = colors.ListedColormap(['white'] + Colors[:3])
bounds = [0, 1, 2, 3, 4]
norm = colors.BoundaryNorm(bounds, cmap.N)
Z = np.zeros(Data.X.shape)
for kID, labelID in enumerate(topLabelIDs):
curIDs = TrueZ == labelID
Z[curIDs, :] = bounds[kID + 1]
pylab.subplots(nrows=1, ncols=2)
ax = pylab.subplot(1, 2, 1)
pylab.imshow(Z.T, interpolation='nearest',
cmap=cmap,
aspect=Z.shape[0] / float(Z.shape[1]),
vmin=bounds[0],
vmax=bounds[-1],
)
pylab.yticks([])
pylab.subplot(1, 2, 2, sharex=ax)
for d in dimIDs:
pylab.plot(np.arange(Z.shape[0]), 10 * d + Data.X[:, d] / 25, 'k.-')
'''
pylab.show()
def plotBlackWhiteStateSeqForMeeting(meetingNum=1, badUIDs=[-1, -2],
**kwargs):
''' Make plot like in Fig. 3 of AOAS paper
'''
from matplotlib import pylab
Data = get_data(meetingNum=args.meetingNum)
Z = np.asarray(Data.TrueParams['Z'], dtype=np.int32)
uLabels = np.unique(Z)
uLabels = np.asarray([u for u in uLabels if u not in badUIDs])
sizes = np.asarray([np.sum(Z == u) for u in uLabels])
sortIDs = np.argsort(-1 * sizes)
Zim = np.zeros((10, Z.size))
for rankID, uID in enumerate(uLabels[sortIDs]):
Zim[1 + rankID, Z == uID] = 1
size = sizes[sortIDs[rankID]]
frac = size / float(Z.size)
print('state %3d: %5d tsteps (%.3f)' % (rankID + 1, size, frac))
for uID in badUIDs:
size = np.sum(Z == uID)
frac = size / float(Z.size)
print('state %3d: %5d tsteps (%.3f)' % (uID, size, frac))
pylab.imshow(1 - Zim,
interpolation='nearest',
aspect=Zim.shape[1] / float(Zim.shape[0]) / 3,
cmap='bone',
vmin=0,
vmax=1,
origin='lower')
pylab.show()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dimIDs', default='0,1,2,3')
parser.add_argument('--meetingNum', type=int, default=1)
parser.add_argument('--numStatesToShow', type=int, default=4)
args = parser.parse_args()
args.dimIDs = [int(x) for x in args.dimIDs.split(',')]
# plotBlackWhiteStateSeqForMeeting(**args.__dict__)
plotXPairHistogram(**args.__dict__)
| [
"matplotlib.pylab.xlim",
"matplotlib.pylab.xticks",
"numpy.argsort",
"matplotlib.pylab.hold",
"bnpy.data.GroupXData.read_from_mat",
"matplotlib.pylab.show",
"argparse.ArgumentParser",
"numpy.asarray",
"os.path.isdir",
"numpy.maximum",
"matplotlib.pylab.plot",
"numpy.allclose",
"numpy.size",
... | [((1925, 1950), 'os.path.isdir', 'os.path.isdir', (['datasetdir'], {}), '(datasetdir)\n', (1938, 1950), False, 'import os\n'), ((2583, 2651), 'os.path.join', 'os.path.join', (['datasetdir', '"""rawData"""', '"""speakerDiarizationData"""', 'fName'], {}), "(datasetdir, 'rawData', 'speakerDiarizationData', fName)\n", (2595, 2651), False, 'import os\n'), ((2833, 2870), 'bnpy.data.GroupXData.read_from_mat', 'GroupXData.read_from_mat', (['matfilepath'], {}), '(matfilepath)\n', (2857, 2870), False, 'from bnpy.data import GroupXData\n'), ((4224, 4236), 'numpy.unique', 'np.unique', (['Z'], {}), '(Z)\n', (4233, 4236), True, 'import numpy as np\n'), ((4251, 4303), 'numpy.asarray', 'np.asarray', (['[u for u in uLabels if u > 0 and u < 10]'], {}), '([u for u in uLabels if u > 0 and u < 10])\n', (4261, 4303), True, 'import numpy as np\n'), ((4376, 4398), 'numpy.argsort', 'np.argsort', (['(-1 * sizes)'], {}), '(-1 * sizes)\n', (4386, 4398), True, 'import numpy as np\n'), ((4410, 4442), 'numpy.zeros_like', 'np.zeros_like', (['Z'], {'dtype': 'np.int32'}), '(Z, dtype=np.int32)\n', (4423, 4442), True, 'import numpy as np\n'), ((5002, 5027), 'numpy.allclose', 'np.allclose', (['(1.0)', 'aggFrac'], {}), '(1.0, aggFrac)\n', (5013, 5027), True, 'import numpy as np\n'), ((5760, 5776), 'numpy.unique', 'np.unique', (['TrueZ'], {}), '(TrueZ)\n', (5769, 5776), True, 'import numpy as np\n'), ((5886, 5915), 'numpy.argsort', 'np.argsort', (['(-1 * sizeOfLabels)'], {}), '(-1 * sizeOfLabels)\n', (5896, 5915), True, 'import numpy as np\n'), ((7888, 7900), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (7898, 7900), False, 'from matplotlib import pylab\n'), ((8165, 8213), 'numpy.asarray', 'np.asarray', (["Data.TrueParams['Z']"], {'dtype': 'np.int32'}), "(Data.TrueParams['Z'], dtype=np.int32)\n", (8175, 8213), True, 'import numpy as np\n'), ((8229, 8241), 'numpy.unique', 'np.unique', (['Z'], {}), '(Z)\n', (8238, 8241), True, 'import numpy as np\n'), ((8256, 8308), 'numpy.asarray', 'np.asarray', (['[u for u in uLabels if u not in badUIDs]'], {}), '([u for u in uLabels if u not in badUIDs])\n', (8266, 8308), True, 'import numpy as np\n'), ((8381, 8403), 'numpy.argsort', 'np.argsort', (['(-1 * sizes)'], {}), '(-1 * sizes)\n', (8391, 8403), True, 'import numpy as np\n'), ((8414, 8436), 'numpy.zeros', 'np.zeros', (['(10, Z.size)'], {}), '((10, Z.size))\n', (8422, 8436), True, 'import numpy as np\n'), ((9083, 9095), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (9093, 9095), False, 'from matplotlib import pylab\n'), ((9158, 9183), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9181, 9183), False, 'import argparse\n'), ((2695, 2722), 'os.path.isfile', 'os.path.isfile', (['matfilepath'], {}), '(matfilepath)\n', (2709, 2722), False, 'import os\n'), ((4558, 4574), 'numpy.sum', 'np.sum', (['(Z == uID)'], {}), '(Z == uID)\n', (4564, 4574), True, 'import numpy as np\n'), ((4817, 4836), 'numpy.sum', 'np.sum', (['(Znew == uID)'], {}), '(Znew == uID)\n', (4823, 4836), True, 'import numpy as np\n'), ((5142, 5170), 'os.path.join', 'os.path.join', (['dataPath', 'file'], {}), '(dataPath, file)\n', (5154, 5170), False, 'import os\n'), ((5222, 5245), 'numpy.transpose', 'np.transpose', (["data['u']"], {}), "(data['u'])\n", (5234, 5245), True, 'import numpy as np\n'), ((8714, 8730), 'numpy.sum', 'np.sum', (['(Z == uID)'], {}), '(Z == uID)\n', (8720, 8730), True, 'import numpy as np\n'), ((3393, 3458), 'os.path.expandvars', 'os.path.expandvars', (['"""$BNPYDATADIR/rawData/speakerDiarizationData"""'], {}), "('$BNPYDATADIR/rawData/speakerDiarizationData')\n", (3411, 3458), False, 'import os\n'), ((4328, 4342), 'numpy.sum', 'np.sum', (['(Z == u)'], {}), '(Z == u)\n', (4334, 4342), True, 'import numpy as np\n'), ((5299, 5313), 'numpy.size', 'np.size', (['TrueZ'], {}), '(TrueZ)\n', (5306, 5313), True, 'import numpy as np\n'), ((5350, 5415), 'os.path.expandvars', 'os.path.expandvars', (['"""$BNPYDATADIR/rawData/speakerDiarizationData"""'], {}), "('$BNPYDATADIR/rawData/speakerDiarizationData')\n", (5368, 5415), False, 'import os\n'), ((5817, 5841), 'numpy.sum', 'np.sum', (['(TrueZ == labelID)'], {}), '(TrueZ == labelID)\n', (5823, 5841), True, 'import numpy as np\n'), ((6182, 6220), 'matplotlib.pylab.subplot', 'pylab.subplot', (['D', 'D', '(id2 + D * id1 + 1)'], {}), '(D, D, id2 + D * id1 + 1)\n', (6195, 6220), False, 'from matplotlib import pylab\n'), ((6351, 6367), 'matplotlib.pylab.hold', 'pylab.hold', (['"""on"""'], {}), "('on')\n", (6361, 6367), False, 'from matplotlib import pylab\n'), ((6550, 6568), 'numpy.minimum', 'np.minimum', (['d1', 'd2'], {}), '(d1, d2)\n', (6560, 6568), True, 'import numpy as np\n'), ((6590, 6608), 'numpy.maximum', 'np.maximum', (['d1', 'd2'], {}), '(d1, d2)\n', (6600, 6608), True, 'import numpy as np\n'), ((6887, 6908), 'matplotlib.pylab.ylim', 'pylab.ylim', (['[-25, 25]'], {}), '([-25, 25])\n', (6897, 6908), False, 'from matplotlib import pylab\n'), ((6921, 6942), 'matplotlib.pylab.xlim', 'pylab.xlim', (['[-25, 25]'], {}), '([-25, 25])\n', (6931, 6942), False, 'from matplotlib import pylab\n'), ((8333, 8347), 'numpy.sum', 'np.sum', (['(Z == u)'], {}), '(Z == u)\n', (8339, 8347), True, 'import numpy as np\n'), ((1832, 1857), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1847, 1857), False, 'import os\n'), ((6264, 6280), 'matplotlib.pylab.xticks', 'pylab.xticks', (['[]'], {}), '([])\n', (6276, 6280), False, 'from matplotlib import pylab\n'), ((6297, 6313), 'matplotlib.pylab.yticks', 'pylab.yticks', (['[]'], {}), '([])\n', (6309, 6313), False, 'from matplotlib import pylab\n'), ((6707, 6825), 'matplotlib.pylab.plot', 'pylab.plot', (['Data.X[dataIDs, cur_d1]', 'Data.X[dataIDs, cur_d2]', '"""."""'], {'color': 'Colors[kID]', 'markeredgecolor': 'Colors[kID]'}), "(Data.X[dataIDs, cur_d1], Data.X[dataIDs, cur_d2], '.', color=\n Colors[kID], markeredgecolor=Colors[kID])\n", (6717, 6825), False, 'from matplotlib import pylab\n'), ((6985, 7001), 'matplotlib.pylab.yticks', 'pylab.yticks', (['[]'], {}), '([])\n', (6997, 7001), False, 'from matplotlib import pylab\n'), ((7048, 7064), 'matplotlib.pylab.xticks', 'pylab.xticks', (['[]'], {}), '([])\n', (7060, 7064), False, 'from matplotlib import pylab\n')] |
# -*- coding: utf-8 -*-
"""
Clase perteneciente al módulo de procesamiento de datos e inferencias Ama.
.. module:: dbscan_processor
:platform: Unix
:synopsis: Detección de clusters de tormenta utilizando el algoritmo DBSCAN.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import ama.utils as utils
import ama.processor as processor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
import wradlib as wrl
from geopy.distance import great_circle
from shapely.geometry import MultiPoint
from sklearn.cluster import DBSCAN
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Proyecto de Tesis / Universidad Católica de Asunción."
__credits__ = "<NAME>"
__license__ = "BSD"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
class DBSCANProcessor:
"""
Detección de clusters de tormenta utilizando el algoritmo DBSCAN.
Valores óptimos Epsilon = 10.0, Densidad = 300
"""
def __init__(self):
pass
###### OPCIONES DE PROCESAMIENTO ######
KMS_PER_RADIAN = 6371.0088
"""
float: La cantidad de kilómetros en un radián.
"""
EPSILON = 10. / KMS_PER_RADIAN
"""
float: El espacio radial o distancia entre puntos.
"""
MIN_SAMPLES = 300
"""
int: La cantidad mínima de puntos para ser considerado un cluster.
"""
TESTING_POINTS = 20
"""
int: La cantidad máxima de puntos a utilizar para las pruebas y verificaciones.
"""
def get_centermost_point(self, clusters):
"""
Función que detecta el centroide para cada cluster de tormenta.
:param clusters: Un vector con los clusters detectados por DBSCAN.
:return: Un vector con tuplas de Latitud, Longitud correspondientes a cada centroide.
"""
result = []
for cluster in clusters:
if len(cluster):
centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
centermost_point = min(cluster, key=lambda point: great_circle(point, centroid).m)
result.append(tuple(centermost_point))
return result
def detect_dbz_clusters(self, filename, layer, test=False):
"""
Función que detecta los clusters de tormenta.
:param filename: El archivo con datos de Radar a procesar.
:param layer: La capa de datos a procesar. Cada capa corresponde a un ángulo de elevación del radar.
:param test: Habilitar modo verificación de datos. En modo verificación se utilizan pocos datos \
para poder verificar cada uno de los datos.
:return: matrix = Una matriz de Nx3 con los valores originales. \
no_noise = Un vector de vectores con todos los puntos detectados como no-ruido. \
centermost_points = Un vector de tuplas con las coordenadas de los centroides detectados. \
time = Fecha/hora de los datos. \
radar = Tupla con las coordenadas del radar.
"""
data, metadata = processor.Processor.process(filename)
lat_vector = []
lon_vector = []
dBZ_vector = []
layer_key = u"SCAN{0}".format(layer)
radar_latitude = float(metadata["VOL"]["Latitude"])
radar_longitude = float(metadata["VOL"]["Longitude"])
for (row, column), value in np.ndenumerate(data[layer_key][u"Z"]["data"]):
if value > -64.:
rng = metadata[layer_key]["r"][column]
azi = metadata[layer_key]["az"][row]
dBZ = value
lon, lat = wrl.georef.polar2lonlat(rng, azi, (radar_longitude, radar_latitude))
# realizar los redondeos
dBZ_value = float("{0:.1f}".format(dBZ))
latitude_value = float("{0:.5f}".format(lat))
longitude_value = float("{0:.5f}".format(lon))
# filtrar los datos.
if dBZ_value >= processor.Processor.MINIMUM_REFLECTIVITY and dBZ_value <= processor.Processor.MAXIMUM_REFLECTIVITY:
lat_vector.append(latitude_value)
lon_vector.append(longitude_value)
dBZ_vector.append(dBZ_value)
if test == 1:
if len(lat_vector) > self.TESTING_POINTS:
break
###### DBSCAN ######
#
# Convertir los vectores de latitud, longitud y dBZ a una matriz de Nx3.
#
# Ejemplo:
# -25.29036 -57.52304 20.0
# -25.28811 -57.52302 30.0
#
matrix = np.column_stack((lat_vector, lon_vector, dBZ_vector))
print("")
print(utils.Colors.BOLD + "### Matriz Latitud-Longitud-dBZ ###" + utils.Colors.ENDC)
print(utils.Colors.BOLD + "Tamaño: {0}".format(matrix.shape) + utils.Colors.ENDC)
print(utils.Colors.BOLD + "{0}".format(np.matrix(matrix)) + utils.Colors.ENDC)
print("")
#
# Ejecutar el algoritmo DBSCAN sobre la matriz recién generada, pero los valores
# deben ser convertidos a radianes para poder aplicar la función haversine sobre cada
# punto.
#
start_time = time.time()
db = DBSCAN(eps=self.EPSILON, min_samples=self.MIN_SAMPLES, algorithm='ball_tree', metric='haversine').fit(
np.radians(np.column_stack((lat_vector, lon_vector))))
cluster_labels = db.labels_
num_clusters = len(set(cluster_labels))
end_time = time.time()
print("")
print(utils.Colors.BOLD + "### DBSCAN sobre matriz Latitud-Longitud ###" + utils.Colors.ENDC)
print(utils.Colors.BOLD + "Nro. Puntos: {0}".format(len(matrix)) + utils.Colors.ENDC)
print(utils.Colors.BOLD + "Nro. Clusteres: {0}".format(num_clusters) + utils.Colors.ENDC)
print(utils.Colors.BOLD + "Compresión: {0}".format(100 * (1 - float(num_clusters) / len(matrix))) + utils.Colors.ENDC)
print(utils.Colors.BOLD + "Tiempo: {0} segundos".format(end_time - start_time) + utils.Colors.ENDC)
print(utils.Colors.BOLD + "Tamaño: {0}".format(cluster_labels.shape) + utils.Colors.ENDC)
print(utils.Colors.BOLD + "{0}".format(np.array(cluster_labels)) + utils.Colors.ENDC)
print("")
clusters = pd.Series([matrix[cluster_labels == n] for n in range(num_clusters)])
print("")
print(utils.Colors.BOLD + "### Lista de Clusteres ###" + utils.Colors.ENDC)
print(utils.Colors.BOLD + "Tamaño: {0}".format(clusters.shape) + utils.Colors.ENDC)
print(utils.Colors.BOLD + "{0}".format(clusters.to_string()) + utils.Colors.ENDC)
print("###")
print(utils.Colors.BOLD + "Clusteres:" + utils.Colors.ENDC)
for cluster in clusters:
print(utils.Colors.BOLD + "Tamaño: {0}".format(cluster.shape) + utils.Colors.ENDC)
print(utils.Colors.BOLD + "{0}".format(cluster) + utils.Colors.ENDC)
print("")
#
# Vector de tuplas con todos los centroides.
#
# Ejemplo:
# [(-25.29036,-57.52304),...]
#
centermost_points = self.get_centermost_point(clusters)
print("")
print(utils.Colors.BOLD + "### Centroides ###" + utils.Colors.ENDC)
print(utils.Colors.BOLD + "Tamaño: {0}".format(len(centermost_points)) + utils.Colors.ENDC)
print(utils.Colors.BOLD + "{0}".format(centermost_points) + utils.Colors.ENDC)
print("")
#
# Juntar todos los puntos detectados como no-ruido.
#
no_noise = []
for cluster in clusters:
for row in cluster:
no_noise.append(row)
return matrix, no_noise, centermost_points, metadata[layer_key]["Time"], (radar_latitude, radar_longitude)
def plot_all_points(self, filename, layer, test=False):
"""
Función que genera un gráfico con los datos detectados por la función DBSCAN.
:param filename: El archivo con datos de Radar a procesar.
:param layer: La capa de datos a procesar. Cada capa corresponde a un ángulo de elevación del radar.
:param test: Habilitar modo verificación de datos. En modo verificación se utilizan pocos datos \
para poder verificar cada uno de los datos.
:return: void
"""
#
# Datos que retorna el proceso de detección de clusters.
#
original, clustered, centroids, time, radar_coordinates = self.detect_dbz_clusters(filename, layer, test)
if len(clustered) > 0:
original_lats, original_lons, original_dBZ = zip(*original)
clustered_lats, clustered_lons, clustered_dBZ = zip(*clustered)
centroid_lats, centroid_lons, centroid_dBZ = zip(*centroids)
###### PLOTEAR ######
#
# Aquí ploteamos los datos completos con una capa extra encima donde se
# muestran los clusteres detectados con sus correpondientes centroides.
#
plt.style.use(u'ggplot')
fig1, ax1 = plt.subplots(figsize=[10, 6])
original_scatter = ax1.scatter(original_lons, original_lats, c=original_dBZ, alpha=1.0, s=6)
clustered_scatter = ax1.scatter(clustered_lons, clustered_lats, c='black', alpha=1.0, s=12)
centroids_scatter = ax1.scatter(centroid_lons, centroid_lats, c='red', edgecolor='None', alpha=0.7, s=120)
radar_scatter = ax1.scatter(radar_coordinates[1], radar_coordinates[0], c='green', edgecolor='None', alpha=1.0, s=80)
ax1.set_title(
u"Reflectividades (dBZ) entre los valores {0} a {1}. Elevación Radar = Capa {2} / {3}".format(
processor.Processor.MINIMUM_REFLECTIVITY,
processor.Processor.MAXIMUM_REFLECTIVITY,
layer + 1,
time),
fontsize=11,
fontweight="bold",
y=1.05)
ax1.set_xlabel('Longitud')
ax1.set_ylabel('Latitud')
ax1.legend(
[original_scatter, clustered_scatter, centroids_scatter, radar_scatter],
[
"dBZ {0} - {1}".format(processor.Processor.MINIMUM_REFLECTIVITY, processor.Processor.MAXIMUM_REFLECTIVITY),
"Datos Limpios",
"Centroides Clusters",
u"Ubicación Radar"
],
loc='upper right')
plt.show()
else:
print(utils.Colors.BOLD + "---" + utils.Colors.ENDC)
print(utils.Colors.BOLD + "No se detectaron clusters." + utils.Colors.ENDC)
| [
"ama.processor.Processor.process",
"shapely.geometry.MultiPoint",
"numpy.column_stack",
"numpy.ndenumerate",
"matplotlib.pyplot.style.use",
"numpy.array",
"wradlib.georef.polar2lonlat",
"geopy.distance.great_circle",
"numpy.matrix",
"time.time",
"matplotlib.pyplot.subplots",
"sklearn.cluster.D... | [((3043, 3080), 'ama.processor.Processor.process', 'processor.Processor.process', (['filename'], {}), '(filename)\n', (3070, 3080), True, 'import ama.processor as processor\n'), ((3358, 3403), 'numpy.ndenumerate', 'np.ndenumerate', (["data[layer_key][u'Z']['data']"], {}), "(data[layer_key][u'Z']['data'])\n", (3372, 3403), True, 'import numpy as np\n'), ((4604, 4657), 'numpy.column_stack', 'np.column_stack', (['(lat_vector, lon_vector, dBZ_vector)'], {}), '((lat_vector, lon_vector, dBZ_vector))\n', (4619, 4657), True, 'import numpy as np\n'), ((5206, 5217), 'time.time', 'time.time', ([], {}), '()\n', (5215, 5217), False, 'import time\n'), ((5504, 5515), 'time.time', 'time.time', ([], {}), '()\n', (5513, 5515), False, 'import time\n'), ((9017, 9041), 'matplotlib.pyplot.style.use', 'plt.style.use', (['u"""ggplot"""'], {}), "(u'ggplot')\n", (9030, 9041), True, 'import matplotlib.pyplot as plt\n'), ((9066, 9095), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[10, 6]'}), '(figsize=[10, 6])\n', (9078, 9095), True, 'import matplotlib.pyplot as plt\n'), ((10483, 10493), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10491, 10493), True, 'import matplotlib.pyplot as plt\n'), ((3597, 3665), 'wradlib.georef.polar2lonlat', 'wrl.georef.polar2lonlat', (['rng', 'azi', '(radar_longitude, radar_latitude)'], {}), '(rng, azi, (radar_longitude, radar_latitude))\n', (3620, 3665), True, 'import wradlib as wrl\n'), ((5231, 5333), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': 'self.EPSILON', 'min_samples': 'self.MIN_SAMPLES', 'algorithm': '"""ball_tree"""', 'metric': '"""haversine"""'}), "(eps=self.EPSILON, min_samples=self.MIN_SAMPLES, algorithm=\n 'ball_tree', metric='haversine')\n", (5237, 5333), False, 'from sklearn.cluster import DBSCAN\n'), ((5357, 5398), 'numpy.column_stack', 'np.column_stack', (['(lat_vector, lon_vector)'], {}), '((lat_vector, lon_vector))\n', (5372, 5398), True, 'import numpy as np\n'), ((4906, 4923), 'numpy.matrix', 'np.matrix', (['matrix'], {}), '(matrix)\n', (4915, 4923), True, 'import numpy as np\n'), ((6208, 6232), 'numpy.array', 'np.array', (['cluster_labels'], {}), '(cluster_labels)\n', (6216, 6232), True, 'import numpy as np\n'), ((1918, 1937), 'shapely.geometry.MultiPoint', 'MultiPoint', (['cluster'], {}), '(cluster)\n', (1928, 1937), False, 'from shapely.geometry import MultiPoint\n'), ((1950, 1969), 'shapely.geometry.MultiPoint', 'MultiPoint', (['cluster'], {}), '(cluster)\n', (1960, 1969), False, 'from shapely.geometry import MultiPoint\n'), ((2048, 2077), 'geopy.distance.great_circle', 'great_circle', (['point', 'centroid'], {}), '(point, centroid)\n', (2060, 2077), False, 'from geopy.distance import great_circle\n')] |
import cv2
import numpy as np
from PIL import Image
from PIL import ImageDraw
from subprocess import Popen, PIPE
import pycocotools.mask as coco_mask_util
def draw_bboxes(image, bboxes, labels=None, output_file=None, fill='red'):
"""
Draw bounding boxes on image.
Return image with drawings as BGR ndarray.
Args:
image (string | ndarray): input image path or image BGR ndarray.
bboxes (np.array): bounding boxes.
labels (list of string): the label names of bboxes.
output_file (string): output image path.
"""
if labels:
assert len(bboxes) == len(labels)
if isinstance(image, str):
image = Image.open(image)
elif isinstance(image, np.ndarray):
image = Image.fromarray(image[:, :, ::-1], mode='RGB')
else:
raise ValueError('`image` should be image path in string or '
'image ndarray.')
draw = ImageDraw.Draw(image)
for i in range(len(bboxes)):
xmin, ymin, xmax, ymax = bboxes[i]
left, right, top, bottom = xmin, xmax, ymin, ymax
lines = [(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)]
draw.line(lines, width=4, fill=fill)
if labels and image.mode == 'RGB':
draw.text((left, top), labels[i], (255, 255, 0))
if output_file:
print('The image with bbox is saved as {}'.format(output_file))
image.save(output_file)
return np.array(image)[:, :, ::-1]
def save_as_gif(images, gif_file, fps=5):
"""
Save numpy images as gif file using ffmpeg.
Args:
images (list|ndarray): a list of uint8 images or uint8 ndarray
with shape [time, height, width, channels]. `channels` can
be 1 or 3.
gif_file (str): path to saved gif file.
fps (int): frames per second of the animation.
"""
h, w, c = images[0].shape
cmd = [
'ffmpeg', '-y',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-r', '%.02f' % fps,
'-s', '%dx%d' % (w, h),
'-pix_fmt', {1: 'gray', 3: 'rgb24'}[c],
'-i', '-',
'-filter_complex', '[0:v]split[x][z];[z]palettegen[y];[x][y]paletteuse',
'-r', '%.02f' % fps,
'-f', 'gif',
'-']
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in images:
proc.stdin.write(image.tostring())
out, err = proc.communicate()
if proc.returncode:
err = '\n'.join([' '.join(cmd), err.decode('utf8')])
raise IOError(err)
del proc
with open(gif_file, 'wb') as f:
f.write(out)
def colormap(rgb=False):
"""
Get colormap
"""
color_list = np.array([
0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,
0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078,
0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,
1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000,
0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667,
0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,
0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000,
1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,
0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,
0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667,
0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333,
0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,
0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333,
0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000,
1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,
1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167,
0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000,
0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,
0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000,
0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000,
0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,
0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286,
0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714,
0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000
]).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list
| [
"PIL.Image.fromarray",
"PIL.Image.open",
"subprocess.Popen",
"numpy.array",
"PIL.ImageDraw.Draw"
] | [((926, 947), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (940, 947), False, 'from PIL import ImageDraw\n'), ((2274, 2322), 'subprocess.Popen', 'Popen', (['cmd'], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n', (2279, 2322), False, 'from subprocess import Popen, PIPE\n'), ((670, 687), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (680, 687), False, 'from PIL import Image\n'), ((1475, 1490), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1483, 1490), True, 'import numpy as np\n'), ((744, 790), 'PIL.Image.fromarray', 'Image.fromarray', (['image[:, :, ::-1]'], {'mode': '"""RGB"""'}), "(image[:, :, ::-1], mode='RGB')\n", (759, 790), False, 'from PIL import Image\n'), ((2685, 4160), 'numpy.array', 'np.array', (['[0.0, 0.447, 0.741, 0.85, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, \n 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, \n 0.3, 0.3, 0.3, 0.6, 0.6, 0.6, 1.0, 0.0, 0.0, 1.0, 0.5, 0.0, 0.749, \n 0.749, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.667, 0.0, 1.0, 0.333, 0.333,\n 0.0, 0.333, 0.667, 0.0, 0.333, 1.0, 0.0, 0.667, 0.333, 0.0, 0.667, \n 0.667, 0.0, 0.667, 1.0, 0.0, 1.0, 0.333, 0.0, 1.0, 0.667, 0.0, 1.0, 1.0,\n 0.0, 0.0, 0.333, 0.5, 0.0, 0.667, 0.5, 0.0, 1.0, 0.5, 0.333, 0.0, 0.5, \n 0.333, 0.333, 0.5, 0.333, 0.667, 0.5, 0.333, 1.0, 0.5, 0.667, 0.0, 0.5,\n 0.667, 0.333, 0.5, 0.667, 0.667, 0.5, 0.667, 1.0, 0.5, 1.0, 0.0, 0.5, \n 1.0, 0.333, 0.5, 1.0, 0.667, 0.5, 1.0, 1.0, 0.5, 0.0, 0.333, 1.0, 0.0, \n 0.667, 1.0, 0.0, 1.0, 1.0, 0.333, 0.0, 1.0, 0.333, 0.333, 1.0, 0.333, \n 0.667, 1.0, 0.333, 1.0, 1.0, 0.667, 0.0, 1.0, 0.667, 0.333, 1.0, 0.667,\n 0.667, 1.0, 0.667, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.333, 1.0, 1.0, 0.667,\n 1.0, 0.167, 0.0, 0.0, 0.333, 0.0, 0.0, 0.5, 0.0, 0.0, 0.667, 0.0, 0.0, \n 0.833, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.167, 0.0, 0.0, 0.333, 0.0, 0.0, \n 0.5, 0.0, 0.0, 0.667, 0.0, 0.0, 0.833, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, \n 0.167, 0.0, 0.0, 0.333, 0.0, 0.0, 0.5, 0.0, 0.0, 0.667, 0.0, 0.0, 0.833,\n 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.143, 0.143, 0.143, 0.286, 0.286, 0.286,\n 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714, 0.714, 0.857, \n 0.857, 0.857, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.447, 0.741, 0.85, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,\n 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, \n 0.184, 0.3, 0.3, 0.3, 0.6, 0.6, 0.6, 1.0, 0.0, 0.0, 1.0, 0.5, 0.0, \n 0.749, 0.749, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.667, 0.0, 1.0, 0.333,\n 0.333, 0.0, 0.333, 0.667, 0.0, 0.333, 1.0, 0.0, 0.667, 0.333, 0.0, \n 0.667, 0.667, 0.0, 0.667, 1.0, 0.0, 1.0, 0.333, 0.0, 1.0, 0.667, 0.0, \n 1.0, 1.0, 0.0, 0.0, 0.333, 0.5, 0.0, 0.667, 0.5, 0.0, 1.0, 0.5, 0.333, \n 0.0, 0.5, 0.333, 0.333, 0.5, 0.333, 0.667, 0.5, 0.333, 1.0, 0.5, 0.667,\n 0.0, 0.5, 0.667, 0.333, 0.5, 0.667, 0.667, 0.5, 0.667, 1.0, 0.5, 1.0, \n 0.0, 0.5, 1.0, 0.333, 0.5, 1.0, 0.667, 0.5, 1.0, 1.0, 0.5, 0.0, 0.333, \n 1.0, 0.0, 0.667, 1.0, 0.0, 1.0, 1.0, 0.333, 0.0, 1.0, 0.333, 0.333, 1.0,\n 0.333, 0.667, 1.0, 0.333, 1.0, 1.0, 0.667, 0.0, 1.0, 0.667, 0.333, 1.0,\n 0.667, 0.667, 1.0, 0.667, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.333, 1.0, 1.0,\n 0.667, 1.0, 0.167, 0.0, 0.0, 0.333, 0.0, 0.0, 0.5, 0.0, 0.0, 0.667, 0.0,\n 0.0, 0.833, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.167, 0.0, 0.0, 0.333, 0.0, \n 0.0, 0.5, 0.0, 0.0, 0.667, 0.0, 0.0, 0.833, 0.0, 0.0, 1.0, 0.0, 0.0, \n 0.0, 0.167, 0.0, 0.0, 0.333, 0.0, 0.0, 0.5, 0.0, 0.0, 0.667, 0.0, 0.0, \n 0.833, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.143, 0.143, 0.143, 0.286, 0.286,\n 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714, 0.714, \n 0.857, 0.857, 0.857, 1.0, 1.0, 1.0])\n', (2693, 4160), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
@title :utils/module_wrappers.py
@author :ch
@contact :<EMAIL>
@created :06/13/2019
@version :1.0
@python_version :3.6.8
An interface for a CL hypernetwork and main network. These interfaces ensure
that we can consistently use these networks without knowing their specific
implementation.
"""
from abc import ABC, abstractmethod
from warnings import warn
import numpy as np
class CLHyperNetInterface(ABC):
"""A general interface for task-conditioned hypernetworks, that are used
for continual learning.
.. deprecated:: 1.0
Please use module :class:`hnets.hnet_interface.CLHyperNetInterface`
instead.
Attributes:
theta: Parameters of the hypernetwork (excluding task embeddings).
num_weights: Total number of parameters in this network, including
task embeddings.
num_outputs: The total number of output neurons (number of weights
generated for the target network).
has_theta: Whether the hypernetwork has internal theta weights.
Otherwise, these weights are assumed to be produced by another
hypernetwork.
theta_shapes: A list of lists of integers denoting the shape of every
weight tensor belonging to "theta". Note, the returned list is
independent of whether "has_theta" is True.
has_task_embs: Whether the hypernet has internal task embeddings.
num_task_embs: Number of task embeddings available internally.
requires_ext_input: Whether the hypernet expects an external input
(e.g., another condition in addition to the current task).
target_shapes: A list of list of integers representing the shapes of
weight tensors generated for a main network (i.e., the shapes of
the hypernet output).
"""
def __init__(self):
"""Initialize the network."""
super(CLHyperNetInterface, self).__init__()
warn('Please use class "hnets.hnet_interface.CLHyperNetInterface" ' +
'instead.', DeprecationWarning)
# The following member variables have to be set by all classes that
# implement this interface.
self._theta = None
self._task_embs = None
self._theta_shapes = None
# Task embedding weights + theta weights.
self._num_weights = None
self._num_outputs = None
# If an external input is required, this may not be None.
self._size_ext_input = None
self._target_shapes = None
def _is_properly_setup(self):
"""This method can be used by classes that implement this interface to
check whether all required properties have been set."""
# assert(self._theta is not None)
# assert(self._task_embs is not None)
assert (self._theta_shapes is not None)
assert (self._num_weights is not None)
assert (self._num_outputs is not None)
assert (self._target_shapes is not None)
@property
def theta(self):
"""Getter for read-only attribute theta.
Theta are all learnable parameters of the hypernet except the task
embeddings, i.e., theta comprises all parameters that should be
regularized in order to avoid catastrophic forgetting when training
the hypernetwork in a Continual Learning setting.
Returns:
A :class:`torch.nn.ParameterList` or None, if this network has no
weights.
"""
return self._theta
@property
def num_outputs(self):
"""Getter for the attribute num_outputs."""
return self._num_outputs
@property
def num_weights(self):
"""Getter for read-only attribute num_weights."""
return self._num_weights
@property
def has_theta(self):
"""Getter for read-only attribute has_theta."""
return self._theta is not None
@property
def theta_shapes(self):
"""Getter for read-only attribute theta_shapes.
Returns:
A list of lists of integers.
"""
return self._theta_shapes
@property
def has_task_embs(self):
"""Getter for read-only attribute has_task_embs."""
return self._task_embs is not None
@property
def num_task_embs(self):
"""Getter for read-only attribute num_task_embs."""
assert (self.has_task_embs)
return len(self._task_embs)
@property
def requires_ext_input(self):
"""Getter for read-only attribute requires_ext_input."""
return self._size_ext_input is not None
@property
def target_shapes(self):
"""Getter for read-only attribute target_shapes.
Returns:
A list of lists of integers.
"""
return self._target_shapes
def get_task_embs(self):
"""Return a list of all task embeddings.
Returns:
A list of Parameter tensors.
"""
assert (self.has_task_embs)
return self._task_embs
def get_task_emb(self, task_id):
"""Return the task embedding corresponding to a given task id.
Args:
task_id: Determines the task for which the embedding should be
returned.
Returns:
A list of Parameter tensors.
"""
assert (self.has_task_embs)
return self._task_embs[task_id]
@abstractmethod
def forward(self, task_id=None, theta=None, dTheta=None, task_emb=None,
ext_inputs=None, squeeze=True):
"""Compute all HyperWeights.
Args:
task_id: The index of the task for which the network should
produce weights. The corresponding internal task embedding will
be selected as input. Only one integer can be given!
theta: List of weight tensors, that are used as network parameters.
If "has_theta" is False, then this parameter is mandatory.
Note, when provided, internal parameters (theta) are not used.
dTheta: List of weight tensors, that are added to "theta" (the
internal list of parameters or the one given via the option
"theta"), when computing the output of this network.
task_emb: If "has_task_embs" is False, then one has to provide the
task embedding as additional input via this option.
ext_inputs: If "requires_ext_input" is True, then one has to provide
the additional embeddings as input here. Note, one might provide
a batch of embeddings (see option "squeeze" for details).
squeeze: If a batch of inputs is given, the first dimension of the
resulting weight tensors will have as first dimension the batch
dimension. Though, the main network expects this dimension to
be squeezed away. This will be done automatically if this
option is enabled (hence, it only has an effect for a batch
size of 1).
Returns:
A list of weights. Two consecutive entries always correspond to a
weight matrix followed by a bias vector.
"""
pass # TODO implement
class MainNetInterface(ABC):
"""A general interface for main networks, that can be used stand-alone
(i.e., having their own weights) or with no (or only some) internal
weights, such that the remaining weights have to be passed through the
forward function (e.g., they may be generated through a hypernetwork).
.. deprecated:: 1.0
Please use module :class:`mnets.mnet_interface.MainNetInterface`
instead.
Attributes:
weights: A list of all internal weights of the main network. If all
weights are assumed to be generated externally, then this
attribute will be None.
param_shapes: A list of list of integers. Each list represents the
shape of a parameter tensor. Note, this attribute is
independent of the attribute "weights", it always comprises the
shapes of all weight tensors as if the network would be stand-
alone (i.e., no weights being passed to the forward function).
hyper_shapes: A list of list of integers. Each list represents the
shape of a weight tensor that has to be passed to the forward
function. If all weights are maintained internally, then this
attribute will be None.
has_bias: Whether layers in this network have bias terms.
has_fc_out: Whether the output layer of the network is a fully-
connected layer.
Note, if this attribute is set to True, it is implicitly assumed
that if "hyper_shapes" is not None, the last two entries of
"hyper_shapes" are the weights and biases of this layer.
num_params: The total number of weights in the parameter tensors
described by the attribute "param_shapes".
"""
def __init__(self):
"""Initialize the network.
Args:
"""
super(MainNetInterface, self).__init__()
warn('Please use class "mnets.mnet_interface.MainNetInterface" ' +
'instead.', DeprecationWarning)
# The following member variables have to be set by all classes that
# implement this interface.
self._weights = None
self._all_shapes = None
self._hyper_shapes = None
self._num_params = None
self._has_bias = None
self._has_fc_out = None
def _is_properly_setup(self):
"""This method can be used by classes that implement this interface to
check whether all required properties have been set."""
assert (self._weights is not None or self._hyper_shapes is not None)
if self._weights is not None and self._hyper_shapes is not None:
assert ((len(self._weights) + len(self._hyper_shapes)) == \
len(self._all_shapes))
elif self._weights is not None:
assert (len(self._weights) == len(self._all_shapes))
else:
assert (len(self._hyper_shapes) == len(self._all_shapes))
assert (self._all_shapes is not None)
assert (isinstance(self._has_bias, bool))
assert (isinstance(self._has_fc_out, bool))
@property
def weights(self):
"""Getter for read-only attribute weights.
Returns:
A :class:`torch.nn.ParameterList` or None, if no parameters are
internally maintained.
"""
return self._weights
@property
def param_shapes(self):
"""Getter for read-only attribute param_shapes.
Returns:
A list of lists of integers.
"""
return self._all_shapes
@property
def hyper_shapes(self):
"""Getter for read-only attribute hyper_shapes.
Returns:
A list of lists of integers.
"""
return self._hyper_shapes
@property
def has_bias(self):
"""Getter for read-only attribute has_bias."""
return self._has_bias
@property
def has_fc_out(self):
"""Getter for read-only attribute has_fc_out."""
return self._has_fc_out
@property
def num_params(self):
"""Getter for read-only attribute num_params.
Returns:
Total number of parameters in the network.
"""
if self._num_params is None:
self._num_params = int(np.sum([np.prod(l) for l in
self.param_shapes]))
return self._num_params
if __name__ == '__main__':
pass
| [
"warnings.warn",
"numpy.prod"
] | [((2584, 2689), 'warnings.warn', 'warn', (['(\'Please use class "hnets.hnet_interface.CLHyperNetInterface" \' + \'instead.\')', 'DeprecationWarning'], {}), '(\'Please use class "hnets.hnet_interface.CLHyperNetInterface" \' +\n \'instead.\', DeprecationWarning)\n', (2588, 2689), False, 'from warnings import warn\n'), ((9813, 9915), 'warnings.warn', 'warn', (['(\'Please use class "mnets.mnet_interface.MainNetInterface" \' + \'instead.\')', 'DeprecationWarning'], {}), '(\'Please use class "mnets.mnet_interface.MainNetInterface" \' +\n \'instead.\', DeprecationWarning)\n', (9817, 9915), False, 'from warnings import warn\n'), ((12186, 12196), 'numpy.prod', 'np.prod', (['l'], {}), '(l)\n', (12193, 12196), True, 'import numpy as np\n')] |
import io
import time
from functools import lru_cache
from urllib.error import HTTPError
import numpy as np
import pandas as pd
import requests
import sidekick as sk
import mundi
from mundi import transforms
from ..cache import ttl_cache
from ..logging import log
from ..utils import today
HOURS = 3600
TIMEOUT = 6 * HOURS
EPIDEMIC_CURVES_APIS = {}
MOBILITY_DATA_APIS = {}
def epidemic_curve_api(key):
return lambda fn: EPIDEMIC_CURVES_APIS.setdefault(key, fn)
def mobility_data_api(key):
return lambda fn: MOBILITY_DATA_APIS.setdefault(key, fn)
#
# Epidemic curves
#
def epidemic_curve(region, api="auto", extra=False, **kwargs):
"""
Universal interface to all epidemic curve loaders.
Always return a dataframe with ["cases", "deaths"] columns for the given
region. Some API's may offer additional columns such as "recovered", "test"
etc.
"""
code = mundi.code(region)
fn = EPIDEMIC_CURVES_APIS[api]
data = fn(code, **kwargs)
return data if extra else data[["cases", "deaths"]]
@epidemic_curve_api("auto")
def auto_api(code, **kwargs):
"""
Select best API to load according to region code.
"""
if code == "BR" or code.startswith("BR-"):
return brasil_io(code)
elif len(code) == 2:
return corona_api(code, **kwargs)
raise ValueError(f"no API can load region with code: {code}")
@epidemic_curve_api("corona-api.com")
@ttl_cache("covid-19", timeout=TIMEOUT)
def corona_api(code) -> pd.DataFrame:
"""
Load country's cases, deaths and recovered timeline from corona-api.com.
"""
data = download_corona_api(code)
data = data["data"]["timeline"]
df = pd.DataFrame(data).rename({"confirmed": "cases"}, axis=1)
df = df[["date", "cases", "deaths", "recovered"]]
df["date"] = pd.to_datetime(df["date"])
df = df.drop_duplicates("date", keep="first").set_index("date")
df = df[df.fillna(0).sum(1) > 0].sort_index()
# Fill missing data with previous measurement
start, end = df.index[[0, -1]]
full_index = pd.to_datetime(np.arange((end - start).days), unit="D", origin=start)
df = df.reindex(full_index).fillna(method="ffill")
return df.astype(int)
@sk.retry(10, sleep=0.5)
def download_corona_api(code) -> dict:
log.info(f"[api/corona-api] Downloading data from corona API ({code})")
url = "http://corona-api.com/countries/{code}?include=timeline"
response = requests.get(url.format(code=code))
size = len(response.content) // 1024
log.info(f"[api/corona-api] Download ended with {size} kb")
return response.json()
@epidemic_curve_api("brasil.io")
def brasil_io(code):
cases = brasil_io_cases()
cases = cases[cases["id"] == code].drop(columns="id")
cases = cases.drop_duplicates("date")
return cases.set_index("date").sort_index()
@ttl_cache("covid-19", timeout=TIMEOUT)
def brasil_io_cases() -> pd.DataFrame:
"""
Return the complete dataframe of cases and deaths from Brasil.io.
"""
df = download_brasil_io_cases()
cols = {
"last_available_confirmed": "cases",
"confirmed": "cases",
"last_available_deaths": "deaths",
"city_ibge_code": "code",
}
cases = df.rename(cols, axis=1)
cases = cases[cases["code"].notna()]
cases["code"] = cases["code"].apply(lambda x: str(int(x))).astype("string")
cases["code"] = "BR-" + cases["code"]
cases["date"] = pd.to_datetime(cases["date"])
cases = cases[cases["place_type"] == "city"]
cases = cases[["date", "code", "cases", "deaths"]]
cases = cases.dropna().reset_index(drop=True)
cases = cases.rename({"code": "id"}, axis=1)
log.info(f"[api/brasil.io] Merging {len(df)} entries")
result = {}
for col in ["cases", "deaths"]:
data = cases.pivot_table(index="id", columns="date", values=col).fillna(-1).sort_index()
data = transforms.sum_children(data).reset_index()
data = data.melt(id_vars=["id"], var_name="date", value_name=col)
data = data[data[col] >= 0]
result[col] = data
out = (
pd.merge(*result.values(), on=["id", "date"], how="outer")
.fillna(0)
.astype({"cases": int, "deaths": int})
)
log.info("[api/brasil.io] Merge complete")
return out
@sk.retry(10, sleep=0.5)
def download_brasil_io_cases():
log.info("[api/brasil.io] Downloading data from Brasil.io")
url = "https://data.brasil.io/dataset/covid19/caso_full.csv.gz"
try:
# Brasil.io is now under a Cloudflare CDN and it requires proper
# User-Agent headers. This means we cannot download data using pandas
# builtin support for URLs in read_csv, since it does not set those
# headers accordingly.
response = requests.get(url, headers={"User-Agent": "python-requests"})
except HTTPError as e:
log.warn(f"[api/brasil.io] error downloading: {e}, using Github fallback")
url = "https://github.com/pydemic/databases/raw/master/caso_full.csv.gz"
return pd.read_csv(url)
else:
return pd.read_csv(io.BytesIO(response.content), compression="gzip")
#
# Mobility data
#
@ttl_cache("covid-19", timeout=TIMEOUT)
@sk.retry(10, sleep=0.5)
def google_mobility_data(cli=False):
"""
Download google mobility data
"""
url = "https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv"
log.info(f"Downloading google mobility data {today()}")
t0 = time.time()
data = requests.get(url)
log.info(f"Download finished after {time.time() - t0:0.2} seconds")
data_cols = ["retail", "grocery", "parks", "transit", "work", "residential"]
df = pd.read_csv(data.content.decode("utf8")).rename(
{
"retail_and_recreation_percent_change_from_baseline": "retail",
"grocery_and_pharmacy_percent_change_from_baseline": "grocery",
"parks_percent_change_from_baseline": "parks",
"transit_stations_percent_change_from_baseline": "transit",
"workplaces_percent_change_from_baseline": "work",
"residential_percent_change_from_baseline": "residential",
},
axis=1,
)
df["date"] = pd.to_datetime(df["date"])
df[data_cols] = df[data_cols] / 100.0
return df
def fix_google_mobility_data_region_codes(df):
data = df[["country_region_code", "sub_region_1", "sub_region_2"]]
codes = data.apply(subregion_code)
return df
@lru_cache(1024)
def subregion_code(country, region, subregion):
region = region or None
subregion = subregion or None
# Check arbitrary mapping
mapping = google_mobility_map_codes()
try:
return mapping[country, region, subregion]
except KeyError:
pass
# Fasttrack pure-country codes
if not region:
return country
for name in (subregion, region):
try:
region = mundi.region(country_id=country, name=name)
except LookupError:
return region.id
return country + "-" + region
@lru_cache(1)
def google_mobility_map_codes() -> dict:
data = {}
# Brazilian states
for state in mundi.regions("BR", type="state"):
data["BR", f"State of {state}", None] = state.id
data["BR", "Federal District", None] = "BR-DF"
return data
if __name__ == "__main__":
sk.import_later("..cli.api:covid19_api_downloader", package=__package__)()
| [
"mundi.transforms.sum_children",
"pandas.read_csv",
"numpy.arange",
"io.BytesIO",
"sidekick.retry",
"requests.get",
"sidekick.import_later",
"mundi.region",
"mundi.code",
"pandas.DataFrame",
"functools.lru_cache",
"time.time",
"pandas.to_datetime",
"mundi.regions"
] | [((2205, 2228), 'sidekick.retry', 'sk.retry', (['(10)'], {'sleep': '(0.5)'}), '(10, sleep=0.5)\n', (2213, 2228), True, 'import sidekick as sk\n'), ((4286, 4309), 'sidekick.retry', 'sk.retry', (['(10)'], {'sleep': '(0.5)'}), '(10, sleep=0.5)\n', (4294, 4309), True, 'import sidekick as sk\n'), ((5195, 5218), 'sidekick.retry', 'sk.retry', (['(10)'], {'sleep': '(0.5)'}), '(10, sleep=0.5)\n', (5203, 5218), True, 'import sidekick as sk\n'), ((6446, 6461), 'functools.lru_cache', 'lru_cache', (['(1024)'], {}), '(1024)\n', (6455, 6461), False, 'from functools import lru_cache\n'), ((7028, 7040), 'functools.lru_cache', 'lru_cache', (['(1)'], {}), '(1)\n', (7037, 7040), False, 'from functools import lru_cache\n'), ((897, 915), 'mundi.code', 'mundi.code', (['region'], {}), '(region)\n', (907, 915), False, 'import mundi\n'), ((1802, 1828), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {}), "(df['date'])\n", (1816, 1828), True, 'import pandas as pd\n'), ((3428, 3457), 'pandas.to_datetime', 'pd.to_datetime', (["cases['date']"], {}), "(cases['date'])\n", (3442, 3457), True, 'import pandas as pd\n'), ((5456, 5467), 'time.time', 'time.time', ([], {}), '()\n', (5465, 5467), False, 'import time\n'), ((5479, 5496), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5491, 5496), False, 'import requests\n'), ((6187, 6213), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {}), "(df['date'])\n", (6201, 6213), True, 'import pandas as pd\n'), ((7137, 7170), 'mundi.regions', 'mundi.regions', (['"""BR"""'], {'type': '"""state"""'}), "('BR', type='state')\n", (7150, 7170), False, 'import mundi\n'), ((2065, 2094), 'numpy.arange', 'np.arange', (['(end - start).days'], {}), '((end - start).days)\n', (2074, 2094), True, 'import numpy as np\n'), ((4761, 4821), 'requests.get', 'requests.get', (['url'], {'headers': "{'User-Agent': 'python-requests'}"}), "(url, headers={'User-Agent': 'python-requests'})\n", (4773, 4821), False, 'import requests\n'), ((7330, 7402), 'sidekick.import_later', 'sk.import_later', (['"""..cli.api:covid19_api_downloader"""'], {'package': '__package__'}), "('..cli.api:covid19_api_downloader', package=__package__)\n", (7345, 7402), True, 'import sidekick as sk\n'), ((1672, 1690), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1684, 1690), True, 'import pandas as pd\n'), ((5028, 5044), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (5039, 5044), True, 'import pandas as pd\n'), ((5082, 5110), 'io.BytesIO', 'io.BytesIO', (['response.content'], {}), '(response.content)\n', (5092, 5110), False, 'import io\n'), ((6889, 6932), 'mundi.region', 'mundi.region', ([], {'country_id': 'country', 'name': 'name'}), '(country_id=country, name=name)\n', (6901, 6932), False, 'import mundi\n'), ((3887, 3916), 'mundi.transforms.sum_children', 'transforms.sum_children', (['data'], {}), '(data)\n', (3910, 3916), False, 'from mundi import transforms\n'), ((5537, 5548), 'time.time', 'time.time', ([], {}), '()\n', (5546, 5548), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from packaging import version
import dask
import dask.array as da
import numpy as np
import pytest
import scipy
import scipy.ndimage
import dask_image.ndinterp
# mode lists for the case with prefilter = False
_supported_modes = ['constant', 'nearest', 'reflect', 'mirror']
_unsupported_modes = ['wrap']
# additional modes are present in SciPy >= 1.6.0
if version.parse(scipy.__version__) >= version.parse('1.6.0'):
_supported_modes += ['grid-constant', 'grid-mirror', 'grid-wrap']
def validate_spline_filter(n=2,
axis_size=64,
interp_order=3,
interp_mode='constant',
chunksize=32,
output=np.float64,
random_seed=0,
use_cupy=False,
axis=None,
input_as_non_dask_array=False,
depth=None):
"""
Compare the outputs of `scipy.ndimage.spline_transform`
and `dask_image.ndinterp.spline_transform`. If axis is not None, then
`spline_transform1d` is tested instead.
"""
if (
np.dtype(output) != np.float64
and version.parse(scipy.__version__) < version.parse('1.4.0')
):
pytest.skip("bug in output dtype handling in SciPy < 1.4")
# define test image
np.random.seed(random_seed)
image = np.random.random([axis_size] * n)
if version.parse(dask.__version__) < version.parse("2020.1.0"):
# older dask will fail if any chunks have size smaller than depth
_depth = dask_image.ndinterp._get_default_depth(interp_order)
rem = axis_size % chunksize
if chunksize < _depth or (rem != 0 and rem < _depth):
pytest.skip("older dask doesn't automatically rechunk")
if input_as_non_dask_array:
if use_cupy:
import cupy as cp
image_da = cp.asarray(image)
else:
image_da = image
else:
# transform into dask array
image_da = da.from_array(image, chunks=[chunksize] * n)
if use_cupy:
import cupy as cp
image_da = image_da.map_blocks(cp.asarray)
if axis is not None:
scipy_func = scipy.ndimage.spline_filter1d
dask_image_func = dask_image.ndinterp.spline_filter1d
kwargs = {'axis': axis}
else:
scipy_func = scipy.ndimage.spline_filter
dask_image_func = dask_image.ndinterp.spline_filter
kwargs = {}
# transform with scipy
image_t_scipy = scipy_func(
image,
output=output,
order=interp_order,
mode=interp_mode,
**kwargs)
# transform with dask-image
image_t_dask = dask_image_func(
image_da,
output=output,
order=interp_order,
mode=interp_mode,
depth=depth,
**kwargs)
image_t_dask_computed = image_t_dask.compute()
rtol = atol = 1e-6
out_dtype = np.dtype(output)
assert image_t_scipy.dtype == image_t_dask_computed.dtype == out_dtype
assert np.allclose(image_t_scipy, image_t_dask_computed,
rtol=rtol, atol=atol)
@pytest.mark.parametrize("n", [1, 2, 3])
@pytest.mark.parametrize("axis_size", [64])
@pytest.mark.parametrize("interp_order", range(2, 6))
@pytest.mark.parametrize("interp_mode", _supported_modes)
@pytest.mark.parametrize("chunksize", [32, 15])
def test_spline_filter_general(
n,
axis_size,
interp_order,
interp_mode,
chunksize,
):
validate_spline_filter(
n=n,
axis_size=axis_size,
interp_order=interp_order,
interp_mode=interp_mode,
chunksize=chunksize,
axis=None,
)
@pytest.mark.cupy
@pytest.mark.parametrize("n", [2])
@pytest.mark.parametrize("axis_size", [32])
@pytest.mark.parametrize("interp_order", range(2, 6))
@pytest.mark.parametrize("interp_mode", _supported_modes[::2])
@pytest.mark.parametrize("chunksize", [16])
@pytest.mark.parametrize("axis", [None, -1])
@pytest.mark.parametrize("input_as_non_dask_array", [False, True])
def test_spline_filter_cupy(
n,
axis_size,
interp_order,
interp_mode,
chunksize,
axis,
input_as_non_dask_array,
):
pytest.importorskip("cupy", minversion="6.0.0")
validate_spline_filter(
n=n,
axis_size=axis_size,
interp_order=interp_order,
interp_mode=interp_mode,
chunksize=chunksize,
axis=axis,
input_as_non_dask_array=input_as_non_dask_array,
use_cupy=True,
)
@pytest.mark.parametrize("n", [1, 2, 3])
@pytest.mark.parametrize("axis_size", [48, 27])
@pytest.mark.parametrize("interp_order", range(2, 6))
@pytest.mark.parametrize("interp_mode", _supported_modes)
@pytest.mark.parametrize("chunksize", [33])
@pytest.mark.parametrize("axis", [0, 1, -1])
def test_spline_filter1d_general(
n,
axis_size,
interp_order,
interp_mode,
chunksize,
axis,
):
if axis == 1 and n < 2:
pytest.skip(msg="skip axis=1 for 1d signals")
validate_spline_filter(
n=n,
axis_size=axis_size,
interp_order=interp_order,
interp_mode=interp_mode,
chunksize=chunksize,
axis=axis,
)
@pytest.mark.parametrize("axis", [None, -1])
def test_spline_filter_non_dask_array_input(axis):
validate_spline_filter(
axis=axis,
input_as_non_dask_array=True,
)
@pytest.mark.parametrize("depth", [None, 24])
@pytest.mark.parametrize("axis", [None, -1])
def test_spline_filter_non_default_depth(depth, axis):
validate_spline_filter(
axis=axis,
depth=depth,
)
@pytest.mark.parametrize("depth", [(16, 32), [18]])
def test_spline_filter1d_invalid_depth(depth):
with pytest.raises(ValueError):
validate_spline_filter(
axis=-1,
depth=depth,
)
@pytest.mark.parametrize("axis_size", [32])
@pytest.mark.parametrize("interp_order", range(2, 6))
@pytest.mark.parametrize("interp_mode", _unsupported_modes)
@pytest.mark.parametrize("axis", [None, -1])
def test_spline_filter_unsupported_modes(
axis_size,
interp_order,
interp_mode,
axis,
):
with pytest.raises(NotImplementedError):
validate_spline_filter(
axis_size=axis_size,
interp_order=interp_order,
interp_mode=interp_mode,
axis=axis,
)
@pytest.mark.parametrize(
"output", [np.float64, np.float32, "float32", np.dtype(np.float32)]
)
@pytest.mark.parametrize("axis", [None, -1])
def test_spline_filter_output_dtype(output, axis):
validate_spline_filter(
axis_size=32,
interp_order=3,
output=output,
axis=axis,
)
@pytest.mark.parametrize("axis", [None, -1])
def test_spline_filter_array_output_unsupported(axis):
n = 2
axis_size = 32
shape = (axis_size,) * n
with pytest.raises(TypeError):
validate_spline_filter(
n=n,
axis_size=axis_size,
interp_order=3,
output=np.empty(shape),
axis=axis,
)
| [
"numpy.dtype",
"dask.array.from_array",
"numpy.allclose",
"pytest.skip",
"numpy.random.random",
"pytest.mark.parametrize",
"pytest.importorskip",
"pytest.raises",
"numpy.random.seed",
"numpy.empty",
"packaging.version.parse",
"cupy.asarray"
] | [((3241, 3280), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[1, 2, 3]'], {}), "('n', [1, 2, 3])\n", (3264, 3280), False, 'import pytest\n'), ((3282, 3324), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis_size"""', '[64]'], {}), "('axis_size', [64])\n", (3305, 3324), False, 'import pytest\n'), ((3380, 3436), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""interp_mode"""', '_supported_modes'], {}), "('interp_mode', _supported_modes)\n", (3403, 3436), False, 'import pytest\n'), ((3438, 3484), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""chunksize"""', '[32, 15]'], {}), "('chunksize', [32, 15])\n", (3461, 3484), False, 'import pytest\n'), ((3806, 3839), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[2]'], {}), "('n', [2])\n", (3829, 3839), False, 'import pytest\n'), ((3841, 3883), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis_size"""', '[32]'], {}), "('axis_size', [32])\n", (3864, 3883), False, 'import pytest\n'), ((3939, 4000), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""interp_mode"""', '_supported_modes[::2]'], {}), "('interp_mode', _supported_modes[::2])\n", (3962, 4000), False, 'import pytest\n'), ((4002, 4044), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""chunksize"""', '[16]'], {}), "('chunksize', [16])\n", (4025, 4044), False, 'import pytest\n'), ((4046, 4089), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[None, -1]'], {}), "('axis', [None, -1])\n", (4069, 4089), False, 'import pytest\n'), ((4091, 4156), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_as_non_dask_array"""', '[False, True]'], {}), "('input_as_non_dask_array', [False, True])\n", (4114, 4156), False, 'import pytest\n'), ((4629, 4668), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[1, 2, 3]'], {}), "('n', [1, 2, 3])\n", (4652, 4668), False, 'import pytest\n'), ((4670, 4716), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis_size"""', '[48, 27]'], {}), "('axis_size', [48, 27])\n", (4693, 4716), False, 'import pytest\n'), ((4772, 4828), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""interp_mode"""', '_supported_modes'], {}), "('interp_mode', _supported_modes)\n", (4795, 4828), False, 'import pytest\n'), ((4830, 4872), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""chunksize"""', '[33]'], {}), "('chunksize', [33])\n", (4853, 4872), False, 'import pytest\n'), ((4874, 4917), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[0, 1, -1]'], {}), "('axis', [0, 1, -1])\n", (4897, 4917), False, 'import pytest\n'), ((5315, 5358), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[None, -1]'], {}), "('axis', [None, -1])\n", (5338, 5358), False, 'import pytest\n'), ((5505, 5549), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""depth"""', '[None, 24]'], {}), "('depth', [None, 24])\n", (5528, 5549), False, 'import pytest\n'), ((5551, 5594), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[None, -1]'], {}), "('axis', [None, -1])\n", (5574, 5594), False, 'import pytest\n'), ((5728, 5778), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""depth"""', '[(16, 32), [18]]'], {}), "('depth', [(16, 32), [18]])\n", (5751, 5778), False, 'import pytest\n'), ((5954, 5996), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis_size"""', '[32]'], {}), "('axis_size', [32])\n", (5977, 5996), False, 'import pytest\n'), ((6052, 6110), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""interp_mode"""', '_unsupported_modes'], {}), "('interp_mode', _unsupported_modes)\n", (6075, 6110), False, 'import pytest\n'), ((6112, 6155), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[None, -1]'], {}), "('axis', [None, -1])\n", (6135, 6155), False, 'import pytest\n'), ((6584, 6627), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[None, -1]'], {}), "('axis', [None, -1])\n", (6607, 6627), False, 'import pytest\n'), ((6805, 6848), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[None, -1]'], {}), "('axis', [None, -1])\n", (6828, 6848), False, 'import pytest\n'), ((406, 438), 'packaging.version.parse', 'version.parse', (['scipy.__version__'], {}), '(scipy.__version__)\n', (419, 438), False, 'from packaging import version\n'), ((442, 464), 'packaging.version.parse', 'version.parse', (['"""1.6.0"""'], {}), "('1.6.0')\n", (455, 464), False, 'from packaging import version\n'), ((1429, 1456), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1443, 1456), True, 'import numpy as np\n'), ((1469, 1502), 'numpy.random.random', 'np.random.random', (['([axis_size] * n)'], {}), '([axis_size] * n)\n', (1485, 1502), True, 'import numpy as np\n'), ((3040, 3056), 'numpy.dtype', 'np.dtype', (['output'], {}), '(output)\n', (3048, 3056), True, 'import numpy as np\n'), ((3143, 3214), 'numpy.allclose', 'np.allclose', (['image_t_scipy', 'image_t_dask_computed'], {'rtol': 'rtol', 'atol': 'atol'}), '(image_t_scipy, image_t_dask_computed, rtol=rtol, atol=atol)\n', (3154, 3214), True, 'import numpy as np\n'), ((4305, 4352), 'pytest.importorskip', 'pytest.importorskip', (['"""cupy"""'], {'minversion': '"""6.0.0"""'}), "('cupy', minversion='6.0.0')\n", (4324, 4352), False, 'import pytest\n'), ((1341, 1399), 'pytest.skip', 'pytest.skip', (['"""bug in output dtype handling in SciPy < 1.4"""'], {}), "('bug in output dtype handling in SciPy < 1.4')\n", (1352, 1399), False, 'import pytest\n'), ((1511, 1542), 'packaging.version.parse', 'version.parse', (['dask.__version__'], {}), '(dask.__version__)\n', (1524, 1542), False, 'from packaging import version\n'), ((1545, 1570), 'packaging.version.parse', 'version.parse', (['"""2020.1.0"""'], {}), "('2020.1.0')\n", (1558, 1570), False, 'from packaging import version\n'), ((2115, 2159), 'dask.array.from_array', 'da.from_array', (['image'], {'chunks': '([chunksize] * n)'}), '(image, chunks=[chunksize] * n)\n', (2128, 2159), True, 'import dask.array as da\n'), ((5073, 5118), 'pytest.skip', 'pytest.skip', ([], {'msg': '"""skip axis=1 for 1d signals"""'}), "(msg='skip axis=1 for 1d signals')\n", (5084, 5118), False, 'import pytest\n'), ((5836, 5861), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5849, 5861), False, 'import pytest\n'), ((6271, 6305), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (6284, 6305), False, 'import pytest\n'), ((6559, 6579), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (6567, 6579), True, 'import numpy as np\n'), ((6973, 6997), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6986, 6997), False, 'import pytest\n'), ((1225, 1241), 'numpy.dtype', 'np.dtype', (['output'], {}), '(output)\n', (1233, 1241), True, 'import numpy as np\n'), ((1268, 1300), 'packaging.version.parse', 'version.parse', (['scipy.__version__'], {}), '(scipy.__version__)\n', (1281, 1300), False, 'from packaging import version\n'), ((1303, 1325), 'packaging.version.parse', 'version.parse', (['"""1.4.0"""'], {}), "('1.4.0')\n", (1316, 1325), False, 'from packaging import version\n'), ((1826, 1881), 'pytest.skip', 'pytest.skip', (['"""older dask doesn\'t automatically rechunk"""'], {}), '("older dask doesn\'t automatically rechunk")\n', (1837, 1881), False, 'import pytest\n'), ((1989, 2006), 'cupy.asarray', 'cp.asarray', (['image'], {}), '(image)\n', (1999, 2006), True, 'import cupy as cp\n'), ((7128, 7143), 'numpy.empty', 'np.empty', (['shape'], {}), '(shape)\n', (7136, 7143), True, 'import numpy as np\n')] |
"""Stereographic projection module."""
import numpy as np
from .__main__ import Projection
from ..angles import DEC, RA
class Sky(Projection):
"""Stereographic projection object.
Parameters
----------
ra: float, optional
Center west longitude.
dec: float, optional
Center latitude (North Pole by default).
twist: float, optional
Planet name.
Source
------
https://proj.org/operations/projections/stere.html
https://github.com/proj4js/proj4js/blob/master/lib/projections/stere.js
"""
def __init__(self, ra=0, dec=0, twist=0):
self.ra = ra
self.dec = dec
self.twist = twist
def __repr__(self):
return (f'<{self}> '
f'RA: {self.ra}° | '
f'Dec: {self.dec}° | '
f'Twist: {self.twist}°')
@property
def ra(self):
"""Pointing right-ascension (degree)."""
return self.__ra
@ra.setter
def ra(self, ra):
"""Set pointing right-ascension (degree)."""
self.__ra = RA(ra)
self.__cra, self.__sra = self._cs(self.__ra)
self.__m = None
@property
def dec(self):
"""Pointing declination."""
return self.__dec
@dec.setter
def dec(self, dec):
"""Set pointing declination (degree)."""
self.__dec = DEC(dec)
self.__cdec, self.__sdec = self._cs(self.__dec)
self.__m = None
@property
def twist(self):
"""Pointing fov twist clockwise angle."""
return self.__twist
@twist.setter
def twist(self, twist):
"""Set pointing FOV twist clockwise-angle (degree)."""
self.__twist = twist
self.__ctwist, self.__stwist = self._cs(self.__twist / 2)
self.__m = None
@property
def pointing(self):
"""FOV pointing angles."""
return self.ra, self.dec, self.twist
@property
def m(self):
"""Sky rotation matrix."""
if self.__m is None:
self.__m = self._rot_sky()
return self.__m
def _rot_sky(self):
"""Calculate the sky rotation matrix."""
m1 = np.array([
[self.__cdec, 0, self.__sdec],
[0, 1, 0],
[-self.__sdec, 0, self.__cdec],
])
m2 = np.array([
[self.__cra, self.__sra, 0],
[-self.__sra, self.__cra, 0],
[0, 0, 1],
])
q0 = self.__ctwist
q1 = self.__stwist * self.__cdec * self.__cra
q2 = self.__stwist * self.__cdec * self.__sra
q3 = self.__stwist * self.__sdec
m3 = np.array([[
1 - 2 * (q2 * q2 + q3 * q3),
2 * (q1 * q2 + q0 * q3),
2 * (q1 * q3 - q0 * q2),
], [
2 * (q1 * q2 - q0 * q3),
1 - 2 * (q1 * q1 + q3 * q3),
2 * (q2 * q3 + q0 * q1),
], [
2 * (q1 * q3 + q0 * q2),
2 * (q2 * q3 - q0 * q1),
1 - 2 * (q1 * q1 + q2 * q2),
]])
return np.dot(m1, np.dot(m2, m3))
def xy(self, ra, dec):
"""Convert ra/dec coordinates in map coordinates.
Parameters
----------
ra: float or array
Right-ascension (degree).
dec: float or array
Declination (degree).
Returns
-------
float or array, float or array
X-Y map coordinates.
"""
(cra, sra), (cdec, sdec) = self._cs(ra), self._cs(dec)
if np.ndim(ra) == 0 and np.ndim(dec) == 0:
shape = None
xyz = np.dot(self.m, [cra * cdec, sra * cdec, sdec])
else:
if np.ndim(ra) > 0 and np.ndim(dec) == 0:
shape = np.shape(ra)
elif np.ndim(ra) == 0 and np.ndim(dec) > 0:
shape = np.shape(dec)
elif np.shape(ra) == np.shape(dec):
shape = np.shape(ra)
else:
raise ValueError('RA and DEC arrays must have the same size.')
xyz = np.zeros((3, np.prod(shape)))
xyz[0] = (cra * cdec).ravel()
xyz[1] = (sra * cdec).ravel()
xyz[2] = sdec.ravel()
np.dot(self.m, xyz, out=xyz)
x, y = xyz[1] / xyz[0], xyz[2] / xyz[0]
if shape is not None:
x = np.reshape(x, shape)
y = np.reshape(y, shape)
return x, y
def lonlat(self, x, y):
"""Alias for map coordinates in ra/dec coordinates.
Parameters
----------
x: float or array
X-coordinate on the map [m].
y: float or array
Y-coordinate on the map [m].
Returns
-------
float or array, float or array
Right ascension and declination [degree].
See also
--------
pyvims.projections.sky.Sky.radec
"""
return self.radec(x, y)
def radec(self, x, y):
"""Convert map coordinates in ra/dec coordinates.
Parameters
----------
x: float or array
X-coordinate on the map [m].
y: float or array
Y-coordinate on the map [m].
Returns
-------
float or array, float or array
Right ascension and declination [degree].
"""
if np.ndim(x) == 0 and np.ndim(y) == 0:
shape = None
u = [1, x, y]
else:
if np.ndim(x) > 0 and np.ndim(y) == 0:
shape = np.shape(x)
elif np.ndim(x) == 0 and np.ndim(y) > 0:
shape = np.shape(y)
elif np.shape(x) == np.shape(y):
shape = np.shape(x)
else:
raise ValueError('X and Y arrays must have the same size.')
u = np.ones((3, np.prod(shape)))
u[1] = np.reshape(x, (-1))
u[2] = np.reshape(y, (-1))
norm = np.sqrt(np.sum(np.power(u, 2), axis=0))
u = np.divide(u, norm)
v = np.dot(self.m.T, u)
ra = np.degrees(np.arctan2(v[1], v[0])) % 360
dec = np.degrees(np.arcsin(v[2]))
if shape is not None:
ra = np.reshape(ra, shape)
dec = np.reshape(dec, shape)
return ra, dec
| [
"numpy.prod",
"numpy.reshape",
"numpy.power",
"numpy.arcsin",
"numpy.ndim",
"numpy.array",
"numpy.dot",
"numpy.arctan2",
"numpy.shape",
"numpy.divide"
] | [((2153, 2242), 'numpy.array', 'np.array', (['[[self.__cdec, 0, self.__sdec], [0, 1, 0], [-self.__sdec, 0, self.__cdec]]'], {}), '([[self.__cdec, 0, self.__sdec], [0, 1, 0], [-self.__sdec, 0, self.\n __cdec]])\n', (2161, 2242), True, 'import numpy as np\n'), ((2299, 2384), 'numpy.array', 'np.array', (['[[self.__cra, self.__sra, 0], [-self.__sra, self.__cra, 0], [0, 0, 1]]'], {}), '([[self.__cra, self.__sra, 0], [-self.__sra, self.__cra, 0], [0, 0, 1]]\n )\n', (2307, 2384), True, 'import numpy as np\n'), ((2618, 2884), 'numpy.array', 'np.array', (['[[1 - 2 * (q2 * q2 + q3 * q3), 2 * (q1 * q2 + q0 * q3), 2 * (q1 * q3 - q0 *\n q2)], [2 * (q1 * q2 - q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3), 2 * (q2 *\n q3 + q0 * q1)], [2 * (q1 * q3 + q0 * q2), 2 * (q2 * q3 - q0 * q1), 1 - \n 2 * (q1 * q1 + q2 * q2)]]'], {}), '([[1 - 2 * (q2 * q2 + q3 * q3), 2 * (q1 * q2 + q0 * q3), 2 * (q1 *\n q3 - q0 * q2)], [2 * (q1 * q2 - q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3), \n 2 * (q2 * q3 + q0 * q1)], [2 * (q1 * q3 + q0 * q2), 2 * (q2 * q3 - q0 *\n q1), 1 - 2 * (q1 * q1 + q2 * q2)]])\n', (2626, 2884), True, 'import numpy as np\n'), ((5959, 5977), 'numpy.divide', 'np.divide', (['u', 'norm'], {}), '(u, norm)\n', (5968, 5977), True, 'import numpy as np\n'), ((5991, 6010), 'numpy.dot', 'np.dot', (['self.m.T', 'u'], {}), '(self.m.T, u)\n', (5997, 6010), True, 'import numpy as np\n'), ((3040, 3054), 'numpy.dot', 'np.dot', (['m2', 'm3'], {}), '(m2, m3)\n', (3046, 3054), True, 'import numpy as np\n'), ((3584, 3630), 'numpy.dot', 'np.dot', (['self.m', '[cra * cdec, sra * cdec, sdec]'], {}), '(self.m, [cra * cdec, sra * cdec, sdec])\n', (3590, 3630), True, 'import numpy as np\n'), ((4192, 4220), 'numpy.dot', 'np.dot', (['self.m', 'xyz'], {'out': 'xyz'}), '(self.m, xyz, out=xyz)\n', (4198, 4220), True, 'import numpy as np\n'), ((4317, 4337), 'numpy.reshape', 'np.reshape', (['x', 'shape'], {}), '(x, shape)\n', (4327, 4337), True, 'import numpy as np\n'), ((4354, 4374), 'numpy.reshape', 'np.reshape', (['y', 'shape'], {}), '(y, shape)\n', (4364, 4374), True, 'import numpy as np\n'), ((5832, 5849), 'numpy.reshape', 'np.reshape', (['x', '(-1)'], {}), '(x, -1)\n', (5842, 5849), True, 'import numpy as np\n'), ((5871, 5888), 'numpy.reshape', 'np.reshape', (['y', '(-1)'], {}), '(y, -1)\n', (5881, 5888), True, 'import numpy as np\n'), ((6090, 6105), 'numpy.arcsin', 'np.arcsin', (['v[2]'], {}), '(v[2])\n', (6099, 6105), True, 'import numpy as np\n'), ((6155, 6176), 'numpy.reshape', 'np.reshape', (['ra', 'shape'], {}), '(ra, shape)\n', (6165, 6176), True, 'import numpy as np\n'), ((6195, 6217), 'numpy.reshape', 'np.reshape', (['dec', 'shape'], {}), '(dec, shape)\n', (6205, 6217), True, 'import numpy as np\n'), ((3501, 3512), 'numpy.ndim', 'np.ndim', (['ra'], {}), '(ra)\n', (3508, 3512), True, 'import numpy as np\n'), ((3522, 3534), 'numpy.ndim', 'np.ndim', (['dec'], {}), '(dec)\n', (3529, 3534), True, 'import numpy as np\n'), ((3723, 3735), 'numpy.shape', 'np.shape', (['ra'], {}), '(ra)\n', (3731, 3735), True, 'import numpy as np\n'), ((5314, 5324), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (5321, 5324), True, 'import numpy as np\n'), ((5334, 5344), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (5341, 5344), True, 'import numpy as np\n'), ((5491, 5502), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (5499, 5502), True, 'import numpy as np\n'), ((5922, 5936), 'numpy.power', 'np.power', (['u', '(2)'], {}), '(u, 2)\n', (5930, 5936), True, 'import numpy as np\n'), ((6035, 6057), 'numpy.arctan2', 'np.arctan2', (['v[1]', 'v[0]'], {}), '(v[1], v[0])\n', (6045, 6057), True, 'import numpy as np\n'), ((3660, 3671), 'numpy.ndim', 'np.ndim', (['ra'], {}), '(ra)\n', (3667, 3671), True, 'import numpy as np\n'), ((3680, 3692), 'numpy.ndim', 'np.ndim', (['dec'], {}), '(dec)\n', (3687, 3692), True, 'import numpy as np\n'), ((3816, 3829), 'numpy.shape', 'np.shape', (['dec'], {}), '(dec)\n', (3824, 3829), True, 'import numpy as np\n'), ((4044, 4058), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (4051, 4058), True, 'import numpy as np\n'), ((5431, 5441), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (5438, 5441), True, 'import numpy as np\n'), ((5450, 5460), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (5457, 5460), True, 'import numpy as np\n'), ((5580, 5591), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (5588, 5591), True, 'import numpy as np\n'), ((5796, 5810), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (5803, 5810), True, 'import numpy as np\n'), ((3753, 3764), 'numpy.ndim', 'np.ndim', (['ra'], {}), '(ra)\n', (3760, 3764), True, 'import numpy as np\n'), ((3774, 3786), 'numpy.ndim', 'np.ndim', (['dec'], {}), '(dec)\n', (3781, 3786), True, 'import numpy as np\n'), ((3847, 3859), 'numpy.shape', 'np.shape', (['ra'], {}), '(ra)\n', (3855, 3859), True, 'import numpy as np\n'), ((3863, 3876), 'numpy.shape', 'np.shape', (['dec'], {}), '(dec)\n', (3871, 3876), True, 'import numpy as np\n'), ((3902, 3914), 'numpy.shape', 'np.shape', (['ra'], {}), '(ra)\n', (3910, 3914), True, 'import numpy as np\n'), ((5520, 5530), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (5527, 5530), True, 'import numpy as np\n'), ((5540, 5550), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (5547, 5550), True, 'import numpy as np\n'), ((5609, 5620), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (5617, 5620), True, 'import numpy as np\n'), ((5624, 5635), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (5632, 5635), True, 'import numpy as np\n'), ((5661, 5672), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (5669, 5672), True, 'import numpy as np\n')] |
"""Generate a single discrete time SIR model.
"""
from . import data_model
import numpy as np
from scipy import stats
import xarray as xr
# Generate Betas
# Beta, or the growth rate of the infection, depends on the covariates.
# Here we implement three different functional forms for the dependency.
SPLIT_TIME = 100
def generate_betas_from_single_random_covariate(num_locations):
"""Beta depend on a single covariate that is randomly generated.
Args:
num_locations: an int representing the number of locations to simulate
Returns:
beta: an xr.DataArray consisting of the growth rate
for each epidemic
v: an xr.DataArray consisting of the randomly generated covariate for each
location
alpha: an xr.DataArray consisting of the weights for each covariate
"""
v = xr.DataArray(
np.random.uniform(0.0, 1.0, (num_locations, 1)),
dims=['location', 'static_covariate'])
alpha = xr.DataArray(np.ones(1), dims=['static_covariate'])
beta = 0.4 * np.exp(alpha @ v)
return beta, v, alpha
def generate_betas_effect_mod(num_locations):
"""Betas depend on 2 discrete, randomly generated effects.
Args:
num_locations: an int representing the number of locations to simulate
Returns:
beta: an xr.DataArray consisting of the growth rate
for each epidemic
v: an xr.DataArray consisting of the randomly generated covariate for each
location
alpha: an xr.DataArray consisting of the weights for each covariate
"""
v = xr.DataArray(np.random.binomial(1, 0.5, size=(num_locations, 2)),
dims={'location': num_locations, 'static_covariate': 2})
hd = v.values[:, 0]
ws = v.values[:, 1]
beta_np = np.exp(np.log(1.5) + np.log(2.0) * (hd == 1) * (ws == 0))
beta = xr.DataArray(beta_np, dims={'location': num_locations})
return beta, v, xr.DataArray(np.array([1, 1]), dims={'static_covariate': 2})
def generate_betas_many_cov2(num_locations, num_pred=1, num_not_pred=2):
"""Betas depend on real valued vector of covariates.
Args:
num_locations: an int representing the number of locations to simulate.
num_pred: an int representing the number of covariates that affect beta.
num_not_pred: an int representing the number of covariates that do not
affect beta.
Returns:
beta: an xr.DataArray consisting of the growth rate
for each epidemic
v: an xr.DataArray consisting of the randomly generated covariate for each
location
alpha: an xr.DataArray consisting of the weights for each covariate
"""
# generate random covariates
# sample from range -1, 1 uniformly
v = xr.DataArray(np.random.uniform(
low=-1.0, high=1.0, size=(num_locations, num_pred + num_not_pred)),
dims={'location': num_locations,
'static_covariate': num_pred+num_not_pred})
# construct weights for each covariate
alpha_1 = np.ones(num_pred)
alpha_0 = np.zeros(num_not_pred)
alpha = xr.DataArray(np.concatenate((alpha_1, alpha_0), axis=0),
dims={'static_covariate': num_pred+num_not_pred})
# this has a different functional form than we've seen before
beta_np = 1 + np.exp(np.matmul(alpha.values, v.values.T))
beta = xr.DataArray(beta_np, dims={'location': num_locations})
return beta, v, alpha
def gen_dynamic_beta_random_time(num_locations, num_time_steps):
"""Betas change at a random time between 1 and num_time_steps-1.
Args:
num_locations: an int representing the number of locations to simulate
num_time_steps: an int representing the number of time steps to simulate
Returns:
beta: an xr.DataArray consisting of the growth rate
for each epidemic with dimensions (location, time)
v: an xr.DataArray consisting of the randomly generated covariate for each
location with dimensions (location, time, 1)
alpha: an xr.DataArray consisting of the weights for each covariate with
dimension 1.
"""
time = np.random.randint(1, num_time_steps-1, num_locations)
cov = np.zeros((num_locations, num_time_steps, 1))
for i in range(num_locations):
cov[i][time[i]:] = 1
v = xr.DataArray(cov, dims=['location', 'time', 'dynamic_covariate'])
alpha = np.random.uniform(-1., 0.)*xr.DataArray(np.ones(1), dims=['dynamic_covariate'])
beta = 0.4 * np.exp(alpha @ v)
return beta, v, alpha
def gen_social_distancing_weight(num_locations):
alpha = np.random.uniform(-1., 0., 1)
return alpha
def new_sir_simulation_model(num_locations, num_time_steps,
num_static_covariates, num_dynamic_covariates=0):
"""Return a zero data_model.new_model with extra simulation parameters.
Args:
num_locations: int representing the number of locations to model epidemics
for
num_time_steps: int representing the maximum number of time steps the
epidemic can have
num_static_covariates: int representing the number of static covariates for
each location
num_dynamic_covariates: int representing the number of dynamic covariates
for each location.
Returns:
ds: an xr.Dataset representing the new infections and
covariates for each location and representing the simulation parameters.
All datavalues are initialized to 0.
"""
if num_time_steps < SPLIT_TIME:
raise ValueError('num_time_steps must be at least %d' % (SPLIT_TIME,))
ds = data_model.new_model(num_locations, num_time_steps,
num_static_covariates, num_dynamic_covariates)
ds['canonical_split_time'] = SPLIT_TIME
ds['canonical_split_time'].attrs['description'] = (
'Int representing the canonical time at which to split the data.')
ds['static_weights'] = data_model.new_dataarray(
{'static_covariate': num_static_covariates})
# TODO(edklein) should population_size be a covariate?
ds['population_size'] = data_model.new_dataarray({'location': num_locations})
ds['population_size'].attrs[
'description'] = 'Int representing the population size in each location.'
ds['fraction_infected'] = data_model.new_dataarray({
'location': num_locations
})
ds['fraction_infected'].attrs['description'] = (
'Float representing the fraction of the population '
'infected at the day %d.' % (SPLIT_TIME,))
ds['start_time'] = data_model.new_dataarray({
'location': num_locations
})
ds['start_time'].attrs['description'] = (
'Int representing the infection start time at each location')
ds['recovery_rate'] = data_model.new_dataarray({'location': num_locations})
ds['recovery_rate'].attrs[
'description'] = ('Float representing the recovery rate in each location.'
' This is used in the SIR simulation of the epidemic.')
if num_dynamic_covariates > 0:
ds['dynamic_weights'] = data_model.new_dataarray(
{'time': num_time_steps,
'dynamic_covariate': num_dynamic_covariates})
ds['growth_rate'] = data_model.new_dataarray({'location': num_locations,
'time': num_time_steps})
ds['growth_rate'].attrs[
'description'] = ('Float representing the growth rate in each location'
' at each point in time.'
'This is used in the SIR simulation of the epidemic.')
else:
ds['growth_rate'] = data_model.new_dataarray({'location': num_locations})
ds['growth_rate'].attrs[
'description'] = ('Float representing the growth rate in each location.'
'This is used in the SIR simulation of the epidemic.')
return ds
def _helper_ground_truth_setup(population_size,
num_time_steps):
"""A helper function that sets up time0 of an SIR simulation.
This helper function calculates the number of susceptible, infected, and
recovered individuals at the begining of a simulation. It returns these
values to be used as initial values in _helper_ground_truth_loop.
Args:
population_size: a xr.DataArray representing the population size in each
location
num_time_steps: an int representing the number of simulation 'days' to run
at each location.
Returns:
new_infections: a DataArray with shape (location, time).
The infections at time 0 are initialized to 1 in all locations.
num_susceptible: a DataArray with shape (location,) containing the
number of susceptible individuals in each location at time 0.
num_infected: a DataArray with shape (location,) containing the
number of infected individuals (1) in each location at time 0.
num_recovered: a DataArray with shape (location,) containing the
number of recovered individuals in each location at time 0.
"""
num_locations = population_size.sizes['location']
num_recovered = data_model.new_dataarray({
'location': num_locations,
}).astype(int)
new_infections = data_model.new_dataarray({
'location': num_locations,
'time': num_time_steps
}).astype(int)
# at each start time, we have 1 infection
new_infections[dict(time=0)] = 1
# setup for t-0
num_infected = new_infections.sel(time=0).copy()
num_susceptible = population_size.copy()
num_susceptible -= num_infected
return new_infections, num_susceptible, num_infected, num_recovered
def _helper_ground_truth_loop(num_susceptible, num_recovered, num_infected,
beta_time_t, gamma, population_size,
prob_infection_constant):
"""A helper function to calculate SIR for one time step of an SIR simulation.
This helper function calculates the number of susceptible, infected, and
recovered individuals at one time step. It returns these values to be used as
initial values in the next call.
Args:
num_susceptible: a DataArray with shape (location,) containing the
number of susceptible individuals in each location at time t.
num_infected: a DataArray with shape (location,) containing the
number of infected individuals (1) in each location at time t.
num_recovered: a DataArray with shape (location,) containing the
number of recovered individuals in each location at time t.
beta_time_t: a xr.DataArray representing the growth rate of the disease in
each location at time t.
gamma: a xr.DataArray representing the recovery rate of the disease in each
location
population_size: a xr.DataArray representing the population size in each
location
prob_infection_constant: a float representing a constant that we multiply
the probability of becoming infected by. We noticed that a value of 1. led
to curves that were short in time and clustered in time. By changing this
to less than 1., our models fit better.
Returns:
num_new_infections: a DataArray with shape (location,) containing the
number of *new* infections that occured at thime t+1.
num_susceptible: a DataArray with shape (location,) containing the
number of susceptible individuals in each location at time t+1.
num_infected: a DataArray with shape (location,) containing the
number of infected individuals (1) in each location at time t+1.
num_recovered: a DataArray with shape (location,) containing the
number of recovered individuals in each location at time t+1.
"""
# Calculate the probability that a person becomes infected
# Python3 doesn't seem to work, so force a float
frac_pop_infected = num_infected.astype(float) / population_size
prob_infected = prob_infection_constant * (
1 - np.exp(-frac_pop_infected * beta_time_t))
# Make sure prob_infected is between 0 and 1
prob_infected = prob_infected.where(prob_infected > 0, 0)
prob_infected = prob_infected.where(prob_infected < 1, 1)
# Determine the number of new infections
# By drawing from a binomial distribution
# Record the number of infections that occured at this time point
num_new_infections = stats.binom.rvs(
num_susceptible.astype(int), prob_infected)
# Calculate the probability that a person recovers
prob_recover = 1 - np.exp(-gamma)
# Determine the number of recoveries
# by drawing from a binomial distribution
num_new_recoveries = stats.binom.rvs(num_infected, prob_recover)
num_susceptible -= num_new_infections
num_recovered += num_new_recoveries
num_infected += num_new_infections - num_new_recoveries
return num_new_infections, num_susceptible, num_recovered, num_infected
def generate_ground_truth(population_size,
beta,
gamma,
num_time_steps,
prob_infection_constant=0.2):
"""A function that generates infections over time using a discrete SIR model.
We assume that the epidemic starts with a single case at time 0.
We then simulate the number of infected individuals as a function of time,
until the number of infected individuals is 0.
This is the epidemic curve. Returns the epidemic curves as a function of time.
Args:
population_size: a xr.DataArray representing the population size in each
location
beta: a xr.DataArray representing the growth rate of the disease in each
location
gamma: a xr.DataArray representing the recovery rate of the disease in each
location
num_time_steps: an int representing the number of simulation 'days' to run
at each location.
prob_infection_constant: a float representing a constant that we multiply
the probability of becoming infected by. We noticed that a value of 1. led
to curves that were short in time and clustered in time. By changing this
to less than 1., our models fit better.
Returns:
new_infections: a xr.DataArray representing the new_infections at each
(location, time).
"""
new_infections, num_susceptible, num_infected, num_recovered = _helper_ground_truth_setup(
population_size, num_time_steps)
if 'time' not in beta.dims:
beta = beta.expand_dims({'time': new_infections.sizes['time']})
for t in new_infections.time[1:]:
beta_time_t = beta[dict(time=t)]
num_new_infections, num_susceptible, num_recovered, num_infected = _helper_ground_truth_loop(
num_susceptible, num_recovered, num_infected, beta_time_t, gamma,
population_size, prob_infection_constant)
new_infections[dict(time=t)] = num_new_infections
return new_infections
def generate_social_distancing_ground_truth(population_size,
beta,
gamma,
num_time_steps,
social_distancing_threshold,
gen_social_distancing_weight_fn,
prob_infection_constant=0.2
):
"""Generate infections over time using SIR with a variable growth rate.
We assume that the epidemic starts with a single case at time 0.
We then simulate the number of infected individuals as a function of time.
When the number of infected individuals reaches num_infected_threshold,
we decrease the growth rate by an amount determined by social_distance_fn.
We continue simulating the number of infected individuals until we reach
num_time_steps. This is the epidemic curve. Returns the epidemic curves as a
function of time.
Args:
population_size: a xr.DataArray representing the population size in each
location
beta: a xr.DataArray representing the static growth rate of the disease in
each location
gamma: a xr.DataArray representing the recovery rate of the disease in each
location
num_time_steps: an int representing the number of simulation 'days' to run
at each location.
social_distancing_threshold: an array of ints of shape (num_locations)
indicating the number of infections at each location when we change the
growth rate.
gen_social_distancing_weight_fn: A (partial) function that generates the
weights of the social distancing covariate. Function is called with the
argument num_locations.
prob_infection_constant: a float representing a constant that we multiply
the probability of becoming infected by. We noticed that a value of 1. led
to curves that were short in time and clustered in time. By changing this
to less than 1., our models fit better.
Returns:
beta_td: a xr.DataArray representing the time-dependent growth rate at each
(location, time).
dynamic_covariate: a xr.DataArray representing the time-dependent covariate
at each (location, time). Currently fixed to be one covariate with
a value of either 0 or 1.
dynamic_weights: a xr.DataArray representing the weight of dynamic_covariate
currently a 1d array with dimension ['dynamic_covariate'].
new_infections: a xr.DataArray representing the new_infections at each
(location, time).
"""
new_infections, num_susceptible, num_infected, num_recovered = _helper_ground_truth_setup(
population_size, num_time_steps)
num_locations = population_size.sizes['location']
# need to compute the change in growth rate at a given
# infection load. This will be represented by a time-dependent covariate
# that will be 0 or 1 in all locations. (It's possible we'll
# want to allow the value of it to change eventually.) The weight will be
# constant in time, although we might store it as time-dependent for
# consistency/scalability
dynamic_alpha = gen_social_distancing_weight_fn(num_locations)
dynamic_weights = xr.DataArray(dynamic_alpha, dims=['dynamic_covariate'])
beta_td = beta.copy()
if 'time' not in beta_td.dims:
beta_td = beta_td.expand_dims({'time': new_infections.sizes['time']}).copy()
dynamic_covariate = xr.zeros_like(new_infections)
dynamic_covariate = dynamic_covariate.expand_dims({'dynamic_covariate':1}).copy()
# No locations start off above their threshold
# at t=0
infection_threshold = xr.zeros_like(beta).expand_dims({'dynamic_covariate':1})
for t in new_infections.time[1:]:
# Update growth rate if needed
dynamic_covariate[dict(time=t)] = infection_threshold.astype(int)
beta_td[dict(time=t)] = beta + (dynamic_weights @ infection_threshold.astype(int))
beta_time_t = beta_td[dict(time=t)]
num_new_infections, num_susceptible, num_recovered, num_infected = _helper_ground_truth_loop(
num_susceptible, num_recovered, num_infected, beta_time_t, gamma,
population_size, prob_infection_constant)
new_infections[dict(time=t)] = num_new_infections
# Check if we need to update growth rate
total_infected = population_size - num_susceptible
infection_threshold = (total_infected > social_distancing_threshold).T.expand_dims({'dynamic_covariate':1})
return beta_td, dynamic_covariate, dynamic_weights, new_infections
def _helper_setup_sir_sim(gen_constant_beta_fn,
num_locations,
num_time_steps=500,
constant_gamma=0.33,
population_size=10000,
gen_dynamic_beta_fn=None):
"""Helper function to set up and store a bunch of variables in a xr.DataSet.
Returns a xr.Dataset containing the growth rate, covariates, weights,
population size, and recovery rate.
Args:
gen_constant_beta_fn: a partial function to generate the constant beta
values for each epidemic when passed num_locations.
num_locations: an int representing the number of locations to run
num_time_steps: an int representing the number of simulation 'days'
(default 500)
constant_gamma: a float representing the constant recovery rate (default
0.33)
population_size: a xr.DataArray representing the population size in each
location. If none, defaults to 10000 in each location.
gen_dynamic_beta_fn: A function to generate the dynamic beta
values for each epidemic when passed num_locations and num_time_steps.
None if the betas are all static.
Returns:
trajectories: a xr.Dataset containing the growth rate, covariates, weights,
population size, and recovery rate.
"""
# generate growth rate for all locations
beta, v, alpha = gen_constant_beta_fn(num_locations)
if type(population_size) is int:
population_size = xr.DataArray(population_size * np.ones(num_locations), dims=['location'])
static_covariates = xr.concat((v, population_size), 'static_covariate')
num_static_covariates = static_covariates.sizes['static_covariate']
# give population_size a weight of 0
static_weights = xr.concat((alpha, xr.DataArray(np.array([0]), dims=['static_covariate'])), 'static_covariate')
if gen_dynamic_beta_fn:
beta_td, v_td, alpha_td = gen_dynamic_beta_fn(num_locations, num_time_steps)
num_dynamic_covariates = (v_td.dynamic_covariate)
beta = beta_td + beta.expand_dims({'time': num_time_steps})
else:
num_dynamic_covariates=0
trajectories = new_sir_simulation_model(num_locations,
num_time_steps, num_static_covariates,
num_dynamic_covariates)
trajectories['growth_rate'] = beta
trajectories['static_covariates'] = static_covariates
trajectories['static_weights'] = static_weights
if gen_dynamic_beta_fn:
trajectories['dynamic_weights'] = alpha_td
trajectories['dynamic_covariates'] = v_td
trajectories['population_size'] = population_size
trajectories['recovery_rate'].data = constant_gamma * np.ones(num_locations)
return trajectories
def generate_simulations(gen_constant_beta_fn,
num_locations,
num_time_steps=500,
constant_gamma=0.33,
population_size=10000,
gen_dynamic_beta_fn=None,
fraction_infected_limits=(.05, 1.),
prob_infection_constant=0.2):
"""Generate many SIR curves.
Generate infection curves for num_locations. The locations may have different
covariates, and thus different trajectories.
Args:
gen_constant_beta_fn: a partial function to generate the constant beta
values for each epidemic when passed num_locations.
num_locations: an int representing the number of locations to run
num_time_steps: an int representing the number of simulation 'days'
(default 500)
constant_gamma: a float representing the constant recovery rate (default
0.33)
population_size: a xr.DataArray representing the population size in each
location. If none, defaults to 10000 in each location.
gen_dynamic_beta_fn: A function to generate the dynamic beta
values for each epidemic when passed num_locations and num_time_steps.
None if the betas are all static.
fraction_infected_limits: A pair of floats in [0, 1] representing the limits
on the fraction of the population that will be infected at SPLIT_TIME.
prob_infection_constant: a float representing a constant that we multiply
the probability of becoming infected by. We noticed that a value of 1. led
to curves that were short in time and clustered in time. By changing this
to less than 1., our models fit better.
Returns:
trajectories: a xr.Dataset of the simulated infections over time
"""
trajectories = _helper_setup_sir_sim(gen_constant_beta_fn,
num_locations,
num_time_steps,
constant_gamma,
population_size,
gen_dynamic_beta_fn)
# Initially, all trajectories start at time 0.
# The actual start_time will be updated to be consistent with
# fraction_infected being infected at SPLIT_TIME.
trajectories['new_infections'] = generate_ground_truth(
trajectories.population_size, trajectories.growth_rate,
trajectories.recovery_rate,
trajectories.sizes['time'], prob_infection_constant)
return data_model.shift_timeseries(trajectories, fraction_infected_limits, SPLIT_TIME)
def generate_social_distancing_simulations(gen_constant_beta_fn,
gen_social_distancing_weight_fn,
num_locations,
num_time_steps=500,
constant_gamma=0.33,
population_size=10000,
social_distancing_threshold=10000/4,
fraction_infected_limits=(.05, 1.),
prob_infection_constant=0.2,
shift_timeseries=True):
"""Generate many SIR curves with social distancing.
Generate many SIR curves with social distancing implemented when the number of
cumulative infections reaches fraction_infected_limits. The locations may have
different covariates, and thus different trajectories.
Args:
gen_constant_beta_fn: a partial function to generate the constant beta
values for each epidemic when passed num_locations.
gen_social_distancing_weight_fn: A (partial) function that generates the
weights of the social distancing covariate. Function is called with the
argument num_locations.
num_locations: an int representing the number of locations to run
num_time_steps: an int representing the number of simulation 'days'
(default 500)
constant_gamma: a float representing the constant recovery rate (default
0.33)
population_size: a xr.DataArray representing the population size in each
location. If none, defaults to 10000 in each location.
social_distancing_threshold: a DataArray representing the number of
infected individuals in each location when we implement social distancing.
fraction_infected_limits: A pair of floats in [0, 1] representing the limits
on the fraction of the population that will be infected at SPLIT_TIME.
prob_infection_constant: a float representing a constant that we multiply
the probability of becoming infected by. We noticed that a value of 1. led
to curves that were short in time and clustered in time. By changing this
to less than 1., our models fit better.
shift_timeseries: A bool indicating whether we should shift the trajectories
based on fraction_infected_limits. If False, all trajectories will start
with 1 infection at time t=0.
Returns:
trajectories: a xr.Dataset of the simulated infections over time
"""
# generate growth rate for all locations
trajectories = _helper_setup_sir_sim(gen_constant_beta_fn,
num_locations,
num_time_steps,
constant_gamma,
population_size,
gen_dynamic_beta_fn=None)
beta, dynamic_covariates, dynamic_weights, new_infections = generate_social_distancing_ground_truth(
trajectories.population_size, trajectories.growth_rate,
trajectories.recovery_rate,
trajectories.sizes['time'], social_distancing_threshold,
gen_social_distancing_weight_fn, prob_infection_constant)
trajectories['growth_rate'] = beta
trajectories['dynamic_covariates'] = dynamic_covariates
trajectories['dynamic_weights'] = dynamic_weights
trajectories['new_infections'] = new_infections
if not shift_timeseries:
return trajectories
else:
return data_model.shift_timeseries(trajectories, fraction_infected_limits,
SPLIT_TIME)
| [
"numpy.ones",
"scipy.stats.binom.rvs",
"numpy.log",
"xarray.concat",
"xarray.zeros_like",
"numpy.random.randint",
"numpy.zeros",
"numpy.exp",
"numpy.array",
"xarray.DataArray",
"numpy.random.uniform",
"numpy.concatenate",
"numpy.matmul",
"numpy.random.binomial"
] | [((1768, 1823), 'xarray.DataArray', 'xr.DataArray', (['beta_np'], {'dims': "{'location': num_locations}"}), "(beta_np, dims={'location': num_locations})\n", (1780, 1823), True, 'import xarray as xr\n'), ((2910, 2927), 'numpy.ones', 'np.ones', (['num_pred'], {}), '(num_pred)\n', (2917, 2927), True, 'import numpy as np\n'), ((2940, 2962), 'numpy.zeros', 'np.zeros', (['num_not_pred'], {}), '(num_not_pred)\n', (2948, 2962), True, 'import numpy as np\n'), ((3237, 3292), 'xarray.DataArray', 'xr.DataArray', (['beta_np'], {'dims': "{'location': num_locations}"}), "(beta_np, dims={'location': num_locations})\n", (3249, 3292), True, 'import xarray as xr\n'), ((3979, 4034), 'numpy.random.randint', 'np.random.randint', (['(1)', '(num_time_steps - 1)', 'num_locations'], {}), '(1, num_time_steps - 1, num_locations)\n', (3996, 4034), True, 'import numpy as np\n'), ((4041, 4085), 'numpy.zeros', 'np.zeros', (['(num_locations, num_time_steps, 1)'], {}), '((num_locations, num_time_steps, 1))\n', (4049, 4085), True, 'import numpy as np\n'), ((4150, 4215), 'xarray.DataArray', 'xr.DataArray', (['cov'], {'dims': "['location', 'time', 'dynamic_covariate']"}), "(cov, dims=['location', 'time', 'dynamic_covariate'])\n", (4162, 4215), True, 'import xarray as xr\n'), ((4424, 4455), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(0.0)', '(1)'], {}), '(-1.0, 0.0, 1)\n', (4441, 4455), True, 'import numpy as np\n'), ((12270, 12313), 'scipy.stats.binom.rvs', 'stats.binom.rvs', (['num_infected', 'prob_recover'], {}), '(num_infected, prob_recover)\n', (12285, 12313), False, 'from scipy import stats\n'), ((17758, 17813), 'xarray.DataArray', 'xr.DataArray', (['dynamic_alpha'], {'dims': "['dynamic_covariate']"}), "(dynamic_alpha, dims=['dynamic_covariate'])\n", (17770, 17813), True, 'import xarray as xr\n'), ((17977, 18006), 'xarray.zeros_like', 'xr.zeros_like', (['new_infections'], {}), '(new_infections)\n', (17990, 18006), True, 'import xarray as xr\n'), ((20644, 20695), 'xarray.concat', 'xr.concat', (['(v, population_size)', '"""static_covariate"""'], {}), "((v, population_size), 'static_covariate')\n", (20653, 20695), True, 'import xarray as xr\n'), ((826, 873), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(num_locations, 1)'], {}), '(0.0, 1.0, (num_locations, 1))\n', (843, 873), True, 'import numpy as np\n'), ((943, 953), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (950, 953), True, 'import numpy as np\n'), ((997, 1014), 'numpy.exp', 'np.exp', (['(alpha @ v)'], {}), '(alpha @ v)\n', (1003, 1014), True, 'import numpy as np\n'), ((1516, 1567), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {'size': '(num_locations, 2)'}), '(1, 0.5, size=(num_locations, 2))\n', (1534, 1567), True, 'import numpy as np\n'), ((2642, 2730), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1.0)', 'high': '(1.0)', 'size': '(num_locations, num_pred + num_not_pred)'}), '(low=-1.0, high=1.0, size=(num_locations, num_pred +\n num_not_pred))\n', (2659, 2730), True, 'import numpy as np\n'), ((2986, 3028), 'numpy.concatenate', 'np.concatenate', (['(alpha_1, alpha_0)'], {'axis': '(0)'}), '((alpha_1, alpha_0), axis=0)\n', (3000, 3028), True, 'import numpy as np\n'), ((4226, 4254), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(0.0)'], {}), '(-1.0, 0.0)\n', (4243, 4254), True, 'import numpy as np\n'), ((4321, 4338), 'numpy.exp', 'np.exp', (['(alpha @ v)'], {}), '(alpha @ v)\n', (4327, 4338), True, 'import numpy as np\n'), ((12148, 12162), 'numpy.exp', 'np.exp', (['(-gamma)'], {}), '(-gamma)\n', (12154, 12162), True, 'import numpy as np\n'), ((21762, 21784), 'numpy.ones', 'np.ones', (['num_locations'], {}), '(num_locations)\n', (21769, 21784), True, 'import numpy as np\n'), ((1708, 1719), 'numpy.log', 'np.log', (['(1.5)'], {}), '(1.5)\n', (1714, 1719), True, 'import numpy as np\n'), ((1856, 1872), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (1864, 1872), True, 'import numpy as np\n'), ((3191, 3226), 'numpy.matmul', 'np.matmul', (['alpha.values', 'v.values.T'], {}), '(alpha.values, v.values.T)\n', (3200, 3226), True, 'import numpy as np\n'), ((4266, 4276), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (4273, 4276), True, 'import numpy as np\n'), ((11618, 11658), 'numpy.exp', 'np.exp', (['(-frac_pop_infected * beta_time_t)'], {}), '(-frac_pop_infected * beta_time_t)\n', (11624, 11658), True, 'import numpy as np\n'), ((18176, 18195), 'xarray.zeros_like', 'xr.zeros_like', (['beta'], {}), '(beta)\n', (18189, 18195), True, 'import xarray as xr\n'), ((20578, 20600), 'numpy.ones', 'np.ones', (['num_locations'], {}), '(num_locations)\n', (20585, 20600), True, 'import numpy as np\n'), ((20856, 20869), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (20864, 20869), True, 'import numpy as np\n'), ((1722, 1733), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (1728, 1733), True, 'import numpy as np\n')] |
'''
@Author: JosieHong
@Date: 2020-04-26 12:40:11
@LastEditAuthor: JosieHong
LastEditTime: 2021-07-11 12:52:18
'''
import os.path as osp
import warnings
import math
import cv2
import mmcv
import numpy as np
from imagecorruptions import corrupt
from mmcv.parallel import DataContainer as DC
import torch
from .utils import random_scale, to_tensor
from .registry import DATASETS
from .coco_seg import Coco_Seg_Dataset, INF
@DATASETS.register_module
class DAVIS_Seg_Dataset(Coco_Seg_Dataset):
# davis 2016
# CLASSES = ('aerobatics', 'bear', 'bike-packing', 'blackswan', 'bmx-bumps',
# 'bmx-trees', 'boat', 'boxing-fisheye', 'breakdance', 'breakdance-flare',
# 'bus', 'camel', 'car-race', 'car-roundabout', 'car-shadow',
# 'car-turn', 'carousel', 'cat-girl', 'cats-car', 'chamaleon',
# 'classic-car', 'color-run', 'cows', 'crossing', 'dance-jump',
# 'dance-twirl', 'dancing', 'deer', 'disc-jockey', 'dog',
# 'dog-agility', 'dog-gooses', 'dogs-jump', 'dogs-scale', 'drift-chicane',
# 'drift-straight', 'drift-turn', 'drone', 'elephant', 'flamingo',
# 'giant-slalom', 'girl-dog', 'goat', 'gold-fish', 'golf',
# 'guitar-violin', 'gym', 'helicopter', 'hike', 'hockey',
# 'horsejump-high', 'horsejump-low', 'horsejump-stick', 'hoverboard', 'india',
# 'judo', 'kid-football', 'kite-surf', 'kite-walk', 'koala',
# 'lab-coat', 'lady-running', 'libby', 'lindy-hop', 'loading',
# 'lock', 'longboard', 'lucia', 'mallard-fly', 'mallard-water',
# 'man-bike', 'mbike-trick', 'miami-surf', 'monkeys-trees', 'motocross-bumps',
# 'motocross-jump', 'motorbike', 'mtb-race', 'night-race', 'orchid',
# 'paragliding', 'paragliding-launch', 'parkour', 'people-sunset', 'pigs',
# 'planes-crossing', 'planes-water', 'rallye', 'rhino', 'rollerblade',
# 'rollercoaster', 'salsa', 'schoolgirls', 'scooter-black', 'scooter-board',
# 'scooter-gray', 'seasnake', 'sheep', 'shooting', 'skate-jump',
# 'skate-park', 'slackline', 'snowboard', 'soapbox', 'soccerball',
# 'stroller', 'stunt', 'subway', 'surf', 'swing',
# 'tandem', 'tennis', 'tennis-vest', 'tractor', 'tractor-sand',
# 'train', 'tuk-tuk', 'upside-down', 'varanus-cage', 'walking')
# davis 2017
CLASSES = ('bear', 'bike-packing', 'blackswan', 'bmx-bumps',
'bmx-trees', 'boat', 'boxing-fisheye', 'breakdance', 'breakdance-flare',
'bus', 'camel', 'car-roundabout', 'car-shadow',
'car-turn', 'cat-girl', 'classic-car', 'color-run', 'cows', 'crossing', 'dance-jump',
'dance-twirl', 'dancing', 'deer', 'disc-jockey', 'dog',
'dog-agility', 'dog-gooses', 'dogs-jump', 'dogs-scale', 'drift-chicane',
'drift-straight', 'drift-turn', 'drone', 'elephant', 'flamingo',
'goat', 'gold-fish',
'hike', 'hockey',
'horsejump-high', 'horsejump-low', 'india',
'judo', 'kid-football', 'kite-surf', 'kite-walk', 'koala',
'lab-coat', 'lady-running', 'libby', 'lindy-hop', 'loading',
'longboard', 'lucia', 'mallard-fly', 'mallard-water',
'mbike-trick', 'miami-surf', 'motocross-bumps',
'motocross-jump', 'motorbike', 'night-race',
'paragliding', 'paragliding-launch', 'parkour', 'pigs',
'planes-water', 'rallye', 'rhino', 'rollerblade',
'schoolgirls', 'scooter-black', 'scooter-board',
'scooter-gray', 'sheep', 'shooting',
'skate-park', 'snowboard', 'soapbox', 'soccerball',
'stroller', 'stunt', 'surf', 'swing',
'tennis', 'tractor-sand',
'train', 'tuk-tuk', 'upside-down', 'varanus-cage', 'walking')
def __init__(self,
ann_file,
img_prefix,
img_scale,
img_norm_cfg,
refer_scale=(127,127),
num_polar=36,
multiscale_mode='value',
size_divisor=None,
proposal_file=None,
num_max_proposals=1000,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True,
with_semantic_seg=False,
seg_prefix=None,
seg_scale_factor=1,
extra_aug=None,
resize_keep_ratio=True,
corruption=None,
corruption_severity=1,
skip_img_without_anno=True,
test_mode=False,
strides=[8, 16, 32, 64, 128],
regress_ranges=[(-1, 64), (64, 128),
(128, 256), (256, 512), (512, 1e8)]):
super(DAVIS_Seg_Dataset, self).__init__(ann_file,
img_prefix,
img_scale,
img_norm_cfg,
multiscale_mode,
size_divisor,
proposal_file,
num_max_proposals,
flip_ratio,
with_mask,
with_crowd,
with_label,
with_semantic_seg,
seg_prefix,
seg_scale_factor,
extra_aug,
resize_keep_ratio,
corruption,
corruption_severity,
skip_img_without_anno,
test_mode)
self.refer_scale = refer_scale
self.strides = strides
self.regress_ranges = regress_ranges
assert num_polar in [36, 72, 180]
self.num_polar = num_polar
def prepare_train_img(self, idx):
img_info = self.img_infos[idx]
img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
# corruption
if self.corruption is not None:
img = corrupt(
img,
severity=self.corruption_severity,
corruption_name=self.corruption)
# load proposals if necessary
if self.proposals is not None:
proposals = self.proposals[idx][:self.num_max_proposals]
# TODO: Handle empty proposals properly. Currently images with
# no proposals are just ignored, but they can be used for
# training in concept.
if len(proposals) == 0:
return None
if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
if proposals.shape[1] == 5:
scores = proposals[:, 4, None]
proposals = proposals[:, :4]
else:
scores = None
ann = self.get_ann_info(idx)
gt_bboxes = ann['bboxes']
gt_labels = ann['labels']
if self.with_crowd:
gt_bboxes_ignore = ann['bboxes_ignore']
# skip the image if there is no valid gt bbox
if len(gt_bboxes) == 0 and self.skip_img_without_anno:
warnings.warn('Skip the image "%s" that has no valid gt bbox' %
osp.join(self.img_prefix, img_info['filename']))
return None
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
img, img_shape, pad_shape, scale_factor = self.img_transform(img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
# get img_refer from first frame
first_frame_idx = img_info["first_frame"]
refer_info = self.img_infos[first_frame_idx]
refer_ann = self.get_ann_info(first_frame_idx)
img_refer = mmcv.imread(osp.join(self.img_prefix, refer_info['filename']))
# crop the bbox
img_refer = torch.squeeze(torch.Tensor(mmcv.imcrop(img_refer, refer_ann["bboxes"])))
# resize to refer_scale
img_refer = torch.Tensor(mmcv.imresize(np.float32(img_refer), self.refer_scale, return_scale=False)).permute(2, 0, 1)
if self.with_seg:
gt_seg = mmcv.imread(
osp.join(self.seg_prefix,
img_info['filename'].replace('jpg', 'png')),
flag='unchanged')
gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
gt_seg = mmcv.imrescale(
gt_seg, self.seg_scale_factor, interpolation='nearest')
gt_seg = gt_seg[None, ...]
if self.proposals is not None:
proposals = self.bbox_transform(proposals, img_shape, scale_factor,
flip)
proposals = np.hstack([proposals, scores
]) if scores is not None else proposals
gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
flip)
if self.with_crowd:
gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
scale_factor, flip)
if self.with_mask:
gt_masks = self.mask_transform(ann['masks'], pad_shape,
scale_factor, flip)
ori_shape = (img_info['height'], img_info['width'], 3)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
data = dict(
img=DC(to_tensor(img), stack=True),
img_meta=DC(img_meta, cpu_only=True),
gt_bboxes=DC(to_tensor(gt_bboxes)),
img_refer=DC(to_tensor(img_refer), stack=True))
if self.with_label:
data['gt_labels'] = DC(to_tensor(gt_labels))
if self.with_crowd:
data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
if self.with_mask:
data['gt_masks'] = DC(gt_masks, cpu_only=True)
#--------------------offline ray label generation-----------------------------
self.center_sample = True
self.use_mask_center = True
self.radius = 1.5
featmap_sizes = self.get_featmap_size(pad_shape)
# featmap_sizes: [[32, 32], [16, 16], [8, 8]]
num_levels = len(self.strides)
all_level_points = self.get_points(featmap_sizes)
# level 0 points: torch.Size([1024, 2])
# level 1 points: torch.Size([256, 2])
# level 2 points: torch.Size([64, 2])
self.num_points_per_level = [i.size()[0] for i in all_level_points]
expanded_regress_ranges = [
all_level_points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
all_level_points[i]) for i in range(num_levels)
]
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(all_level_points, 0)
gt_masks = gt_masks[:len(gt_bboxes)]
gt_bboxes = torch.Tensor(gt_bboxes)
gt_labels = torch.Tensor(gt_labels)
_labels, _bbox_targets, _mask_targets = self.polar_target_single(
gt_bboxes,gt_masks,gt_labels,concat_points, concat_regress_ranges, self.num_polar)
data['_gt_labels'] = DC(_labels)
data['_gt_bboxes'] = DC(_bbox_targets)
data['_gt_masks'] = DC(_mask_targets)
#--------------------offline ray label generation-----------------------------
return data
def get_featmap_size(self, shape):
h,w = shape[:2]
featmap_sizes = []
for i in self.strides:
featmap_sizes.append([int(h / i)+1, int(w / i)+1])
return featmap_sizes
def prepare_test_img(self, idx):
"""Prepare an image for testing (multi-scale and flipping)"""
img_info = self.img_infos[idx]
img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
# corruption
if self.corruption is not None:
img = corrupt(
img,
severity=self.corruption_severity,
corruption_name=self.corruption)
# load proposals if necessary
if self.proposals is not None:
proposal = self.proposals[idx][:self.num_max_proposals]
if not (proposal.shape[1] == 4 or proposal.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposal.shape))
else:
proposal = None
# get img_refer from first frame
first_frame_idx = img_info["first_frame"]
refer_info = self.img_infos[first_frame_idx]
refer_ann = self.get_ann_info(first_frame_idx)
img_refer = mmcv.imread(osp.join(self.img_prefix, refer_info['filename']))
# crop the bbox
img_refer = torch.squeeze(torch.Tensor(mmcv.imcrop(img_refer, refer_ann["bboxes"])))
# resize to refer_scale
img_refer = torch.Tensor(mmcv.imresize(np.float32(img_refer), self.refer_scale, return_scale=False)).permute(2, 0, 1)
def prepare_single(img, scale, flip, proposal=None):
_img, img_shape, pad_shape, scale_factor = self.img_transform(
img, scale, flip, keep_ratio=self.resize_keep_ratio)
_img = to_tensor(_img)
_img_meta = dict(
ori_shape=(img_info['height'], img_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
if proposal is not None:
if proposal.shape[1] == 5:
score = proposal[:, 4, None]
proposal = proposal[:, :4]
else:
score = None
_proposal = self.bbox_transform(proposal, img_shape,
scale_factor, flip)
_proposal = np.hstack([_proposal, score
]) if score is not None else _proposal
_proposal = to_tensor(_proposal)
else:
_proposal = None
return _img, _img_meta, _proposal
imgs = []
img_metas = []
img_refers = []
proposals = []
for scale in self.img_scales:
_img, _img_meta, _proposal = prepare_single(
img, scale, False, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
img_refers.append(DC(to_tensor(img_refer), stack=True))
proposals.append(_proposal)
if self.flip_ratio > 0:
_img, _img_meta, _proposal = prepare_single(
img, scale, True, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
img_refers.append(DC(to_tensor(img_refer), stack=True))
proposals.append(_proposal)
data = dict(img=imgs,
img_meta=img_metas,
img_refer=img_refers)
if self.proposals is not None:
data['proposals'] = proposals
return data
# fit different polar nunbers
def polar_target_single(self, gt_bboxes, gt_masks, gt_labels, points, regress_ranges, num_polar):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
#xs ys 分别是points的x y坐标
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1) #feature map上所有点对于gtbox的上下左右距离 [num_pix, num_gt, 4]
#mask targets 也按照这种写 同时labels 得从bbox中心修改成mask 重心
mask_centers = []
mask_contours = []
#第一步 先算重心 return [num_gt, 2]
for mask in gt_masks:
cnt, contour = self.get_single_centerpoint(mask)
contour = contour[0]
contour = torch.Tensor(contour).float()
y, x = cnt
mask_centers.append([x,y])
mask_contours.append(contour)
mask_centers = torch.Tensor(mask_centers).float()
# 把mask_centers assign到不同的层上,根据regress_range和重心的位置
mask_centers = mask_centers[None].expand(num_points, num_gts, 2)
#---------------------------------------------------------------------------
# condition1: inside a gt bbox
# add center sample
if self.center_sample:
if self.use_mask_center:
inside_gt_bbox_mask = self.get_mask_sample_region(gt_bboxes,
mask_centers,
self.strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = self.get_sample_region(gt_bboxes,
self.strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0 #[num_gt] 介于0-80
bbox_targets = bbox_targets[range(num_points), min_area_inds]
pos_inds = labels.nonzero().reshape(-1)
mask_targets = torch.zeros(num_points, num_polar).float()
pos_mask_ids = min_area_inds[pos_inds]
for p,id in zip(pos_inds, pos_mask_ids):
x, y = points[p]
pos_mask_contour = mask_contours[id]
# SiamPolar: interpolate
new_contour = []
contour_length = len(pos_mask_contour)
for i in range(contour_length):
new_contour.append(pos_mask_contour[i])
# new_contour.append((3*pos_mask_contour[i]+pos_mask_contour[(i+1)%contour_length])/4)
new_contour.append((pos_mask_contour[i]+pos_mask_contour[(i+1)%contour_length])/2)
# new_contour.append((pos_mask_contour[i]+3*pos_mask_contour[(i+1)%contour_length])/4)
new_pos_mask_contour = torch.cat(new_contour, dim=0).unsqueeze(1)
# print(pos_mask_contour.size())
# print(new_pos_mask_contour.size())
# print(new_pos_mask_contour)
# exit()
dists, coords = self.get_coordinates(x, y, new_pos_mask_contour, num_polar)
mask_targets[p] = dists
return labels, bbox_targets, mask_targets
def get_coordinates(self, c_x, c_y, pos_mask_contour, num_polar):
ct = pos_mask_contour[:, 0, :]
x = ct[:, 0] - c_x
y = ct[:, 1] - c_y
# angle = np.arctan2(x, y)*180/np.pi
angle = torch.atan2(x, y) * 180 / np.pi
angle[angle < 0] += 360
angle = angle.int()
# dist = np.sqrt(x ** 2 + y ** 2)
dist = torch.sqrt(x ** 2 + y ** 2)
angle, idx = torch.sort(angle)
dist = dist[idx]
# generate num_polar angles
new_coordinate = {}
step_size = int(360/num_polar)
for i in range(0, 360, step_size):
if i in angle:
d = dist[angle==i].max()
new_coordinate[i] = d
elif i + 1 in angle:
d = dist[angle == i+1].max()
new_coordinate[i] = d
elif i - 1 in angle:
d = dist[angle == i-1].max()
new_coordinate[i] = d
elif i + 2 in angle:
d = dist[angle == i+2].max()
new_coordinate[i] = d
elif i - 2 in angle:
d = dist[angle == i-2].max()
new_coordinate[i] = d
elif i + 3 in angle:
d = dist[angle == i+3].max()
new_coordinate[i] = d
elif i - 3 in angle:
d = dist[angle == i-3].max()
new_coordinate[i] = d
# josie.add
elif i + 4 in angle:
d = dist[angle == i+4].max()
new_coordinate[i] = d
elif i - 4 in angle:
d = dist[angle == i-4].max()
new_coordinate[i] = d
elif i + 5 in angle:
d = dist[angle == i+5].max()
new_coordinate[i] = d
elif i - 5 in angle:
d = dist[angle == i-5].max()
new_coordinate[i] = d
distances = torch.zeros(num_polar)
for a in range(0, 360, step_size):
if not a in new_coordinate.keys():
new_coordinate[a] = torch.tensor(1e-6)
distances[a//step_size] = 1e-6
else:
distances[a//step_size] = new_coordinate[a]
return distances, new_coordinate
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def merge_contours(self, contours):
alpha = 0.25
# init
b = contours[0][:, 0, :]
cx, cy = b.mean(axis=0)
# guarantee that the threshold is at the same level as the object size
# thrx = contours[0][:, 0, :][:, 0].max() - contours[0][:, 0, :][:, 0].min()
# thry = contours[0][:, 0, :][:, 1].max() - contours[0][:, 0, :][:, 1].min()
records = [0 for i in range(len(contours))]
new_contours = [contours[0]]
records[0] = 1
flag = True
while (flag == True):
flag = False
for i in range(1, len(contours)-1):
tmp = contours[i][:, 0, :]
tx, ty = tmp.mean(axis=0)
if records[i] == 0:
d = math.sqrt((cx - tx) ** 2 + (cy - ty) ** 2)
lx = b[:, 0].max() - b[:, 0].min() + tmp[:, 0].max() - tmp[:, 0].min()
ly = b[:, 1].max() - b[:, 1].min() + tmp[:, 1].max() - tmp[:, 1].min()
l = math.sqrt(lx ** 2 + ly ** 2)
# print("d: {}, l: {}".format(d, l))
if d <= alpha * l:
# print("Add a new contour!")
new_contours.append(contours[i])
records[i] = 1
flag = True
cx = (cx + tx) / 2
cy = (cy + ty) / 2
return new_contours
def get_single_centerpoint(self, mask):
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours.sort(key=lambda x: cv2.contourArea(x), reverse=True) # only save the biggest one
'''debug IndexError: list index out of range'''
if len(contours) == 0:
return None, None
count = contours[0][:, 0, :]
try:
center = self.get_centerpoint(count)
except:
x,y = count.mean(axis=0)
center = [int(x), int(y)]
if len(contours) > 1:
# keep the contours near the biggest contour
new_contours = self.merge_contours(contours)
else:
new_contours = [contours[0]] # the biggest contour
return center, new_contours | [
"numpy.random.rand",
"numpy.hstack",
"torch.sqrt",
"math.sqrt",
"mmcv.imrescale",
"cv2.contourArea",
"torch.sort",
"torch.Tensor",
"torch.cat",
"imagecorruptions.corrupt",
"torch.stack",
"os.path.join",
"torch.atan2",
"mmcv.parallel.DataContainer",
"torch.tensor",
"mmcv.imcrop",
"cv2... | [((11949, 11990), 'torch.cat', 'torch.cat', (['expanded_regress_ranges'], {'dim': '(0)'}), '(expanded_regress_ranges, dim=0)\n', (11958, 11990), False, 'import torch\n'), ((12015, 12045), 'torch.cat', 'torch.cat', (['all_level_points', '(0)'], {}), '(all_level_points, 0)\n', (12024, 12045), False, 'import torch\n'), ((12112, 12135), 'torch.Tensor', 'torch.Tensor', (['gt_bboxes'], {}), '(gt_bboxes)\n', (12124, 12135), False, 'import torch\n'), ((12156, 12179), 'torch.Tensor', 'torch.Tensor', (['gt_labels'], {}), '(gt_labels)\n', (12168, 12179), False, 'import torch\n'), ((12388, 12399), 'mmcv.parallel.DataContainer', 'DC', (['_labels'], {}), '(_labels)\n', (12390, 12399), True, 'from mmcv.parallel import DataContainer as DC\n'), ((12429, 12446), 'mmcv.parallel.DataContainer', 'DC', (['_bbox_targets'], {}), '(_bbox_targets)\n', (12431, 12446), True, 'from mmcv.parallel import DataContainer as DC\n'), ((12475, 12492), 'mmcv.parallel.DataContainer', 'DC', (['_mask_targets'], {}), '(_mask_targets)\n', (12477, 12492), True, 'from mmcv.parallel import DataContainer as DC\n'), ((17521, 17564), 'torch.stack', 'torch.stack', (['(left, top, right, bottom)', '(-1)'], {}), '((left, top, right, bottom), -1)\n', (17532, 17564), False, 'import torch\n'), ((21740, 21767), 'torch.sqrt', 'torch.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (21750, 21767), False, 'import torch\n'), ((21789, 21806), 'torch.sort', 'torch.sort', (['angle'], {}), '(angle)\n', (21799, 21806), False, 'import torch\n'), ((23290, 23312), 'torch.zeros', 'torch.zeros', (['num_polar'], {}), '(num_polar)\n', (23301, 23312), False, 'import torch\n'), ((25470, 25530), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (25486, 25530), False, 'import cv2\n'), ((6704, 6751), 'os.path.join', 'osp.join', (['self.img_prefix', "img_info['filename']"], {}), "(self.img_prefix, img_info['filename'])\n", (6712, 6751), True, 'import os.path as osp\n'), ((6832, 6917), 'imagecorruptions.corrupt', 'corrupt', (['img'], {'severity': 'self.corruption_severity', 'corruption_name': 'self.corruption'}), '(img, severity=self.corruption_severity, corruption_name=self.corruption\n )\n', (6839, 6917), False, 'from imagecorruptions import corrupt\n'), ((8840, 8889), 'os.path.join', 'osp.join', (['self.img_prefix', "refer_info['filename']"], {}), "(self.img_prefix, refer_info['filename'])\n", (8848, 8889), True, 'import os.path as osp\n'), ((9469, 9539), 'mmcv.imrescale', 'mmcv.imrescale', (['gt_seg', 'self.seg_scale_factor'], {'interpolation': '"""nearest"""'}), "(gt_seg, self.seg_scale_factor, interpolation='nearest')\n", (9483, 9539), False, 'import mmcv\n'), ((11073, 11100), 'mmcv.parallel.DataContainer', 'DC', (['gt_masks'], {'cpu_only': '(True)'}), '(gt_masks, cpu_only=True)\n', (11075, 11100), True, 'from mmcv.parallel import DataContainer as DC\n'), ((12996, 13043), 'os.path.join', 'osp.join', (['self.img_prefix', "img_info['filename']"], {}), "(self.img_prefix, img_info['filename'])\n", (13004, 13043), True, 'import os.path as osp\n'), ((13124, 13209), 'imagecorruptions.corrupt', 'corrupt', (['img'], {'severity': 'self.corruption_severity', 'corruption_name': 'self.corruption'}), '(img, severity=self.corruption_severity, corruption_name=self.corruption\n )\n', (13131, 13209), False, 'from imagecorruptions import corrupt\n'), ((13919, 13968), 'os.path.join', 'osp.join', (['self.img_prefix', "refer_info['filename']"], {}), "(self.img_prefix, refer_info['filename'])\n", (13927, 13968), True, 'import os.path as osp\n'), ((8304, 8320), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8318, 8320), True, 'import numpy as np\n'), ((8962, 9005), 'mmcv.imcrop', 'mmcv.imcrop', (['img_refer', "refer_ann['bboxes']"], {}), "(img_refer, refer_ann['bboxes'])\n", (8973, 9005), False, 'import mmcv\n'), ((9789, 9819), 'numpy.hstack', 'np.hstack', (['[proposals, scores]'], {}), '([proposals, scores])\n', (9798, 9819), True, 'import numpy as np\n'), ((10693, 10720), 'mmcv.parallel.DataContainer', 'DC', (['img_meta'], {'cpu_only': '(True)'}), '(img_meta, cpu_only=True)\n', (10695, 10720), True, 'from mmcv.parallel import DataContainer as DC\n'), ((14041, 14084), 'mmcv.imcrop', 'mmcv.imcrop', (['img_refer', "refer_ann['bboxes']"], {}), "(img_refer, refer_ann['bboxes'])\n", (14052, 14084), False, 'import mmcv\n'), ((15666, 15694), 'mmcv.parallel.DataContainer', 'DC', (['_img_meta'], {'cpu_only': '(True)'}), '(_img_meta, cpu_only=True)\n', (15668, 15694), True, 'from mmcv.parallel import DataContainer as DC\n'), ((18072, 18098), 'torch.Tensor', 'torch.Tensor', (['mask_centers'], {}), '(mask_centers)\n', (18084, 18098), False, 'import torch\n'), ((20199, 20233), 'torch.zeros', 'torch.zeros', (['num_points', 'num_polar'], {}), '(num_points, num_polar)\n', (20210, 20233), False, 'import torch\n'), ((21591, 21608), 'torch.atan2', 'torch.atan2', (['x', 'y'], {}), '(x, y)\n', (21602, 21608), False, 'import torch\n'), ((23440, 23459), 'torch.tensor', 'torch.tensor', (['(1e-06)'], {}), '(1e-06)\n', (23452, 23459), False, 'import torch\n'), ((8180, 8227), 'os.path.join', 'osp.join', (['self.img_prefix', "img_info['filename']"], {}), "(self.img_prefix, img_info['filename'])\n", (8188, 8227), True, 'import os.path as osp\n'), ((15126, 15155), 'numpy.hstack', 'np.hstack', (['[_proposal, score]'], {}), '([_proposal, score])\n', (15135, 15155), True, 'import numpy as np\n'), ((16016, 16044), 'mmcv.parallel.DataContainer', 'DC', (['_img_meta'], {'cpu_only': '(True)'}), '(_img_meta, cpu_only=True)\n', (16018, 16044), True, 'from mmcv.parallel import DataContainer as DC\n'), ((17915, 17936), 'torch.Tensor', 'torch.Tensor', (['contour'], {}), '(contour)\n', (17927, 17936), False, 'import torch\n'), ((20982, 21011), 'torch.cat', 'torch.cat', (['new_contour'], {'dim': '(0)'}), '(new_contour, dim=0)\n', (20991, 21011), False, 'import torch\n'), ((24707, 24749), 'math.sqrt', 'math.sqrt', (['((cx - tx) ** 2 + (cy - ty) ** 2)'], {}), '((cx - tx) ** 2 + (cy - ty) ** 2)\n', (24716, 24749), False, 'import math\n'), ((24956, 24984), 'math.sqrt', 'math.sqrt', (['(lx ** 2 + ly ** 2)'], {}), '(lx ** 2 + ly ** 2)\n', (24965, 24984), False, 'import math\n'), ((25567, 25585), 'cv2.contourArea', 'cv2.contourArea', (['x'], {}), '(x)\n', (25582, 25585), False, 'import cv2\n'), ((9087, 9108), 'numpy.float32', 'np.float32', (['img_refer'], {}), '(img_refer)\n', (9097, 9108), True, 'import numpy as np\n'), ((14166, 14187), 'numpy.float32', 'np.float32', (['img_refer'], {}), '(img_refer)\n', (14176, 14187), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from nilearn import image, input_data
from nilearn.datasets import load_mni152_brain_mask
def get_masker(mask_img=None, target_affine=None):
if isinstance(mask_img, input_data.NiftiMasker):
return mask_img
if mask_img is None:
mask_img = load_mni152_brain_mask()
if target_affine is not None:
if np.ndim(target_affine) == 0:
target_affine = np.eye(3) * target_affine
elif np.ndim(target_affine) == 1:
target_affine = np.diag(target_affine)
mask_img = image.resample_img(
mask_img, target_affine=target_affine, interpolation="nearest"
)
masker = input_data.NiftiMasker(mask_img=mask_img).fit()
return masker
def coords_to_voxels(coords, ref_img=None):
if ref_img is None:
ref_img = load_mni152_brain_mask()
affine = ref_img.affine
coords = np.atleast_2d(coords)
coords = np.hstack([coords, np.ones((len(coords), 1))])
voxels = np.linalg.pinv(affine).dot(coords.T)[:-1].T
voxels = voxels[(voxels >= 0).all(axis=1)]
voxels = voxels[(voxels < ref_img.shape[:3]).all(axis=1)]
voxels = np.floor(voxels).astype(int)
return voxels
def coords_to_peaks_img(coords, mask_img):
mask_img = image.load_img(mask_img)
voxels = coords_to_voxels(coords, mask_img)
peaks = np.zeros(mask_img.shape)
np.add.at(peaks, tuple(voxels.T), 1.0)
peaks_img = image.new_img_like(mask_img, peaks)
return peaks_img
def gaussian_coord_smoothing(
coords, mask_img=None, target_affine=None, fwhm=9.0
):
masker = get_masker(mask_img, target_affine)
peaks_img = coords_to_peaks_img(coords, mask_img=masker.mask_img_)
img = image.smooth_img(peaks_img, fwhm=fwhm)
return masker.inverse_transform(masker.transform(img).squeeze())
def coordinates_to_maps(
coordinates, mask_img=None, target_affine=(4, 4, 4), fwhm=9.0
):
print(
"Transforming {} coordinates for {} articles".format(
coordinates.shape[0], len(set(coordinates["pmid"]))
)
)
masker = get_masker(mask_img=mask_img, target_affine=target_affine)
images, img_pmids = [], []
for pmid, img in iter_coordinates_to_maps(
coordinates, mask_img=masker, fwhm=fwhm
):
images.append(masker.transform(img).ravel())
img_pmids.append(pmid)
return pd.DataFrame(images, index=img_pmids), masker
def iter_coordinates_to_maps(
coordinates, mask_img=None, target_affine=(4, 4, 4), fwhm=9.0
):
masker = get_masker(mask_img=mask_img, target_affine=target_affine)
articles = coordinates.groupby("pmid")
for i, (pmid, coord) in enumerate(articles):
print(
"{:.1%} pmid: {:< 20}".format(i / len(articles), pmid),
end="\r",
flush=True,
)
img = gaussian_coord_smoothing(
coord.loc[:, ["x", "y", "z"]].values, fwhm=fwhm, mask_img=masker
)
yield pmid, img
| [
"nilearn.image.new_img_like",
"numpy.atleast_2d",
"numpy.eye",
"numpy.linalg.pinv",
"nilearn.image.load_img",
"numpy.floor",
"numpy.ndim",
"nilearn.image.smooth_img",
"numpy.diag",
"numpy.zeros",
"nilearn.datasets.load_mni152_brain_mask",
"pandas.DataFrame",
"nilearn.image.resample_img",
"... | [((907, 928), 'numpy.atleast_2d', 'np.atleast_2d', (['coords'], {}), '(coords)\n', (920, 928), True, 'import numpy as np\n'), ((1275, 1299), 'nilearn.image.load_img', 'image.load_img', (['mask_img'], {}), '(mask_img)\n', (1289, 1299), False, 'from nilearn import image, input_data\n'), ((1360, 1384), 'numpy.zeros', 'np.zeros', (['mask_img.shape'], {}), '(mask_img.shape)\n', (1368, 1384), True, 'import numpy as np\n'), ((1444, 1479), 'nilearn.image.new_img_like', 'image.new_img_like', (['mask_img', 'peaks'], {}), '(mask_img, peaks)\n', (1462, 1479), False, 'from nilearn import image, input_data\n'), ((1722, 1760), 'nilearn.image.smooth_img', 'image.smooth_img', (['peaks_img'], {'fwhm': 'fwhm'}), '(peaks_img, fwhm=fwhm)\n', (1738, 1760), False, 'from nilearn import image, input_data\n'), ((304, 328), 'nilearn.datasets.load_mni152_brain_mask', 'load_mni152_brain_mask', ([], {}), '()\n', (326, 328), False, 'from nilearn.datasets import load_mni152_brain_mask\n'), ((569, 656), 'nilearn.image.resample_img', 'image.resample_img', (['mask_img'], {'target_affine': 'target_affine', 'interpolation': '"""nearest"""'}), "(mask_img, target_affine=target_affine, interpolation=\n 'nearest')\n", (587, 656), False, 'from nilearn import image, input_data\n'), ((841, 865), 'nilearn.datasets.load_mni152_brain_mask', 'load_mni152_brain_mask', ([], {}), '()\n', (863, 865), False, 'from nilearn.datasets import load_mni152_brain_mask\n'), ((2379, 2416), 'pandas.DataFrame', 'pd.DataFrame', (['images'], {'index': 'img_pmids'}), '(images, index=img_pmids)\n', (2391, 2416), True, 'import pandas as pd\n'), ((374, 396), 'numpy.ndim', 'np.ndim', (['target_affine'], {}), '(target_affine)\n', (381, 396), True, 'import numpy as np\n'), ((687, 728), 'nilearn.input_data.NiftiMasker', 'input_data.NiftiMasker', ([], {'mask_img': 'mask_img'}), '(mask_img=mask_img)\n', (709, 728), False, 'from nilearn import image, input_data\n'), ((1168, 1184), 'numpy.floor', 'np.floor', (['voxels'], {}), '(voxels)\n', (1176, 1184), True, 'import numpy as np\n'), ((431, 440), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (437, 440), True, 'import numpy as np\n'), ((470, 492), 'numpy.ndim', 'np.ndim', (['target_affine'], {}), '(target_affine)\n', (477, 492), True, 'import numpy as np\n'), ((527, 549), 'numpy.diag', 'np.diag', (['target_affine'], {}), '(target_affine)\n', (534, 549), True, 'import numpy as np\n'), ((1002, 1024), 'numpy.linalg.pinv', 'np.linalg.pinv', (['affine'], {}), '(affine)\n', (1016, 1024), True, 'import numpy as np\n')] |
# authors: <NAME>, Manish
# date: 2020-01-23
"""Calculates MSE error for test set
Usage: src/vegas_test_results.py --test=<test> --out_dir=<out_dir>
Options:
--test=<test> Path (including filename) to training data
--out_dir=<out_dir> Path to directory where model results on test set need to be saved
"""
# importing required libraries
from docopt import docopt
import os
import matplotlib.pyplot as plt
from pandas.plotting import table
import numpy as np
import selenium
import pickle
import pandas as pd
# regressors / models
from sklearn.linear_model import LinearRegression, LogisticRegression, Lasso, Ridge
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
# Feature selection
from sklearn.feature_selection import RFE
# other
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.feature_extraction.text import CountVectorizer
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import altair as alt
opt = docopt(__doc__)
def main(test, out_dir):
test_data = pd.read_csv(test)
X = test_data.drop('score', axis =1)
y = test_data['score']
# loading required features based on training
cols_to_consider = np.load("results/features_to_use.npy", allow_pickle = True)
X = X[cols_to_consider]
# fetching trained model and predicting results
model = pickle.load(open("results/finalized_model.sav", 'rb'))
y_pred = model.predict(X)
print("Model evaluated successfully on test data, MSE error - " , round(mean_squared_error( y, y_pred),3))
#
if __name__ == "__main__":
main(opt["--test"], opt["--out_dir"])
| [
"pandas.read_csv",
"sklearn.metrics.mean_squared_error",
"warnings.simplefilter",
"numpy.load",
"docopt.docopt"
] | [((995, 1057), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (1016, 1057), False, 'import warnings\n'), ((1087, 1102), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (1093, 1102), False, 'from docopt import docopt\n'), ((1146, 1163), 'pandas.read_csv', 'pd.read_csv', (['test'], {}), '(test)\n', (1157, 1163), True, 'import pandas as pd\n'), ((1310, 1367), 'numpy.load', 'np.load', (['"""results/features_to_use.npy"""'], {'allow_pickle': '(True)'}), "('results/features_to_use.npy', allow_pickle=True)\n", (1317, 1367), True, 'import numpy as np\n'), ((1628, 1657), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y', 'y_pred'], {}), '(y, y_pred)\n', (1646, 1657), False, 'from sklearn.metrics import mean_squared_error\n')] |
""" Matrix profile anomaly detection.
Reference:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. (2016, December).
Matrix profile I: all pairs similarity joins for time series: a unifying view that includes motifs, discords and shapelets.
In Data Mining (ICDM), 2016 IEEE 16th International Conference on (pp. 1317-1322). IEEE.
"""
# Authors: <NAME>, 2018.
import math
import numpy as np
import pandas as pd
import scipy.signal as sps
from tqdm import tqdm
from .BaseDetector import BaseDetector
# -------------
# CLASSES
# -------------
class MatrixProfileAD(BaseDetector):
""" Anomaly detection in time series using the matrix profile
Parameters
----------
m : int (default=10)
Window size.
contamination : float (default=0.1)
Estimate of the expected percentage of anomalies in the data.
Comments
--------
- This only works on time series data.
"""
def __init__(self, m=10, contamination=0.1,
tol=1e-8, verbose=False):
super(MatrixProfileAD, self).__init__()
self.m = int(m)
self.contamination = float(contamination)
self.tol = float(tol)
self.verbose = bool(verbose)
def ab_join(self, T, split):
""" Compute the ABjoin and BAjoin side-by-side,
where `split` determines the splitting point.
"""
# algorithm options
excZoneLen = int(np.round(self.m * 0.5))
radius = 1.1
dataLen = len(T)
proLen = dataLen - self.m + 1
# change Nan and Inf to zero
T = np.nan_to_num(T)
# precompute the mean, standard deviation
s = pd.Series(T)
dataMu = s.rolling(self.m).mean().values[self.m-1:dataLen]
dataSig = s.rolling(self.m).std().values[self.m-1:dataLen]
matrixProfile = np.ones(proLen) * np.inf
idxOrder = excZoneLen + np.arange(0, proLen, 1)
idxOrder = idxOrder[np.random.permutation(len(idxOrder))]
# construct the matrixprofile
for i, idx in enumerate(idxOrder):
# query
query = T[idx:idx+self.m-1]
# distance profile
distProfile = self._diagonal_dist(T, idx, dataLen, self.m, proLen, dataMu, dataSig)
distProfile = abs(distProfile)
distProfile = np.sqrt(distProfile)
# position magic
pos1 = np.arange(idx, proLen, 1)
pos2 = np.arange(0, proLen-idx+1, 1)
# split magic
distProfile = distProfile[np.where((pos2 <= split) & (pos1 > split))[0]]
pos1Split = pos1[np.where((pos2 <= split) & (pos1 > split))[0]]
pos2Split = pos2[np.where((pos2 <= split) & (pos1 > split))[0]]
pos1 = pos1Split
pos2 = pos2Split
# update magic
updatePos = np.where(matrixProfile[pos1] > distProfile)[0]
matrixProfile[pos1[updatePos]] = distProfile[updatePos]
updatePos = np.where(matrixProfile[pos2] > distProfile)[0]
matrixProfile[pos2[updatePos]] = distProfile[updatePos]
return matrixProfile
def fit_predict(self, T):
""" Fit the model to the time series T.
:param T : np.array(), shape (n_samples)
The time series data for which to compute the matrix profile.
:returns y_score : np.array(), shape (n_samples)
Anomaly score for the samples in T.
:returns y_pred : np.array(), shape (n_samples)
Returns -1 for inliers and +1 for anomalies/outliers.
"""
return self.fit(np.array([])).predict(T)
def fit(self, T=np.array([])):
""" Fit the model to the time series T.
:param T : np.array(), shape (n_samples)
The time series data for which to compute the matrix profile.
:returns self : object
"""
self.T_train = T
return self
def predict(self, T=np.array([])):
""" Compute the anomaly score + predict the label of each sample in T.
:returns y_score : np.array(), shape (n_samples)
Anomaly score for the samples in T.
:returns y_pred : np.array(), shape (n_samples)
Returns -1 for inliers and +1 for anomalies/outliers.
"""
# fuse T_train and T
nt = len(T)
nT = np.concatenate((self.T_train, T))
n = len(nT)
# compute the matrix profile
matrix_profile = self._compute_matrix_profile_stomp(nT, self.m)
# transform to an anomaly score (1NN distance)
# the largest distance = the largest anomaly
# rescale between 0 and 1, this yields the anomaly score
y_score = (matrix_profile - min(matrix_profile)) / (max(matrix_profile) - min(matrix_profile))
y_score = np.append(y_score, np.zeros(n-len(matrix_profile), dtype=float))
# prediction threshold + absolute predictions
self.threshold = np.sort(y_score)[int(n * (1.0 - self.contamination))]
y_pred = np.ones(n, dtype=float)
y_pred[y_score < self.threshold] = -1
# cut y_pred and y_score to match length of T
return y_score[-nt:], y_pred[-nt:]
def _compute_matrix_profile_stomp(self, T, m):
""" Compute the matrix profile and profile index for time series T using correct STOMP.
:param T : np.array(), shape (n_samples)
The time series data for which to compute the matrix profile.
:param m : int
Length of the query.
:returns matrix_profile : np.array(), shape (n_samples)
The matrix profile (distance) for the time series T.
comments
--------
- Includes a fix for straight line time series segments.
"""
n = len(T)
# precompute the mean, standard deviation
s = pd.Series(T)
data_m = s.rolling(m).mean().values[m-1:n]
data_s = s.rolling(m).std().values[m-1:n]
# where the data is zero
idxz = np.where(data_s < 1e-8)[0]
data_s[idxz] = 0.0
idxn = np.where(data_s > 0.0)[0]
zero_s = False
if len(idxz) > 0:
zero_s = True
# precompute distance to straight line segment of 0s
slD = np.zeros(n-m+1, dtype=float)
if zero_s:
for i in range(n-m+1):
Tsegm = T[i:i+m]
Tm = data_m[i]
Ts = data_s[i]
if Ts == 0.0: # data_s is effectively 0
slD[i] = 0.0
else:
Tn = (Tsegm - Tm) / Ts
slD[i] = np.sqrt(np.sum(Tn ** 2))
# compute the first dot product
q = T[:m]
QT = sps.convolve(T.copy(), q[::-1], 'valid', 'direct')
QT_first = QT.copy()
# compute the distance profile
D = self._compute_fixed_distance_profile(T[:m], QT, n, m, data_m, data_s, data_m[0], data_s[0], slD.copy(), idxz, idxn, zero_s)
# initialize matrix profile
matrix_profile = D
# in-order evaluation of the rest of the profile
for i in tqdm(range(1, n-m+1, 1), disable=not(self.verbose)):
# update the dot product
QT[1:] = QT[:-1] - (T[:n-m] * T[i-1]) + (T[m:n] * T[i+m-1])
QT[0] = QT_first[i]
# compute the distance profile: without function calls!
if data_s[i] == 0.0: # query_s is effectively 0
D = slD.copy()
elif zero_s:
D[idxn] = np.sqrt(2 * (m - (QT[idxn] - m * data_m[idxn] * data_m[i]) / (data_s[idxn] * data_s[i])))
nq = (q - data_m[i]) / data_s[i]
d = np.sqrt(np.sum(nq ** 2))
D[idxz] = d
else:
D = np.sqrt(2 * (m - (QT - m * data_m * data_m[i]) / (data_s * data_s[i])))
# update the matrix profile
exclusion_range = (int(max(0, round(i-m/2))), int(min(round(i+m/2+1), n-m+1)))
D[exclusion_range[0]:exclusion_range[1]] = np.inf
ix = np.where(D < matrix_profile)[0]
matrix_profile[ix] = D[ix]
# matrix_profile = np.minimum(matrix_profile, D)
return matrix_profile
def _compute_fixed_distance_profile(self, q, QT, n, m, data_m, data_s, query_m, query_s, slD, idxz, idxn, zero_s):
""" Compute the fixed distance profile """
D = np.zeros(n-m+1, dtype=float)
if query_s == 0.0: # query_s is effectively 0
return slD
if zero_s:
D[idxn] = np.sqrt(2 * (m - (QT[idxn] - m * data_m[idxn] * query_m) / (data_s[idxn] * query_s)))
nq = (q - query_m) / query_s
d = np.sqrt(np.sum(nq ** 2))
D[idxz] = d
else:
D = np.sqrt(2 * (m - (QT - m * data_m * query_m) / (data_s * query_s)))
return D
def _compute_matrix_profile_stamp(self, T, m):
""" Compute the matrix profile and profile index for time series T using STAMP.
:param T : np.array(), shape (n_samples)
The time series data for which to compute the matrix profile.
:param m : int
Length of the query.
:returns matrix_profile : np.array(), shape (n_samples)
The matrix profile (distance) for the time series T.
:returns profile_index : np.array(), shape (n_samples)
The matrix profile index accompanying the matrix profile.
comments
--------
- Uses the STAMP algorithm to compute the matrix profile.
- Includes a fix for straight line time series segments.
"""
n = len(T)
# initialize the empty profile and index
matrix_profile = np.ones(n-m+1) * np.inf
# precompute the mean, standard deviation
s = pd.Series(T)
data_m = s.rolling(m).mean().values[m-1:n]
data_s = s.rolling(m).std().values[m-1:n]
# where the data is zero
idxz = np.where(data_s < 1e-8)[0]
data_s[idxz] = 0.0
idxn = np.where(data_s > 0.0)[0]
zero_s = False
if len(idxz) > 0:
zero_s = True
# precompute distance to straight line segment of 0s
# brute force distance computation (because the dot_product is zero!)
# --> this is a structural issue with the MASS algorithm for fast distance computation
slD = np.zeros(n-m+1, dtype=float)
if zero_s:
for i in range(n-m+1):
Tsegm = T[i:i+m]
Tm = data_m[i]
Ts = data_s[i]
if Ts == 0.0: # data_s is effectively 0
slD[i] = 0.0
else:
Tn = (Tsegm - Tm) / Ts
slD[i] = np.sqrt(np.sum(Tn ** 2))
# random search order for the outer loop
indices = np.arange(0, n-m+1, 1)
np.random.shuffle(indices)
# compute the matrix profile
if self.verbose: print('Iterations:', len(indices))
for i, idx in tqdm(enumerate(indices), disable=not(self.verbose)):
# query for which to compute the distance profile
query = T[idx:idx+m]
# normalized distance profile (using MASS)
D = self._compute_MASS(query, T, n, m, data_m, data_s, data_m[idx], data_s[idx], slD.copy())
# update the matrix profile (keeping minimum distances)
# self-join is True! (we only look at constructing the matrix profile for a single time series)
exclusion_range = (int(max(0, round(idx-m/2))), int(min(round(idx+m/2+1), n-m+1)))
D[exclusion_range[0]:exclusion_range[1]] = np.inf
ix = np.where(D < matrix_profile)[0]
matrix_profile[ix] = D[ix]
return matrix_profile
def _compute_MASS(self, query, T, n, m, data_m, data_s, query_m, query_s, slD):
""" Compute the distance profile using the MASS algorithm.
:param query : np.array(), shape (self.m)
Query segment for which to compute the distance profile.
:param T : np.array(), shape (n_samples)
The time series data for which to compute the matrix profile.
:param n : int
Length of time series T.
:param m : int
Length of the query.
:param data_f : np.array, shape (n + m)
FFT transform of T.
:param data_m : np.array, shape (n - m + 1)
Mean of every segment of length m of T.
:param data_s : np.array, shape (n - m + 1)
STD of every segment of length m of T.
:param query_m : float
Mean of the query segment.
:param query_s : float
Standard deviation of the query segment.
:returns dist_profile : np.array(), shape (n_samples)
Distance profile of the query to time series T.
"""
# CASE 1: query is a straight line segment of 0s
if query_s < 1e-8:
return slD
# CASE 2: query is every other possible subsequence
# compute the sliding dot product
reverse_query = query[::-1]
dot_product = sps.fftconvolve(T, reverse_query, 'valid')
# compute the distance profile without correcting for standard deviation of the main signal being 0
# since this is numpy, it will result in np.inf if the data_sig is 0
dist_profile = np.sqrt(2 * (m - (dot_product - m * query_m * data_m) / (query_s * data_s)))
# correct for data_s being 0
zero_idxs = np.where(data_s < 1e-8)[0]
if len(zero_idxs) > 0:
n_query = (query - query_m) / query_s
d = np.linalg.norm(n_query - np.zeros(m, dtype=float))
dist_profile[zero_idxs] = d
return dist_profile
def _compute_brute_force_distance_profile(self, query, T, n, m, data_f, data_m, data_s, query_m, query_s):
""" Compute the brute force distance profile. """
dist_profile = np.zeros(n-m+1, dtype=float)
# normalize query
if query_m < 1e-8:
n_query = np.zeros(m, dtype=float)
else:
n_query = (query - query_m) / query_s
# compute the distance profile
for i in range(n-m+1):
T_segm = T[i:i+m]
Tm = data_m[i]
Ts = data_s[i]
# normalize time series segment
if Ts < 1e-8:
T_norm = np.zeros(m, dtype=float)
else:
T_norm = (T_segm - Tm) / Ts
# compute distance
dist_profile[i] = np.linalg.norm(T_norm - n_query)
return dist_profile
def _diagonal_dist(self, data, idx, dataLen, subLen, proLen, dataMu, dataSig):
""" Compute the diagonal distance (as in the original matrix profile code) """
xTerm = np.dot(np.ones(proLen-idx+1), np.dot(data[idx-1:idx+subLen-1], data[:subLen]))
mTerm = data[idx-1:proLen-1] * data[:proLen-idx]
aTerm = data[idx+subLen-1:] * data[subLen:dataLen-idx+1]
if proLen != idx:
xTerm[1:] = xTerm[1:] - np.cumsum(mTerm) + np.cumsum(aTerm)
distProfile = np.divide(xTerm - subLen * dataMu[idx-1:] * dataMu[:proLen-idx+1],
subLen * dataSig[idx-1:] * dataSig[:proLen-idx+1])
distProfile = 2 * subLen * (1 - distProfile)
| [
"pandas.Series",
"numpy.sqrt",
"numpy.ones",
"numpy.arange",
"numpy.divide",
"numpy.round",
"numpy.sort",
"numpy.where",
"scipy.signal.fftconvolve",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.sum",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.cumsum",
"numpy.nan_to_num",
... | [((1605, 1621), 'numpy.nan_to_num', 'np.nan_to_num', (['T'], {}), '(T)\n', (1618, 1621), True, 'import numpy as np\n'), ((1685, 1697), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (1694, 1697), True, 'import pandas as pd\n'), ((3673, 3685), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3681, 3685), True, 'import numpy as np\n'), ((3976, 3988), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3984, 3988), True, 'import numpy as np\n'), ((4373, 4406), 'numpy.concatenate', 'np.concatenate', (['(self.T_train, T)'], {}), '((self.T_train, T))\n', (4387, 4406), True, 'import numpy as np\n'), ((5048, 5071), 'numpy.ones', 'np.ones', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (5055, 5071), True, 'import numpy as np\n'), ((5869, 5881), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (5878, 5881), True, 'import pandas as pd\n'), ((6279, 6311), 'numpy.zeros', 'np.zeros', (['(n - m + 1)'], {'dtype': 'float'}), '(n - m + 1, dtype=float)\n', (6287, 6311), True, 'import numpy as np\n'), ((8419, 8451), 'numpy.zeros', 'np.zeros', (['(n - m + 1)'], {'dtype': 'float'}), '(n - m + 1, dtype=float)\n', (8427, 8451), True, 'import numpy as np\n'), ((9820, 9832), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (9829, 9832), True, 'import pandas as pd\n'), ((10403, 10435), 'numpy.zeros', 'np.zeros', (['(n - m + 1)'], {'dtype': 'float'}), '(n - m + 1, dtype=float)\n', (10411, 10435), True, 'import numpy as np\n'), ((10858, 10884), 'numpy.arange', 'np.arange', (['(0)', '(n - m + 1)', '(1)'], {}), '(0, n - m + 1, 1)\n', (10867, 10884), True, 'import numpy as np\n'), ((10889, 10915), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (10906, 10915), True, 'import numpy as np\n'), ((13155, 13197), 'scipy.signal.fftconvolve', 'sps.fftconvolve', (['T', 'reverse_query', '"""valid"""'], {}), "(T, reverse_query, 'valid')\n", (13170, 13197), True, 'import scipy.signal as sps\n'), ((13407, 13483), 'numpy.sqrt', 'np.sqrt', (['(2 * (m - (dot_product - m * query_m * data_m) / (query_s * data_s)))'], {}), '(2 * (m - (dot_product - m * query_m * data_m) / (query_s * data_s)))\n', (13414, 13483), True, 'import numpy as np\n'), ((13980, 14012), 'numpy.zeros', 'np.zeros', (['(n - m + 1)'], {'dtype': 'float'}), '(n - m + 1, dtype=float)\n', (13988, 14012), True, 'import numpy as np\n'), ((15152, 15286), 'numpy.divide', 'np.divide', (['(xTerm - subLen * dataMu[idx - 1:] * dataMu[:proLen - idx + 1])', '(subLen * dataSig[idx - 1:] * dataSig[:proLen - idx + 1])'], {}), '(xTerm - subLen * dataMu[idx - 1:] * dataMu[:proLen - idx + 1], \n subLen * dataSig[idx - 1:] * dataSig[:proLen - idx + 1])\n', (15161, 15286), True, 'import numpy as np\n'), ((1447, 1469), 'numpy.round', 'np.round', (['(self.m * 0.5)'], {}), '(self.m * 0.5)\n', (1455, 1469), True, 'import numpy as np\n'), ((1857, 1872), 'numpy.ones', 'np.ones', (['proLen'], {}), '(proLen)\n', (1864, 1872), True, 'import numpy as np\n'), ((1914, 1937), 'numpy.arange', 'np.arange', (['(0)', 'proLen', '(1)'], {}), '(0, proLen, 1)\n', (1923, 1937), True, 'import numpy as np\n'), ((2355, 2375), 'numpy.sqrt', 'np.sqrt', (['distProfile'], {}), '(distProfile)\n', (2362, 2375), True, 'import numpy as np\n'), ((2425, 2450), 'numpy.arange', 'np.arange', (['idx', 'proLen', '(1)'], {}), '(idx, proLen, 1)\n', (2434, 2450), True, 'import numpy as np\n'), ((2470, 2503), 'numpy.arange', 'np.arange', (['(0)', '(proLen - idx + 1)', '(1)'], {}), '(0, proLen - idx + 1, 1)\n', (2479, 2503), True, 'import numpy as np\n'), ((4977, 4993), 'numpy.sort', 'np.sort', (['y_score'], {}), '(y_score)\n', (4984, 4993), True, 'import numpy as np\n'), ((6032, 6056), 'numpy.where', 'np.where', (['(data_s < 1e-08)'], {}), '(data_s < 1e-08)\n', (6040, 6056), True, 'import numpy as np\n'), ((6101, 6123), 'numpy.where', 'np.where', (['(data_s > 0.0)'], {}), '(data_s > 0.0)\n', (6109, 6123), True, 'import numpy as np\n'), ((8569, 8658), 'numpy.sqrt', 'np.sqrt', (['(2 * (m - (QT[idxn] - m * data_m[idxn] * query_m) / (data_s[idxn] * query_s)))'], {}), '(2 * (m - (QT[idxn] - m * data_m[idxn] * query_m) / (data_s[idxn] *\n query_s)))\n', (8576, 8658), True, 'import numpy as np\n'), ((8791, 8858), 'numpy.sqrt', 'np.sqrt', (['(2 * (m - (QT - m * data_m * query_m) / (data_s * query_s)))'], {}), '(2 * (m - (QT - m * data_m * query_m) / (data_s * query_s)))\n', (8798, 8858), True, 'import numpy as np\n'), ((9733, 9751), 'numpy.ones', 'np.ones', (['(n - m + 1)'], {}), '(n - m + 1)\n', (9740, 9751), True, 'import numpy as np\n'), ((9983, 10007), 'numpy.where', 'np.where', (['(data_s < 1e-08)'], {}), '(data_s < 1e-08)\n', (9991, 10007), True, 'import numpy as np\n'), ((10052, 10074), 'numpy.where', 'np.where', (['(data_s > 0.0)'], {}), '(data_s > 0.0)\n', (10060, 10074), True, 'import numpy as np\n'), ((13542, 13566), 'numpy.where', 'np.where', (['(data_s < 1e-08)'], {}), '(data_s < 1e-08)\n', (13550, 13566), True, 'import numpy as np\n'), ((14085, 14109), 'numpy.zeros', 'np.zeros', (['m'], {'dtype': 'float'}), '(m, dtype=float)\n', (14093, 14109), True, 'import numpy as np\n'), ((14572, 14604), 'numpy.linalg.norm', 'np.linalg.norm', (['(T_norm - n_query)'], {}), '(T_norm - n_query)\n', (14586, 14604), True, 'import numpy as np\n'), ((14837, 14862), 'numpy.ones', 'np.ones', (['(proLen - idx + 1)'], {}), '(proLen - idx + 1)\n', (14844, 14862), True, 'import numpy as np\n'), ((14860, 14913), 'numpy.dot', 'np.dot', (['data[idx - 1:idx + subLen - 1]', 'data[:subLen]'], {}), '(data[idx - 1:idx + subLen - 1], data[:subLen])\n', (14866, 14913), True, 'import numpy as np\n'), ((2874, 2917), 'numpy.where', 'np.where', (['(matrixProfile[pos1] > distProfile)'], {}), '(matrixProfile[pos1] > distProfile)\n', (2882, 2917), True, 'import numpy as np\n'), ((3013, 3056), 'numpy.where', 'np.where', (['(matrixProfile[pos2] > distProfile)'], {}), '(matrixProfile[pos2] > distProfile)\n', (3021, 3056), True, 'import numpy as np\n'), ((8073, 8101), 'numpy.where', 'np.where', (['(D < matrix_profile)'], {}), '(D < matrix_profile)\n', (8081, 8101), True, 'import numpy as np\n'), ((8720, 8735), 'numpy.sum', 'np.sum', (['(nq ** 2)'], {}), '(nq ** 2)\n', (8726, 8735), True, 'import numpy as np\n'), ((11697, 11725), 'numpy.where', 'np.where', (['(D < matrix_profile)'], {}), '(D < matrix_profile)\n', (11705, 11725), True, 'import numpy as np\n'), ((14424, 14448), 'numpy.zeros', 'np.zeros', (['m'], {'dtype': 'float'}), '(m, dtype=float)\n', (14432, 14448), True, 'import numpy as np\n'), ((15112, 15128), 'numpy.cumsum', 'np.cumsum', (['aTerm'], {}), '(aTerm)\n', (15121, 15128), True, 'import numpy as np\n'), ((2565, 2607), 'numpy.where', 'np.where', (['((pos2 <= split) & (pos1 > split))'], {}), '((pos2 <= split) & (pos1 > split))\n', (2573, 2607), True, 'import numpy as np\n'), ((2641, 2683), 'numpy.where', 'np.where', (['((pos2 <= split) & (pos1 > split))'], {}), '((pos2 <= split) & (pos1 > split))\n', (2649, 2683), True, 'import numpy as np\n'), ((2717, 2759), 'numpy.where', 'np.where', (['((pos2 <= split) & (pos1 > split))'], {}), '((pos2 <= split) & (pos1 > split))\n', (2725, 2759), True, 'import numpy as np\n'), ((3627, 3639), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3635, 3639), True, 'import numpy as np\n'), ((7539, 7632), 'numpy.sqrt', 'np.sqrt', (['(2 * (m - (QT[idxn] - m * data_m[idxn] * data_m[i]) / (data_s[idxn] *\n data_s[i])))'], {}), '(2 * (m - (QT[idxn] - m * data_m[idxn] * data_m[i]) / (data_s[idxn] *\n data_s[i])))\n', (7546, 7632), True, 'import numpy as np\n'), ((7789, 7860), 'numpy.sqrt', 'np.sqrt', (['(2 * (m - (QT - m * data_m * data_m[i]) / (data_s * data_s[i])))'], {}), '(2 * (m - (QT - m * data_m * data_m[i]) / (data_s * data_s[i])))\n', (7796, 7860), True, 'import numpy as np\n'), ((13691, 13715), 'numpy.zeros', 'np.zeros', (['m'], {'dtype': 'float'}), '(m, dtype=float)\n', (13699, 13715), True, 'import numpy as np\n'), ((15093, 15109), 'numpy.cumsum', 'np.cumsum', (['mTerm'], {}), '(mTerm)\n', (15102, 15109), True, 'import numpy as np\n'), ((6649, 6664), 'numpy.sum', 'np.sum', (['(Tn ** 2)'], {}), '(Tn ** 2)\n', (6655, 6664), True, 'import numpy as np\n'), ((7706, 7721), 'numpy.sum', 'np.sum', (['(nq ** 2)'], {}), '(nq ** 2)\n', (7712, 7721), True, 'import numpy as np\n'), ((10773, 10788), 'numpy.sum', 'np.sum', (['(Tn ** 2)'], {}), '(Tn ** 2)\n', (10779, 10788), True, 'import numpy as np\n')] |
import numpy as np
import random
def is_valid(pos, board):
try:
board[pos[0]][pos[1]]
except IndexError:
return False
if min(pos) < 0:
return False
return True
def _next_move(pos, board):
moves = {
"RIGHT": np.array((0, 1)),
"UP": np.array((-1, 0)),
"LEFT": np.array((0, -1)),
"DOWN": np.array((1, 0)),
}
dirs = ["UP", "LEFT", "DOWN", "RIGHT"]
# if "b" not in board:
if board[pos[0]][pos[1]] == "d":
return "CLEAN"
new_pos = (-1, -1)
while not is_valid(new_pos, board):
dir = dirs[random.randint(0, 3)]
new_pos = tuple(pos + moves[dir])
return dir
| [
"numpy.array",
"random.randint"
] | [((263, 279), 'numpy.array', 'np.array', (['(0, 1)'], {}), '((0, 1))\n', (271, 279), True, 'import numpy as np\n'), ((295, 312), 'numpy.array', 'np.array', (['(-1, 0)'], {}), '((-1, 0))\n', (303, 312), True, 'import numpy as np\n'), ((330, 347), 'numpy.array', 'np.array', (['(0, -1)'], {}), '((0, -1))\n', (338, 347), True, 'import numpy as np\n'), ((365, 381), 'numpy.array', 'np.array', (['(1, 0)'], {}), '((1, 0))\n', (373, 381), True, 'import numpy as np\n'), ((601, 621), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (615, 621), False, 'import random\n')] |
import numpy as np
import cv2
import collections
import numbers
import random
import math
import copy
from up.data.datasets.transforms import Augmentation
from up.utils.general.registry_factory import AUGMENTATION_REGISTRY
@AUGMENTATION_REGISTRY.register('color_jitter_mmseg')
class RandomColorJitterMMSeg(Augmentation):
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
color_type='BGR'):
super(RandomColorJitterMMSeg, self).__init__()
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
self.color2hsv = getattr(cv2, 'COLOR_{}2HSV'.format(color_type))
self.hsv2color = getattr(cv2, 'COLOR_HSV2{}'.format(color_type))
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, img):
"""Brightness distortion."""
if random.randint(0, 2):
return self.convert(
img,
beta=random.uniform(-self.brightness_delta,
self.brightness_delta))
return img
def contrast(self, img):
"""Contrast distortion."""
if random.randint(0, 2):
return self.convert(
img,
alpha=random.uniform(self.contrast_lower, self.contrast_upper))
return img
def saturation(self, img):
"""Saturation distortion."""
if random.randint(0, 2):
img = cv2.cvtColor(img, self.color2hsv)
img[:, :, 1] = self.convert(
img[:, :, 1],
alpha=random.uniform(self.saturation_lower,
self.saturation_upper))
img = cv2.cvtColor(img, self.hsv2color)
return img
def hue(self, img):
"""Hue distortion."""
if random.randint(0, 2):
img = cv2.cvtColor(img, self.color2hsv)
img[:, :,
0] = (img[:, :, 0].astype(int)
+ random.randint(-self.hue_delta, self.hue_delta)) % 180
img = cv2.cvtColor(img, self.hsv2color)
return img
def augment(self, data):
"""
Arguments:
img (np.array): Input image.
Returns:
img (np.array): Color jittered image.
"""
output = copy.copy(data)
img = data.image
assert isinstance(img, np.ndarray)
img = np.uint8(img)
# random brightness
img = self.brightness(img)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(0, 2)
if mode == 1:
img = self.contrast(img)
# random saturation
img = self.saturation(img)
# random hue
img = self.hue(img)
# random contrast
if mode == 0:
img = self.contrast(img)
img = np.asanyarray(img)
output.image = img
return output
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness_delta)
format_string += ', contrast=({0},{1})'.format(self.contrast_lower, self.contrast_upper)
format_string += ', saturation=({0},{1})'.format(self.saturation_lower, self.saturation_upper)
format_string += ', hue={0})'.format(self.hue_delta)
return format_string
@AUGMENTATION_REGISTRY.register('seg_resize')
class SegResize(Augmentation):
def __init__(self, size, **kwargs):
super(Augmentation, self).__init__()
assert (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = tuple(size)
def augment(self, data):
data['image'] = cv2.resize(data['image'], dsize=self.size, interpolation=cv2.INTER_LINEAR)
data['gt_semantic_seg'] = cv2.resize(data['gt_semantic_seg'], dsize=self.size, interpolation=cv2.INTER_NEAREST)
return data
@AUGMENTATION_REGISTRY.register('seg_rand_resize')
class SegRandResize(Augmentation):
"""
Randomly resize image & label with scale factor in [scale_min, scale_max]
"""
def __init__(self, scale, aspect_ratio=None):
super(SegRandResize, self).__init__()
assert (isinstance(scale, collections.Iterable) and len(scale) == 2)
if isinstance(scale, collections.Iterable) and len(scale) == 2 \
and isinstance(scale[0], numbers.Number) and isinstance(scale[1], numbers.Number):
self.scale = scale
else:
raise (RuntimeError("segtransforms.RandScale() scale param error.\n"))
if aspect_ratio is None:
self.aspect_ratio = aspect_ratio
elif isinstance(aspect_ratio, collections.Iterable) and len(aspect_ratio) == 2 \
and isinstance(aspect_ratio[0], numbers.Number) and isinstance(aspect_ratio[1], numbers.Number) \
and 0 < aspect_ratio[0] < aspect_ratio[1]:
self.aspect_ratio = aspect_ratio
else:
raise (RuntimeError("segtransforms.RandScale() aspect_ratio param error.\n"))
def augment(self, data):
image = data['image']
label = data['gt_semantic_seg']
if random.random() < 0.5:
temp_scale = self.scale[0] + (1. - self.scale[0]) * random.random()
else:
temp_scale = 1. + (self.scale[1] - 1.) * random.random()
temp_aspect_ratio = 1.0
if self.aspect_ratio is not None:
temp_aspect_ratio = self.aspect_ratio[0] + (self.aspect_ratio[1] - self.aspect_ratio[0]) * random.random()
temp_aspect_ratio = math.sqrt(temp_aspect_ratio)
scale_factor_w = temp_scale * temp_aspect_ratio
scale_factor_h = temp_scale / temp_aspect_ratio
h, w, _ = image.shape
new_w = int(w * scale_factor_w)
new_h = int(h * scale_factor_h)
data['image'] = cv2.resize(image, dsize=(new_w, new_h), interpolation=cv2.INTER_LINEAR)
data['gt_semantic_seg'] = cv2.resize(label, dsize=(new_w, new_h), interpolation=cv2.INTER_NEAREST)
return data
@AUGMENTATION_REGISTRY.register('seg_crop')
class SegCrop(Augmentation):
"""Crops the given tensor.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is made.
"""
def __init__(self, size, crop_type='center', ignore_label=255):
super(SegCrop, self).__init__()
if isinstance(size, int):
self.crop_h = size
self.crop_w = size
elif isinstance(size, collections.Iterable) and len(size) == 2 \
and isinstance(size[0], int) and isinstance(size[1], int) \
and size[0] > 0 and size[1] > 0:
self.crop_h = size[0]
self.crop_w = size[1]
else:
raise (RuntimeError("crop size error.\n"))
if crop_type == 'center' or crop_type == 'rand':
self.crop_type = crop_type
else:
raise (RuntimeError("crop type error: rand | center\n"))
if isinstance(ignore_label, int):
self.ignore_label = ignore_label
else:
raise (RuntimeError("ignore_label should be an integer number\n"))
def augment(self, data):
image = data['image']
label = data['gt_semantic_seg']
h, w, _ = image.shape
pad_h = max(self.crop_h - h, 0)
pad_w = max(self.crop_w - w, 0)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label))
h, w, _ = image.shape
if self.crop_type == 'rand':
h_off = random.randint(0, h - self.crop_h)
w_off = random.randint(0, w - self.crop_w)
else:
h_off = (h - self.crop_h) // 2
w_off = (w - self.crop_w) // 2
data['image'] = np.asarray(image[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
data['gt_semantic_seg'] = np.asarray(label[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
return data
@AUGMENTATION_REGISTRY.register('seg_random_flip')
class SegRandomHorizontalFlip(Augmentation):
def augment(self, data):
image = data['image']
label = data['gt_semantic_seg']
flip = np.random.choice(2) * 2 - 1
data['image'] = image[:, ::flip, :]
data['gt_semantic_seg'] = label[:, ::flip]
return data
@AUGMENTATION_REGISTRY.register('seg_rand_rotate')
class RandRotate(Augmentation):
def augment(self, data):
image = data['image']
label = data['gt_semantic_seg']
angle = random.random() * 20 - 10
h, w = image.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1)
data['image'] = cv2.warpAffine(image, rotation_matrix, (w, h), flags=cv2.INTER_LINEAR)
data['gt_semantic_seg'] = cv2.warpAffine(label, rotation_matrix, (w, h), flags=cv2.INTER_NEAREST)
return data
@AUGMENTATION_REGISTRY.register('seg_rand_blur')
class RandomGaussianBlur(Augmentation):
def augment(self, data):
gauss_size = random.choice([1, 3, 5, 7])
if gauss_size > 1:
# do the gaussian blur
data['image'] = cv2.GaussianBlur(data['image'], (gauss_size, gauss_size), 0)
return data
@AUGMENTATION_REGISTRY.register('seg_rand_brightness')
class Random_Brightness(Augmentation):
def __init__(self, shift_value=10):
super().__init__()
self.shift_value = shift_value
def augment(self, data):
if random.random() < 0.5:
return data
image = data['image']
image = image.astype(np.float32)
shift = random.randint(-self.shift_value, self.shift_value)
image[:, :, :] += shift
image = np.around(image)
image = np.clip(image, 0, 255).astype(np.uint8)
data['image'] = image
return data
| [
"numpy.clip",
"numpy.uint8",
"up.utils.general.registry_factory.AUGMENTATION_REGISTRY.register",
"math.sqrt",
"numpy.asanyarray",
"copy.copy",
"numpy.asarray",
"random.randint",
"random.uniform",
"cv2.warpAffine",
"random.choice",
"numpy.random.choice",
"numpy.around",
"cv2.cvtColor",
"c... | [((227, 279), 'up.utils.general.registry_factory.AUGMENTATION_REGISTRY.register', 'AUGMENTATION_REGISTRY.register', (['"""color_jitter_mmseg"""'], {}), "('color_jitter_mmseg')\n", (257, 279), False, 'from up.utils.general.registry_factory import AUGMENTATION_REGISTRY\n'), ((3806, 3850), 'up.utils.general.registry_factory.AUGMENTATION_REGISTRY.register', 'AUGMENTATION_REGISTRY.register', (['"""seg_resize"""'], {}), "('seg_resize')\n", (3836, 3850), False, 'from up.utils.general.registry_factory import AUGMENTATION_REGISTRY\n'), ((4346, 4395), 'up.utils.general.registry_factory.AUGMENTATION_REGISTRY.register', 'AUGMENTATION_REGISTRY.register', (['"""seg_rand_resize"""'], {}), "('seg_rand_resize')\n", (4376, 4395), False, 'from up.utils.general.registry_factory import AUGMENTATION_REGISTRY\n'), ((6486, 6528), 'up.utils.general.registry_factory.AUGMENTATION_REGISTRY.register', 'AUGMENTATION_REGISTRY.register', (['"""seg_crop"""'], {}), "('seg_crop')\n", (6516, 6528), False, 'from up.utils.general.registry_factory import AUGMENTATION_REGISTRY\n'), ((8821, 8870), 'up.utils.general.registry_factory.AUGMENTATION_REGISTRY.register', 'AUGMENTATION_REGISTRY.register', (['"""seg_random_flip"""'], {}), "('seg_random_flip')\n", (8851, 8870), False, 'from up.utils.general.registry_factory import AUGMENTATION_REGISTRY\n'), ((9176, 9225), 'up.utils.general.registry_factory.AUGMENTATION_REGISTRY.register', 'AUGMENTATION_REGISTRY.register', (['"""seg_rand_rotate"""'], {}), "('seg_rand_rotate')\n", (9206, 9225), False, 'from up.utils.general.registry_factory import AUGMENTATION_REGISTRY\n'), ((9730, 9777), 'up.utils.general.registry_factory.AUGMENTATION_REGISTRY.register', 'AUGMENTATION_REGISTRY.register', (['"""seg_rand_blur"""'], {}), "('seg_rand_blur')\n", (9760, 9777), False, 'from up.utils.general.registry_factory import AUGMENTATION_REGISTRY\n'), ((10071, 10124), 'up.utils.general.registry_factory.AUGMENTATION_REGISTRY.register', 'AUGMENTATION_REGISTRY.register', (['"""seg_rand_brightness"""'], {}), "('seg_rand_brightness')\n", (10101, 10124), False, 'from up.utils.general.registry_factory import AUGMENTATION_REGISTRY\n'), ((1136, 1156), 'numpy.clip', 'np.clip', (['img', '(0)', '(255)'], {}), '(img, 0, 255)\n', (1143, 1156), True, 'import numpy as np\n'), ((1273, 1293), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1287, 1293), False, 'import random\n'), ((1564, 1584), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1578, 1584), False, 'import random\n'), ((1819, 1839), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1833, 1839), False, 'import random\n'), ((2222, 2242), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (2236, 2242), False, 'import random\n'), ((2713, 2728), 'copy.copy', 'copy.copy', (['data'], {}), '(data)\n', (2722, 2728), False, 'import copy\n'), ((2811, 2824), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (2819, 2824), True, 'import numpy as np\n'), ((3001, 3021), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (3015, 3021), False, 'import random\n'), ((3294, 3312), 'numpy.asanyarray', 'np.asanyarray', (['img'], {}), '(img)\n', (3307, 3312), True, 'import numpy as np\n'), ((4128, 4202), 'cv2.resize', 'cv2.resize', (["data['image']"], {'dsize': 'self.size', 'interpolation': 'cv2.INTER_LINEAR'}), "(data['image'], dsize=self.size, interpolation=cv2.INTER_LINEAR)\n", (4138, 4202), False, 'import cv2\n'), ((4237, 4327), 'cv2.resize', 'cv2.resize', (["data['gt_semantic_seg']"], {'dsize': 'self.size', 'interpolation': 'cv2.INTER_NEAREST'}), "(data['gt_semantic_seg'], dsize=self.size, interpolation=cv2.\n INTER_NEAREST)\n", (4247, 4327), False, 'import cv2\n'), ((6284, 6355), 'cv2.resize', 'cv2.resize', (['image'], {'dsize': '(new_w, new_h)', 'interpolation': 'cv2.INTER_LINEAR'}), '(image, dsize=(new_w, new_h), interpolation=cv2.INTER_LINEAR)\n', (6294, 6355), False, 'import cv2\n'), ((6390, 6462), 'cv2.resize', 'cv2.resize', (['label'], {'dsize': '(new_w, new_h)', 'interpolation': 'cv2.INTER_NEAREST'}), '(label, dsize=(new_w, new_h), interpolation=cv2.INTER_NEAREST)\n', (6400, 6462), False, 'import cv2\n'), ((8592, 8680), 'numpy.asarray', 'np.asarray', (['image[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w]', 'np.float32'], {}), '(image[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w], np.\n float32)\n', (8602, 8680), True, 'import numpy as np\n'), ((8712, 8800), 'numpy.asarray', 'np.asarray', (['label[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w]', 'np.float32'], {}), '(label[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w], np.\n float32)\n', (8722, 8800), True, 'import numpy as np\n'), ((9456, 9505), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(w / 2, h / 2)', 'angle', '(1)'], {}), '((w / 2, h / 2), angle, 1)\n', (9479, 9505), False, 'import cv2\n'), ((9530, 9600), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rotation_matrix', '(w, h)'], {'flags': 'cv2.INTER_LINEAR'}), '(image, rotation_matrix, (w, h), flags=cv2.INTER_LINEAR)\n', (9544, 9600), False, 'import cv2\n'), ((9635, 9706), 'cv2.warpAffine', 'cv2.warpAffine', (['label', 'rotation_matrix', '(w, h)'], {'flags': 'cv2.INTER_NEAREST'}), '(label, rotation_matrix, (w, h), flags=cv2.INTER_NEAREST)\n', (9649, 9706), False, 'import cv2\n'), ((9868, 9895), 'random.choice', 'random.choice', (['[1, 3, 5, 7]'], {}), '([1, 3, 5, 7])\n', (9881, 9895), False, 'import random\n'), ((10445, 10496), 'random.randint', 'random.randint', (['(-self.shift_value)', 'self.shift_value'], {}), '(-self.shift_value, self.shift_value)\n', (10459, 10496), False, 'import random\n'), ((10545, 10561), 'numpy.around', 'np.around', (['image'], {}), '(image)\n', (10554, 10561), True, 'import numpy as np\n'), ((1859, 1892), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'self.color2hsv'], {}), '(img, self.color2hsv)\n', (1871, 1892), False, 'import cv2\n'), ((2103, 2136), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'self.hsv2color'], {}), '(img, self.hsv2color)\n', (2115, 2136), False, 'import cv2\n'), ((2262, 2295), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'self.color2hsv'], {}), '(img, self.color2hsv)\n', (2274, 2295), False, 'import cv2\n'), ((2462, 2495), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'self.hsv2color'], {}), '(img, self.hsv2color)\n', (2474, 2495), False, 'import cv2\n'), ((5598, 5613), 'random.random', 'random.random', ([], {}), '()\n', (5611, 5613), False, 'import random\n'), ((6009, 6037), 'math.sqrt', 'math.sqrt', (['temp_aspect_ratio'], {}), '(temp_aspect_ratio)\n', (6018, 6037), False, 'import math\n'), ((7930, 8024), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', '(0)', 'pad_h', '(0)', 'pad_w', 'cv2.BORDER_CONSTANT'], {'value': '(0.0, 0.0, 0.0)'}), '(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(\n 0.0, 0.0, 0.0))\n', (7948, 8024), False, 'import cv2\n'), ((8118, 8214), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['label', '(0)', 'pad_h', '(0)', 'pad_w', 'cv2.BORDER_CONSTANT'], {'value': 'self.ignore_label'}), '(label, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=\n self.ignore_label)\n', (8136, 8214), False, 'import cv2\n'), ((8377, 8411), 'random.randint', 'random.randint', (['(0)', '(h - self.crop_h)'], {}), '(0, h - self.crop_h)\n', (8391, 8411), False, 'import random\n'), ((8432, 8466), 'random.randint', 'random.randint', (['(0)', '(w - self.crop_w)'], {}), '(0, w - self.crop_w)\n', (8446, 8466), False, 'import random\n'), ((9986, 10046), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (["data['image']", '(gauss_size, gauss_size)', '(0)'], {}), "(data['image'], (gauss_size, gauss_size), 0)\n", (10002, 10046), False, 'import cv2\n'), ((10311, 10326), 'random.random', 'random.random', ([], {}), '()\n', (10324, 10326), False, 'import random\n'), ((9030, 9049), 'numpy.random.choice', 'np.random.choice', (['(2)'], {}), '(2)\n', (9046, 9049), True, 'import numpy as np\n'), ((9373, 9388), 'random.random', 'random.random', ([], {}), '()\n', (9386, 9388), False, 'import random\n'), ((10578, 10600), 'numpy.clip', 'np.clip', (['image', '(0)', '(255)'], {}), '(image, 0, 255)\n', (10585, 10600), True, 'import numpy as np\n'), ((1370, 1431), 'random.uniform', 'random.uniform', (['(-self.brightness_delta)', 'self.brightness_delta'], {}), '(-self.brightness_delta, self.brightness_delta)\n', (1384, 1431), False, 'import random\n'), ((1662, 1718), 'random.uniform', 'random.uniform', (['self.contrast_lower', 'self.contrast_upper'], {}), '(self.contrast_lower, self.contrast_upper)\n', (1676, 1718), False, 'import random\n'), ((1986, 2046), 'random.uniform', 'random.uniform', (['self.saturation_lower', 'self.saturation_upper'], {}), '(self.saturation_lower, self.saturation_upper)\n', (2000, 2046), False, 'import random\n'), ((2389, 2436), 'random.randint', 'random.randint', (['(-self.hue_delta)', 'self.hue_delta'], {}), '(-self.hue_delta, self.hue_delta)\n', (2403, 2436), False, 'import random\n'), ((5685, 5700), 'random.random', 'random.random', ([], {}), '()\n', (5698, 5700), False, 'import random\n'), ((5768, 5783), 'random.random', 'random.random', ([], {}), '()\n', (5781, 5783), False, 'import random\n'), ((5961, 5976), 'random.random', 'random.random', ([], {}), '()\n', (5974, 5976), False, 'import random\n')] |
from numpy.core.numeric import count_nonzero
import pandas as pd
import numpy as np
import re
data = pd.read_csv("data/day13.csv", header = None, dtype=str, delimiter= '\n')[0]
codes = [re.split("\s\S\S\s", word) for word in data.values][1:]
# Challenge 1
word = np.array(data.values)[0]
c_dic = {c[0]:c[1] for c in codes}
c_dic['0'+word[0]] = ''
c_dic[word[-1]+'0'] = ''
word = '0'+word+'0'
steps = 10
dic = {key:0 for key in c_dic}
for i in range(len(word)-1): dic[word[i]+word[i+1]] += 1
for step in range(steps):
dic2 = {k:val for k, val in dic.items()}
for key in dic:
if key[0] != '0' and key[-1] != '0':
res = c_dic[key]
dic2[key[0]+res] += dic[key]
dic2[res+key[1]] += dic[key]
dic2[key] -= dic[key]
dic = {k:val for k, val in dic2.items()}
occ = {val:0 for k,val in c_dic.items()}
for char in word: occ[char] = 0
for key in dic:
occ[key[0]] += dic[key]
occ[key[1]] += dic[key]
out = np.sort([val for k,val in occ.items()])
print((out[-1]-out[2])//2)
# Challenge 2
word = np.array(data.values)[0]
c_dic = {c[0]:c[1] for c in codes}
c_dic['0'+word[0]] = ''
c_dic[word[-1]+'0'] = ''
word = '0'+word+'0'
steps = 40
dic = {key:0 for key in c_dic}
for i in range(len(word)-1): dic[word[i]+word[i+1]] += 1
for step in range(steps):
dic2 = {k:val for k, val in dic.items()}
for key in dic:
if key[0] != '0' and key[-1] != '0':
res = c_dic[key]
dic2[key[0]+res] += dic[key]
dic2[res+key[1]] += dic[key]
dic2[key] -= dic[key]
dic = {k:val for k, val in dic2.items()}
occ = {val:0 for k,val in c_dic.items()}
for char in word: occ[char] = 0
for key in dic:
occ[key[0]] += dic[key]
occ[key[1]] += dic[key]
out = np.sort([val for k,val in occ.items()])
print((out[-1]-out[2])//2) | [
"numpy.array",
"pandas.read_csv",
"re.split"
] | [((102, 171), 'pandas.read_csv', 'pd.read_csv', (['"""data/day13.csv"""'], {'header': 'None', 'dtype': 'str', 'delimiter': '"""\n"""'}), "('data/day13.csv', header=None, dtype=str, delimiter='\\n')\n", (113, 171), True, 'import pandas as pd\n'), ((265, 286), 'numpy.array', 'np.array', (['data.values'], {}), '(data.values)\n', (273, 286), True, 'import numpy as np\n'), ((1066, 1087), 'numpy.array', 'np.array', (['data.values'], {}), '(data.values)\n', (1074, 1087), True, 'import numpy as np\n'), ((187, 217), 're.split', 're.split', (['"""\\\\s\\\\S\\\\S\\\\s"""', 'word'], {}), "('\\\\s\\\\S\\\\S\\\\s', word)\n", (195, 217), False, 'import re\n')] |
import argparse
import pickle
import numpy as np
from numba import njit
@njit
def count_trees(tau, phi, order, traversal):
assert traversal == 'dfs' or traversal == 'bfs'
K = len(tau)
expected_colsum = np.ones(K)
expected_colsum[0] = 0
first_partial = np.copy(tau)
np.fill_diagonal(first_partial, 0)
first_delta = np.copy(phi)
partial_trees = [(1, first_partial, first_delta)]
completed_trees = 0
while len(partial_trees) > 0:
if traversal == 'dfs':
to_resolve, P, delta = partial_trees.pop()
else:
to_resolve, P, delta = partial_trees.pop(0)
#to_resolve, P, delta = partial_trees[0]
#partial_trees = partial_trees[1:]
if to_resolve == K:
assert np.all(expected_colsum == np.sum(P, axis=0))
assert np.all(0 <= delta) and np.all(delta <= 1)
np.fill_diagonal(P, 1)
completed_trees += 1
continue
R = order[to_resolve]
parents = np.nonzero(P[:,R])[0]
for parent in parents:
P_prime = np.copy(P)
P_prime[:,R] = 0
P_prime[parent,R] = 1
if np.any(delta[parent] - phi[R] < 0):
continue
delta_prime = np.copy(delta)
delta_prime[parent] -= phi[R]
partial_trees.append((to_resolve + 1, P_prime, delta_prime))
return completed_trees
@njit
def make_order(phi):
phisum = np.sum(phi, axis=1)
order = np.argsort(-phisum)
assert order[0] == 0
return order
@njit
def make_tau(phi, order):
K, S = phi.shape
tau = np.eye(K)
for I in range(K):
for J in range(I + 1, K):
I_prime = order[I]
J_prime = order[J]
assert not np.all(phi[I_prime] == phi[J_prime])
if np.all(phi[I_prime] >= phi[J_prime]):
tau[I_prime,J_prime] = 1
return tau
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('sim_data_fn')
args = parser.parse_args()
with open(args.sim_data_fn, 'rb') as dataf:
simdata = pickle.load(dataf)
phi, true_tree = simdata['phi'], simdata['adjm']
order = make_order(phi)
tau = make_tau(phi, order)
num_trees = count_trees(tau, phi, order, 'dfs')
print(args.sim_data_fn, num_trees)
main()
| [
"numpy.copy",
"numpy.eye",
"numpy.ones",
"argparse.ArgumentParser",
"pickle.load",
"numpy.fill_diagonal",
"numpy.any",
"numpy.argsort",
"numpy.sum",
"numpy.nonzero",
"numpy.all"
] | [((209, 219), 'numpy.ones', 'np.ones', (['K'], {}), '(K)\n', (216, 219), True, 'import numpy as np\n'), ((264, 276), 'numpy.copy', 'np.copy', (['tau'], {}), '(tau)\n', (271, 276), True, 'import numpy as np\n'), ((279, 313), 'numpy.fill_diagonal', 'np.fill_diagonal', (['first_partial', '(0)'], {}), '(first_partial, 0)\n', (295, 313), True, 'import numpy as np\n'), ((330, 342), 'numpy.copy', 'np.copy', (['phi'], {}), '(phi)\n', (337, 342), True, 'import numpy as np\n'), ((1314, 1333), 'numpy.sum', 'np.sum', (['phi'], {'axis': '(1)'}), '(phi, axis=1)\n', (1320, 1333), True, 'import numpy as np\n'), ((1344, 1363), 'numpy.argsort', 'np.argsort', (['(-phisum)'], {}), '(-phisum)\n', (1354, 1363), True, 'import numpy as np\n'), ((1462, 1471), 'numpy.eye', 'np.eye', (['K'], {}), '(K)\n', (1468, 1471), True, 'import numpy as np\n'), ((1746, 1858), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""LOL HI THERE"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='LOL HI THERE', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (1769, 1858), False, 'import argparse\n'), ((1993, 2011), 'pickle.load', 'pickle.load', (['dataf'], {}), '(dataf)\n', (2004, 2011), False, 'import pickle\n'), ((817, 839), 'numpy.fill_diagonal', 'np.fill_diagonal', (['P', '(1)'], {}), '(P, 1)\n', (833, 839), True, 'import numpy as np\n'), ((923, 942), 'numpy.nonzero', 'np.nonzero', (['P[:, R]'], {}), '(P[:, R])\n', (933, 942), True, 'import numpy as np\n'), ((988, 998), 'numpy.copy', 'np.copy', (['P'], {}), '(P)\n', (995, 998), True, 'import numpy as np\n'), ((1059, 1093), 'numpy.any', 'np.any', (['(delta[parent] - phi[R] < 0)'], {}), '(delta[parent] - phi[R] < 0)\n', (1065, 1093), True, 'import numpy as np\n'), ((1132, 1146), 'numpy.copy', 'np.copy', (['delta'], {}), '(delta)\n', (1139, 1146), True, 'import numpy as np\n'), ((1637, 1673), 'numpy.all', 'np.all', (['(phi[I_prime] >= phi[J_prime])'], {}), '(phi[I_prime] >= phi[J_prime])\n', (1643, 1673), True, 'import numpy as np\n'), ((769, 787), 'numpy.all', 'np.all', (['(0 <= delta)'], {}), '(0 <= delta)\n', (775, 787), True, 'import numpy as np\n'), ((792, 810), 'numpy.all', 'np.all', (['(delta <= 1)'], {}), '(delta <= 1)\n', (798, 810), True, 'import numpy as np\n'), ((1591, 1627), 'numpy.all', 'np.all', (['(phi[I_prime] == phi[J_prime])'], {}), '(phi[I_prime] == phi[J_prime])\n', (1597, 1627), True, 'import numpy as np\n'), ((737, 754), 'numpy.sum', 'np.sum', (['P'], {'axis': '(0)'}), '(P, axis=0)\n', (743, 754), True, 'import numpy as np\n')] |
import numpy as np
class OUNoiseGenerator(object):
def __init__(self, action_dim, action_low, action_high,
mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000):
self.mu_ = mu
self.theta_ = theta
self.sigma_ = max_sigma
self.max_sigma_ = max_sigma
self.min_sigma_ = min_sigma
self.decay_period_ = decay_period
self.action_dim_ = action_dim
self.low_ = action_low
self.high_ = action_high
self.state_ = None
self.reset()
def reset(self):
self.state_ = np.ones(self.action_dim_) * self.mu_
def evolve_state(self):
x = self.state_
dx = self.theta_ * (self.mu_ - x) + self.sigma_ * np.random.randn(self.action_dim_)
self.state_ = x + dx
return self.state_
def get_action(self, action, t=0):
ou_state = self.evolve_state()
self.sigma_ = self.max_sigma_ - (self.max_sigma_ - self.min_sigma_) * min(1.0, t / self.decay_period_)
return np.clip(action + ou_state, self.low_, self.high_)
| [
"numpy.clip",
"numpy.random.randn",
"numpy.ones"
] | [((1035, 1084), 'numpy.clip', 'np.clip', (['(action + ou_state)', 'self.low_', 'self.high_'], {}), '(action + ou_state, self.low_, self.high_)\n', (1042, 1084), True, 'import numpy as np\n'), ((592, 617), 'numpy.ones', 'np.ones', (['self.action_dim_'], {}), '(self.action_dim_)\n', (599, 617), True, 'import numpy as np\n'), ((740, 773), 'numpy.random.randn', 'np.random.randn', (['self.action_dim_'], {}), '(self.action_dim_)\n', (755, 773), True, 'import numpy as np\n')] |
import numpy as np
def measure_curvature_pixels(y_eval, left_fit, right_fit):
'''
Calculates the curvature of polynomial functions in pixels.
PARAMETERS
* y_eval : where we want radius of curvature to be evaluated (We'll choose the maximum y-value, bottom of image)
'''
# Calculation of R_curve (radius of curvature)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
return left_curverad, right_curverad
def measure_curvature_meters(y_eval, fit_pts, ym_per_pix= 30/720, xm_per_pix=3.7/900):
'''
Calculates the curvature of polynomial functions in meters.
NOTE: Chose a straight line birdeye view image to calculate pixel to meter parameters.
PARAMETERS
* ym_per_pix : meters per pixel in y dimension (meters/length of lane in pixel)
* xm_per_pix : meters per pixel in x dimension (meters/width between lanes in pixel)
* y_eval : where we want radius of curvature to be evaluated (We'll choose the maximum y-value, bottom of image)
'''
# Unpack and Define variables
(left_fitx, right_fitx, ploty) = fit_pts
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# Calculation of R_curve (radius of curvature)
left_radius_curve = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_radius_curve = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
return left_radius_curve, right_radius_curve | [
"numpy.absolute",
"numpy.polyfit"
] | [((1317, 1374), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(left_fitx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, left_fitx * xm_per_pix, 2)\n', (1327, 1374), True, 'import numpy as np\n'), ((1390, 1448), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(right_fitx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, right_fitx * xm_per_pix, 2)\n', (1400, 1448), True, 'import numpy as np\n'), ((417, 445), 'numpy.absolute', 'np.absolute', (['(2 * left_fit[0])'], {}), '(2 * left_fit[0])\n', (428, 445), True, 'import numpy as np\n'), ((522, 551), 'numpy.absolute', 'np.absolute', (['(2 * right_fit[0])'], {}), '(2 * right_fit[0])\n', (533, 551), True, 'import numpy as np\n'), ((1601, 1632), 'numpy.absolute', 'np.absolute', (['(2 * left_fit_cr[0])'], {}), '(2 * left_fit_cr[0])\n', (1612, 1632), True, 'import numpy as np\n'), ((1730, 1762), 'numpy.absolute', 'np.absolute', (['(2 * right_fit_cr[0])'], {}), '(2 * right_fit_cr[0])\n', (1741, 1762), True, 'import numpy as np\n')] |
from collections import namedtuple
import re
import glob
import os.path
import numpy as np
import scipy.io.wavfile as wavfile
import scipy.signal as signal
import math
import paths
from minimum_phase import minimum_phase
files = glob.glob(os.path.join(paths.data_path, "elev*", "L*.wav"), recursive=True)
def to_coords(f):
try:
bn = os.path.basename(f)
x = re.match('L(.*)e(.*)a.wav', bn)
az, elev = int(x[2]), int(x[1])
wav = wavfile.read(f)
assert wav[0] == 44100
data = wav[1];
data = data / 32768.0
return az, elev, wav[1]
except:
print(f)
raise
d = {}
for f in files:
az, elev, data = to_coords(f)
x = d.get(elev, {})
x[az] = data
d[elev] = x
assert len(d) > 2, "Must have at least 2 elevations"
print(f"Have {len(d)} elevations")
elev_angles = sorted(d.keys())
degs_per_elev = elev_angles[1]-elev_angles[0]
print("Checking that elevations are equidistant")
for i in range(len(elev_angles)-1):
assert elev_angles[i+1]-elev_angles[i] == degs_per_elev, f"Elevation must be equal to {degs_per_elev}"
elev_min = elev_angles[0]
elev_max = elev_angles[-1]
print("Unfolding azimuths")
azimuths = []
for e in elev_angles:
azs = sorted(d[e].keys())
azs = [d[e][i] for i in azs]
azimuths.append(azs)
indices = [(i, j) for i in range(len(azimuths)) for j in range(len(azimuths[i]))]
print("Building magnitude responses")
for i, j in indices:
azimuths[i][j] = np.abs(np.fft.fft(azimuths[i][j]))
print("Equalizing power response")
presp = np.zeros(len(azimuths[0][0]), dtype=np.float64)
c = 0
for i, j in indices:
c += 1
presp += azimuths[i][j]**2
average_power = presp/c
for i, j in indices:
azimuths[i][j] = np.sqrt(azimuths[i][j]**2 / average_power)
azimuths[i][j][0] = 1.0
print("Clamping responses to be between -60 db and 3 db")
min_gain = 10**(-60/20)
max_gain = 10**(3/20)
print(f"Min {min_gain} max {max_gain}")
for i, j in indices:
azimuths[i][j] = np.maximum(np.minimum(azimuths[i][j], max_gain), min_gain)
print("Converting to minimum phase")
for i, j in indices:
azimuths[i][j] = minimum_phase(azimuths[i][j])
hrir_length_final = 32
print(f"Windowing to {hrir_length_final} points")
# We use blackman-harris because the WDL likes it for its resampler, so proceeding under the assumption that it's good enough for us too.
blackman = signal.blackmanharris(hrir_length_final*2-1)[-hrir_length_final:]
assert len(blackman) == hrir_length_final
assert blackman[0] == 1.0
for i, j in indices:
azimuths[i][j] = azimuths[i][j][:hrir_length_final]*blackman
# this is the data that we need to write out.
HrtfData = namedtuple("HrtfData", [
# Number of elevations in the dataset.
"num_elevs",
# Increment of the elevation in degrees.
"elev_increment",
# Min elevation angle in degrees.
"elev_min",
# num_elevs-length list.
# Holds the azimuth count for each elevation.
# For now, we assume azimuths are equally distributed.
"num_azimuths",
# The azimuths themselves as an array of arrays of arrays.
"azimuths",
# Number of data points in the set.
"impulse_length",
])
num_elevs = len(azimuths)
elev_min = min(d.keys())
tmp = sorted(d.keys())
elev_increment = tmp[1]-tmp[0]
num_azs = [len(i) for i in azimuths]
impulse_length = len(azimuths[0][0])
assert impulse_length == hrir_length_final
hrtf_data = HrtfData(
num_elevs = num_elevs,
elev_min = elev_min,
elev_increment = elev_increment,
num_azimuths = num_azs,
azimuths = azimuths,
impulse_length = impulse_length
)
| [
"collections.namedtuple",
"numpy.sqrt",
"numpy.minimum",
"numpy.fft.fft",
"re.match",
"scipy.signal.blackmanharris",
"scipy.io.wavfile.read",
"minimum_phase.minimum_phase"
] | [((2678, 2795), 'collections.namedtuple', 'namedtuple', (['"""HrtfData"""', "['num_elevs', 'elev_increment', 'elev_min', 'num_azimuths', 'azimuths',\n 'impulse_length']"], {}), "('HrtfData', ['num_elevs', 'elev_increment', 'elev_min',\n 'num_azimuths', 'azimuths', 'impulse_length'])\n", (2688, 2795), False, 'from collections import namedtuple\n'), ((1749, 1793), 'numpy.sqrt', 'np.sqrt', (['(azimuths[i][j] ** 2 / average_power)'], {}), '(azimuths[i][j] ** 2 / average_power)\n', (1756, 1793), True, 'import numpy as np\n'), ((2146, 2175), 'minimum_phase.minimum_phase', 'minimum_phase', (['azimuths[i][j]'], {}), '(azimuths[i][j])\n', (2159, 2175), False, 'from minimum_phase import minimum_phase\n'), ((2399, 2447), 'scipy.signal.blackmanharris', 'signal.blackmanharris', (['(hrir_length_final * 2 - 1)'], {}), '(hrir_length_final * 2 - 1)\n', (2420, 2447), True, 'import scipy.signal as signal\n'), ((379, 410), 're.match', 're.match', (['"""L(.*)e(.*)a.wav"""', 'bn'], {}), "('L(.*)e(.*)a.wav', bn)\n", (387, 410), False, 'import re\n'), ((465, 480), 'scipy.io.wavfile.read', 'wavfile.read', (['f'], {}), '(f)\n', (477, 480), True, 'import scipy.io.wavfile as wavfile\n'), ((1494, 1520), 'numpy.fft.fft', 'np.fft.fft', (['azimuths[i][j]'], {}), '(azimuths[i][j])\n', (1504, 1520), True, 'import numpy as np\n'), ((2018, 2054), 'numpy.minimum', 'np.minimum', (['azimuths[i][j]', 'max_gain'], {}), '(azimuths[i][j], max_gain)\n', (2028, 2054), True, 'import numpy as np\n')] |
import itertools as it
import math
from . import base_objs
import gen_basis_helpers.shared.misc_utils as misc
import numpy as np
class BroadenFunctCompositeStandard(base_objs.BroadenFunctionStandard):
leafObjs = misc.StandardComponentDescriptor("leafObjs")
def __init__(self, objs:iter):
""" Initializer for composite broadening function. When called, this objectsums all individual values from objs (i.e. used to get the sum of a list of broadening functions)
Args:
objs: (iter of BroadenFunctionBase).
"""
self.objs = list(objs)
assert len(self.objs)>0, "Len of iter needs to be greater than zero"
def __call__(self, xVals):
allVals = list()
#Calculate individual broadening functions
for x in self.objs:
allVals.append( x(xVals) )
#Sum them all
outVals = [0 for x in range(len(allVals[0]))]
for currVals in allVals: #gets list of y-vals
for idx,yVal in enumerate(currVals):
outVals[idx] += yVal
return outVals
@property
def areas(self):
outList = list()
for x in self.objs:
outList.extend( x.areas )
return outList
@areas.setter
def areas(self,vals):
allObjs = self.leafObjs
assert len(allObjs)==len(vals), "Exacltly one area must be given for each leaf; you gave {} areas for {} leafs".format( len(allObjs),len(vals) )
for area,obj in it.zip_longest(vals,allObjs):
obj.areas = [area]
class GauBroadenFunct(base_objs.BroadenFunctionStandard):
def __init__(self,exp, coeff, centre):
""" Initializer for a Gaussian function (f(x) = c exp(-a (x-x')^{2} )
Args:
exp: (float) Exponent prefactor, the value "a" above. MUST BE POSITIVE
coeff: (float) Gaussian prefactor, the value "c" above
centre: (float) The position of the centre of the Gaussian function, the value "x'" above
"""
self.exp = exp
assert self.exp > 0, "Positive exponent required for Gaussian broadening function"
self.coeff = coeff
self.centre = centre
def __call__(self, xVals):
outVals = np.array(xVals, dtype="float64")
outVals -= float(self.centre)
outVals = outVals ** 2
outVals *= -1*self.exp
outVals = np.exp(outVals)
outVals *= self.coeff
return outVals.tolist() #Weirdly seems to make main script faster than returning a np.array
@property
def areas(self):
gauIntegral = math.sqrt( math.pi / self.exp )
return [gauIntegral*self.coeff]
@areas.setter
def areas(self,val):
assert len(val)==1, "Intensities needs an iter with ONE value, not {}".format(len(val))
gauIntegral = math.sqrt( math.pi / self.exp )
self.coeff = val[0] / gauIntegral
@property
def positions(self):
return [self.centre]
@positions.setter
def positions(self,val):
assert len(val)==1, "Positions needs an iter with ONE value, not {}".format(len(val))
self.centre = val[0]
@property
def leafObjs(self):
""" Property used on composite classes to find all leaf-objects. Just returns [self] for a leaf (this class) """
return [self]
class BoxBroadenFunct(base_objs.BroadenFunctionStandard):
def __init__(self, pos, width, height):
""" Initializer for box function f(x) = area if pos-width<=x<=pos+width, else 0.0
Args:
pos: (float) x-value at which the function is centred
width: (float) Width of the box function
height: (float) Intensity of the box function when its non-zero
"""
self._pos = pos
self._width = width
self._height = height
def __call__(self, xVals):
outVals = list()
minX, maxX = self._pos-self._width, self._pos+self._width
for x in xVals:
if ( x >= minX ) and (x <= maxX):
outVals.append(self._height)
else:
outVals.append(0.0)
return outVals
@property
def areas(self):
""" For box broadening function this actually returns height rather than area """
return [self._height]
@areas.setter
def areas(self, vals):
assert len(vals) == 1, "areas needs an iter with ONE value, not {}".format(len(vals))
self._height = vals[0]
@property
def positions(self):
return [self._pos]
@positions.setter
def positions(self, vals):
assert len(vals) == 1, "positions needs an iter with ONE value, not {}".format(len(vals))
self._pos = vals[0]
@property
def leafObjs(self):
""" Property used on composite classes to find all leaf-objects. Just returns [self] for a leaf (this class) """
return [self]
def createNormalisedGauFunctFromCentreAndFWHM(centre, fwhm, area=1.0):
""" Creates a Gaussian broadening function with total area of 1.0
Args:
Centre: (Float) Position where Gaussian is centred (i.e. where the maximum is located)
fwhm: Full-Width at half maximum.
area: (Optional, float) The area of the output Gaussian function. Default is an area of 1.0
Returns
gauFunct: (BroadenFunctionBase obj) Callable class, takes iter of x values as input and return iter of y values
"""
sigma = fwhm / ( 2* math.sqrt(math.log(2)*2) )
outCoeff = 1 / (sigma * math.sqrt(math.pi*2) )
outExp = 1 / (2*sigma*sigma)
return GauBroadenFunct(outExp, outCoeff*area, centre)
| [
"itertools.zip_longest",
"gen_basis_helpers.shared.misc_utils.StandardComponentDescriptor",
"math.sqrt",
"math.log",
"numpy.exp",
"numpy.array"
] | [((217, 261), 'gen_basis_helpers.shared.misc_utils.StandardComponentDescriptor', 'misc.StandardComponentDescriptor', (['"""leafObjs"""'], {}), "('leafObjs')\n", (249, 261), True, 'import gen_basis_helpers.shared.misc_utils as misc\n'), ((1318, 1347), 'itertools.zip_longest', 'it.zip_longest', (['vals', 'allObjs'], {}), '(vals, allObjs)\n', (1332, 1347), True, 'import itertools as it\n'), ((1976, 2008), 'numpy.array', 'np.array', (['xVals'], {'dtype': '"""float64"""'}), "(xVals, dtype='float64')\n", (1984, 2008), True, 'import numpy as np\n'), ((2103, 2118), 'numpy.exp', 'np.exp', (['outVals'], {}), '(outVals)\n', (2109, 2118), True, 'import numpy as np\n'), ((2284, 2313), 'math.sqrt', 'math.sqrt', (['(math.pi / self.exp)'], {}), '(math.pi / self.exp)\n', (2293, 2313), False, 'import math\n'), ((2495, 2524), 'math.sqrt', 'math.sqrt', (['(math.pi / self.exp)'], {}), '(math.pi / self.exp)\n', (2504, 2524), False, 'import math\n'), ((4872, 4894), 'math.sqrt', 'math.sqrt', (['(math.pi * 2)'], {}), '(math.pi * 2)\n', (4881, 4894), False, 'import math\n'), ((4830, 4841), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (4838, 4841), False, 'import math\n')] |
from abc import ABC
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
from lhotse.features.base import FeatureExtractor, register_extractor
from lhotse.utils import EPSILON, Seconds, is_module_available
@dataclass
class KaldifeatFrameOptions:
sampling_rate: int = 16000
frame_shift: Seconds = 0.01
frame_length: Seconds = 0.025
dither: float = 0.0 # default was 1.0
preemph_coeff: float = 0.97
remove_dc_offset: bool = True
window_type: str = "povey"
round_to_power_of_two: bool = True
blackman_coeff: float = 0.42
snip_edges: bool = False # default was True (won't work with Lhotse)
def to_dict(self) -> Dict[str, Any]:
d = asdict(self)
d["samp_freq"] = float(d.pop("sampling_rate"))
d["frame_shift_ms"] = d.pop("frame_shift") * 1000.0
d["frame_length_ms"] = d.pop("frame_length") * 1000.0
return d
@staticmethod
def from_dict(data: Dict[str, Any]) -> "KaldifeatFrameOptions":
data = data.copy()
if "samp_freq" in data:
data["sampling_rate"] = int(data.pop("samp_freq"))
for key in ["frame_shift_ms", "frame_length_ms"]:
if key in data:
data[key.replace("_ms", "")] = data.pop(key) / 1000
return KaldifeatFrameOptions(**data)
@dataclass
class KaldifeatMelOptions:
num_bins: int = 80 # default was 23
low_freq: float = 20.0
high_freq: float = -400.0 # default was 0.0
vtln_low: float = 100.0
vtln_high: float = -500.0
debug_mel: bool = False
htk_mode: bool = False
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
@staticmethod
def from_dict(data: Dict[str, Any]) -> "KaldifeatMelOptions":
return KaldifeatMelOptions(**data)
class KaldifeatExtractor(FeatureExtractor, ABC):
"""
Base class with shared implementation for kaldifeat feature extractors.
Derived classes are expected to set ``self.extractor`` inside ``__init__``.
"""
def __init__(self, config: Optional[Any] = None) -> None:
super().__init__(config=config)
assert is_module_available(
"kaldifeat"
), 'To use kaldifeat extractors, please "pip install kaldifeat" first.'
@property
def device(self) -> Union[str, torch.device]:
return self.config.device
def extract_batch(
self,
samples: Union[
np.ndarray, torch.Tensor, Sequence[np.ndarray], Sequence[torch.Tensor]
],
sampling_rate: int,
) -> Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]]:
return self.extract(samples=samples, sampling_rate=sampling_rate)
def extract(
self,
samples: Union[
np.ndarray, torch.Tensor, Sequence[np.ndarray], Sequence[torch.Tensor]
],
sampling_rate: int,
) -> Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]]:
# Check for sampling rate compatibility.
expected_sr = self.config.frame_opts.sampling_rate
assert sampling_rate == expected_sr, (
f"Mismatched sampling rate: extractor expects {expected_sr}, "
f"got {sampling_rate}"
)
# kaldifeat expects a list of 1D torch tensors.
# If we got a torch tensor / list of torch tensors in the input,
# we'll also return torch tensors. If we got numpy arrays, we
# will convert back to numpy.
maybe_as_numpy = lambda x: x
input_is_list = False
if isinstance(samples, list):
input_is_list = True
pass # nothing to do with `samples`
elif samples.ndim > 1:
samples = list(samples)
else:
# The user passed an array/tensor of shape (num_samples,)
samples = [samples]
for idx in range(len(samples)):
if isinstance(samples[idx], np.ndarray):
samples[idx] = torch.from_numpy(samples[idx])
maybe_as_numpy = lambda x: x.numpy()
if samples[idx].ndim == 2:
# ndim could be > 1 if the input is a list of arrays/tensors.
samples[idx] = samples[idx].squeeze()
# Actual feature extraction.
result = self.extractor(samples, chunk_size=self.config.chunk_size)
# If all items are of the same shape, concatenate
if len(result) == 1:
if input_is_list:
return [maybe_as_numpy(result[0])]
else:
return maybe_as_numpy(result[0])
elif all(item.shape == result[0].shape for item in result[1:]):
return maybe_as_numpy(torch.stack(result, dim=0))
else:
return [maybe_as_numpy(r) for r in result]
@property
def frame_shift(self) -> Seconds:
return self.config.frame_opts.frame_shift
@dataclass
class KaldifeatFbankConfig:
frame_opts: KaldifeatFrameOptions = KaldifeatFrameOptions()
mel_opts: KaldifeatMelOptions = KaldifeatMelOptions()
use_energy: bool = False
energy_floor: float = EPSILON # default was 0.0
raw_energy: bool = True
htk_compat: bool = False
use_log_fbank: bool = True
use_power: bool = True
device: Union[str, torch.device] = "cpu"
# This is an extra setting compared to kaldifeat FbankOptions:
# by default, we'll ask kaldifeat to compute the feats in chunks
# to avoid excessive memory usage.
chunk_size: Optional[int] = 1000
def to_dict(self) -> Dict[str, Any]:
d = asdict(self)
d["frame_opts"] = self.frame_opts.to_dict()
d["mel_opts"] = self.mel_opts.to_dict()
return d
@staticmethod
def from_dict(data: Dict[str, Any]) -> "KaldifeatFbankConfig":
frame_opts = KaldifeatFrameOptions.from_dict(data.pop("frame_opts"))
mel_opts = KaldifeatMelOptions.from_dict(data.pop("mel_opts"))
return KaldifeatFbankConfig(frame_opts=frame_opts, mel_opts=mel_opts, **data)
@register_extractor
class KaldifeatFbank(KaldifeatExtractor):
"""Log Mel energy filter bank feature extractor based on ``kaldifeat`` package."""
name = "kaldifeat-fbank"
config_type = KaldifeatFbankConfig
def __init__(self, config: Optional[KaldifeatFbankConfig] = None) -> None:
super().__init__(config)
import kaldifeat
self.extractor = kaldifeat.Fbank(
kaldifeat.FbankOptions.from_dict(self.config.to_dict())
)
def feature_dim(self, sampling_rate: int) -> int:
return self.config.mel_opts.num_bins
@staticmethod
def mix(
features_a: np.ndarray, features_b: np.ndarray, energy_scaling_factor_b: float
) -> np.ndarray:
return np.log(
np.maximum(
# protection against log(0); max with EPSILON is adequate since these are energies (always >= 0)
EPSILON,
np.exp(features_a) + energy_scaling_factor_b * np.exp(features_b),
)
)
@staticmethod
def compute_energy(features: np.ndarray) -> float:
return float(np.sum(np.exp(features)))
@dataclass
class KaldifeatMfccConfig:
frame_opts: KaldifeatFrameOptions = KaldifeatFrameOptions()
mel_opts: KaldifeatMelOptions = KaldifeatMelOptions(num_bins=23)
num_ceps: int = 13
use_energy: bool = False
energy_floor: float = EPSILON # default was 0.0
raw_energy: bool = True
cepstral_lifter: float = 22.0
htk_compat: bool = False
device: Union[str, torch.device] = "cpu"
# This is an extra setting compared to kaldifeat FbankOptions:
# by default, we'll ask kaldifeat to compute the feats in chunks
# to avoid excessive memory usage.
chunk_size: Optional[int] = 1000
def to_dict(self) -> Dict[str, Any]:
d = asdict(self)
d["frame_opts"] = self.frame_opts.to_dict()
d["mel_opts"] = self.mel_opts.to_dict()
return d
@staticmethod
def from_dict(data: Dict[str, Any]) -> "KaldifeatMfccConfig":
frame_opts = KaldifeatFrameOptions.from_dict(data.pop("frame_opts"))
mel_opts = KaldifeatMelOptions.from_dict(data.pop("mel_opts"))
return KaldifeatMfccConfig(frame_opts=frame_opts, mel_opts=mel_opts, **data)
@register_extractor
class KaldifeatMfcc(KaldifeatExtractor):
"""MFCC feature extractor based on ``kaldifeat`` package."""
name = "kaldifeat-mfcc"
config_type = KaldifeatMfccConfig
def __init__(self, config: Optional[KaldifeatMfccConfig] = None) -> None:
super().__init__(config)
import kaldifeat
self.extractor = kaldifeat.Mfcc(
kaldifeat.MfccOptions.from_dict(self.config.to_dict())
)
def feature_dim(self, sampling_rate: int) -> int:
return self.config.num_ceps
| [
"dataclasses.asdict",
"torch.stack",
"torch.from_numpy",
"numpy.exp",
"lhotse.utils.is_module_available"
] | [((770, 782), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (776, 782), False, 'from dataclasses import asdict, dataclass\n'), ((1712, 1724), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (1718, 1724), False, 'from dataclasses import asdict, dataclass\n'), ((2194, 2226), 'lhotse.utils.is_module_available', 'is_module_available', (['"""kaldifeat"""'], {}), "('kaldifeat')\n", (2213, 2226), False, 'from lhotse.utils import EPSILON, Seconds, is_module_available\n'), ((5606, 5618), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (5612, 5618), False, 'from dataclasses import asdict, dataclass\n'), ((7869, 7881), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (7875, 7881), False, 'from dataclasses import asdict, dataclass\n'), ((4023, 4053), 'torch.from_numpy', 'torch.from_numpy', (['samples[idx]'], {}), '(samples[idx])\n', (4039, 4053), False, 'import torch\n'), ((7169, 7185), 'numpy.exp', 'np.exp', (['features'], {}), '(features)\n', (7175, 7185), True, 'import numpy as np\n'), ((4734, 4760), 'torch.stack', 'torch.stack', (['result'], {'dim': '(0)'}), '(result, dim=0)\n', (4745, 4760), False, 'import torch\n'), ((6976, 6994), 'numpy.exp', 'np.exp', (['features_a'], {}), '(features_a)\n', (6982, 6994), True, 'import numpy as np\n'), ((7023, 7041), 'numpy.exp', 'np.exp', (['features_b'], {}), '(features_b)\n', (7029, 7041), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Callable, Dict, Tuple
import numpy as np
from dppy.finite_dpps import FiniteDPP
from scipydirect import minimize
from .acquisition import (
AcquisitionFunction,
OneShotBatchAcquisitionFunction,
SequentialBatchAcquisitionFunction,
)
from .bounds import Bounds
@dataclass
class OptimizationResult:
"""Optimization result.
Parameters
----------
x_min : np.ndarray of shape (batch_size, n_dimensions)
The argmin.
f_min : np.ndarray of shape (batch_size,)
The min.
"""
x_min: np.ndarray
f_min: np.ndarray
class Optimizer(ABC):
"""An acquisition function Optimizer.
Optimizers find the minimum of a given acquisition function. This minimum
is then used as the next query location of the objective function.
Parameters
----------
acquisition_function : AcquisitionFunction
The acquisition function.
bounds : Bounds
The parameter bounds.
"""
def __init__(self, acquisition_function: AcquisitionFunction, bounds: Bounds):
self.acquisition_function = acquisition_function
self.bounds = bounds
def optimize(self) -> OptimizationResult:
"""Optimize an acquisition function.
Optimizes the `acquisition_function` over the `surrogate` model,
within the `bounds`.
Returns
-------
optimization_result: OptimizationResult
The result of optimization.
"""
x_min, f_min = self._optimize()
return OptimizationResult(x_min=x_min, f_min=f_min)
@abstractmethod
def _optimize(self) -> Tuple[np.ndarray, np.ndarray]:
"""Optimize an acquisition function."""
class DirectOptimizer(Optimizer):
"""Direct acquisition function Optimizer.
This is a wrapper around the DIRECT global optimizer. Specifically,
we use the scipydirect implementation.
Parameters
----------
acquisition_function : AcquisitionFunction
The acquisition function.
bounds : Bounds
The parameter bounds.
direct_kwargs : Dict[str, Any]
Kwargs passed to scipydirect.minimize.
"""
def __init__(
self,
acquisition_function: AcquisitionFunction,
bounds: Bounds,
**direct_kwargs: Dict[str, Any]
):
super().__init__(acquisition_function, bounds)
self.direct_kwargs = direct_kwargs
def _optimize(self) -> Tuple[np.ndarray, np.ndarray]:
def objective(x):
return self.acquisition_function(x.reshape(1, -1))
res = minimize(
objective,
bounds=list(zip(self.bounds.lowers, self.bounds.uppers)),
**self.direct_kwargs
)
x_min = res.x
f_min = res.fun
return np.array([x_min]), np.array([f_min])
class SequentialBatchOptimizer(Optimizer):
"""Sequential Batch Optimizer.
This is a batch optimizer that selects a batch by sequentially selecting
points from a SequentialBatchAcquisitionFunction. This proceeds by
repeatedly optimizing then updating said acquisition function.
Parameters
----------
acquisition_function : SequentialBatchAcquisitionFunction
The sequential batch acquisition function to be optimized.
bounds : Bounds
The parameter bounds.
base_optimizer : Optimizer
The underlying optimizer used to optimize the acquisition
function.
batch_size : int
The size of the batch.
"""
def __init__(
self,
acquisition_function: SequentialBatchAcquisitionFunction,
bounds: Bounds,
base_optimizer: Optimizer,
batch_size: int,
):
super().__init__(acquisition_function, bounds)
self.base_optimizer = base_optimizer
self.batch_size = batch_size
self.x_mins = []
self.f_mins = []
def _optimize(self) -> Tuple[np.ndarray, np.ndarray]:
self.start_batch()
self.acquisition_function.start_batch()
for _ in range(self.batch_size):
res = self.base_optimizer.optimize()
self.add_to_batch(res)
self.acquisition_function.add_to_batch(res)
self.acquisition_function.finish_batch()
return self.get_batch()
def start_batch(self) -> None:
"""Prepare to start creating a batch."""
self.x_mins = []
self.f_mins = []
def add_to_batch(self, optimization_result: OptimizationResult) -> None:
"""Add the newly selected point to the batch."""
self.x_mins.append(optimization_result.x_min)
self.f_mins.append(optimization_result.f_min)
def get_batch(self) -> None:
"""Get the resulting batch."""
return np.concatenate(self.x_mins), np.concatenate(self.f_mins)
class OneShotBatchOptimizerStrategy(ABC):
"""One-shot Batch Optimizer Strategy.
Strategies implement a `select` method for selecting a batch of
trial locations given all of the evaluations of an aquisition
function during a single pass of global optimization.
"""
@abstractmethod
def select(
self, x: np.ndarray, a_x: np.ndarray, batch_size: int
) -> Tuple[np.ndarray, np.ndarray]:
"""Select a batch of points."""
raise NotImplementedError
class OneShotBatchOptimizerRandomSamplingStrategy(OneShotBatchOptimizerStrategy):
"""One-shot Batch Optimizer Random Sampling Strategy.
The random sampling strategy simply randomly samples a subset of the
acquistion function evaluations.
"""
def select(
self, x: np.ndarray, a_x: np.ndarray, batch_size: int
) -> Tuple[np.ndarray, np.ndarray]:
"""Select a batch of points by random sampling."""
indicies = np.random.choice(range(len(x)), size=batch_size)
return x[indicies], a_x[indicies]
class OneShotBatchOptimizerKDPPSamplingStrategy(OneShotBatchOptimizerStrategy):
"""One-shot Batch Optimizer k-DPP Sampling Strategy.
The k-DPP sampling strategy samples a subset of the acquistion function
evaluations from a k-DPP.
Parameters
----------
kernel : Callable[[np.ndarray], np.ndarray]
The kernel to compute the likelihood matrix for the dpp.
alpha : float
Small constant added to the diagonal of the likelihood matrix,
by defaul 1e-5.
"""
def __init__(self, kernel: Callable[[np.ndarray], np.ndarray], alpha: float = 1e-5):
super().__init__()
self.kernel = kernel
self.alpha = alpha
def select(
self, x: np.ndarray, a_x: np.ndarray, batch_size: int
) -> Tuple[np.ndarray, np.ndarray]:
"""Select a batch of points by sampling from a k-dpp."""
likelihood = self.kernel(x) + self.alpha * np.eye(len(x))
dpp = FiniteDPP("likelihood", L=likelihood)
dpp.sample_exact_k_dpp(size=batch_size)
indices = dpp.list_of_samples[0]
return x[indices], a_x[indices]
class OneShotBatchOptimizer(Optimizer):
"""One-shot Batch Optimizer.
The one-shot optimizer selects a batch of points using just a single
global optimization pass. This works by using `base_optimizer` to optimize
the `acquisition_function` and then selecting a batch from all the evaluations
using a `strategy`.
Parameters
----------
acquisition_function : OneShotBatchAcquisitionFunction
A one-shot batch acquisition function.
bounds : Bounds
The parameter bounds.
base_optimizer : Optimizer
The base optimizer that runs global optimization of the acquisition_function.
batch_size : int
The size of the batch.
strategy : OneShotBatchOptimizerStrategy
The strategy used to select a batch of points given all the evaluations
during a global optimization of the acquisition function.
"""
def __init__(
self,
acquisition_function: OneShotBatchAcquisitionFunction,
bounds: Bounds,
base_optimizer: Optimizer,
batch_size: int,
strategy: OneShotBatchOptimizerStrategy,
):
super().__init__(acquisition_function, bounds)
self.base_optimizer = base_optimizer
self.batch_size = batch_size
self.strategy = strategy
def _optimize(self) -> Tuple[np.ndarray, np.ndarray]:
self.acquisition_function.start_optimization()
self.base_optimizer.optimize()
xs, a_xs = self.acquisition_function.get_evaluations()
xs, a_xs = self.strategy.select(xs, a_xs, self.batch_size)
return xs, a_xs
| [
"numpy.array",
"numpy.concatenate",
"dppy.finite_dpps.FiniteDPP"
] | [((6859, 6896), 'dppy.finite_dpps.FiniteDPP', 'FiniteDPP', (['"""likelihood"""'], {'L': 'likelihood'}), "('likelihood', L=likelihood)\n", (6868, 6896), False, 'from dppy.finite_dpps import FiniteDPP\n'), ((2845, 2862), 'numpy.array', 'np.array', (['[x_min]'], {}), '([x_min])\n', (2853, 2862), True, 'import numpy as np\n'), ((2864, 2881), 'numpy.array', 'np.array', (['[f_min]'], {}), '([f_min])\n', (2872, 2881), True, 'import numpy as np\n'), ((4804, 4831), 'numpy.concatenate', 'np.concatenate', (['self.x_mins'], {}), '(self.x_mins)\n', (4818, 4831), True, 'import numpy as np\n'), ((4833, 4860), 'numpy.concatenate', 'np.concatenate', (['self.f_mins'], {}), '(self.f_mins)\n', (4847, 4860), True, 'import numpy as np\n')] |
"""main server script
will sit onboard host and operate as Nebula --- its dynamic soul"""
# --------------------------------------------------
#
# Embodied AI Engine Prototype v0.10
# 2021/01/25
#
# © <NAME> 2020
# <EMAIL>
#
# Dedicated to <NAME>
#
# --------------------------------------------------
from random import randrange
from time import time
from tensorflow.keras.models import load_model
import pyaudio
import numpy as np
import concurrent.futures
from random import random
from time import sleep
from pydub import AudioSegment
from pydub.playback import play
# --------------------------------------------------
#
# instantiate an object for each neural net
#
# --------------------------------------------------
# v4 models were trained with 1st batch of Blue Haze datasets
class MoveRNN:
def __init__(self):
print('MoveRNN initialization')
self.move_rnn = load_model('models/EMR-v4_RNN_skeleton_data.nose.x.h5')
def predict(self, in_val):
# predictions and input with localval
self.pred = self.move_rnn.predict(in_val)
return self.pred
class AffectRNN:
def __init__(self):
print('AffectRNN initialization')
self.affect_rnn = load_model('models/EMR-v4_RNN_bitalino.h5')
def predict(self, in_val):
# predictions and input with localval
self.pred = self.affect_rnn.predict(in_val)
return self.pred
class MoveAffectCONV2:
def __init__(self):
print('MoveAffectCONV2 initialization')
self.move_affect_conv2 = load_model('models/EMR-v4_conv2D_move-affect.h5')
def predict(self, in_val):
# predictions and input with localval
self.pred = self.move_affect_conv2.predict(in_val)
return self.pred
class AffectMoveCONV2:
def __init__(self):
print('AffectMoveCONV2 initialization')
self.affect_move_conv2 = load_model('models/EMR-v4_conv2D_affect-move.h5')
def predict(self, in_val):
# predictions and input with localval
self.pred = self.affect_move_conv2.predict(in_val)
return self.pred
# --------------------------------------------------
#
# controls all thought-trains and affect responses
#
# --------------------------------------------------
class AiDataEngine():
def __init__(self, speed=1):
print('building engine server')
self.interrupt_bang = False
# self.running = False
# self.PORT = 8000
# self.IP_ADDR = "127.0.0.1"
self.global_speed = speed
self.rnd_stream = 0
# make a default dict for the engine
self.datadict = {'move_rnn': 0,
'affect_rnn': 0,
'move_affect_conv2': 0,
'affect_move_conv2': 0,
'master_output': 0,
'user_in': 0,
'rnd_poetry': 0,
'rhythm_rnn': 0,
'affect_net': 0,
'self_awareness': 0,
'affect_decision': 0,
'rhythm_rate': 0.1}
# name list for nets
self.netnames = ['move_rnn',
'affect_rnn',
'move_affect_conv2',
'affect_move_conv2',
'self_awareness', # Net name for self-awareness
'master_output'] # input for self-awareness
# names for affect listening
self.affectnames = ['user_in',
'rnd_poetry',
'affect_net',
'self_awareness']
self.rhythm_rate = 0.1
self.affect_listen = 0
# fill with random values
self.dict_fill()
print(self.datadict)
# instantiate nets as objects and make models
self.move_net = MoveRNN()
self.affect_net = AffectRNN()
self.move_affect_net = MoveAffectCONV2()
self.affect_move_net = AffectMoveCONV2()
self.affect_perception = MoveAffectCONV2()
# logging on/off switches
self.net_logging = False
self.master_logging = False
self.streaming_logging = False
self.affect_logging = False
# --------------------------------------------------
#
# prediction and rnd num gen zone
#
# --------------------------------------------------
# makes a prediction for a given net and defined input var
def make_data(self):
while True:
# calc rhythmic intensity based on self-awareness factor & global speed
intensity = self.datadict.get('self_awareness')
self.rhythm_rate = (self.rhythm_rate * intensity) * self.global_speed
self.datadict['rhythm_rate'] = self.rhythm_rate
# get input vars from dict (NB not always self)
in_val1 = self.get_in_val(0) # move RNN as input
in_val2 = self.get_in_val(1) # affect RNN as input
in_val3 = self.get_in_val(2) # move - affect as input
in_val4 = self.get_in_val(1) # affect RNN as input
# send in vals to net object for prediction
pred1 = self.move_net.predict(in_val1)
pred2 = self.affect_net.predict(in_val2)
pred3 = self.move_affect_net.predict(in_val3)
pred4 = self.affect_move_net.predict(in_val4)
# special case for self awareness stream
self_aware_input = self.get_in_val(5) # main movement as input
self_aware_pred = self.affect_perception.predict(self_aware_input)
if self.net_logging:
print(f" 'move_rnn' in: {in_val1} predicted {pred1}")
print(f" 'affect_rnn' in: {in_val2} predicted {pred2}")
print(f" move_affect_conv2' in: {in_val3} predicted {pred3}")
print(f" 'affect_move_conv2' in: {in_val4} predicted {pred4}")
print(f" 'self_awareness' in: {self_aware_input} predicted {self_aware_pred}")
# put predictions back into the dicts and master
self.put_pred(0, pred1)
self.put_pred(1, pred2)
self.put_pred(2, pred3)
self.put_pred(3, pred4)
self.put_pred(4, self_aware_pred)
# outputs a stream of random poetry
rnd_poetry = random()
self.datadict['rnd_poetry'] = random()
if self.streaming_logging:
print(f'random poetry = {rnd_poetry}')
sleep(self.rhythm_rate)
# function to get input value for net prediction from dictionary
def get_in_val(self, which_dict):
# get the current value and reshape ready for input for prediction
input_val = self.datadict.get(self.netnames[which_dict])
input_val = np.reshape(input_val, (1, 1, 1))
return input_val
# function to put prediction value from net into dictionary
def put_pred(self, which_dict, pred):
# randomly chooses one of te 4 predicted outputs
out_pred_val = pred[0][randrange(4)]
if self.master_logging:
print(f"out pred val == {out_pred_val}, master move output == {self.datadict['master_output']}")
# save to data dict and master move out ONLY 1st data
self.datadict[self.netnames[which_dict]] = out_pred_val
self.datadict['master_output'] = out_pred_val
# fills the dictionary with rnd values for each key of data dictionary
def dict_fill(self):
for key in self.datadict.keys():
rnd = random()
self.datadict[key] = rnd
# --------------------------------------------------
#
# affect and streaming methods
#
# --------------------------------------------------
# define which feed to listen to, and duration
# and a course of affect response
def affect(self):
# daddy cycle = is the master running on?
while True:
if self.affect_logging:
print('\t\t\t\t\t\t\t\t=========HIYA - DADDY cycle===========')
# flag for breaking on big affect signal
self.interrupt_bang = True
# calc master cycle before a change
master_cycle = randrange(6, 26) * self.global_speed
loop_dur = time() + master_cycle
if self.affect_logging:
print(f" interrupt_listener: started! sleeping now for {loop_dur}...")
# refill the dicts?????
self.dict_fill()
# child cycle - waiting for interrupt from master clock
while time() < loop_dur:
if self.affect_logging:
print('\t\t\t\t\t\t\t\t=========Hello - child cycle 1 ===========')
# if a major break out then go to Daddy cycle
if not self.interrupt_bang:
break
# randomly pick an input stream for this cycle
rnd = randrange(4)
self.rnd_stream = self.affectnames[rnd]
self.datadict['affect_decision'] = rnd
print(self.rnd_stream)
if self.affect_logging:
print(self.rnd_stream)
# hold this stream for 1-4 secs, unless interrupt bang
end_time = time() + (randrange(1000, 4000) / 1000)
if self.affect_logging:
print('end time = ', end_time)
# baby cycle 2 - own time loops
while time() < end_time:
if self.affect_logging:
print('\t\t\t\t\t\t\t\t=========Hello - baby cycle 2 ===========')
# go get the current value from dict
affect_listen = self.datadict[self.rnd_stream]
if self.affect_logging:
print('current value =', affect_listen)
# make the master output the current value of the stream
self.datadict['master_output'] = affect_listen
if self.master_logging:
print(f'\t\t ============== master move output = {affect_listen}')
# calc affect on behaviour
# if input stream is LOUD then smash a random fill and break out to Daddy cycle...
if affect_listen > 0.50:
if self.affect_logging:
print('interrupt > HIGH !!!!!!!!!')
# A - refill dict with random
self.dict_fill()
# B - jumps out of this loop into daddy
self.interrupt_bang = False
if self.affect_logging:
print('interrupt bang = ', self.interrupt_bang)
# C break out of this loop, and next (cos of flag)
break
# if middle loud fill dict with random, all processes norm
elif 0.20 < affect_listen < 0.49:
if self.affect_logging:
print('interrupt MIDDLE -----------')
print('interrupt bang = ', self.interrupt_bang)
# refill dict with random
self.dict_fill()
elif affect_listen <= 0.20:
if self.affect_logging:
print('interrupt LOW_______________')
print('interrupt bang = ', self.interrupt_bang)
# and wait for a cycle
sleep(self.rhythm_rate)
def parse_got_dict(self, got_dict):
self.datadict['user_in'] = got_dict['mic_level']
# user change the overall speed of the engine
self.global_speed = got_dict['speed']
# user change tempo of outputs and parsing
self.rhythm_rate = got_dict['tempo']
# # stop start methods
# def go(self):
# # self.running = True
# trio.run(self.flywheel)
# print('I got here daddy')
def quit(self):
self.running = False
"""main client script
controls microphone stream and organise all audio responses"""
class Client:
def __init__(self, library):
self.running = True
self.connected = False
self.logging = False
# is the robot connected
self.robot_connected = True
self.direction = 1
if self.robot_connected:
# import robot scripts
from arm.arm import Arm
from robot.rerobot import Robot
# instantiate arm comms
self.arm_arm = Arm()
# self.robot_robot.reset_arm()
# prepare for movement
# LED's ready fpr drawing
self.arm_arm.led_blue()
# get arm into draw mode
self.arm_arm.draw_mode_status = True
self.arm_arm.first_draw_move = True
self.arm_arm.pen_drawing_status = False
# goto position
self.arm_arm.arm_reach_out()
# instantiate robot comms
self.robot_robot = Robot()
# move gripper arm up
for n in range(12):
self.robot_robot.gripper_up()
if library == 'jazz':
self.audio_file_sax = AudioSegment.from_mp3('assets/alfie.mp3')
self.audio_file_bass = AudioSegment.from_mp3('assets/bass.mp3') + 4
elif library == 'pop':
self.audio_file_sax = AudioSegment.from_wav('assets/vocals.wav')
self.audio_file_bass = AudioSegment.from_wav('assets/accompaniment.wav')
# robot instrument vars
# globs for sax
self.pan_law_sax = -0.5
self.audio_file_len_ms_sax = self.audio_file_sax.duration_seconds * 1000
# globs for bass
self.pan_law_bass = 0
self.audio_file_len_ms_bass = self.audio_file_bass.duration_seconds * 1000
# self.HOST = '127.0.0.1' # Client IP (this)
# self.PORT = 8000
# Port to listen on (non-privileged ports are > 1023)
self.CHUNK = 2 ** 11
self.RATE = 44100
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paInt16,
channels=1,
rate=self.RATE,
input=True,
frames_per_buffer=self.CHUNK)
# build send data dict
self.send_data_dict = {'mic_level': 0,
'speed': 1,
'tempo': 0.1
}
# init got dict
self.got_dict = {}
# instantiate the server
self.engine = AiDataEngine()
# # set the ball rolling
# self.main()
def snd_listen(self):
print("mic listener: started!")
while True:
data = np.frombuffer(self.stream.read(self.CHUNK,exception_on_overflow = False),
dtype=np.int16)
peak = np.average(np.abs(data)) * 2
if peak > 2000:
bars = "#" * int(50 * peak / 2 ** 16)
print("%05d %s" % (peak, bars))
self.send_data_dict['mic_level'] = peak / 30000
def terminate(self):
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
def data_exchange(self):
print("data exchange: started!")
while True:
# send self.send_data_dict
self.engine.parse_got_dict(self.send_data_dict)
# get self.datadict from engine
self.got_dict = self.engine.datadict
# sync with engine & stop freewheeling
sleep_dur = self.got_dict['rhythm_rate']
# print('data exchange')
sleep(sleep_dur)
def engine(self):
# set the engine off
self.engine.go()
def main(self):
# snd_listen and client need dependent threads.
# All other IO is ok as a single Trio thread inside self.client
tasks = [self.engine.make_data,
self.engine.affect,
self.snd_listen,
self.data_exchange,
self.robot_sax,
self.robot_bass]
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = {executor.submit(task): task for task in tasks}
def robot_sax(self):
# make a serial port connection here
print('im here SAX - sleeping for 3')
sleep(3)
# loop here
# while self.running:
# print('im here2')
# while not self.improv_go:
# print('im here3')
# sleep(1)
# print('sleeping robot')
# then start improvisers
while True:
print('im here4')
# grab raw data from engine stream
raw_data_from_dict = self.got_dict['master_output']
rhythm_rate = self.got_dict['rhythm_rate']
print('sax', raw_data_from_dict, rhythm_rate)
# add variability to the individual instrument
rnd_dur_delta = random()
rhythm_rate *= rnd_dur_delta * 8
print('sax', raw_data_from_dict, rhythm_rate)
# make a sound & move bot
self.make_sound('sax', raw_data_from_dict, rhythm_rate)
print('making a new one')
def robot_bass(self):
# make a serial port connection here
print('im here Bass - sleeping for 3')
sleep(3)
# loop here
# while self.running:
# print('im here2')
# while not self.improv_go:
# print('im here3')
# sleep(1)
# print('sleeping robot')
# then start improvisers
while True:
print('im here4')
# grab raw data from engine stream
raw_data_from_dict = self.got_dict['master_output']
# trying different part of the dict
raw_data_from_dict = self.got_dict['move_rnn']
rhythm_rate = self.got_dict['rhythm_rate']
print('bass', raw_data_from_dict, rhythm_rate)
# add variability to the individual instrument
rnd_dur_delta = random() * 4
rhythm_rate *= rnd_dur_delta
print('bass', raw_data_from_dict, rhythm_rate)
# make a sound & move bot
self.make_sound('bass', raw_data_from_dict, rhythm_rate)
print('making a new one')
def make_sound(self, instrument, incoming_raw_data, rhythm_rate):
# # temp random num gen
# rnd = randrange(self.audio_dir_len)
# print(self.audio_dir[rnd])
print('making sound')
if instrument == 'sax':
audio_file = self.audio_file_sax
audio_file_len_ms = self.audio_file_len_ms_sax
pan_law = self.pan_law_sax
len_delta = random() * 1000
elif instrument == 'bass':
audio_file = self.audio_file_bass
audio_file_len_ms = self.audio_file_len_ms_bass
pan_law = self.pan_law_bass
len_delta = random() * 1000
# rescale incoming raw data
audio_play_position = int(((incoming_raw_data - 0) / (1 - 0)) * (audio_file_len_ms - 0) + 0)
duration = rhythm_rate * len_delta
if duration < 0.1:
duration = 0.1
end_point = audio_play_position + duration
print(audio_play_position, end_point, duration)
# make a sound from incoming data
snippet = audio_file[audio_play_position: end_point]
print('snippet')
# pan snippet
pan_snippet = snippet.pan(pan_law)
print('pan')
# move bot before making sound
if self.robot_connected:
if instrument == 'sax':
self.move_robot(incoming_raw_data, duration)
# get the robot to move with
play(pan_snippet)
print('play')
# sleep(duration/ 1000)
print('fininshed a play')
def move_robot(self, incoming_data, duration):
# top previous movements
# self.robot_robot.gripper_stop()
# self.robot_robot.paddle_stop()
# self.robot_robot.stop()
# which movement
if duration > 0.2:
# select a joint (1-16 % 4)
# or move bot left or right (17)
# or move gripper up or down (18)
rnd_joint = randrange(22)
rnd_direction = randrange(2)
if rnd_direction == 1:
direction = -20
else:
direction = 20
rnd_speed = randrange(3, 15)
rnd_speed *= 10
# move an arm joint
if rnd_joint <= 15:
joint = (rnd_joint % 4) + 1
self.arm_arm.move_joint_relative_speed(joint, direction, rnd_speed)
# move the gripper
elif rnd_joint == 16:
if rnd_direction == 1:
self.robot_robot.gripper_up()
else:
self.robot_robot.gripper_down()
# or move the wheels
elif rnd_joint == 17:
if rnd_direction == 1:
self.robot_robot.paddle_open()
else:
self.robot_robot.paddle_close()
# or move the wheels
elif rnd_joint >= 18:
if rnd_direction == 1:
self.robot_robot.step_forward()
else:
self.robot_robot.step_backward()
if __name__ == '__main__':
library = 'jazz'
# library = 'pop'
cl = Client(library)
# set the ball rolling
cl.main()
| [
"numpy.abs",
"numpy.reshape",
"random.randrange",
"pydub.playback.play",
"pydub.AudioSegment.from_mp3",
"robot.rerobot.Robot",
"time.sleep",
"tensorflow.keras.models.load_model",
"time.time",
"random.random",
"pyaudio.PyAudio",
"arm.arm.Arm",
"pydub.AudioSegment.from_wav"
] | [((899, 954), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/EMR-v4_RNN_skeleton_data.nose.x.h5"""'], {}), "('models/EMR-v4_RNN_skeleton_data.nose.x.h5')\n", (909, 954), False, 'from tensorflow.keras.models import load_model\n'), ((1218, 1261), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/EMR-v4_RNN_bitalino.h5"""'], {}), "('models/EMR-v4_RNN_bitalino.h5')\n", (1228, 1261), False, 'from tensorflow.keras.models import load_model\n'), ((1546, 1595), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/EMR-v4_conv2D_move-affect.h5"""'], {}), "('models/EMR-v4_conv2D_move-affect.h5')\n", (1556, 1595), False, 'from tensorflow.keras.models import load_model\n'), ((1887, 1936), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/EMR-v4_conv2D_affect-move.h5"""'], {}), "('models/EMR-v4_conv2D_affect-move.h5')\n", (1897, 1936), False, 'from tensorflow.keras.models import load_model\n'), ((6859, 6891), 'numpy.reshape', 'np.reshape', (['input_val', '(1, 1, 1)'], {}), '(input_val, (1, 1, 1))\n', (6869, 6891), True, 'import numpy as np\n'), ((14285, 14302), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (14300, 14302), False, 'import pyaudio\n'), ((16686, 16694), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (16691, 16694), False, 'from time import sleep\n'), ((17683, 17691), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (17688, 17691), False, 'from time import sleep\n'), ((20093, 20110), 'pydub.playback.play', 'play', (['pan_snippet'], {}), '(pan_snippet)\n', (20097, 20110), False, 'from pydub.playback import play\n'), ((6400, 6408), 'random.random', 'random', ([], {}), '()\n', (6406, 6408), False, 'from random import random\n'), ((6451, 6459), 'random.random', 'random', ([], {}), '()\n', (6457, 6459), False, 'from random import random\n'), ((6567, 6590), 'time.sleep', 'sleep', (['self.rhythm_rate'], {}), '(self.rhythm_rate)\n', (6572, 6590), False, 'from time import sleep\n'), ((7112, 7124), 'random.randrange', 'randrange', (['(4)'], {}), '(4)\n', (7121, 7124), False, 'from random import randrange\n'), ((7609, 7617), 'random.random', 'random', ([], {}), '()\n', (7615, 7617), False, 'from random import random\n'), ((12771, 12776), 'arm.arm.Arm', 'Arm', ([], {}), '()\n', (12774, 12776), False, 'from arm.arm import Arm\n'), ((13257, 13264), 'robot.rerobot.Robot', 'Robot', ([], {}), '()\n', (13262, 13264), False, 'from robot.rerobot import Robot\n'), ((13443, 13484), 'pydub.AudioSegment.from_mp3', 'AudioSegment.from_mp3', (['"""assets/alfie.mp3"""'], {}), "('assets/alfie.mp3')\n", (13464, 13484), False, 'from pydub import AudioSegment\n'), ((15966, 15982), 'time.sleep', 'sleep', (['sleep_dur'], {}), '(sleep_dur)\n', (15971, 15982), False, 'from time import sleep\n'), ((17299, 17307), 'random.random', 'random', ([], {}), '()\n', (17305, 17307), False, 'from random import random\n'), ((20611, 20624), 'random.randrange', 'randrange', (['(22)'], {}), '(22)\n', (20620, 20624), False, 'from random import randrange\n'), ((20654, 20666), 'random.randrange', 'randrange', (['(2)'], {}), '(2)\n', (20663, 20666), False, 'from random import randrange\n'), ((20808, 20824), 'random.randrange', 'randrange', (['(3)', '(15)'], {}), '(3, 15)\n', (20817, 20824), False, 'from random import randrange\n'), ((8284, 8300), 'random.randrange', 'randrange', (['(6)', '(26)'], {}), '(6, 26)\n', (8293, 8300), False, 'from random import randrange\n'), ((8344, 8350), 'time.time', 'time', ([], {}), '()\n', (8348, 8350), False, 'from time import time\n'), ((8659, 8665), 'time.time', 'time', ([], {}), '()\n', (8663, 8665), False, 'from time import time\n'), ((9029, 9041), 'random.randrange', 'randrange', (['(4)'], {}), '(4)\n', (9038, 9041), False, 'from random import randrange\n'), ((13520, 13560), 'pydub.AudioSegment.from_mp3', 'AudioSegment.from_mp3', (['"""assets/bass.mp3"""'], {}), "('assets/bass.mp3')\n", (13541, 13560), False, 'from pydub import AudioSegment\n'), ((13631, 13673), 'pydub.AudioSegment.from_wav', 'AudioSegment.from_wav', (['"""assets/vocals.wav"""'], {}), "('assets/vocals.wav')\n", (13652, 13673), False, 'from pydub import AudioSegment\n'), ((13709, 13758), 'pydub.AudioSegment.from_wav', 'AudioSegment.from_wav', (['"""assets/accompaniment.wav"""'], {}), "('assets/accompaniment.wav')\n", (13730, 13758), False, 'from pydub import AudioSegment\n'), ((18406, 18414), 'random.random', 'random', ([], {}), '()\n', (18412, 18414), False, 'from random import random\n'), ((19081, 19089), 'random.random', 'random', ([], {}), '()\n', (19087, 19089), False, 'from random import random\n'), ((9374, 9380), 'time.time', 'time', ([], {}), '()\n', (9378, 9380), False, 'from time import time\n'), ((9576, 9582), 'time.time', 'time', ([], {}), '()\n', (9580, 9582), False, 'from time import time\n'), ((11714, 11737), 'time.sleep', 'sleep', (['self.rhythm_rate'], {}), '(self.rhythm_rate)\n', (11719, 11737), False, 'from time import sleep\n'), ((15205, 15217), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (15211, 15217), True, 'import numpy as np\n'), ((19303, 19311), 'random.random', 'random', ([], {}), '()\n', (19309, 19311), False, 'from random import random\n'), ((9384, 9405), 'random.randrange', 'randrange', (['(1000)', '(4000)'], {}), '(1000, 4000)\n', (9393, 9405), False, 'from random import randrange\n')] |
import numpy as np
import matplotlib.pyplot as plt
from astropy.wcs import WCS
from kidsdata import KissData
from kidsdata.db import list_scan, get_scan
plt.ion()
# Open the scan 431
kd = KissData(get_scan(431))
# Read All the valid data from array B
list_data = kd.names.DataSc + kd.names.DataUc + ["I", "Q"]
kd.read_data(list_data=list_data, list_detector=kd.get_list_detector("B", flag=0, typedet=1), silent=True)
# Compute and plot the beam map
beammap, (datas, wcs, popts) = kd.plot_beammap(
coord="pdiff", flatfield=None, cm_func="kidsdata.common_mode.pca_filtering", ncomp=2
)
# Update the kidpar
for key in ["x0", "y0"]:
popts[key] -= np.nanmedian(popts[key])
kd._extended_kidpar = popts
# Plot geometry
geometry, fwhm = kd.plot_kidpar()
# select good detector, ie within 60 arcmin of the center and fwhm 25 +- 10
kidpar = kd.kidpar.loc[kd.list_detector]
pos = np.array([kidpar["x0"], kidpar["y0"]]) * 60 # arcmin
fwhm = np.array(np.abs(kidpar["fwhm_x"]) + np.abs(kidpar["fwhm_y"])) / 2 * 60
ikid = np.where((np.sqrt(pos[0] ** 2 + pos[1] ** 2) < 60) & (np.abs(fwhm - 25) < 10))[0]
data, weight, hits = kd.continuum_map(coord="pdiff", ikid=ikid, cdelt=0.05)
plt.subplot(projection=WCS(data.header))
plt.imshow(data.data, origin="lower")
| [
"matplotlib.pyplot.imshow",
"numpy.abs",
"numpy.sqrt",
"numpy.nanmedian",
"numpy.array",
"kidsdata.db.get_scan",
"matplotlib.pyplot.ion",
"astropy.wcs.WCS"
] | [((156, 165), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (163, 165), True, 'import matplotlib.pyplot as plt\n'), ((1226, 1263), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data.data'], {'origin': '"""lower"""'}), "(data.data, origin='lower')\n", (1236, 1263), True, 'import matplotlib.pyplot as plt\n'), ((201, 214), 'kidsdata.db.get_scan', 'get_scan', (['(431)'], {}), '(431)\n', (209, 214), False, 'from kidsdata.db import list_scan, get_scan\n'), ((658, 682), 'numpy.nanmedian', 'np.nanmedian', (['popts[key]'], {}), '(popts[key])\n', (670, 682), True, 'import numpy as np\n'), ((886, 924), 'numpy.array', 'np.array', (["[kidpar['x0'], kidpar['y0']]"], {}), "([kidpar['x0'], kidpar['y0']])\n", (894, 924), True, 'import numpy as np\n'), ((1208, 1224), 'astropy.wcs.WCS', 'WCS', (['data.header'], {}), '(data.header)\n', (1211, 1224), False, 'from astropy.wcs import WCS\n'), ((956, 980), 'numpy.abs', 'np.abs', (["kidpar['fwhm_x']"], {}), "(kidpar['fwhm_x'])\n", (962, 980), True, 'import numpy as np\n'), ((983, 1007), 'numpy.abs', 'np.abs', (["kidpar['fwhm_y']"], {}), "(kidpar['fwhm_y'])\n", (989, 1007), True, 'import numpy as np\n'), ((1035, 1069), 'numpy.sqrt', 'np.sqrt', (['(pos[0] ** 2 + pos[1] ** 2)'], {}), '(pos[0] ** 2 + pos[1] ** 2)\n', (1042, 1069), True, 'import numpy as np\n'), ((1079, 1096), 'numpy.abs', 'np.abs', (['(fwhm - 25)'], {}), '(fwhm - 25)\n', (1085, 1096), True, 'import numpy as np\n')] |
"""
This code explores Different Models of Convolutional Neural Networks
for the San Salvador Gang Project
@author: falba and ftop
"""
import os
import google_streetview.api
import pandas as pd
import numpy as np
import sys
import matplotlib.image as mp_img
from matplotlib import pyplot as plot
from skimage import io
from skimage.color import rgb2gray
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.model_selection import train_test_split
#pip install tensorflow keras numpy skimage matplotlib
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras import layers
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.utils import to_categorical
from keras.layers import LSTM, Embedding
from keras.preprocessing.image import ImageDataGenerator
from sklearn.linear_model import LogisticRegression
from numpy import where
from keras import regularizers
import random
#### Load the data
## Set Directory
os.chdir('C:/Users/falba/Dropbox/ImageAnalysis/San Salvador/GangBoundaries')
df = pd.read_csv("C:/Users/falba/Dropbox/ImageAnalysis/San Salvador/GangBoundaries/sample.csv", header=0)
df
astr = "C:/Users/falba/Dropbox/ImageAnalysis/San Salvador/GangBoundaries/"
# Let's create a sample for testing and training:
train, test = train_test_split(df, test_size=0.25, random_state=38)
# Obtaining the image data of testing
test_cases=[]
test_class=[]
file=test.file
for x in file:
image = io.imread(astr+x)
image =rgb2gray(image)
test_cases.append(image)
test_cases=np.reshape(np.ravel(test_cases),(579,480,480,-1))
for index, Series in test.iterrows():
test_class.append(Series["gang_territory10"])
test_class=np.reshape(test_class,(579,-1))
# The image data of training
train_cases=[]
train_class=[]
fileT=train.file
for x in fileT:
image = io.imread(astr+x)
image=rgb2gray(image)
train_cases.append(image)
train_cases=np.reshape(train_cases,(1735,480,480,-1))
for index, series in train.iterrows():
train_class.append(series["gang_territory10"])
train_class=np.reshape(train_class,(1735,-1))
## To Categorical
#y_train = to_categorical(train_class)
#y_test= to_categorical(test_class)
input_dim = train_cases.shape[1]
maxlen = 100
### Now let's try a Convolutional Neural Networks
# Seeting up Convolution Layers and Filters
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(480,480,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='Adam',metrics=['accuracy'])
model.summary()
hist_1 = model.fit(train_cases,train_class,verbose=False,epochs=50,validation_data=(test_cases,test_class),batch_size=10)
hist1acc=model.evaluate(test_cases,test_class)
#accuracy: 0.6165
# plot loss during training
plot.subplot(211)
#plot.title('Loss / Binary Crossentropy')
plot.plot(hist_1.history['loss'], label='Train')
plot.plot(hist_1.history['val_loss'], label='Test')
plot.legend()
plot.show()
plot.subplot(212)
#plot.title('Accuracy / Binary Crossentropy')
plot.plot(hist_1.history['accuracy'], label='Train')
plot.plot(hist_1.history['val_accuracy'], label='Test')
plot.legend()
plot.show()
#plot.savefig('LossBinCross.png')
# Binary CrossEntropy - Model 2
model = Sequential()
model.add(Conv2D(8, (5, 5), activation='relu', input_shape=(480,480,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(2, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
hist_2 = model.fit(train_cases,train_class,verbose=False,epochs=30,validation_data=(test_cases,test_class),batch_size=10)
# evaluate the model
hist2acc=model.evaluate(test_cases,test_class)
#Accuracy 0.5354
# plot accuracy during training
plot.subplot(212)
plot.title('Accuracy / Binary Crossentropy')
plot.plot(hist_2.history['accuracy'], label='Train')
plot.plot(hist_2.history['val_accuracy'], label='Test')
plot.legend()
plot.show()
plot.subplot(211)
plot.title('Loss / Binary Crossentropy')
plot.plot(hist_2.history['loss'], label='Train')
plot.plot(hist_2.history['val_loss'], label='Test')
plot.legend()
plot.show()
## Seems like EPOCH migh be too high. Optimal can be less than 10
## Maybe because overfitting
# Binary CrossEntropy - Model 3
model = Sequential()
model.add(Conv2D(10, (11, 11), activation='relu', input_shape=(480,480,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(20, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Conv2D(100, (4, 4), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
hist_3 = model.fit(train_cases,train_class,verbose=False,epochs=30,validation_data=(test_cases,test_class),batch_size=10)
# evaluate the model
hist3acc=model.evaluate(test_cases,test_class)
#Accuracy 53%
## Graphs
plot.subplot(212)
#plot.title('Accuracy / Binary Crossentropy')
plot.plot(hist_3.history['accuracy'], label='Train')
plot.plot(hist_3.history['val_accuracy'], label='Test')
plot.legend()
plot.show()
plot.subplot(211)
#plot.title('Loss / Binary Crossentropy')
plot.plot(hist_3.history['loss'], label='Train')
plot.plot(hist_3.history['val_loss'], label='Test')
plot.legend()
plot.show()
## LET'S TRY REGULARIZATION
model = Sequential()
model.add(Conv2D(8, (5, 5), activation='relu', input_shape=(480,480,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(2, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Flatten())
model.add(Dense(10,
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
#model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
hist= model.fit(train_cases,train_class,verbose=False,epochs=50,validation_data=(test_cases,test_class),batch_size=10)
# evaluate the model
hist_acc=model.evaluate(test_cases,test_class)
#Accuracy 53%
# plot accuracy during training
plot.subplot(212)
plot.title('Accuracy / Binary Crossentropy')
plot.plot(hist.history['accuracy'], label='Train')
plot.plot(hist.history['val_accuracy'], label='Test')
plot.legend()
plot.show()
plot.subplot(211)
plot.title('Loss / Binary Crossentropy')
plot.plot(hist.history['loss'], label='Train')
plot.plot(hist.history['val_loss'], label='Test')
plot.legend()
plot.show()
## It didnt help much with accuracy but it did with the loss
| [
"keras.layers.Conv2D",
"pandas.read_csv",
"keras.layers.Dense",
"numpy.reshape",
"matplotlib.pyplot.plot",
"keras.regularizers.l1",
"skimage.color.rgb2gray",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"sklearn.model_selection.train_test_split",
"keras.models.Sequential",
"skimage.io.... | [((1094, 1170), 'os.chdir', 'os.chdir', (['"""C:/Users/falba/Dropbox/ImageAnalysis/San Salvador/GangBoundaries"""'], {}), "('C:/Users/falba/Dropbox/ImageAnalysis/San Salvador/GangBoundaries')\n", (1102, 1170), False, 'import os\n'), ((1177, 1287), 'pandas.read_csv', 'pd.read_csv', (['"""C:/Users/falba/Dropbox/ImageAnalysis/San Salvador/GangBoundaries/sample.csv"""'], {'header': '(0)'}), "(\n 'C:/Users/falba/Dropbox/ImageAnalysis/San Salvador/GangBoundaries/sample.csv'\n , header=0)\n", (1188, 1287), True, 'import pandas as pd\n'), ((1428, 1481), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': '(0.25)', 'random_state': '(38)'}), '(df, test_size=0.25, random_state=38)\n', (1444, 1481), False, 'from sklearn.model_selection import train_test_split\n'), ((1853, 1886), 'numpy.reshape', 'np.reshape', (['test_class', '(579, -1)'], {}), '(test_class, (579, -1))\n', (1863, 1886), True, 'import numpy as np\n'), ((2097, 2142), 'numpy.reshape', 'np.reshape', (['train_cases', '(1735, 480, 480, -1)'], {}), '(train_cases, (1735, 480, 480, -1))\n', (2107, 2142), True, 'import numpy as np\n'), ((2248, 2283), 'numpy.reshape', 'np.reshape', (['train_class', '(1735, -1)'], {}), '(train_class, (1735, -1))\n', (2258, 2283), True, 'import numpy as np\n'), ((2540, 2552), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2550, 2552), False, 'from keras.models import Sequential\n'), ((3249, 3266), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(211)'], {}), '(211)\n', (3261, 3266), True, 'from matplotlib import pyplot as plot\n'), ((3311, 3359), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_1.history['loss']"], {'label': '"""Train"""'}), "(hist_1.history['loss'], label='Train')\n", (3320, 3359), True, 'from matplotlib import pyplot as plot\n'), ((3361, 3412), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_1.history['val_loss']"], {'label': '"""Test"""'}), "(hist_1.history['val_loss'], label='Test')\n", (3370, 3412), True, 'from matplotlib import pyplot as plot\n'), ((3414, 3427), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (3425, 3427), True, 'from matplotlib import pyplot as plot\n'), ((3429, 3440), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (3438, 3440), True, 'from matplotlib import pyplot as plot\n'), ((3444, 3461), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(212)'], {}), '(212)\n', (3456, 3461), True, 'from matplotlib import pyplot as plot\n'), ((3510, 3562), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_1.history['accuracy']"], {'label': '"""Train"""'}), "(hist_1.history['accuracy'], label='Train')\n", (3519, 3562), True, 'from matplotlib import pyplot as plot\n'), ((3564, 3619), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_1.history['val_accuracy']"], {'label': '"""Test"""'}), "(hist_1.history['val_accuracy'], label='Test')\n", (3573, 3619), True, 'from matplotlib import pyplot as plot\n'), ((3621, 3634), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (3632, 3634), True, 'from matplotlib import pyplot as plot\n'), ((3636, 3647), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (3645, 3647), True, 'from matplotlib import pyplot as plot\n'), ((3729, 3741), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3739, 3741), False, 'from keras.models import Sequential\n'), ((4435, 4452), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(212)'], {}), '(212)\n', (4447, 4452), True, 'from matplotlib import pyplot as plot\n'), ((4454, 4498), 'matplotlib.pyplot.title', 'plot.title', (['"""Accuracy / Binary Crossentropy"""'], {}), "('Accuracy / Binary Crossentropy')\n", (4464, 4498), True, 'from matplotlib import pyplot as plot\n'), ((4500, 4552), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_2.history['accuracy']"], {'label': '"""Train"""'}), "(hist_2.history['accuracy'], label='Train')\n", (4509, 4552), True, 'from matplotlib import pyplot as plot\n'), ((4554, 4609), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_2.history['val_accuracy']"], {'label': '"""Test"""'}), "(hist_2.history['val_accuracy'], label='Test')\n", (4563, 4609), True, 'from matplotlib import pyplot as plot\n'), ((4611, 4624), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (4622, 4624), True, 'from matplotlib import pyplot as plot\n'), ((4626, 4637), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (4635, 4637), True, 'from matplotlib import pyplot as plot\n'), ((4641, 4658), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(211)'], {}), '(211)\n', (4653, 4658), True, 'from matplotlib import pyplot as plot\n'), ((4660, 4700), 'matplotlib.pyplot.title', 'plot.title', (['"""Loss / Binary Crossentropy"""'], {}), "('Loss / Binary Crossentropy')\n", (4670, 4700), True, 'from matplotlib import pyplot as plot\n'), ((4702, 4750), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_2.history['loss']"], {'label': '"""Train"""'}), "(hist_2.history['loss'], label='Train')\n", (4711, 4750), True, 'from matplotlib import pyplot as plot\n'), ((4752, 4803), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_2.history['val_loss']"], {'label': '"""Test"""'}), "(hist_2.history['val_loss'], label='Test')\n", (4761, 4803), True, 'from matplotlib import pyplot as plot\n'), ((4805, 4818), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (4816, 4818), True, 'from matplotlib import pyplot as plot\n'), ((4820, 4831), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (4829, 4831), True, 'from matplotlib import pyplot as plot\n'), ((4977, 4989), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4987, 4989), False, 'from keras.models import Sequential\n'), ((5759, 5776), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(212)'], {}), '(212)\n', (5771, 5776), True, 'from matplotlib import pyplot as plot\n'), ((5825, 5877), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_3.history['accuracy']"], {'label': '"""Train"""'}), "(hist_3.history['accuracy'], label='Train')\n", (5834, 5877), True, 'from matplotlib import pyplot as plot\n'), ((5879, 5934), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_3.history['val_accuracy']"], {'label': '"""Test"""'}), "(hist_3.history['val_accuracy'], label='Test')\n", (5888, 5934), True, 'from matplotlib import pyplot as plot\n'), ((5936, 5949), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (5947, 5949), True, 'from matplotlib import pyplot as plot\n'), ((5951, 5962), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (5960, 5962), True, 'from matplotlib import pyplot as plot\n'), ((5966, 5983), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(211)'], {}), '(211)\n', (5978, 5983), True, 'from matplotlib import pyplot as plot\n'), ((6028, 6076), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_3.history['loss']"], {'label': '"""Train"""'}), "(hist_3.history['loss'], label='Train')\n", (6037, 6076), True, 'from matplotlib import pyplot as plot\n'), ((6078, 6129), 'matplotlib.pyplot.plot', 'plot.plot', (["hist_3.history['val_loss']"], {'label': '"""Test"""'}), "(hist_3.history['val_loss'], label='Test')\n", (6087, 6129), True, 'from matplotlib import pyplot as plot\n'), ((6131, 6144), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (6142, 6144), True, 'from matplotlib import pyplot as plot\n'), ((6146, 6157), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (6155, 6157), True, 'from matplotlib import pyplot as plot\n'), ((6201, 6213), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6211, 6213), False, 'from keras.models import Sequential\n'), ((7045, 7062), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(212)'], {}), '(212)\n', (7057, 7062), True, 'from matplotlib import pyplot as plot\n'), ((7064, 7108), 'matplotlib.pyplot.title', 'plot.title', (['"""Accuracy / Binary Crossentropy"""'], {}), "('Accuracy / Binary Crossentropy')\n", (7074, 7108), True, 'from matplotlib import pyplot as plot\n'), ((7110, 7160), 'matplotlib.pyplot.plot', 'plot.plot', (["hist.history['accuracy']"], {'label': '"""Train"""'}), "(hist.history['accuracy'], label='Train')\n", (7119, 7160), True, 'from matplotlib import pyplot as plot\n'), ((7162, 7215), 'matplotlib.pyplot.plot', 'plot.plot', (["hist.history['val_accuracy']"], {'label': '"""Test"""'}), "(hist.history['val_accuracy'], label='Test')\n", (7171, 7215), True, 'from matplotlib import pyplot as plot\n'), ((7217, 7230), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (7228, 7230), True, 'from matplotlib import pyplot as plot\n'), ((7232, 7243), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (7241, 7243), True, 'from matplotlib import pyplot as plot\n'), ((7247, 7264), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(211)'], {}), '(211)\n', (7259, 7264), True, 'from matplotlib import pyplot as plot\n'), ((7266, 7306), 'matplotlib.pyplot.title', 'plot.title', (['"""Loss / Binary Crossentropy"""'], {}), "('Loss / Binary Crossentropy')\n", (7276, 7306), True, 'from matplotlib import pyplot as plot\n'), ((7308, 7354), 'matplotlib.pyplot.plot', 'plot.plot', (["hist.history['loss']"], {'label': '"""Train"""'}), "(hist.history['loss'], label='Train')\n", (7317, 7354), True, 'from matplotlib import pyplot as plot\n'), ((7356, 7405), 'matplotlib.pyplot.plot', 'plot.plot', (["hist.history['val_loss']"], {'label': '"""Test"""'}), "(hist.history['val_loss'], label='Test')\n", (7365, 7405), True, 'from matplotlib import pyplot as plot\n'), ((7407, 7420), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (7418, 7420), True, 'from matplotlib import pyplot as plot\n'), ((7422, 7433), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (7431, 7433), True, 'from matplotlib import pyplot as plot\n'), ((1603, 1622), 'skimage.io.imread', 'io.imread', (['(astr + x)'], {}), '(astr + x)\n', (1612, 1622), False, 'from skimage import io\n'), ((1633, 1648), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (1641, 1648), False, 'from skimage.color import rgb2gray\n'), ((1704, 1724), 'numpy.ravel', 'np.ravel', (['test_cases'], {}), '(test_cases)\n', (1712, 1724), True, 'import numpy as np\n'), ((2002, 2021), 'skimage.io.imread', 'io.imread', (['(astr + x)'], {}), '(astr + x)\n', (2011, 2021), False, 'from skimage import io\n'), ((2031, 2046), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (2039, 2046), False, 'from skimage.color import rgb2gray\n'), ((2564, 2640), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': '(480, 480, 1)'}), "(32, kernel_size=(3, 3), activation='relu', input_shape=(480, 480, 1))\n", (2570, 2640), False, 'from keras.layers import Conv2D\n'), ((2649, 2679), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2661, 2679), False, 'from keras.layers import MaxPooling2D\n'), ((2692, 2729), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (2698, 2729), False, 'from keras.layers import Conv2D\n'), ((2742, 2772), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2754, 2772), False, 'from keras.layers import MaxPooling2D\n'), ((2785, 2798), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2792, 2798), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2811, 2820), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2818, 2820), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2833, 2861), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (2838, 2861), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2874, 2886), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2881, 2886), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2899, 2929), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2904, 2929), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3753, 3816), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(5, 5)'], {'activation': '"""relu"""', 'input_shape': '(480, 480, 1)'}), "(8, (5, 5), activation='relu', input_shape=(480, 480, 1))\n", (3759, 3816), False, 'from keras.layers import Conv2D\n'), ((3827, 3857), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3839, 3857), False, 'from keras.layers import MaxPooling2D\n'), ((3870, 3906), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(5, 5)'], {'activation': '"""relu"""'}), "(2, (5, 5), activation='relu')\n", (3876, 3906), False, 'from keras.layers import Conv2D\n'), ((3919, 3949), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (3931, 3949), False, 'from keras.layers import MaxPooling2D\n'), ((3962, 3971), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3969, 3971), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3984, 4012), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3989, 4012), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((4025, 4055), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4030, 4055), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5001, 5067), 'keras.layers.Conv2D', 'Conv2D', (['(10)', '(11, 11)'], {'activation': '"""relu"""', 'input_shape': '(480, 480, 1)'}), "(10, (11, 11), activation='relu', input_shape=(480, 480, 1))\n", (5007, 5067), False, 'from keras.layers import Conv2D\n'), ((5078, 5108), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5090, 5108), False, 'from keras.layers import MaxPooling2D\n'), ((5121, 5158), 'keras.layers.Conv2D', 'Conv2D', (['(20)', '(5, 5)'], {'activation': '"""relu"""'}), "(20, (5, 5), activation='relu')\n", (5127, 5158), False, 'from keras.layers import Conv2D\n'), ((5171, 5201), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (5183, 5201), False, 'from keras.layers import MaxPooling2D\n'), ((5214, 5252), 'keras.layers.Conv2D', 'Conv2D', (['(100)', '(4, 4)'], {'activation': '"""relu"""'}), "(100, (4, 4), activation='relu')\n", (5220, 5252), False, 'from keras.layers import Conv2D\n'), ((5265, 5295), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5277, 5295), False, 'from keras.layers import MaxPooling2D\n'), ((5308, 5317), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5315, 5317), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5330, 5358), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5335, 5358), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5371, 5401), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (5376, 5401), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((6225, 6288), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(5, 5)'], {'activation': '"""relu"""', 'input_shape': '(480, 480, 1)'}), "(8, (5, 5), activation='relu', input_shape=(480, 480, 1))\n", (6231, 6288), False, 'from keras.layers import Conv2D\n'), ((6299, 6329), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6311, 6329), False, 'from keras.layers import MaxPooling2D\n'), ((6342, 6378), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(5, 5)'], {'activation': '"""relu"""'}), "(2, (5, 5), activation='relu')\n", (6348, 6378), False, 'from keras.layers import Conv2D\n'), ((6391, 6421), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (6403, 6421), False, 'from keras.layers import MaxPooling2D\n'), ((6434, 6443), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6441, 6443), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((6640, 6670), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (6645, 6670), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((6502, 6523), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (6517, 6523), False, 'from keras import regularizers\n'), ((6563, 6584), 'keras.regularizers.l1', 'regularizers.l1', (['(0.01)'], {}), '(0.01)\n', (6578, 6584), False, 'from keras import regularizers\n')] |
# Code based on https://github.com/yaringal/ConcreteDropout
# License:
# MIT License
#
# Copyright (c) 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import numpy as np
from torch import nn
from models.base_nn import BaseNN
class ConcreteDropout(nn.Module):
def __init__(self, weight_regularizer=1e-6,
dropout_regularizer=1e-5, init_min=0.1, init_max=0.1):
super(ConcreteDropout, self).__init__()
self.weight_regularizer = weight_regularizer
self.dropout_regularizer = dropout_regularizer
init_min = np.log(init_min) - np.log(1. - init_min)
init_max = np.log(init_max) - np.log(1. - init_max)
self.p_logit = nn.Parameter(torch.empty(1).uniform_(init_min, init_max))
def forward(self, x, layer):
p = torch.sigmoid(self.p_logit)
out = layer(self._concrete_dropout(x, p))
sum_of_square = 0
for param in layer.parameters():
sum_of_square += torch.sum(torch.pow(param, 2))
weights_regularizer = self.weight_regularizer * sum_of_square / (1 - p)
dropout_regularizer = p * torch.log(p)
dropout_regularizer += (1. - p) * torch.log(1. - p)
input_dimensionality = x[0].numel() # Number of elements of first item in batch
dropout_regularizer *= self.dropout_regularizer * input_dimensionality
regularization = weights_regularizer + dropout_regularizer
return out, regularization
def _concrete_dropout(self, x, p):
eps = 1e-7
temp = 0.1
unif_noise = torch.rand_like(x)
drop_prob = (torch.log(p + eps)
- torch.log(1 - p + eps)
+ torch.log(unif_noise + eps)
- torch.log(1 - unif_noise + eps))
drop_prob = torch.sigmoid(drop_prob / temp)
random_tensor = 1 - drop_prob
retain_prob = 1 - p
x = torch.mul(x, random_tensor)
x /= retain_prob
return x
class ConcreteDropoutNN(BaseNN):
def __init__(self, weight_regularizer, dropout_regularizer, input_size, output_size, **kwargs):
super(ConcreteDropoutNN, self).__init__(**kwargs)
self.hidden_size.append(output_size)
self.linear1 = nn.Linear(input_size, self.hidden_size[0])
self.linears = nn.ModuleList([nn.Linear(self.hidden_size[i], self.hidden_size[i + 1]) for i in range(len(self.hidden_size) - 1)])
self.conc_drops = nn.ModuleList([ConcreteDropout(weight_regularizer=weight_regularizer,
dropout_regularizer=dropout_regularizer)
for i in range(len(self.hidden_size))])
self.act = nn.ReLU()
def forward(self, x):
regularization = torch.empty(len(self.hidden_size), device=x.device)
out_arr = []
out, regularization[0] = self.conc_drops[0](x, nn.Sequential(self.linear1, self.act))
out_arr.append(out)
for i in range(len(self.hidden_size) - 1):
if i == len(self.hidden_size) - 2:
act = nn.Identity()
else:
act = self.act
out, regularization[i + 1] = self.conc_drops[i + 1](out, nn.Sequential(self.linears[i], act))
out_arr.append(out)
return out, regularization.sum()
| [
"torch.mul",
"torch.nn.ReLU",
"torch.log",
"torch.rand_like",
"torch.nn.Sequential",
"torch.sigmoid",
"numpy.log",
"torch.pow",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.empty"
] | [((1816, 1843), 'torch.sigmoid', 'torch.sigmoid', (['self.p_logit'], {}), '(self.p_logit)\n', (1829, 1843), False, 'import torch\n'), ((2585, 2603), 'torch.rand_like', 'torch.rand_like', (['x'], {}), '(x)\n', (2600, 2603), False, 'import torch\n'), ((2819, 2850), 'torch.sigmoid', 'torch.sigmoid', (['(drop_prob / temp)'], {}), '(drop_prob / temp)\n', (2832, 2850), False, 'import torch\n'), ((2930, 2957), 'torch.mul', 'torch.mul', (['x', 'random_tensor'], {}), '(x, random_tensor)\n', (2939, 2957), False, 'import torch\n'), ((3264, 3306), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'self.hidden_size[0]'], {}), '(input_size, self.hidden_size[0])\n', (3273, 3306), False, 'from torch import nn\n'), ((3741, 3750), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3748, 3750), False, 'from torch import nn\n'), ((1587, 1603), 'numpy.log', 'np.log', (['init_min'], {}), '(init_min)\n', (1593, 1603), True, 'import numpy as np\n'), ((1606, 1628), 'numpy.log', 'np.log', (['(1.0 - init_min)'], {}), '(1.0 - init_min)\n', (1612, 1628), True, 'import numpy as np\n'), ((1647, 1663), 'numpy.log', 'np.log', (['init_max'], {}), '(init_max)\n', (1653, 1663), True, 'import numpy as np\n'), ((1666, 1688), 'numpy.log', 'np.log', (['(1.0 - init_max)'], {}), '(1.0 - init_max)\n', (1672, 1688), True, 'import numpy as np\n'), ((2139, 2151), 'torch.log', 'torch.log', (['p'], {}), '(p)\n', (2148, 2151), False, 'import torch\n'), ((2194, 2212), 'torch.log', 'torch.log', (['(1.0 - p)'], {}), '(1.0 - p)\n', (2203, 2212), False, 'import torch\n'), ((2765, 2796), 'torch.log', 'torch.log', (['(1 - unif_noise + eps)'], {}), '(1 - unif_noise + eps)\n', (2774, 2796), False, 'import torch\n'), ((3933, 3970), 'torch.nn.Sequential', 'nn.Sequential', (['self.linear1', 'self.act'], {}), '(self.linear1, self.act)\n', (3946, 3970), False, 'from torch import nn\n'), ((2002, 2021), 'torch.pow', 'torch.pow', (['param', '(2)'], {}), '(param, 2)\n', (2011, 2021), False, 'import torch\n'), ((2714, 2741), 'torch.log', 'torch.log', (['(unif_noise + eps)'], {}), '(unif_noise + eps)\n', (2723, 2741), False, 'import torch\n'), ((3345, 3400), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_size[i]', 'self.hidden_size[i + 1]'], {}), '(self.hidden_size[i], self.hidden_size[i + 1])\n', (3354, 3400), False, 'from torch import nn\n'), ((4120, 4133), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (4131, 4133), False, 'from torch import nn\n'), ((4252, 4287), 'torch.nn.Sequential', 'nn.Sequential', (['self.linears[i]', 'act'], {}), '(self.linears[i], act)\n', (4265, 4287), False, 'from torch import nn\n'), ((1725, 1739), 'torch.empty', 'torch.empty', (['(1)'], {}), '(1)\n', (1736, 1739), False, 'import torch\n'), ((2626, 2644), 'torch.log', 'torch.log', (['(p + eps)'], {}), '(p + eps)\n', (2635, 2644), False, 'import torch\n'), ((2668, 2690), 'torch.log', 'torch.log', (['(1 - p + eps)'], {}), '(1 - p + eps)\n', (2677, 2690), False, 'import torch\n')] |
'''
A compatibility layer for DSS C-API that mimics the official OpenDSS COM interface.
Copyright (c) 2016-2020 <NAME>
'''
from __future__ import absolute_import
from .._cffi_api_util import Base
import numpy as np
class IYMatrix(Base):
__slots__ = []
def GetCompressedYMatrix(self, factor=True):
'''Return as (data, indices, indptr) that can fed into scipy.sparse.csc_matrix'''
ffi = self._api_util.ffi
nBus = ffi.new('uint32_t*')
nBus[0] = 0
nNz = ffi.new('uint32_t*')
nNz[0] = 0
ColPtr = ffi.new('int32_t**')
RowIdxPtr = ffi.new('int32_t**')
cValsPtr = ffi.new('double**')
self._lib.YMatrix_GetCompressedYMatrix(factor, nBus, nNz, ColPtr, RowIdxPtr, cValsPtr)
if not nBus[0] or not nNz[0]:
res = None
else:
# return as (data, indices, indptr) that can fed into scipy.sparse.csc_matrix
res = (
np.fromstring(ffi.buffer(cValsPtr[0], nNz[0] * 16), dtype=complex),
np.fromstring(ffi.buffer(RowIdxPtr[0], nNz[0] * 4), dtype=np.int32),
np.fromstring(ffi.buffer(ColPtr[0], (nBus[0] + 1) * 4), dtype=np.int32)
)
self._lib.DSS_Dispose_PInteger(ColPtr)
self._lib.DSS_Dispose_PInteger(RowIdxPtr)
self._lib.DSS_Dispose_PDouble(cValsPtr)
self.CheckForError()
return res
def ZeroInjCurr(self):
self.CheckForError(self._lib.YMatrix_ZeroInjCurr())
def GetSourceInjCurrents(self):
self.CheckForError(self._lib.YMatrix_GetSourceInjCurrents())
def GetPCInjCurr(self):
self.CheckForError(self._lib.YMatrix_GetPCInjCurr())
def BuildYMatrixD(self, BuildOps, AllocateVI):
self.CheckForError(self._lib.YMatrix_BuildYMatrixD(BuildOps, AllocateVI))
def AddInAuxCurrents(self, SType):
self.CheckForError(self._lib.YMatrix_AddInAuxCurrents(SType))
def GetIPointer(self):
'''Get access to the internal Current pointer'''
IvectorPtr = self._api_util.ffi.new('double**')
self.CheckForError(self._lib.YMatrix_getIpointer(IvectorPtr))
return IvectorPtr[0]
def GetVPointer(self):
'''Get access to the internal Voltage pointer'''
VvectorPtr = self._api_util.ffi.new('double**')
self.CheckForError(self._lib.YMatrix_getVpointer(VvectorPtr))
return VvectorPtr[0]
def SolveSystem(self, NodeV):
if type(NodeV) is not np.ndarray:
NodeV = np.array(NodeV)
NodeV = self._api_util.ffi.cast("double *", NodeV.ctypes.data)
NodeVPtr = self._api_util.ffi.new('double**')
NodeVPtr[0] = NodeV
result = self.CheckForError(self._lib.YMatrix_SolveSystem(NodeVPtr))
return result
@property
def SystemYChanged(self):
return self.CheckForError(self._lib.YMatrix_Get_SystemYChanged())
@SystemYChanged.setter
def SystemYChanged(self, value):
self.CheckForError(self._lib.YMatrix_Set_SystemYChanged(value))
@property
def UseAuxCurrents(self):
return self.CheckForError(self._lib.YMatrix_Get_UseAuxCurrents())
@UseAuxCurrents.setter
def UseAuxCurrents(self, value):
self.CheckForError(self._lib.YMatrix_Set_UseAuxCurrents(value))
# for better compatibility with OpenDSSDirect.py
getYSparse = GetCompressedYMatrix
def getI(self):
'''Get the data from the internal Current pointer'''
IvectorPtr = self.GetIPointer()
return self._api_util.ffi.unpack(IvectorPtr, 2 * (self.CheckForError(self._lib.Circuit_Get_NumNodes() + 1)))
def getV(self):
'''Get the data from the internal Voltage pointer'''
VvectorPtr = self.GetVPointer()
return self._api_util.ffi.unpack(VvectorPtr, 2 * (self.CheckForError(self._lib.Circuit_Get_NumNodes() + 1)))
| [
"numpy.array"
] | [((2540, 2555), 'numpy.array', 'np.array', (['NodeV'], {}), '(NodeV)\n', (2548, 2555), True, 'import numpy as np\n')] |
import os
import random
import numpy as np
from PIL import Image
def get_loss_train_data():
if not os.path.exists('.data/DIV2K'):
# DIV2K Home Page: https://data.vision.ee.ethz.ch/cvl/DIV2K/
# DIV2K Training Set: http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip
raise os.error('No DIV2K Training set found in .data/DIV2K directory. Download http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip')
def format_loss_train_input_image(img):
""" Crops any image larger than 1920x1080 and formats the pixels to numpy array of shape [batches, channels, height, width] """
data = np.asarray(img, dtype=np.uint8)
img.close()
data = data.astype(dtype=np.float32) / 255.0
height, width, channels = data.shape
if height > width:
data = np.transpose(data, (1, 0, 2))
x = height
height = width
width = x
if height > 1080:
starty = height // 2 - 540
endy = starty + 1080
data = data[starty:endy, :, :]
if width > 1920:
startx = width // 2 - 960
endx = startx + 1920
data = data[:, startx:endx, :]
return np.transpose(np.reshape(data, [1, 1080, 1920, 3]), (0, 3, 1, 2))
img_names = os.listdir('.data/DIV2K')
def get_rand():
i = random.randint(0, len(img_names) - 1)
filename = f'.data/DIV2K/{img_names[i]}'
img = Image.open(filename)
img.load()
if img.height < 1080 or img.width < 1080 or (img.height < 1920 and img.width < 1920):
img.close()
return None
return img
x = get_rand()
while x == None: x = get_rand()
return format_loss_train_input_image(x) | [
"os.path.exists",
"os.listdir",
"PIL.Image.open",
"numpy.reshape",
"numpy.asarray",
"os.error",
"numpy.transpose"
] | [((1319, 1344), 'os.listdir', 'os.listdir', (['""".data/DIV2K"""'], {}), "('.data/DIV2K')\n", (1329, 1344), False, 'import os\n'), ((105, 134), 'os.path.exists', 'os.path.exists', (['""".data/DIV2K"""'], {}), "('.data/DIV2K')\n", (119, 134), False, 'import os\n'), ((308, 451), 'os.error', 'os.error', (['"""No DIV2K Training set found in .data/DIV2K directory. Download http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip"""'], {}), "(\n 'No DIV2K Training set found in .data/DIV2K directory. Download http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip'\n )\n", (316, 451), False, 'import os\n'), ((638, 669), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (648, 669), True, 'import numpy as np\n'), ((1479, 1499), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1489, 1499), False, 'from PIL import Image\n'), ((837, 866), 'numpy.transpose', 'np.transpose', (['data', '(1, 0, 2)'], {}), '(data, (1, 0, 2))\n', (849, 866), True, 'import numpy as np\n'), ((1250, 1286), 'numpy.reshape', 'np.reshape', (['data', '[1, 1080, 1920, 3]'], {}), '(data, [1, 1080, 1920, 3])\n', (1260, 1286), True, 'import numpy as np\n')] |
import os.path
from absl import app
from absl import flags
from absl import logging
from typing import Any, Dict
import tensorflow as tf
import tensorflow.keras as keras
import uncertainty_baselines as ub
import uncertainty_metrics as um
import numpy as np
# import sklearn.isotonic
# import sklearn.neural_network
from metrics import BrierScore
from metrics import MMC
from metrics import nll
def one_vs_all_loss_fn(dm_alpha: float = 1., from_logits: bool = True,reduction = tf.keras.losses.Reduction.SUM,one_hot=False):
"""Requires from_logits=True to calculate correctly."""
if not from_logits:
raise ValueError('One-vs-all loss requires inputs to the '
'loss function to be logits, not probabilities.')
def one_vs_all_loss(labels: tf.Tensor, logits: tf.Tensor,reduction=reduction):
r"""Implements the one-vs-all loss function.
As implemented in https://arxiv.org/abs/1709.08716, multiplies the output
logits by dm_alpha (if using a distance-based formulation) before taking K
independent sigmoid operations of each class logit, and then calculating the
sum of the log-loss across classes. The loss function is calculated from the
K sigmoided logits as follows -
\mathcal{L} = \sum_{i=1}^{K} -\mathbb{I}(y = i) \log p(\hat{y}^{(i)} | x)
-\mathbb{I} (y \neq i) \log (1 - p(\hat{y}^{(i)} | x))
Args:
labels: Integer Tensor of dense labels, shape [batch_size].
logits: Tensor of shape [batch_size, num_classes].
Returns:
A scalar containing the mean over the batch for one-vs-all loss.
"""
#eps = tf.keras.backend.epsilon()
eps = 1e-6
#eps = 1e-10
logits = logits * dm_alpha
n_classes = tf.cast(logits.shape[1], tf.float32)
if one_hot:
labels = tf.argmax(labels, axis=-1) #decode one_hot
one_vs_all_probs = tf.math.sigmoid(logits)
labels = tf.cast(tf.squeeze(labels), tf.int32)
row_ids = tf.range(tf.shape(one_vs_all_probs)[0], dtype=tf.int32)
idx = tf.stack([row_ids, labels], axis=1)
# Shape of class_probs is [batch_size,].
class_probs = tf.gather_nd(one_vs_all_probs, idx)
s1 = tf.math.log(class_probs + eps)
s2 = tf.reduce_sum(tf.math.log(1. - one_vs_all_probs + eps),axis=-1)
s3 = - tf.math.log(1. - class_probs + eps)
loss = -s1 - s2 - s3
if reduction == tf.keras.losses.Reduction.NONE:
return loss
elif reduction == tf.keras.losses.Reduction.SUM:
return tf.reduce_mean(loss)
return one_vs_all_loss
# def one_vs_all_loss_fn(dm_alpha: float = 1., from_logits: bool = True):
# """Requires from_logits=True to calculate correctly."""
# if not from_logits:
# raise ValueError('One-vs-all loss requires inputs to the '
# 'loss function to be logits, not probabilities.')
# def one_vs_all_loss(labels: tf.Tensor, logits: tf.Tensor):
# r"""Implements the one-vs-all loss function.
# As implemented in https://arxiv.org/abs/1709.08716, multiplies the output
# logits by dm_alpha (if using a distance-based formulation) before taking K
# independent sigmoid operations of each class logit, and then calculating the
# sum of the log-loss across classes. The loss function is calculated from the
# K sigmoided logits as follows -
# \mathcal{L} = \sum_{i=1}^{K} -\mathbb{I}(y = i) \log p(\hat{y}^{(i)} | x)
# -\mathbb{I} (y \neq i) \log (1 - p(\hat{y}^{(i)} | x))
# Args:
# labels: Integer Tensor of dense labels, shape [batch_size].
# logits: Tensor of shape [batch_size, num_classes].
# Returns:
# A scalar containing the mean over the batch for one-vs-all loss.
# """
# #eps = tf.keras.backend.epsilon()
# eps = 1e-6
# #eps = 1e-10
# logits = logits * dm_alpha
# n_classes = tf.cast(logits.shape[1], tf.float32)
# one_vs_all_probs = tf.math.sigmoid(logits)
# labels = tf.cast(tf.squeeze(labels), tf.int32)
# row_ids = tf.range(tf.shape(one_vs_all_probs)[0], dtype=tf.int32)
# idx = tf.stack([row_ids, labels], axis=1)
# # Shape of class_probs is [batch_size,].
# class_probs = tf.gather_nd(one_vs_all_probs, idx)
# loss = (
# tf.reduce_mean(tf.math.log(class_probs + eps)) +
# n_classes * tf.reduce_mean(tf.math.log(1. - one_vs_all_probs + eps)) -
# tf.reduce_mean(tf.math.log(1. - class_probs + eps)))
# return -loss
# return one_vs_all_loss
def _calc_certs(probs: tf.Tensor,
certainty_variant: str = 'partial') -> tf.Tensor:
#form Ci's
#probs = tf.math.sigmoid(logits)
probs_comp = 1-probs
K = probs.shape[1]
cert_list = []
for i in range(K):
proj_vec = np.zeros(K)
proj_vec[i]=1
proj_mat = np.outer(proj_vec,proj_vec)
proj_mat_comp = np.identity(K)-np.outer(proj_vec,proj_vec)
tproj_mat = tf.constant(proj_mat,dtype=tf.float32)
tproj_mat_comp = tf.constant(proj_mat_comp,dtype=tf.float32)
out = tf.tensordot(probs,tproj_mat,axes=1) + tf.tensordot(probs_comp,tproj_mat_comp,axes=1)
cert_list+=[tf.reduce_prod(out,axis=1)]
if certainty_variant == 'partial':
certs = tf.stack(cert_list,axis=1,name='certs')
elif certainty_variant == 'total':
certs = tf.stack(cert_list,axis=1)
certs_argmax = tf.one_hot(tf.argmax(certs,axis=1),depth=K)
certs_reduce = tf.tile(tf.reduce_sum(certs,axis=1,keepdims=True),[1,K])
certs = tf.math.multiply(certs_argmax,certs_reduce)
elif certainty_variant == 'normalized':
certs = tf.stack(cert_list,axis=1)
certs_norm = tf.tile(tf.reduce_sum(certs,axis=1,keepdims=True),[1,K])
certs = tf.math.divide(certs,certs_norm)
else:
raise ValueError(f'unknown certainty_variant={certainty_variant}')
#certs.name = 'certs'
return certs
def _calc_logits_from_certs(certs: tf.Tensor,
eps: float = 1e-6) -> tf.Tensor:
#logits_from_certs
K = certs.shape[1]
logcerts = tf.math.log(certs+eps)
rs = tf.tile(logcerts[:,:1],[1,K])-logcerts #set first logit to zero (an arbitrary choice)
logits_from_certs = -rs
return logits_from_certs
def _activ(activation_type: str = 'relu'):
activation = {'relu': tf.keras.layers.ReLU(), 'sin': tf.keras.backend.sin}
if activation_type in activation.keys():
return activation[activation_type]
else:
return activation['relu']
class resnetLayer(tf.keras.layers.Layer):
def __init__(self,
num_filters: int = 16,
kernel_size: int = 3,
strides: int = 1,
use_activation: bool = True,
activation_type: str = 'relu', #relu or sin!
use_norm: bool = True,
l2_weight: float = 1e-4):
super(resnetLayer,self).__init__()
self.use_activation = use_activation
self.use_norm = use_norm
self.kernel_regularizer = None
if l2_weight:
self.kernel_regularizer = tf.keras.regularizers.l2(l2_weight)
# print(f' resnetLayer num_filters={num_filters}, strides={strides}, kernel_size={kernel_size}')
self.conv_layer = tf.keras.layers.Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=self.kernel_regularizer)
self.batch_norm = tf.keras.layers.BatchNormalization()
self.activation = _activ(activation_type)
def call(self,
inputs: tf.Tensor) -> tf.Tensor:
x = self.conv_layer(inputs)
if self.use_norm:
x = self.batch_norm(x)
if self.use_activation:
x = self.activation(x)
return x
class resnet20Block(tf.keras.layers.Layer):
def __init__(self,
stack: int,
res_block: int,
num_filters: int = 16,
activation_type: str = 'relu', #relu or sin!
l2_weight: float = 1e-4):
super(resnet20Block,self).__init__()
self.stack = stack
self.res_block = res_block
self.num_filters = num_filters
self.activation_type = activation_type
self.l2_weight = l2_weight
# layers= 3 if self.stack > 0 and self.res_block == 0 else 2
# print(f'resnetBlock: stack={stack}, res_block={res_block}, filters={num_filters}, number_layers={layers}')
strides = 1
if self.stack > 0 and self.res_block == 0:
strides = 2
self.l_1 = resnetLayer(num_filters=self.num_filters,
strides=strides,
l2_weight=self.l2_weight,
activation_type=self.activation_type)
self.l_2 = resnetLayer(num_filters=self.num_filters,
l2_weight=self.l2_weight,
use_activation=False)
self.l_3 = resnetLayer(num_filters=self.num_filters,
kernel_size=1,
strides=strides,
l2_weight=self.l2_weight,
use_activation=False,
use_norm=False)
self.l_add = tf.keras.layers.Add()
self.l_activation = _activ(self.activation_type)
def call(self,inputs: tf.Tensor) -> tf.Tensor:
y = self.l_1(inputs)
y = self.l_2(y)
x = self.l_3(inputs) if self.stack > 0 and self.res_block == 0 else inputs
x = self.l_add([x, y])
x = self.l_activation(x)
return x
class DMLayer(tf.keras.layers.Layer):
def __init__(self, units: int = 10, **kwargs):
super(DMLayer, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(name='DMLayer_weight',
shape=(input_shape[-1], self.units),
initializer="he_normal",
trainable=True)
def get_config(self):
return {"units": self.units}
def call(self, inputs):
#tf.tile(inputs)
# w_tiled = tf.tile(tf.reshape(self.w,shape=(1,)+self.w.shape),[inputs.shape[0],1,1])
# inputs_tiled = tf.tile(tf.reshape(inputs,shape=inputs.shape+(1,)),[1,1,self.units])
# out = tf.math.sqrt(tf.math.reduce_euclidean_norm(inputs_tiled-w_tiled,axis=1))
# a = tf.random.normal(shape=(128,64))
# b = tf.random.normal(shape=(64,10))
be=tf.expand_dims(self.w,0)
ae=tf.expand_dims(inputs,-1)
out = -tf.math.sqrt(tf.math.reduce_euclidean_norm(be-ae,axis=1))
return out
class resnet20(tf.keras.Model):
def __init__(self,
batch_size: int = 128,
l2_weight: float = 0.0,
activation_type: str = 'relu', #relu or sin
certainty_variant: str = 'partial', #partial, total or normalized
model_variant: str = '1vsall', #1vsall or vanilla
logit_variant: str = 'affine', #affine or dm
**params):
super(resnet20,self).__init__()
self.batch_size = batch_size
self.l2_weight = l2_weight
if activation_type in ['sin','relu']:
self.activation_type = activation_type
else:
raise ValueError(f'unknown activation_type={activation_type}')
if certainty_variant in ['partial','total','normalized']:
self.certainty_variant = certainty_variant
else:
raise ValueError(f'unknown certainty_variant={certainty_variant}')
if model_variant in ['1vsall','vanilla']:
self.model_variant = model_variant
else:
raise ValueError(f'unknown model_variant={model_variant}')
if logit_variant in ['affine','dm']:
self.logit_variant = logit_variant
else:
raise ValueError(f'unknown logit_variant={logit_variant}')
self.depth = 20
self.num_res_blocks = int((self.depth - 2) / 6)
num_filters = 16
self.layer_init_1 = tf.keras.layers.InputLayer(input_shape=(32, 32, 3),
batch_size=self.batch_size)
self.layer_init_2 = resnetLayer(num_filters=num_filters,
l2_weight=self.l2_weight,
activation_type=self.activation_type)
self.res_blocks = [[0 for stack in range(3)] for res_block in range(self.num_res_blocks)]
for stack in range(3):
for res_block in range(self.num_res_blocks):
self.res_blocks[stack][res_block] = resnet20Block(stack = stack,
res_block = res_block,
num_filters = num_filters,
activation_type = self.activation_type,
l2_weight = self.l2_weight)
num_filters *= 2
self.layer_final_1 = tf.keras.layers.AveragePooling2D(pool_size=8)
self.layer_final_2 = tf.keras.layers.Flatten()
if self.logit_variant == 'dm':
self.layer_final_3 = DMLayer(units=10)
elif self.logit_variant == 'affine':
self.layer_final_3 = tf.keras.layers.Dense(10, kernel_initializer='he_normal')
else:
raise ValueError(f'unknown logit_variant={self.logit_variant}')
def call(self,
inputs: tf.Tensor,
trainable: bool = False) -> dict:
x = self.layer_init_1(inputs)
x = self.layer_init_2(x)
for stack in range(3):
for res_block in range(self.num_res_blocks):
x = self.res_blocks[stack][res_block](x)
x = self.layer_final_1(x)
x = self.layer_final_2(x)
logits = self.layer_final_3(x)
if self.model_variant == '1vsall':
probs = tf.math.sigmoid(logits)
if self.logit_variant == 'dm':
probs = 2*probs
elif self.model_variant == 'vanilla':
probs = tf.math.softmax(logits,axis=-1)
else:
raise ValueError(f'unknown model_variant={self.model_variant}')
certs = _calc_certs(probs, certainty_variant = self.certainty_variant)
logits_from_certs = _calc_logits_from_certs(certs = certs)
return {'logits':logits,'probs':probs,'certs':certs,'logits_from_certs':logits_from_certs}
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
class resnet50Block(tf.keras.layers.Layer):
def __init__(self,
stack: int,
res_block: int,
num_filters: int = 16,
activation_type: str = 'relu', #relu or sin!
l2_weight: float = 1e-4):
super(resnet50Block,self).__init__()
self.stack = stack
self.res_block = res_block
self.num_filters = num_filters
self.activation_type = activation_type
self.l2_weight = l2_weight
strides = 1
if self.stack > 0 and self.res_block == 0:
strides = 2
# print(f'resnet50Block: stack={stack}, res_block={res_block}, filters={num_filters}')
self.l_1 = resnetLayer(num_filters=self.num_filters,
kernel_size=1,
strides=strides,
l2_weight=self.l2_weight,
activation_type=self.activation_type)
self.l_2 = resnetLayer(num_filters=self.num_filters,
kernel_size=3,
l2_weight=self.l2_weight,
activation_type=self.activation_type)
self.l_3 = resnetLayer(num_filters=4*self.num_filters,
kernel_size=1,
l2_weight=self.l2_weight,
use_activation=False,
use_norm=True)
self.l_4 = resnetLayer(num_filters=4*self.num_filters,
kernel_size=1,
strides=strides,
l2_weight=self.l2_weight,
use_activation=False)
self.l_add = tf.keras.layers.Add()
self.l_activation = _activ(self.activation_type)
def call(self,inputs: tf.Tensor) -> tf.Tensor:
y = self.l_1(inputs)
y = self.l_2(y)
y = self.l_3(y)
if self.res_block == 0:
x = self.l_4(inputs)
else:
x = inputs
x = self.l_add([x, y])
x = self.l_activation(x)
return x
#agreed with tf.keras.applications.ResNet50
class resnet50(tf.keras.Model):
def __init__(self,
batch_size: int = 128,
l2_weight: float = 0.0,
activation_type: str = 'relu', #relu or sin
certainty_variant: str = 'partial', #partial, total or normalized
model_variant: str = '1vsall', #1vsall or vanilla
logit_variant: str = 'affine', #affine or dm
**params):
super(resnet50,self).__init__()
self.batch_size = batch_size
self.l2_weight = l2_weight
if activation_type in ['sin','relu']:
self.activation_type = activation_type
else:
raise ValueError(f'unknown activation_type={activation_type}')
if certainty_variant in ['partial','total','normalized']:
self.certainty_variant = certainty_variant
else:
raise ValueError(f'unknown certainty_variant={certainty_variant}')
if model_variant in ['1vsall','vanilla']:
self.model_variant = model_variant
else:
raise ValueError(f'unknown model_variant={model_variant}')
if logit_variant in ['affine','dm']:
self.logit_variant = logit_variant
else:
raise ValueError(f'unknown logit_variant={logit_variant}')
# self.num_res_blocks = int((self.depth - 2) / 6)
self.num_res_blocks = [3,4,6,3]
num_filters = 64
self.layer_init_1 = tf.keras.layers.InputLayer(input_shape=(224, 224, 3),
batch_size=self.batch_size)
self.layer_init_2 = resnetLayer(num_filters=num_filters,
kernel_size=7,
strides=2,
l2_weight=self.l2_weight,
activation_type=self.activation_type)
self.layer_init_3 = tf.keras.layers.MaxPooling2D(pool_size=3,strides=2)
#self.res_blocks = [[0 for stack in range(4)] for res_block in range(max(self.num_res_blocks))]
self.res_blocks = [[0 for res_block in range(max(self.num_res_blocks))] for stack in range(4)]
for stack in range(4):
for res_block in range(self.num_res_blocks[stack]):
self.res_blocks[stack][res_block] = resnet50Block(stack = stack,
res_block = res_block,
num_filters = num_filters,
activation_type = self.activation_type,
l2_weight = self.l2_weight)
num_filters *= 2
self.layer_final_1 = tf.keras.layers.AveragePooling2D(7)
self.layer_final_2 = tf.keras.layers.Flatten()
if self.logit_variant == 'dm':
self.layer_final_3 = DMLayer(units=1000)
elif self.logit_variant == 'affine':
self.layer_final_3 = tf.keras.layers.Dense(1000, kernel_initializer='he_normal')
else:
raise ValueError(f'unknown logit_variant={self.logit_variant}')
def call(self,
inputs: tf.Tensor,
trainable: bool = False) -> dict:
x = self.layer_init_1(inputs)
x = self.layer_init_2(x)
x = self.layer_init_3(x)
for stack in range(4):
for res_block in range(self.num_res_blocks[stack]):
x = self.res_blocks[stack][res_block](x)
x = self.layer_final_1(x)
x = self.layer_final_2(x)
logits = self.layer_final_3(x)
if self.model_variant == '1vsall':
probs = tf.math.sigmoid(logits)
if self.logit_variant == 'dm':
probs = 2*probs
elif self.model_variant == 'vanilla':
probs = tf.math.softmax(logits,axis=-1)
else:
raise ValueError(f'unknown model_variant={self.model_variant}')
certs = _calc_certs(probs, certainty_variant = self.certainty_variant)
logits_from_certs = _calc_logits_from_certs(certs = certs)
return {'logits':logits,'probs':probs,'certs':certs,'logits_from_certs':logits_from_certs}
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
#garbage collection
return {m.name: m.result() for m in self.metrics}
class dummymodel(tf.keras.Model):
def __init__(self,
batch_size:int = 128,
**params):
super(dummymodel,self).__init__()
self.batch_size = batch_size
self.layer_1 = tf.keras.layers.InputLayer(input_shape=(224, 224, 3),
batch_size=self.batch_size)
self.layer_2 = tf.keras.layers.Flatten()
self.layer_3 = tf.keras.layers.Dense(1000, kernel_initializer='he_normal')
def call(self,
inputs: tf.Tensor,
trainable: bool = False) -> dict:
x = self.layer_1(inputs)
x = self.layer_2(x)
logits = self.layer_3(x)
probs = tf.math.sigmoid(logits)
return {'logits':logits,'probs':probs,'certs':probs,'logits_from_certs':logits}
# def create_model(batch_size: int,
# l2_weight: float = 0.0,
# activation_type: str = 'relu', #relu or sine
# certainty_variant: str = 'partial', # total, partial or normalized
# model_variant: str = '1vsall', #1vsall or vanilla
# logit_variant: str = 'affine', #affine or dm
# **unused_kwargs: Dict[str, Any]) -> tf.keras.models.Model:
# return resnet20(batch_size=batch_size,
# l2_weight=l2_weight,
# activation_type=activation_type,
# certainty_variant=certainty_variant,
# model_variant=model_variant,
# logit_variant=logit_variant)
# based on um.numpy.plot_diagram, um.numpy.reliability_diagram
def _extract_conf_acc(probs,labels,bins=0,one_hot=False):
probs = np.array(probs)
labels = np.array(labels)
if not one_hot:
labels_matrix = um.numpy.visualization.one_hot_encode(labels, probs.shape[1])
else:
labels_matrix = labels
# plot_diagram(probs.flatten(), labels_matrix.flatten(), y_axis))
probs = probs.flatten()
labels = labels_matrix.flatten()
probs_labels = [(prob, labels[i]) for i, prob in enumerate(probs)]
probs_labels = np.array(sorted(probs_labels, key=lambda x: x[0]))
window_len = int(len(labels)/100.)
calibration_errors = []
confidences = []
accuracies = []
# More interesting than length of the window (which is specific to this
# window) is average distance between datapoints. This normalizes by dividing
# by the window length.
distances = []
for i in range(len(probs_labels)-window_len):
distances.append((
probs_labels[i+window_len, 0] - probs_labels[i, 0])/float(window_len))
# It's pretty sketchy to look for the 100 datapoints around this one.
# They could be anywhere in the probability simplex. This introduces bias.
mean_confidences = um.numpy.visualization.mean(probs_labels[i:i + window_len, 0])
confidences.append(mean_confidences)
class_accuracies = um.numpy.visualization.mean(probs_labels[i:i + window_len, 1])
accuracies.append(class_accuracies)
calibration_error = class_accuracies-mean_confidences
calibration_errors.append(calibration_error)
if bins>0:
delta = int((len(probs_labels)-window_len)/bins)
return confidences[::delta],accuracies[::delta]
else:
return confidences, accuracies
# nonlinear calibration
class calLayer(tf.keras.layers.Layer):
def __init__(self,
basis_type: str = 'uniform',
basis_params: list = [-20,20,20],
basis_list: list = [-2,-1,0,1,2],
train_basis=True):
super(calLayer,self).__init__()
self.basis_type = basis_type
self.basis_params = basis_params
self.basis_list = basis_list
self.train_basis = train_basis
def build(self, input_shape):
# if input_shape[-1]!=1:
# raise ValueError('input_shape != 1')
if self.basis_type=='uniform':
self.basis_exponents = np.linspace(*self.basis_params)
else:
self.basis_exponents = self.basis_list
self.basis_exponents = tf.convert_to_tensor(self.basis_exponents,dtype=tf.float32)
self.alphas = tf.exp(self.basis_exponents)
#self.alphas = tf.cast(self.alphas,dtype=tf.float32)
self.alphas = tf.Variable(name='calLayer_alphas',
initial_value=self.alphas,
trainable=self.train_basis)
self.W1 = self.add_weight(name='calLayer_weights',
shape=(len(self.basis_exponents),),
initializer="he_normal",
trainable=True)
def get_config(self):
return {"basis_type": self.basis_type,
"basis_params": self.basis_params,
"basis_list": self.basis_list,
"train_basis": self.train_basis}
def call(self,inputs):
inputs_shape = tf.shape(inputs)
inputs_r = tf.reshape(inputs,shape=(-1,1))
self.beta = tf.nn.softmax(self.W1)
#print(self.alphas)
eps = 1e-10
x_alpha = tf.pow(inputs_r+eps,self.alphas)
out = tf.reduce_sum(self.beta*x_alpha,axis=-1)
return tf.reshape(out,shape=inputs_shape)
def _form_cal_dataset(uncal_model:tf.keras.Model,
output_name:str,
train_dataset,
dataset_bins:int,
steps:int,
append_random:bool = False,
random_frac:float = 0.1):
cal_dataset = dict()
#FLAGS = exp1.FLAGS
#self.cal_dataset = train_dataset
#dataset = exp1.datasets['cifar10']['val']
labels = np.empty(0)
probs = None
for i,(x,y) in enumerate(train_dataset):
if i>steps: break
out = uncal_model(x)[output_name].numpy()
labels = np.append(labels,y.numpy().astype('int32'))
probs = out if type(probs)==type(None) else np.concatenate((probs,out))
if append_random:
print(labels.shape,probs.shape)
random_frac = random_frac
random_mean = 0.5
random_std = 0.33
batch_size = next(iter(train_dataset))[0].shape[0]
val_examples = steps*batch_size
random_size = int(val_examples*random_frac)
#random_x = np.sqrt(random_std)*np.random.randn(random_size,32,32,3) + random_mean
random_x = np.random.rand(random_size,32,32,3)
random_probs = uncal_model(random_x)[output_name].numpy()
#random_labels_onehot = np.zeros(shape=(random_size,random_probs.shape[1]))
random_labels_onehot = np.ones(shape=(random_size,random_probs.shape[1]))
#random_labels_onehot = np.random.binomial(1,0.5,size=(random_size,random_probs.shape[1]))
#random_labels_onehot = np.ones(shape=(random_size,random_probs.shape[1]))
#random_labels_onehot = np.random.randint(low=0,high=2,size=(random_size,random_probs.shape[1])).astype('float32')
labels_onehot = um.numpy.visualization.one_hot_encode(labels.astype('int32'), probs.shape[1])
# print(labels_onehot.shape)
# print(random_labels_onehot.shape)
# print(labels_onehot.dtype)
# print(random_labels_onehot.dtype)
# print(random_labels_onehot2.dtype)
# print(random_labels_onehot[0])
# print(random_labels_onehot2[0])
labels_onehot = np.concatenate((labels_onehot,random_labels_onehot))
probs = np.concatenate((probs,random_probs))
print(labels_onehot.shape,probs.shape)
confidences, accuracies = _extract_conf_acc(probs=probs,
labels=labels_onehot,
bins=dataset_bins,
one_hot=True)
else:
confidences, accuracies = _extract_conf_acc(probs=probs,
labels=labels.astype('int32'),
bins=dataset_bins,
one_hot=False)
cal_dataset['x'] = tf.convert_to_tensor(confidences,dtype=tf.float32)
cal_dataset['y'] = tf.convert_to_tensor(accuracies,dtype=tf.float32)
return cal_dataset
class nonlin_calibrator(tf.keras.Model):
def __init__(self,
basis_type: str = 'uniform',
basis_params: list = [-20,20,10],
basis_list: list = [-2,-1,0,1,2],
train_basis: bool = True):
super(nonlin_calibrator,self).__init__()
self.layer = calLayer(basis_type = basis_type,
basis_params = basis_params,
basis_list = basis_list,
train_basis = train_basis)
def call(self,
inputs: tf.Tensor,
training: bool = False) -> tf.Tensor:
x = self.layer(inputs)
return x
class cal_model(tf.keras.Model):
def __init__(self,
uncal_model: tf.keras.Model,
calibrator: tf.keras.Model,
output_name: str):
super(cal_model,self).__init__()
#self.inp = tf.keras.layers.Input(shape=(32,32,3))
self.uncal_model = uncal_model
self.calibrator = calibrator
self.output_name = output_name
def call(self,
inputs:tf.Tensor):
x = self.uncal_model(inputs)[self.output_name]
x = self.calibrator(x)
return {self.output_name: x}
def calibrate_model_nonlin(model,
dataset,
FLAGS,
output='certs',
epochs=10000,
verbose=False,
bins=4000,
basis_type='uniform', # or list
basis_params={-20,20,60},
basis_list = [-2,-1,0,1,2]
):
def feature_create(x,basis_exponents):
x_feat = tf.tile(tf.reshape(x,shape=(-1,1)),[1,len(basis_exponents)])
be_tf = tf.convert_to_tensor(basis_exponents,dtype=tf.float32)
return tf.pow(x_feat,tf.exp(be_tf))
def cal_out(W1,x,basis_exponents):
size = len(basis_exponents)
x_shape = tf.shape(x)
# print(x_shape)
xr = tf.reshape(x,shape=(-1,))
# print(xr.shape)
W1_tile = tf.tile(tf.reshape(tf.nn.softmax(W1),[1,size]),[tf.shape(xr)[0],1])
x_feat = feature_create(xr,basis_exponents)
# print(W1_tile.shape)
# print(x_feat.shape)
out = tf.reduce_sum(W1_tile*x_feat,axis=-1)
return tf.reshape(out,shape=x_shape)
def cost(W1, x, y):
yhats = cal_out(W1,x,basis_exponents)
# print(yhats.shape)
# print(y.shape)
cost_value = tf.keras.losses.MSE(y_true=y,
y_pred=yhats)
return cost_value
def grad(W1, x, y):
with tf.GradientTape() as tape:
cost_value = cost(W1, x, y)
return cost_value, tape.gradient(cost_value, W1)
if basis_type=='uniform':
basis_exponents = np.linspace(*basis_params)
else:
basis_exponents = basis_list
W1 = tf.Variable(tf.random.normal(shape=(len(basis_exponents),)))
optimizer = ub.optimizers.get(optimizer_name='adam',
learning_rate_schedule='constant',
learning_rate=0.1,
weight_decay=None)
#number of classes
K = FLAGS['no_classes']
labels = np.empty(0)
probs = np.empty((0,K))
for i,(x,y) in enumerate(dataset):
if i>FLAGS['validation_steps']: break
out = model(x)[output].numpy()
labels = np.append(labels,y.numpy().astype('int32'))
probs = np.concatenate((probs,out))
confidences, accuracies = _extract_conf_acc(probs=probs,labels=labels.astype('int32'),bins=bins)
X_train = tf.convert_to_tensor(confidences,dtype=tf.float32)
y_train = tf.convert_to_tensor(accuracies,dtype=tf.float32)
for i in range(epochs):
train_cost, grads = grad(W1,X_train,y_train)
optimizer.apply_gradients(zip([grads], [W1]))
# if i % 50 == 0:
# print(train_cost.numpy())
def model_return():
inp = tf.keras.layers.Input(shape=(32,32,3))
out_model = model(inp)
out_calibr = cal_out(W1,out_model[output],basis_exponents=basis_exponents)
out_model[output+'_cal'] = out_calibr
return tf.keras.Model(inputs=inp,outputs=out_model)
# def cal_model(x):
# #out = tf.keras.models.Model(model.layers[0].input, model.output[output])(x)
# out = model(x)[output]
# out_shape = out.shape
# out_calibr = cal_out(W1,out,basis_exponents=basis_exponents)
# return {output+'_cal':out_calibr}
return model_return(),W1 | [
"tensorflow.keras.losses.MSE",
"tensorflow.tile",
"tensorflow.shape",
"numpy.random.rand",
"tensorflow.math.log",
"tensorflow.reduce_sum",
"uncertainty_baselines.optimizers.get",
"tensorflow.math.divide",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.GradientTape",
"numpy.array",
"... | [((6352, 6376), 'tensorflow.math.log', 'tf.math.log', (['(certs + eps)'], {}), '(certs + eps)\n', (6363, 6376), True, 'import tensorflow as tf\n'), ((25718, 25733), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (25726, 25733), True, 'import numpy as np\n'), ((25747, 25763), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (25755, 25763), True, 'import numpy as np\n'), ((29858, 29869), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (29866, 29869), True, 'import numpy as np\n'), ((32353, 32404), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['confidences'], {'dtype': 'tf.float32'}), '(confidences, dtype=tf.float32)\n', (32373, 32404), True, 'import tensorflow as tf\n'), ((32427, 32477), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['accuracies'], {'dtype': 'tf.float32'}), '(accuracies, dtype=tf.float32)\n', (32447, 32477), True, 'import tensorflow as tf\n'), ((35635, 35752), 'uncertainty_baselines.optimizers.get', 'ub.optimizers.get', ([], {'optimizer_name': '"""adam"""', 'learning_rate_schedule': '"""constant"""', 'learning_rate': '(0.1)', 'weight_decay': 'None'}), "(optimizer_name='adam', learning_rate_schedule='constant',\n learning_rate=0.1, weight_decay=None)\n", (35652, 35752), True, 'import uncertainty_baselines as ub\n'), ((35929, 35940), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (35937, 35940), True, 'import numpy as np\n'), ((35953, 35969), 'numpy.empty', 'np.empty', (['(0, K)'], {}), '((0, K))\n', (35961, 35969), True, 'import numpy as np\n'), ((36325, 36376), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['confidences'], {'dtype': 'tf.float32'}), '(confidences, dtype=tf.float32)\n', (36345, 36376), True, 'import tensorflow as tf\n'), ((36390, 36440), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['accuracies'], {'dtype': 'tf.float32'}), '(accuracies, dtype=tf.float32)\n', (36410, 36440), True, 'import tensorflow as tf\n'), ((1802, 1838), 'tensorflow.cast', 'tf.cast', (['logits.shape[1]', 'tf.float32'], {}), '(logits.shape[1], tf.float32)\n', (1809, 1838), True, 'import tensorflow as tf\n'), ((1960, 1983), 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['logits'], {}), '(logits)\n', (1975, 1983), True, 'import tensorflow as tf\n'), ((2127, 2162), 'tensorflow.stack', 'tf.stack', (['[row_ids, labels]'], {'axis': '(1)'}), '([row_ids, labels], axis=1)\n', (2135, 2162), True, 'import tensorflow as tf\n'), ((2235, 2270), 'tensorflow.gather_nd', 'tf.gather_nd', (['one_vs_all_probs', 'idx'], {}), '(one_vs_all_probs, idx)\n', (2247, 2270), True, 'import tensorflow as tf\n'), ((2293, 2323), 'tensorflow.math.log', 'tf.math.log', (['(class_probs + eps)'], {}), '(class_probs + eps)\n', (2304, 2323), True, 'import tensorflow as tf\n'), ((5023, 5034), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (5031, 5034), True, 'import numpy as np\n'), ((5076, 5104), 'numpy.outer', 'np.outer', (['proj_vec', 'proj_vec'], {}), '(proj_vec, proj_vec)\n', (5084, 5104), True, 'import numpy as np\n'), ((5191, 5230), 'tensorflow.constant', 'tf.constant', (['proj_mat'], {'dtype': 'tf.float32'}), '(proj_mat, dtype=tf.float32)\n', (5202, 5230), True, 'import tensorflow as tf\n'), ((5255, 5299), 'tensorflow.constant', 'tf.constant', (['proj_mat_comp'], {'dtype': 'tf.float32'}), '(proj_mat_comp, dtype=tf.float32)\n', (5266, 5299), True, 'import tensorflow as tf\n'), ((5503, 5544), 'tensorflow.stack', 'tf.stack', (['cert_list'], {'axis': '(1)', 'name': '"""certs"""'}), "(cert_list, axis=1, name='certs')\n", (5511, 5544), True, 'import tensorflow as tf\n'), ((6384, 6416), 'tensorflow.tile', 'tf.tile', (['logcerts[:, :1]', '[1, K]'], {}), '(logcerts[:, :1], [1, K])\n', (6391, 6416), True, 'import tensorflow as tf\n'), ((6605, 6627), 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (6625, 6627), True, 'import tensorflow as tf\n'), ((7519, 7697), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['num_filters'], {'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""', 'kernel_regularizer': 'self.kernel_regularizer'}), "(num_filters, kernel_size=kernel_size, strides=\n strides, padding='same', kernel_initializer='he_normal',\n kernel_regularizer=self.kernel_regularizer)\n", (7541, 7697), True, 'import tensorflow as tf\n'), ((7969, 8005), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (8003, 8005), True, 'import tensorflow as tf\n'), ((9899, 9920), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (9918, 9920), True, 'import tensorflow as tf\n'), ((11076, 11101), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.w', '(0)'], {}), '(self.w, 0)\n', (11090, 11101), True, 'import tensorflow as tf\n'), ((11108, 11134), 'tensorflow.expand_dims', 'tf.expand_dims', (['inputs', '(-1)'], {}), '(inputs, -1)\n', (11122, 11134), True, 'import tensorflow as tf\n'), ((12755, 12834), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(32, 32, 3)', 'batch_size': 'self.batch_size'}), '(input_shape=(32, 32, 3), batch_size=self.batch_size)\n', (12781, 12834), True, 'import tensorflow as tf\n'), ((13834, 13879), 'tensorflow.keras.layers.AveragePooling2D', 'tf.keras.layers.AveragePooling2D', ([], {'pool_size': '(8)'}), '(pool_size=8)\n', (13866, 13879), True, 'import tensorflow as tf\n'), ((13909, 13934), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (13932, 13934), True, 'import tensorflow as tf\n'), ((18072, 18093), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (18091, 18093), True, 'import tensorflow as tf\n'), ((20068, 20154), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(224, 224, 3)', 'batch_size': 'self.batch_size'}), '(input_shape=(224, 224, 3), batch_size=self.\n batch_size)\n', (20094, 20154), True, 'import tensorflow as tf\n'), ((20558, 20610), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(3)', 'strides': '(2)'}), '(pool_size=3, strides=2)\n', (20586, 20610), True, 'import tensorflow as tf\n'), ((21466, 21501), 'tensorflow.keras.layers.AveragePooling2D', 'tf.keras.layers.AveragePooling2D', (['(7)'], {}), '(7)\n', (21498, 21501), True, 'import tensorflow as tf\n'), ((21531, 21556), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (21554, 21556), True, 'import tensorflow as tf\n'), ((24204, 24290), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(224, 224, 3)', 'batch_size': 'self.batch_size'}), '(input_shape=(224, 224, 3), batch_size=self.\n batch_size)\n', (24230, 24290), True, 'import tensorflow as tf\n'), ((24364, 24389), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (24387, 24389), True, 'import tensorflow as tf\n'), ((24413, 24472), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1000)'], {'kernel_initializer': '"""he_normal"""'}), "(1000, kernel_initializer='he_normal')\n", (24434, 24472), True, 'import tensorflow as tf\n'), ((24694, 24717), 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['logits'], {}), '(logits)\n', (24709, 24717), True, 'import tensorflow as tf\n'), ((25808, 25869), 'uncertainty_metrics.numpy.visualization.one_hot_encode', 'um.numpy.visualization.one_hot_encode', (['labels', 'probs.shape[1]'], {}), '(labels, probs.shape[1])\n', (25845, 25869), True, 'import uncertainty_metrics as um\n'), ((26851, 26913), 'uncertainty_metrics.numpy.visualization.mean', 'um.numpy.visualization.mean', (['probs_labels[i:i + window_len, 0]'], {}), '(probs_labels[i:i + window_len, 0])\n', (26878, 26913), True, 'import uncertainty_metrics as um\n'), ((26986, 27048), 'uncertainty_metrics.numpy.visualization.mean', 'um.numpy.visualization.mean', (['probs_labels[i:i + window_len, 1]'], {}), '(probs_labels[i:i + window_len, 1])\n', (27013, 27048), True, 'import uncertainty_metrics as um\n'), ((28209, 28269), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.basis_exponents'], {'dtype': 'tf.float32'}), '(self.basis_exponents, dtype=tf.float32)\n', (28229, 28269), True, 'import tensorflow as tf\n'), ((28291, 28319), 'tensorflow.exp', 'tf.exp', (['self.basis_exponents'], {}), '(self.basis_exponents)\n', (28297, 28319), True, 'import tensorflow as tf\n'), ((28415, 28510), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""calLayer_alphas"""', 'initial_value': 'self.alphas', 'trainable': 'self.train_basis'}), "(name='calLayer_alphas', initial_value=self.alphas, trainable=\n self.train_basis)\n", (28426, 28510), True, 'import tensorflow as tf\n'), ((29088, 29104), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (29096, 29104), True, 'import tensorflow as tf\n'), ((29124, 29157), 'tensorflow.reshape', 'tf.reshape', (['inputs'], {'shape': '(-1, 1)'}), '(inputs, shape=(-1, 1))\n', (29134, 29157), True, 'import tensorflow as tf\n'), ((29176, 29198), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.W1'], {}), '(self.W1)\n', (29189, 29198), True, 'import tensorflow as tf\n'), ((29265, 29300), 'tensorflow.pow', 'tf.pow', (['(inputs_r + eps)', 'self.alphas'], {}), '(inputs_r + eps, self.alphas)\n', (29271, 29300), True, 'import tensorflow as tf\n'), ((29312, 29355), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.beta * x_alpha)'], {'axis': '(-1)'}), '(self.beta * x_alpha, axis=-1)\n', (29325, 29355), True, 'import tensorflow as tf\n'), ((29377, 29412), 'tensorflow.reshape', 'tf.reshape', (['out'], {'shape': 'inputs_shape'}), '(out, shape=inputs_shape)\n', (29387, 29412), True, 'import tensorflow as tf\n'), ((30579, 30617), 'numpy.random.rand', 'np.random.rand', (['random_size', '(32)', '(32)', '(3)'], {}), '(random_size, 32, 32, 3)\n', (30593, 30617), True, 'import numpy as np\n'), ((30796, 30847), 'numpy.ones', 'np.ones', ([], {'shape': '(random_size, random_probs.shape[1])'}), '(shape=(random_size, random_probs.shape[1]))\n', (30803, 30847), True, 'import numpy as np\n'), ((31586, 31639), 'numpy.concatenate', 'np.concatenate', (['(labels_onehot, random_labels_onehot)'], {}), '((labels_onehot, random_labels_onehot))\n', (31600, 31639), True, 'import numpy as np\n'), ((31655, 31692), 'numpy.concatenate', 'np.concatenate', (['(probs, random_probs)'], {}), '((probs, random_probs))\n', (31669, 31692), True, 'import numpy as np\n'), ((34363, 34418), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['basis_exponents'], {'dtype': 'tf.float32'}), '(basis_exponents, dtype=tf.float32)\n', (34383, 34418), True, 'import tensorflow as tf\n'), ((34569, 34580), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (34577, 34580), True, 'import tensorflow as tf\n'), ((34622, 34648), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1,)'}), '(x, shape=(-1,))\n', (34632, 34648), True, 'import tensorflow as tf\n'), ((34895, 34935), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(W1_tile * x_feat)'], {'axis': '(-1)'}), '(W1_tile * x_feat, axis=-1)\n', (34908, 34935), True, 'import tensorflow as tf\n'), ((34948, 34978), 'tensorflow.reshape', 'tf.reshape', (['out'], {'shape': 'x_shape'}), '(out, shape=x_shape)\n', (34958, 34978), True, 'import tensorflow as tf\n'), ((35126, 35169), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', ([], {'y_true': 'y', 'y_pred': 'yhats'}), '(y_true=y, y_pred=yhats)\n', (35145, 35169), True, 'import tensorflow as tf\n'), ((35460, 35486), 'numpy.linspace', 'np.linspace', (['*basis_params'], {}), '(*basis_params)\n', (35471, 35486), True, 'import numpy as np\n'), ((36176, 36204), 'numpy.concatenate', 'np.concatenate', (['(probs, out)'], {}), '((probs, out))\n', (36190, 36204), True, 'import numpy as np\n'), ((36686, 36726), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (36707, 36726), True, 'import tensorflow as tf\n'), ((36909, 36954), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inp', 'outputs': 'out_model'}), '(inputs=inp, outputs=out_model)\n', (36923, 36954), True, 'import tensorflow as tf\n'), ((1881, 1907), 'tensorflow.argmax', 'tf.argmax', (['labels'], {'axis': '(-1)'}), '(labels, axis=-1)\n', (1890, 1907), True, 'import tensorflow as tf\n'), ((2009, 2027), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (2019, 2027), True, 'import tensorflow as tf\n'), ((2351, 2392), 'tensorflow.math.log', 'tf.math.log', (['(1.0 - one_vs_all_probs + eps)'], {}), '(1.0 - one_vs_all_probs + eps)\n', (2362, 2392), True, 'import tensorflow as tf\n'), ((2416, 2452), 'tensorflow.math.log', 'tf.math.log', (['(1.0 - class_probs + eps)'], {}), '(1.0 - class_probs + eps)\n', (2427, 2452), True, 'import tensorflow as tf\n'), ((5128, 5142), 'numpy.identity', 'np.identity', (['K'], {}), '(K)\n', (5139, 5142), True, 'import numpy as np\n'), ((5143, 5171), 'numpy.outer', 'np.outer', (['proj_vec', 'proj_vec'], {}), '(proj_vec, proj_vec)\n', (5151, 5171), True, 'import numpy as np\n'), ((5313, 5351), 'tensorflow.tensordot', 'tf.tensordot', (['probs', 'tproj_mat'], {'axes': '(1)'}), '(probs, tproj_mat, axes=1)\n', (5325, 5351), True, 'import tensorflow as tf\n'), ((5352, 5400), 'tensorflow.tensordot', 'tf.tensordot', (['probs_comp', 'tproj_mat_comp'], {'axes': '(1)'}), '(probs_comp, tproj_mat_comp, axes=1)\n', (5364, 5400), True, 'import tensorflow as tf\n'), ((5419, 5446), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (5433, 5446), True, 'import tensorflow as tf\n'), ((5599, 5626), 'tensorflow.stack', 'tf.stack', (['cert_list'], {'axis': '(1)'}), '(cert_list, axis=1)\n', (5607, 5626), True, 'import tensorflow as tf\n'), ((5789, 5833), 'tensorflow.math.multiply', 'tf.math.multiply', (['certs_argmax', 'certs_reduce'], {}), '(certs_argmax, certs_reduce)\n', (5805, 5833), True, 'import tensorflow as tf\n'), ((7345, 7380), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['l2_weight'], {}), '(l2_weight)\n', (7369, 7380), True, 'import tensorflow as tf\n'), ((14783, 14806), 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['logits'], {}), '(logits)\n', (14798, 14806), True, 'import tensorflow as tf\n'), ((15514, 15531), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (15529, 15531), True, 'import tensorflow as tf\n'), ((22457, 22480), 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['logits'], {}), '(logits)\n', (22472, 22480), True, 'import tensorflow as tf\n'), ((23179, 23196), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (23194, 23196), True, 'import tensorflow as tf\n'), ((28072, 28103), 'numpy.linspace', 'np.linspace', (['*self.basis_params'], {}), '(*self.basis_params)\n', (28083, 28103), True, 'import numpy as np\n'), ((30123, 30151), 'numpy.concatenate', 'np.concatenate', (['(probs, out)'], {}), '((probs, out))\n', (30137, 30151), True, 'import numpy as np\n'), ((34294, 34322), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 1)'}), '(x, shape=(-1, 1))\n', (34304, 34322), True, 'import tensorflow as tf\n'), ((34447, 34460), 'tensorflow.exp', 'tf.exp', (['be_tf'], {}), '(be_tf)\n', (34453, 34460), True, 'import tensorflow as tf\n'), ((35277, 35294), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (35292, 35294), True, 'import tensorflow as tf\n'), ((2066, 2092), 'tensorflow.shape', 'tf.shape', (['one_vs_all_probs'], {}), '(one_vs_all_probs)\n', (2074, 2092), True, 'import tensorflow as tf\n'), ((2657, 2677), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (2671, 2677), True, 'import tensorflow as tf\n'), ((5660, 5684), 'tensorflow.argmax', 'tf.argmax', (['certs'], {'axis': '(1)'}), '(certs, axis=1)\n', (5669, 5684), True, 'import tensorflow as tf\n'), ((5724, 5767), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['certs'], {'axis': '(1)', 'keepdims': '(True)'}), '(certs, axis=1, keepdims=True)\n', (5737, 5767), True, 'import tensorflow as tf\n'), ((5894, 5921), 'tensorflow.stack', 'tf.stack', (['cert_list'], {'axis': '(1)'}), '(cert_list, axis=1)\n', (5902, 5921), True, 'import tensorflow as tf\n'), ((6015, 6048), 'tensorflow.math.divide', 'tf.math.divide', (['certs', 'certs_norm'], {}), '(certs, certs_norm)\n', (6029, 6048), True, 'import tensorflow as tf\n'), ((11158, 11204), 'tensorflow.math.reduce_euclidean_norm', 'tf.math.reduce_euclidean_norm', (['(be - ae)'], {'axis': '(1)'}), '(be - ae, axis=1)\n', (11187, 11204), True, 'import tensorflow as tf\n'), ((14112, 14169), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'kernel_initializer': '"""he_normal"""'}), "(10, kernel_initializer='he_normal')\n", (14133, 14169), True, 'import tensorflow as tf\n'), ((14948, 14980), 'tensorflow.math.softmax', 'tf.math.softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (14963, 14980), True, 'import tensorflow as tf\n'), ((21736, 21795), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1000)'], {'kernel_initializer': '"""he_normal"""'}), "(1000, kernel_initializer='he_normal')\n", (21757, 21795), True, 'import tensorflow as tf\n'), ((22622, 22654), 'tensorflow.math.softmax', 'tf.math.softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (22637, 22654), True, 'import tensorflow as tf\n'), ((34711, 34728), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['W1'], {}), '(W1)\n', (34724, 34728), True, 'import tensorflow as tf\n'), ((5950, 5993), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['certs'], {'axis': '(1)', 'keepdims': '(True)'}), '(certs, axis=1, keepdims=True)\n', (5963, 5993), True, 'import tensorflow as tf\n'), ((34740, 34752), 'tensorflow.shape', 'tf.shape', (['xr'], {}), '(xr)\n', (34748, 34752), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 17 19:39:45 2019
@author: aimldl
"""
import numpy as np
print( np.random.choice(5, 3, replace=False ) )
a = ['pooh', 'rabbit', 'piglet', 'Christopher']
print( np.random.choice(a, 3, replace=False ) )
print( np.random.choice(8, 32, replace=False ) )
| [
"numpy.random.choice"
] | [((122, 159), 'numpy.random.choice', 'np.random.choice', (['(5)', '(3)'], {'replace': '(False)'}), '(5, 3, replace=False)\n', (138, 159), True, 'import numpy as np\n'), ((222, 259), 'numpy.random.choice', 'np.random.choice', (['a', '(3)'], {'replace': '(False)'}), '(a, 3, replace=False)\n', (238, 259), True, 'import numpy as np\n'), ((273, 311), 'numpy.random.choice', 'np.random.choice', (['(8)', '(32)'], {'replace': '(False)'}), '(8, 32, replace=False)\n', (289, 311), True, 'import numpy as np\n')] |
"""implementation of argmin step"""
from scipy import optimize
import numpy as np
from BanditPricing import randUnitVector
def argmin(eta, s_radius, barrier, g_bar_aggr_t, g_tilde, d, max_iter = 1e4):
#implement argmin_ball(eta * (g_bar_1:t + g_tilde_t+1)^T x + barrier(x)
#argmin is over ball with radius r
TOL = 1e-10 # numerical error allowed
#g_bar_aggr_t = complex_to_real(g_bar_aggr_t)
#g_tilde = complex_to_real(g_tilde)
#init_pt = complex_to_real(randUnitVector(d)*s_radius/2)
cons = {'type': 'ineq', 'fun': lambda x: s_radius - np.linalg.norm(x),
'jac': lambda x: x / np.linalg.norm(x) if np.linalg.norm(x) > TOL else np.zeros(x.shape)}
res = optimize.minimize(fun=obj,
x0=randUnitVector(d)*s_radius/2,
args=(eta, barrier, g_bar_aggr_t, g_tilde),
constraints=cons,
options={'disp': False, 'maxiter': max_iter})
return res['x']
def obj(x, eta, barrier, g_bar_aggr_t, g_tilde):
return eta * np.dot(g_bar_aggr_t + g_tilde, x) + barrier(x)
def real_to_complex(z): # real vector of length 2n -> complex of length n
return z[:len(z)//2] + 1j * z[len(z)//2:]
def complex_to_real(z): # complex vector of length n -> real of length 2n
return np.concatenate((np.real(z), np.imag(z))) | [
"BanditPricing.randUnitVector",
"numpy.real",
"numpy.dot",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.imag"
] | [((1067, 1100), 'numpy.dot', 'np.dot', (['(g_bar_aggr_t + g_tilde)', 'x'], {}), '(g_bar_aggr_t + g_tilde, x)\n', (1073, 1100), True, 'import numpy as np\n'), ((1347, 1357), 'numpy.real', 'np.real', (['z'], {}), '(z)\n', (1354, 1357), True, 'import numpy as np\n'), ((1359, 1369), 'numpy.imag', 'np.imag', (['z'], {}), '(z)\n', (1366, 1369), True, 'import numpy as np\n'), ((568, 585), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (582, 585), True, 'import numpy as np\n'), ((670, 687), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (678, 687), True, 'import numpy as np\n'), ((641, 658), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (655, 658), True, 'import numpy as np\n'), ((620, 637), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (634, 637), True, 'import numpy as np\n'), ((757, 774), 'BanditPricing.randUnitVector', 'randUnitVector', (['d'], {}), '(d)\n', (771, 774), False, 'from BanditPricing import randUnitVector\n')] |
"""
Author: michealowen
Last edited: 2019.11.1,Friday
LASSO回归算法,使用波士顿房价数据集
在损失函数中加入L1正则项,后验概率的符合拉普拉斯分布
"""
#encoding=UTF-8
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
class ridgeRegression:
'''
LASSO回归模型类
'''
def __init__(self,X,x_test,Y,y_test,k=1.0):
'''
Params:
X:样本,shape=(m,n)
Y:为标注,shape=(1,m)
x_test:测试集的数据
y_test:测试集的标签
k:正则项系数
'''
self.X=X
self.Y=Y
n = self.X.shape[1]
self.w = np.array([0 for i in range(n+1)],dtype='float64')
self.x_test = x_test
self.y_test = y_test
self.k=k
def preProcess(self,x):
'''
加上x0=1,并对样本进行归一化
'''
x = np.c_[x,np.array([1 for i in range(x.shape[0])])]
for i in range(x.shape[1]-1):
x[:,i] = (x[:,i] - np.mean(x[:,i]))/np.std(x[:,i])
return x
def fit(self,method='GBD',alpha=None,iterNums=None,batchSize=None):
'''
使用传入的method参数,选择适当的拟合方法
GBD:梯度下降,SGD:随机梯度下降,SBGD:小批量梯度下降,MT:矩阵法求解
'''
self.X = self.preProcess(self.X) #预处理数据
if method == 'BGD':
self.BGD(alpha,iterNums)
elif method == 'SGD':
self.SGD(alpha)
elif method == 'SBGD':
self.SBGD(alpha)
return None
def MT(self):
"""
基于矩阵求解的方法
"""
self.w = np.dot(np.linalg.pinv(np.dot(self.X.T,self.X)+self.k*np.eye(len(self.w))),np.dot(self.X.T,self.Y.T))
m,n = self.X.shape #m为样本数,n为维度
J = 1/m * np.dot( (np.dot(self.X,self.w.T) - self.Y),(np.dot(self.X,self.w.T) - self.Y).T) #MSE
print(J)
return None
def BGD(self,alpha,iterNums):
'''
使用所有样本进行梯度下降
'''
if alpha == None:
print('缺少参数:迭代步长')
if iterNums == None:
print('缺少参数:迭代次数')
m,n = self.X.shape #m为样本数,n为维度
i = 0
MinCost = float("inf")
while i<iterNums:
J = 1/m * (np.dot( (np.dot(self.X,self.w.T) - self.Y),(np.dot(self.X,self.w.T) - self.Y).T)+self.k*np.sum(np.abs(self.w)))
if J < MinCost:
MinCost = J
print(J," ",i)
self.w -= 2/m * alpha * ((np.dot(self.X.T ,(np.dot( self.X ,self.w.T) - self.Y.T ))).T+self.k*np.array([1 for i in range(len(self.w))]))
i += 1
else:
break
return None
def SGD(self,alpha):
'''
随机梯度下降
'''
if alpha == None:
print('缺少参数:迭代步长')
m,n = self.X.shape #m为样本数,n为维度
i = 0
MinCost = float("inf")
while True:
partIndex = np.random.randint(len(self.X))
X_part = self.X[partIndex]
Y_part = self.Y[partIndex]
J = 1/m * (np.dot( (np.dot(X_part,self.w) - Y_part),(np.dot(X_part,self.w) - Y_part).T)+self.k*np.sum(np.abs(self.w)))
if abs(J - MinCost) < 0.0001:
break
else:
print("J:",J," ",i)
self.w -= 2/m * alpha * ((np.dot(X_part.T ,(np.dot( X_part ,self.w.T) - Y_part.T ))).T+self.k*np.array([1 for i in range(len(self.w))]))
i = i+1
MinCost = J
return None
def SBGD(self,alpha):
'''
小批量梯度下降
'''
if alpha == None:
print('缺少参数:迭代步长')
m,n = self.X.shape #m为样本数,n为维度
i = 0
MinCost = float("inf")
while True:
partIndex = np.random.choice(range(m),int(m/10))
X_part = self.X[partIndex]
Y_part = self.Y[partIndex]
J = 1/m * (np.dot( (np.dot(X_part,self.w) - Y_part),(np.dot(X_part,self.w) - Y_part).T)+self.k*np.sum(np.abs(self.w)))
if abs(J - MinCost) < 0.0001:
break
else:
print("J:",J," ",i)
self.w -= 2/m * alpha * ((np.dot(X_part.T ,(np.dot( X_part ,self.w.T) - Y_part.T ))).T +self.k*np.array([1 for i in range(len(self.w))]))
i = i+1
MinCost = J
return None
def predict(self,data):
'''
预测输入数据对应的输出
'''
data = self.preProcess(data)
y = np.dot(data,self.w)
print(y)
return None
def evaluate(self):
'''
通过测试集评估模型的好坏,计算RSS(sum of squares for errors)
'''
print('评估')
print(np.sum(np.square((np.dot(self.preProcess(self.x_test),self.w.T)-y_test))))
return None
if __name__ == '__main__':
boston = load_boston()
#print(type(boston))
x_train,x_test,y_train,y_test= train_test_split(boston.data,boston.target,test_size=0.1,random_state=0)
model = ridgeRegression(x_train,x_test,y_train,y_test,k=1.0)
model.fit('SGD',alpha=0.1,iterNums=10000)
#model.fit('MT',alpha=0.1,iterNums=10000)
model.evaluate() | [
"numpy.mean",
"numpy.abs",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_boston",
"numpy.dot",
"numpy.std"
] | [((4743, 4756), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (4754, 4756), False, 'from sklearn.datasets import load_boston\n'), ((4817, 4892), 'sklearn.model_selection.train_test_split', 'train_test_split', (['boston.data', 'boston.target'], {'test_size': '(0.1)', 'random_state': '(0)'}), '(boston.data, boston.target, test_size=0.1, random_state=0)\n', (4833, 4892), False, 'from sklearn.model_selection import train_test_split\n'), ((4410, 4430), 'numpy.dot', 'np.dot', (['data', 'self.w'], {}), '(data, self.w)\n', (4416, 4430), True, 'import numpy as np\n'), ((1612, 1638), 'numpy.dot', 'np.dot', (['self.X.T', 'self.Y.T'], {}), '(self.X.T, self.Y.T)\n', (1618, 1638), True, 'import numpy as np\n'), ((996, 1011), 'numpy.std', 'np.std', (['x[:, i]'], {}), '(x[:, i])\n', (1002, 1011), True, 'import numpy as np\n'), ((979, 995), 'numpy.mean', 'np.mean', (['x[:, i]'], {}), '(x[:, i])\n', (986, 995), True, 'import numpy as np\n'), ((1560, 1584), 'numpy.dot', 'np.dot', (['self.X.T', 'self.X'], {}), '(self.X.T, self.X)\n', (1566, 1584), True, 'import numpy as np\n'), ((1707, 1731), 'numpy.dot', 'np.dot', (['self.X', 'self.w.T'], {}), '(self.X, self.w.T)\n', (1713, 1731), True, 'import numpy as np\n'), ((1742, 1766), 'numpy.dot', 'np.dot', (['self.X', 'self.w.T'], {}), '(self.X, self.w.T)\n', (1748, 1766), True, 'import numpy as np\n'), ((2166, 2190), 'numpy.dot', 'np.dot', (['self.X', 'self.w.T'], {}), '(self.X, self.w.T)\n', (2172, 2190), True, 'import numpy as np\n'), ((2252, 2266), 'numpy.abs', 'np.abs', (['self.w'], {}), '(self.w)\n', (2258, 2266), True, 'import numpy as np\n'), ((3000, 3022), 'numpy.dot', 'np.dot', (['X_part', 'self.w'], {}), '(X_part, self.w)\n', (3006, 3022), True, 'import numpy as np\n'), ((3082, 3096), 'numpy.abs', 'np.abs', (['self.w'], {}), '(self.w)\n', (3088, 3096), True, 'import numpy as np\n'), ((3845, 3867), 'numpy.dot', 'np.dot', (['X_part', 'self.w'], {}), '(X_part, self.w)\n', (3851, 3867), True, 'import numpy as np\n'), ((3927, 3941), 'numpy.abs', 'np.abs', (['self.w'], {}), '(self.w)\n', (3933, 3941), True, 'import numpy as np\n'), ((2201, 2225), 'numpy.dot', 'np.dot', (['self.X', 'self.w.T'], {}), '(self.X, self.w.T)\n', (2207, 2225), True, 'import numpy as np\n'), ((3033, 3055), 'numpy.dot', 'np.dot', (['X_part', 'self.w'], {}), '(X_part, self.w)\n', (3039, 3055), True, 'import numpy as np\n'), ((3878, 3900), 'numpy.dot', 'np.dot', (['X_part', 'self.w'], {}), '(X_part, self.w)\n', (3884, 3900), True, 'import numpy as np\n'), ((2416, 2440), 'numpy.dot', 'np.dot', (['self.X', 'self.w.T'], {}), '(self.X, self.w.T)\n', (2422, 2440), True, 'import numpy as np\n'), ((3277, 3301), 'numpy.dot', 'np.dot', (['X_part', 'self.w.T'], {}), '(X_part, self.w.T)\n', (3283, 3301), True, 'import numpy as np\n'), ((4122, 4146), 'numpy.dot', 'np.dot', (['X_part', 'self.w.T'], {}), '(X_part, self.w.T)\n', (4128, 4146), True, 'import numpy as np\n')] |
import copy
import os
import torch
import torchvision
import warnings
import math
import utils.misc
import numpy as np
import os.path as osp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import models.modified_resnet_cifar as modified_resnet_cifar
import models.modified_resnetmtl_cifar as modified_resnetmtl_cifar
import models.modified_linear as modified_linear
from PIL import Image
from torch.optim import lr_scheduler
from torchvision import datasets, transforms
from tensorboardX import SummaryWriter
from utils.compute_features import compute_features
from utils.process_mnemonics import process_mnemonics
from utils.compute_accuracy import compute_accuracy
from trainer.incremental import incremental_train_and_eval
from utils.misc import *
from utils.process_fp import process_inputs_fp
warnings.filterwarnings('ignore')
class Trainer(object):
def __init__(self, the_args):
self.args = the_args
self.log_dir = './logs/'
if not osp.exists(self.log_dir):
os.mkdir(self.log_dir)
self.save_path = self.log_dir + self.args.dataset + '_nfg' + str(self.args.nb_cl_fg) + '_ncls' + str(self.args.nb_cl) + '_nproto' + str(self.args.nb_protos)
self.save_path += '_' + self.args.method
if not osp.exists(self.save_path):
os.mkdir(self.save_path)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023))])
self.transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023))])
self.trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=self.transform_train)
self.testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=self.transform_test)
self.evalset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=self.transform_test)
self.network = modified_resnet_cifar.resnet32
self.network_mtl = modified_resnetmtl_cifar.resnetmtl32
self.lr_strat_first_phase = [int(160*0.5), int(160*0.75)]
self.lr_strat = [int(self.args.epochs*0.5), int(self.args.epochs*0.75)]
self.dictionary_size = self.args.dictionary_size
def map_labels(self, order_list, Y_set):
map_Y = []
for idx in Y_set:
map_Y.append(order_list.index(idx))
map_Y = np.array(map_Y)
return map_Y
def train(self):
self.train_writer = SummaryWriter(logdir=self.save_path)
dictionary_size = self.dictionary_size
top1_acc_list_cumul = np.zeros((int(self.args.num_classes/self.args.nb_cl), 4, self.args.nb_runs))
top1_acc_list_ori = np.zeros((int(self.args.num_classes/self.args.nb_cl), 4, self.args.nb_runs))
X_train_total = np.array(self.trainset.train_data)
Y_train_total = np.array(self.trainset.train_labels)
X_valid_total = np.array(self.testset.test_data)
Y_valid_total = np.array(self.testset.test_labels)
np.random.seed(1993)
for iteration_total in range(self.args.nb_runs):
order_name = osp.join(self.save_path, "seed_{}_{}_order_run_{}.pkl".format(1993, self.args.dataset, iteration_total))
print("Order name:{}".format(order_name))
if osp.exists(order_name):
print("Loading orders")
order = utils.misc.unpickle(order_name)
else:
print("Generating orders")
order = np.arange(self.args.num_classes)
np.random.shuffle(order)
utils.misc.savepickle(order, order_name)
order_list = list(order)
print(order_list)
np.random.seed(self.args.random_seed)
X_valid_cumuls = []
X_protoset_cumuls = []
X_train_cumuls = []
Y_valid_cumuls = []
Y_protoset_cumuls = []
Y_train_cumuls = []
alpha_dr_herding = np.zeros((int(self.args.num_classes/self.args.nb_cl),dictionary_size,self.args.nb_cl),np.float32)
prototypes = np.zeros((self.args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3]))
for orde in range(self.args.num_classes):
prototypes[orde,:,:,:,:] = X_train_total[np.where(Y_train_total==order[orde])]
start_iter = int(self.args.nb_cl_fg/self.args.nb_cl)-1
for iteration in range(start_iter, int(self.args.num_classes/self.args.nb_cl)):
if iteration == start_iter:
last_iter = 0
tg_model = self.network(num_classes=self.args.nb_cl_fg)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("Out_features:", out_features)
ref_model = None
free_model = None
ref_free_model = None
elif iteration == start_iter+1:
last_iter = iteration
ref_model = copy.deepcopy(tg_model)
print("Fusion Mode: "+self.args.fusion_mode)
tg_model = self.network(num_classes=self.args.nb_cl_fg)
ref_dict = ref_model.state_dict()
tg_dict = tg_model.state_dict()
tg_dict.update(ref_dict)
tg_model.load_state_dict(tg_dict)
tg_model.to(self.device)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("Out_features:", out_features)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features, self.args.nb_cl)
new_fc.fc1.weight.data = tg_model.fc.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = out_features*1.0 / self.args.nb_cl
else:
last_iter = iteration
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features1 = tg_model.fc.fc1.out_features
out_features2 = tg_model.fc.fc2.out_features
print("Out_features:", out_features1+out_features2)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features1+out_features2, self.args.nb_cl)
new_fc.fc1.weight.data[:out_features1] = tg_model.fc.fc1.weight.data
new_fc.fc1.weight.data[out_features1:] = tg_model.fc.fc2.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = (out_features1+out_features2)*1.0 / (self.args.nb_cl)
if iteration > start_iter:
cur_lamda = self.args.lamda * math.sqrt(lamda_mult)
else:
cur_lamda = self.args.lamda
actual_cl = order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)]
indices_train_10 = np.array([i in order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)] for i in Y_train_total])
indices_test_10 = np.array([i in order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)] for i in Y_valid_total])
X_train = X_train_total[indices_train_10]
X_valid = X_valid_total[indices_test_10]
X_valid_cumuls.append(X_valid)
X_train_cumuls.append(X_train)
X_valid_cumul = np.concatenate(X_valid_cumuls)
X_train_cumul = np.concatenate(X_train_cumuls)
Y_train = Y_train_total[indices_train_10]
Y_valid = Y_valid_total[indices_test_10]
Y_valid_cumuls.append(Y_valid)
Y_train_cumuls.append(Y_train)
Y_valid_cumul = np.concatenate(Y_valid_cumuls)
Y_train_cumul = np.concatenate(Y_train_cumuls)
if iteration == start_iter:
X_valid_ori = X_valid
Y_valid_ori = Y_valid
else:
X_protoset = np.concatenate(X_protoset_cumuls)
Y_protoset = np.concatenate(Y_protoset_cumuls)
if self.args.rs_ratio > 0:
scale_factor = (len(X_train) * self.args.rs_ratio) / (len(X_protoset) * (1 - self.args.rs_ratio))
rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor))
rs_num_samples = int(len(X_train) / (1 - self.args.rs_ratio))
print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples))
X_train = np.concatenate((X_train,X_protoset),axis=0)
Y_train = np.concatenate((Y_train,Y_protoset))
print('Batch of classes number {0} arrives'.format(iteration+1))
map_Y_train = np.array([order_list.index(i) for i in Y_train])
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
is_start_iteration = (iteration == start_iter)
if iteration > start_iter:
old_embedding_norm = tg_model.fc.fc1.weight.data.norm(dim=1, keepdim=True)
average_old_embedding_norm = torch.mean(old_embedding_norm, dim=0).to('cpu').type(torch.DoubleTensor)
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
novel_embedding = torch.zeros((self.args.nb_cl, num_features))
for cls_idx in range(iteration*self.args.nb_cl, (iteration+1)*self.args.nb_cl):
cls_indices = np.array([i == cls_idx for i in map_Y_train])
assert(len(np.where(cls_indices==1)[0])==dictionary_size)
self.evalset.test_data = X_train[cls_indices].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0])
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
cls_features = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
norm_features = F.normalize(torch.from_numpy(cls_features), p=2, dim=1)
cls_embedding = torch.mean(norm_features, dim=0)
novel_embedding[cls_idx-iteration*self.args.nb_cl] = F.normalize(cls_embedding, p=2, dim=0) * average_old_embedding_norm
tg_model.to(self.device)
tg_model.fc.fc2.weight.data = novel_embedding.to(self.device)
self.trainset.train_data = X_train.astype('uint8')
self.trainset.train_labels = map_Y_train
if iteration > start_iter and self.args.rs_ratio > 0 and scale_factor > 1:
print("Weights from sampling:", rs_sample_weights)
index1 = np.where(rs_sample_weights>1)[0]
index2 = np.where(map_Y_train<iteration*self.args.nb_cl)[0]
assert((index1==index2).all())
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples)
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.args.train_batch_size, shuffle=False, sampler=train_sampler, num_workers=self.args.num_workers)
else:
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.args.train_batch_size,
shuffle=True, num_workers=self.args.num_workers)
self.testset.test_data = X_valid_cumul.astype('uint8')
self.testset.test_labels = map_Y_valid_cumul
testloader = torch.utils.data.DataLoader(self.testset, batch_size=self.args.test_batch_size,
shuffle=False, num_workers=self.args.num_workers)
print('Max and min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train)))
print('Max and min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul)))
ckp_name = osp.join(self.save_path, 'run_{}_iteration_{}_model.pth'.format(iteration_total, iteration))
ckp_name_free = osp.join(self.save_path, 'run_{}_iteration_{}_free_model.pth'.format(iteration_total, iteration))
print('Checkpoint name:', ckp_name)
if iteration==start_iter and self.args.resume_fg:
print("Loading first group models from checkpoint")
tg_model = torch.load(self.args.ckpt_dir_fg)
elif self.args.resume and os.path.exists(ckp_name):
print("Loading models from checkpoint")
tg_model = torch.load(ckp_name)
else:
if iteration > start_iter:
ref_model = ref_model.to(self.device)
ignored_params = list(map(id, tg_model.fc.fc1.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, tg_model.parameters())
base_params = filter(lambda p: p.requires_grad,base_params)
base_params = filter(lambda p: p.requires_grad,base_params)
tg_params_new =[{'params': base_params, 'lr': self.args.base_lr2, 'weight_decay': self.args.custom_weight_decay}, {'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}]
tg_model = tg_model.to(self.device)
tg_optimizer = optim.SGD(tg_params_new, lr=self.args.base_lr2, momentum=self.args.custom_momentum, weight_decay=self.args.custom_weight_decay)
else:
tg_params = tg_model.parameters()
tg_model = tg_model.to(self.device)
tg_optimizer = optim.SGD(tg_params, lr=self.args.base_lr1, momentum=self.args.custom_momentum, weight_decay=self.args.custom_weight_decay)
if iteration > start_iter:
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=self.lr_strat, gamma=self.args.lr_factor)
else:
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=self.lr_strat_first_phase, gamma=self.args.lr_factor)
print("Incremental train")
if iteration > start_iter:
tg_model = incremental_train_and_eval(self.args.epochs, tg_model, ref_model, free_model, ref_free_model, tg_optimizer, tg_lr_scheduler, trainloader, testloader, iteration, start_iter, cur_lamda, self.args.dist, self.args.K, self.args.lw_mr)
else:
tg_model = incremental_train_and_eval(self.args.epochs, tg_model, ref_model, free_model, ref_free_model, tg_optimizer, tg_lr_scheduler, trainloader, testloader, iteration, start_iter, cur_lamda, self.args.dist, self.args.K, self.args.lw_mr)
torch.save(tg_model, ckp_name)
if self.args.dynamic_budget:
nb_protos_cl = self.args.nb_protos
else:
nb_protos_cl = int(np.ceil(self.args.nb_protos*100./self.args.nb_cl/(iteration+1)))
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
for iter_dico in range(last_iter*self.args.nb_cl, (iteration+1)*self.args.nb_cl):
self.evalset.test_data = prototypes[iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0])
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
mu = np.mean(D,axis=1)
index1 = int(iter_dico/self.args.nb_cl)
index2 = iter_dico % self.args.nb_cl
alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0
w_t = mu
iter_herding = 0
iter_herding_eff = 0
while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000:
tmp_t = np.dot(w_t,D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if alpha_dr_herding[index1,ind_max,index2] == 0:
alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding
iter_herding += 1
w_t = w_t+mu-D[:,ind_max]
X_protoset_cumuls = []
Y_protoset_cumuls = []
class_means = np.zeros((64,100,2))
for iteration2 in range(iteration+1):
for iter_dico in range(self.args.nb_cl):
current_cl = order[range(iteration2*self.args.nb_cl,(iteration2+1)*self.args.nb_cl)]
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
mapped_prototypes2 = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D2 = mapped_prototypes2.T
D2 = D2/np.linalg.norm(D2,axis=0)
alph = alpha_dr_herding[iteration2,:,iter_dico]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
X_protoset_cumuls.append(prototypes[iteration2*self.args.nb_cl+iter_dico,np.where(alph==1)[0]])
Y_protoset_cumuls.append(order[iteration2*self.args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0])))
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
alph = np.ones(dictionary_size)/dictionary_size
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
current_means = class_means[:, order[range(0,(iteration+1)*self.args.nb_cl)]]
class_means = np.zeros((64,100,2))
for iteration2 in range(iteration+1):
for iter_dico in range(self.args.nb_cl):
current_cl = order[range(iteration2*self.args.nb_cl,(iteration2+1)*self.args.nb_cl)]
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
mapped_prototypes2 = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D2 = mapped_prototypes2.T
D2 = D2/np.linalg.norm(D2,axis=0)
alph = alpha_dr_herding[iteration2,:,iter_dico]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
alph = np.ones(dictionary_size)/dictionary_size
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
torch.save(class_means, osp.join(self.save_path, 'run_{}_iteration_{}_class_means.pth'.format(iteration_total, iteration)))
current_means = class_means[:, order[range(0,(iteration+1)*self.args.nb_cl)]]
is_start_iteration = (iteration == start_iter)
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
print('Computing accuracy for first-phase classes')
self.evalset.test_data = X_valid_ori.astype('uint8')
self.evalset.test_labels = map_Y_valid_ori
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
ori_acc, fast_fc = compute_accuracy(tg_model, free_model, tg_feature_model, current_means, X_protoset_cumuls, Y_protoset_cumuls, evalloader, order_list, is_start_iteration=is_start_iteration, maml_lr=self.args.maml_lr, maml_epoch=self.args.maml_epoch)
top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T
self.train_writer.add_scalar('ori_acc/LwF', float(ori_acc[0]), iteration)
self.train_writer.add_scalar('ori_acc/iCaRL', float(ori_acc[1]), iteration)
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
print('Computing accuracy for all seen classes')
self.evalset.test_data = X_valid_cumul.astype('uint8')
self.evalset.test_labels = map_Y_valid_cumul
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
cumul_acc, _ = compute_accuracy(tg_model, free_model, tg_feature_model, current_means, X_protoset_cumuls, Y_protoset_cumuls, evalloader, order_list, is_start_iteration=is_start_iteration, fast_fc=fast_fc, maml_lr=self.args.maml_lr, maml_epoch=self.args.maml_epoch)
top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T
self.train_writer.add_scalar('cumul_acc/LwF', float(cumul_acc[0]), iteration)
self.train_writer.add_scalar('cumul_acc/iCaRL', float(cumul_acc[1]), iteration)
torch.save(top1_acc_list_ori, osp.join(self.save_path, 'run_{}_top1_acc_list_ori.pth'.format(iteration_total)))
torch.save(top1_acc_list_cumul, osp.join(self.save_path, 'run_{}_top1_acc_list_cumul.pth'.format(iteration_total)))
self.train_writer.close
| [
"torchvision.datasets.CIFAR100",
"torch.optim.lr_scheduler.MultiStepLR",
"math.sqrt",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"copy.deepcopy",
"numpy.linalg.norm",
"trainer.incremental.incremental_train_and_eval",
"numpy.arange",
"os.path.exists",
"utils.compute_accuracy.... | [((832, 865), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (855, 865), False, 'import warnings\n'), ((1852, 1959), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'self.transform_train'}), "(root='./data', train=True, download=True,\n transform=self.transform_train)\n", (1881, 1959), False, 'import torchvision\n'), ((1979, 2086), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'self.transform_test'}), "(root='./data', train=False, download=True,\n transform=self.transform_test)\n", (2008, 2086), False, 'import torchvision\n'), ((2106, 2214), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(False)', 'transform': 'self.transform_test'}), "(root='./data', train=False, download=False,\n transform=self.transform_test)\n", (2135, 2214), False, 'import torchvision\n'), ((2687, 2702), 'numpy.array', 'np.array', (['map_Y'], {}), '(map_Y)\n', (2695, 2702), True, 'import numpy as np\n'), ((2774, 2810), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'logdir': 'self.save_path'}), '(logdir=self.save_path)\n', (2787, 2810), False, 'from tensorboardX import SummaryWriter\n'), ((3094, 3128), 'numpy.array', 'np.array', (['self.trainset.train_data'], {}), '(self.trainset.train_data)\n', (3102, 3128), True, 'import numpy as np\n'), ((3153, 3189), 'numpy.array', 'np.array', (['self.trainset.train_labels'], {}), '(self.trainset.train_labels)\n', (3161, 3189), True, 'import numpy as np\n'), ((3214, 3246), 'numpy.array', 'np.array', (['self.testset.test_data'], {}), '(self.testset.test_data)\n', (3222, 3246), True, 'import numpy as np\n'), ((3271, 3305), 'numpy.array', 'np.array', (['self.testset.test_labels'], {}), '(self.testset.test_labels)\n', (3279, 3305), True, 'import numpy as np\n'), ((3314, 3334), 'numpy.random.seed', 'np.random.seed', (['(1993)'], {}), '(1993)\n', (3328, 3334), True, 'import numpy as np\n'), ((4002, 4039), 'numpy.random.seed', 'np.random.seed', (['self.args.random_seed'], {}), '(self.args.random_seed)\n', (4016, 4039), True, 'import numpy as np\n'), ((4373, 4499), 'numpy.zeros', 'np.zeros', (['(self.args.num_classes, dictionary_size, X_train_total.shape[1],\n X_train_total.shape[2], X_train_total.shape[3])'], {}), '((self.args.num_classes, dictionary_size, X_train_total.shape[1],\n X_train_total.shape[2], X_train_total.shape[3]))\n', (4381, 4499), True, 'import numpy as np\n'), ((1001, 1025), 'os.path.exists', 'osp.exists', (['self.log_dir'], {}), '(self.log_dir)\n', (1011, 1025), True, 'import os.path as osp\n'), ((1039, 1061), 'os.mkdir', 'os.mkdir', (['self.log_dir'], {}), '(self.log_dir)\n', (1047, 1061), False, 'import os\n'), ((1300, 1326), 'os.path.exists', 'osp.exists', (['self.save_path'], {}), '(self.save_path)\n', (1310, 1326), True, 'import os.path as osp\n'), ((1340, 1364), 'os.mkdir', 'os.mkdir', (['self.save_path'], {}), '(self.save_path)\n', (1348, 1364), False, 'import os\n'), ((3591, 3613), 'os.path.exists', 'osp.exists', (['order_name'], {}), '(order_name)\n', (3601, 3613), True, 'import os.path as osp\n'), ((7766, 7796), 'numpy.concatenate', 'np.concatenate', (['X_valid_cumuls'], {}), '(X_valid_cumuls)\n', (7780, 7796), True, 'import numpy as np\n'), ((7825, 7855), 'numpy.concatenate', 'np.concatenate', (['X_train_cumuls'], {}), '(X_train_cumuls)\n', (7839, 7855), True, 'import numpy as np\n'), ((8077, 8107), 'numpy.concatenate', 'np.concatenate', (['Y_valid_cumuls'], {}), '(Y_valid_cumuls)\n', (8091, 8107), True, 'import numpy as np\n'), ((8136, 8166), 'numpy.concatenate', 'np.concatenate', (['Y_train_cumuls'], {}), '(Y_train_cumuls)\n', (8150, 8166), True, 'import numpy as np\n'), ((12149, 12283), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.testset'], {'batch_size': 'self.args.test_batch_size', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(self.testset, batch_size=self.args.\n test_batch_size, shuffle=False, num_workers=self.args.num_workers)\n', (12176, 12283), False, 'import torch\n'), ((17408, 17430), 'numpy.zeros', 'np.zeros', (['(64, 100, 2)'], {}), '((64, 100, 2))\n', (17416, 17430), True, 'import numpy as np\n'), ((19948, 19970), 'numpy.zeros', 'np.zeros', (['(64, 100, 2)'], {}), '((64, 100, 2))\n', (19956, 19970), True, 'import numpy as np\n'), ((22708, 22842), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.evalset'], {'batch_size': 'self.args.eval_batch_size', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(self.evalset, batch_size=self.args.\n eval_batch_size, shuffle=False, num_workers=self.args.num_workers)\n', (22735, 22842), False, 'import torch\n'), ((22869, 23113), 'utils.compute_accuracy.compute_accuracy', 'compute_accuracy', (['tg_model', 'free_model', 'tg_feature_model', 'current_means', 'X_protoset_cumuls', 'Y_protoset_cumuls', 'evalloader', 'order_list'], {'is_start_iteration': 'is_start_iteration', 'maml_lr': 'self.args.maml_lr', 'maml_epoch': 'self.args.maml_epoch'}), '(tg_model, free_model, tg_feature_model, current_means,\n X_protoset_cumuls, Y_protoset_cumuls, evalloader, order_list,\n is_start_iteration=is_start_iteration, maml_lr=self.args.maml_lr,\n maml_epoch=self.args.maml_epoch)\n', (22885, 23113), False, 'from utils.compute_accuracy import compute_accuracy\n'), ((23657, 23791), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.evalset'], {'batch_size': 'self.args.eval_batch_size', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(self.evalset, batch_size=self.args.\n eval_batch_size, shuffle=False, num_workers=self.args.num_workers)\n', (23684, 23791), False, 'import torch\n'), ((23822, 24084), 'utils.compute_accuracy.compute_accuracy', 'compute_accuracy', (['tg_model', 'free_model', 'tg_feature_model', 'current_means', 'X_protoset_cumuls', 'Y_protoset_cumuls', 'evalloader', 'order_list'], {'is_start_iteration': 'is_start_iteration', 'fast_fc': 'fast_fc', 'maml_lr': 'self.args.maml_lr', 'maml_epoch': 'self.args.maml_epoch'}), '(tg_model, free_model, tg_feature_model, current_means,\n X_protoset_cumuls, Y_protoset_cumuls, evalloader, order_list,\n is_start_iteration=is_start_iteration, fast_fc=fast_fc, maml_lr=self.\n args.maml_lr, maml_epoch=self.args.maml_epoch)\n', (23838, 24084), False, 'from utils.compute_accuracy import compute_accuracy\n'), ((1412, 1437), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1435, 1437), False, 'import torch\n'), ((1501, 1537), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (1522, 1537), False, 'from torchvision import datasets, transforms\n'), ((1539, 1572), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1570, 1572), False, 'from torchvision import datasets, transforms\n'), ((1574, 1595), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1593, 1595), False, 'from torchvision import datasets, transforms\n'), ((1597, 1669), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5071, 0.4866, 0.4409)', '(0.2009, 0.1984, 0.2023)'], {}), '((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023))\n', (1617, 1669), False, 'from torchvision import datasets, transforms\n'), ((1726, 1747), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1745, 1747), False, 'from torchvision import datasets, transforms\n'), ((1749, 1821), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5071, 0.4866, 0.4409)', '(0.2009, 0.1984, 0.2023)'], {}), '((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023))\n', (1769, 1821), False, 'from torchvision import datasets, transforms\n'), ((3796, 3828), 'numpy.arange', 'np.arange', (['self.args.num_classes'], {}), '(self.args.num_classes)\n', (3805, 3828), True, 'import numpy as np\n'), ((3845, 3869), 'numpy.random.shuffle', 'np.random.shuffle', (['order'], {}), '(order)\n', (3862, 3869), True, 'import numpy as np\n'), ((4595, 4633), 'numpy.where', 'np.where', (['(Y_train_total == order[orde])'], {}), '(Y_train_total == order[orde])\n', (4603, 4633), True, 'import numpy as np\n'), ((8330, 8363), 'numpy.concatenate', 'np.concatenate', (['X_protoset_cumuls'], {}), '(X_protoset_cumuls)\n', (8344, 8363), True, 'import numpy as np\n'), ((8393, 8426), 'numpy.concatenate', 'np.concatenate', (['Y_protoset_cumuls'], {}), '(Y_protoset_cumuls)\n', (8407, 8426), True, 'import numpy as np\n'), ((8943, 8988), 'numpy.concatenate', 'np.concatenate', (['(X_train, X_protoset)'], {'axis': '(0)'}), '((X_train, X_protoset), axis=0)\n', (8957, 8988), True, 'import numpy as np\n'), ((9013, 9050), 'numpy.concatenate', 'np.concatenate', (['(Y_train, Y_protoset)'], {}), '((Y_train, Y_protoset))\n', (9027, 9050), True, 'import numpy as np\n'), ((9767, 9811), 'torch.zeros', 'torch.zeros', (['(self.args.nb_cl, num_features)'], {}), '((self.args.nb_cl, num_features))\n', (9778, 9811), False, 'import torch\n'), ((11522, 11607), 'torch.utils.data.sampler.WeightedRandomSampler', 'torch.utils.data.sampler.WeightedRandomSampler', (['rs_sample_weights', 'rs_num_samples'], {}), '(rs_sample_weights,\n rs_num_samples)\n', (11568, 11607), False, 'import torch\n'), ((11634, 11798), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.args.train_batch_size', 'shuffle': '(False)', 'sampler': 'train_sampler', 'num_workers': 'self.args.num_workers'}), '(self.trainset, batch_size=self.args.\n train_batch_size, shuffle=False, sampler=train_sampler, num_workers=\n self.args.num_workers)\n', (11661, 11798), False, 'import torch\n'), ((11849, 11984), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.args.train_batch_size', 'shuffle': '(True)', 'num_workers': 'self.args.num_workers'}), '(self.trainset, batch_size=self.args.\n train_batch_size, shuffle=True, num_workers=self.args.num_workers)\n', (11876, 11984), False, 'import torch\n'), ((12954, 12987), 'torch.load', 'torch.load', (['self.args.ckpt_dir_fg'], {}), '(self.args.ckpt_dir_fg)\n', (12964, 12987), False, 'import torch\n'), ((15948, 15989), 'numpy.zeros', 'np.zeros', (['self.evalset.test_data.shape[0]'], {}), '(self.evalset.test_data.shape[0])\n', (15956, 15989), True, 'import numpy as np\n'), ((16019, 16153), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.evalset'], {'batch_size': 'self.args.eval_batch_size', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(self.evalset, batch_size=self.args.\n eval_batch_size, shuffle=False, num_workers=self.args.num_workers)\n', (16046, 16153), False, 'import torch\n'), ((16279, 16398), 'utils.compute_features.compute_features', 'compute_features', (['tg_model', 'free_model', 'tg_feature_model', 'is_start_iteration', 'evalloader', 'num_samples', 'num_features'], {}), '(tg_model, free_model, tg_feature_model, is_start_iteration,\n evalloader, num_samples, num_features)\n', (16295, 16398), False, 'from utils.compute_features import compute_features\n'), ((16504, 16522), 'numpy.mean', 'np.mean', (['D'], {'axis': '(1)'}), '(D, axis=1)\n', (16511, 16522), True, 'import numpy as np\n'), ((23166, 23183), 'numpy.array', 'np.array', (['ori_acc'], {}), '(ori_acc)\n', (23174, 23183), True, 'import numpy as np\n'), ((24137, 24156), 'numpy.array', 'np.array', (['cumul_acc'], {}), '(cumul_acc)\n', (24145, 24156), True, 'import numpy as np\n'), ((5304, 5327), 'copy.deepcopy', 'copy.deepcopy', (['tg_model'], {}), '(tg_model)\n', (5317, 5327), False, 'import copy\n'), ((5879, 5956), 'models.modified_linear.SplitCosineLinear', 'modified_linear.SplitCosineLinear', (['in_features', 'out_features', 'self.args.nb_cl'], {}), '(in_features, out_features, self.args.nb_cl)\n', (5912, 5956), True, 'import models.modified_linear as modified_linear\n'), ((6266, 6289), 'copy.deepcopy', 'copy.deepcopy', (['tg_model'], {}), '(tg_model)\n', (6279, 6289), False, 'import copy\n'), ((6559, 6657), 'models.modified_linear.SplitCosineLinear', 'modified_linear.SplitCosineLinear', (['in_features', '(out_features1 + out_features2)', 'self.args.nb_cl'], {}), '(in_features, out_features1 +\n out_features2, self.args.nb_cl)\n', (6592, 6657), True, 'import models.modified_linear as modified_linear\n'), ((7086, 7107), 'math.sqrt', 'math.sqrt', (['lamda_mult'], {}), '(lamda_mult)\n', (7095, 7107), False, 'import math\n'), ((9942, 9989), 'numpy.array', 'np.array', (['[(i == cls_idx) for i in map_Y_train]'], {}), '([(i == cls_idx) for i in map_Y_train])\n', (9950, 9989), True, 'import numpy as np\n'), ((10196, 10237), 'numpy.zeros', 'np.zeros', (['self.evalset.test_data.shape[0]'], {}), '(self.evalset.test_data.shape[0])\n', (10204, 10237), True, 'import numpy as np\n'), ((10271, 10405), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.evalset'], {'batch_size': 'self.args.eval_batch_size', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(self.evalset, batch_size=self.args.\n eval_batch_size, shuffle=False, num_workers=self.args.num_workers)\n', (10298, 10405), False, 'import torch\n'), ((10502, 10621), 'utils.compute_features.compute_features', 'compute_features', (['tg_model', 'free_model', 'tg_feature_model', 'is_start_iteration', 'evalloader', 'num_samples', 'num_features'], {}), '(tg_model, free_model, tg_feature_model, is_start_iteration,\n evalloader, num_samples, num_features)\n', (10518, 10621), False, 'from utils.compute_features import compute_features\n'), ((10746, 10778), 'torch.mean', 'torch.mean', (['norm_features'], {'dim': '(0)'}), '(norm_features, dim=0)\n', (10756, 10778), False, 'import torch\n'), ((11334, 11365), 'numpy.where', 'np.where', (['(rs_sample_weights > 1)'], {}), '(rs_sample_weights > 1)\n', (11342, 11365), True, 'import numpy as np\n'), ((11392, 11443), 'numpy.where', 'np.where', (['(map_Y_train < iteration * self.args.nb_cl)'], {}), '(map_Y_train < iteration * self.args.nb_cl)\n', (11400, 11443), True, 'import numpy as np\n'), ((13026, 13050), 'os.path.exists', 'os.path.exists', (['ckp_name'], {}), '(ckp_name)\n', (13040, 13050), False, 'import os\n'), ((13135, 13155), 'torch.load', 'torch.load', (['ckp_name'], {}), '(ckp_name)\n', (13145, 13155), False, 'import torch\n'), ((15362, 15392), 'torch.save', 'torch.save', (['tg_model', 'ckp_name'], {}), '(tg_model, ckp_name)\n', (15372, 15392), False, 'import torch\n'), ((15538, 15610), 'numpy.ceil', 'np.ceil', (['(self.args.nb_protos * 100.0 / self.args.nb_cl / (iteration + 1))'], {}), '(self.args.nb_protos * 100.0 / self.args.nb_cl / (iteration + 1))\n', (15545, 15610), True, 'import numpy as np\n'), ((16457, 16482), 'numpy.linalg.norm', 'np.linalg.norm', (['D'], {'axis': '(0)'}), '(D, axis=0)\n', (16471, 16482), True, 'import numpy as np\n'), ((16970, 16984), 'numpy.dot', 'np.dot', (['w_t', 'D'], {}), '(w_t, D)\n', (16976, 16984), True, 'import numpy as np\n'), ((17014, 17030), 'numpy.argmax', 'np.argmax', (['tmp_t'], {}), '(tmp_t)\n', (17023, 17030), True, 'import numpy as np\n'), ((17798, 17839), 'numpy.zeros', 'np.zeros', (['self.evalset.test_data.shape[0]'], {}), '(self.evalset.test_data.shape[0])\n', (17806, 17839), True, 'import numpy as np\n'), ((17886, 18020), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.evalset'], {'batch_size': 'self.args.eval_batch_size', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(self.evalset, batch_size=self.args.\n eval_batch_size, shuffle=False, num_workers=self.args.num_workers)\n', (17913, 18020), False, 'import torch\n'), ((18146, 18265), 'utils.compute_features.compute_features', 'compute_features', (['tg_model', 'free_model', 'tg_feature_model', 'is_start_iteration', 'evalloader', 'num_samples', 'num_features'], {}), '(tg_model, free_model, tg_feature_model, is_start_iteration,\n evalloader, num_samples, num_features)\n', (18162, 18265), False, 'from utils.compute_features import compute_features\n'), ((18512, 18646), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.evalset'], {'batch_size': 'self.args.eval_batch_size', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(self.evalset, batch_size=self.args.\n eval_batch_size, shuffle=False, num_workers=self.args.num_workers)\n', (18539, 18646), False, 'import torch\n'), ((18707, 18826), 'utils.compute_features.compute_features', 'compute_features', (['tg_model', 'free_model', 'tg_feature_model', 'is_start_iteration', 'evalloader', 'num_samples', 'num_features'], {}), '(tg_model, free_model, tg_feature_model, is_start_iteration,\n evalloader, num_samples, num_features)\n', (18723, 18826), False, 'from utils.compute_features import compute_features\n'), ((19496, 19552), 'numpy.linalg.norm', 'np.linalg.norm', (['class_means[:, current_cl[iter_dico], 0]'], {}), '(class_means[:, current_cl[iter_dico], 0])\n', (19510, 19552), True, 'import numpy as np\n'), ((19777, 19833), 'numpy.linalg.norm', 'np.linalg.norm', (['class_means[:, current_cl[iter_dico], 1]'], {}), '(class_means[:, current_cl[iter_dico], 1])\n', (19791, 19833), True, 'import numpy as np\n'), ((20338, 20379), 'numpy.zeros', 'np.zeros', (['self.evalset.test_data.shape[0]'], {}), '(self.evalset.test_data.shape[0])\n', (20346, 20379), True, 'import numpy as np\n'), ((20426, 20560), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.evalset'], {'batch_size': 'self.args.eval_batch_size', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(self.evalset, batch_size=self.args.\n eval_batch_size, shuffle=False, num_workers=self.args.num_workers)\n', (20453, 20560), False, 'import torch\n'), ((20686, 20805), 'utils.compute_features.compute_features', 'compute_features', (['tg_model', 'free_model', 'tg_feature_model', 'is_start_iteration', 'evalloader', 'num_samples', 'num_features'], {}), '(tg_model, free_model, tg_feature_model, is_start_iteration,\n evalloader, num_samples, num_features)\n', (20702, 20805), False, 'from utils.compute_features import compute_features\n'), ((21052, 21186), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.evalset'], {'batch_size': 'self.args.eval_batch_size', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(self.evalset, batch_size=self.args.\n eval_batch_size, shuffle=False, num_workers=self.args.num_workers)\n', (21079, 21186), False, 'import torch\n'), ((21247, 21366), 'utils.compute_features.compute_features', 'compute_features', (['tg_model', 'free_model', 'tg_feature_model', 'is_start_iteration', 'evalloader', 'num_samples', 'num_features'], {}), '(tg_model, free_model, tg_feature_model, is_start_iteration,\n evalloader, num_samples, num_features)\n', (21263, 21366), False, 'from utils.compute_features import compute_features\n'), ((21795, 21851), 'numpy.linalg.norm', 'np.linalg.norm', (['class_means[:, current_cl[iter_dico], 0]'], {}), '(class_means[:, current_cl[iter_dico], 0])\n', (21809, 21851), True, 'import numpy as np\n'), ((22076, 22132), 'numpy.linalg.norm', 'np.linalg.norm', (['class_means[:, current_cl[iter_dico], 1]'], {}), '(class_means[:, current_cl[iter_dico], 1])\n', (22090, 22132), True, 'import numpy as np\n'), ((10666, 10696), 'torch.from_numpy', 'torch.from_numpy', (['cls_features'], {}), '(cls_features)\n', (10682, 10696), False, 'import torch\n'), ((10852, 10890), 'torch.nn.functional.normalize', 'F.normalize', (['cls_embedding'], {'p': '(2)', 'dim': '(0)'}), '(cls_embedding, p=2, dim=0)\n', (10863, 10890), True, 'import torch.nn.functional as F\n'), ((13914, 14046), 'torch.optim.SGD', 'optim.SGD', (['tg_params_new'], {'lr': 'self.args.base_lr2', 'momentum': 'self.args.custom_momentum', 'weight_decay': 'self.args.custom_weight_decay'}), '(tg_params_new, lr=self.args.base_lr2, momentum=self.args.\n custom_momentum, weight_decay=self.args.custom_weight_decay)\n', (13923, 14046), True, 'import torch.optim as optim\n'), ((14209, 14337), 'torch.optim.SGD', 'optim.SGD', (['tg_params'], {'lr': 'self.args.base_lr1', 'momentum': 'self.args.custom_momentum', 'weight_decay': 'self.args.custom_weight_decay'}), '(tg_params, lr=self.args.base_lr1, momentum=self.args.\n custom_momentum, weight_decay=self.args.custom_weight_decay)\n', (14218, 14337), True, 'import torch.optim as optim\n'), ((14414, 14510), 'torch.optim.lr_scheduler.MultiStepLR', 'lr_scheduler.MultiStepLR', (['tg_optimizer'], {'milestones': 'self.lr_strat', 'gamma': 'self.args.lr_factor'}), '(tg_optimizer, milestones=self.lr_strat, gamma=self\n .args.lr_factor)\n', (14438, 14510), False, 'from torch.optim import lr_scheduler\n'), ((14566, 14673), 'torch.optim.lr_scheduler.MultiStepLR', 'lr_scheduler.MultiStepLR', (['tg_optimizer'], {'milestones': 'self.lr_strat_first_phase', 'gamma': 'self.args.lr_factor'}), '(tg_optimizer, milestones=self.lr_strat_first_phase,\n gamma=self.args.lr_factor)\n', (14590, 14673), False, 'from torch.optim import lr_scheduler\n'), ((14798, 15040), 'trainer.incremental.incremental_train_and_eval', 'incremental_train_and_eval', (['self.args.epochs', 'tg_model', 'ref_model', 'free_model', 'ref_free_model', 'tg_optimizer', 'tg_lr_scheduler', 'trainloader', 'testloader', 'iteration', 'start_iter', 'cur_lamda', 'self.args.dist', 'self.args.K', 'self.args.lw_mr'], {}), '(self.args.epochs, tg_model, ref_model,\n free_model, ref_free_model, tg_optimizer, tg_lr_scheduler, trainloader,\n testloader, iteration, start_iter, cur_lamda, self.args.dist, self.args\n .K, self.args.lw_mr)\n', (14824, 15040), False, 'from trainer.incremental import incremental_train_and_eval\n'), ((15104, 15346), 'trainer.incremental.incremental_train_and_eval', 'incremental_train_and_eval', (['self.args.epochs', 'tg_model', 'ref_model', 'free_model', 'ref_free_model', 'tg_optimizer', 'tg_lr_scheduler', 'trainloader', 'testloader', 'iteration', 'start_iter', 'cur_lamda', 'self.args.dist', 'self.args.K', 'self.args.lw_mr'], {}), '(self.args.epochs, tg_model, ref_model,\n free_model, ref_free_model, tg_optimizer, tg_lr_scheduler, trainloader,\n testloader, iteration, start_iter, cur_lamda, self.args.dist, self.args\n .K, self.args.lw_mr)\n', (15130, 15346), False, 'from trainer.incremental import incremental_train_and_eval\n'), ((18332, 18357), 'numpy.linalg.norm', 'np.linalg.norm', (['D'], {'axis': '(0)'}), '(D, axis=0)\n', (18346, 18357), True, 'import numpy as np\n'), ((18897, 18923), 'numpy.linalg.norm', 'np.linalg.norm', (['D2'], {'axis': '(0)'}), '(D2, axis=0)\n', (18911, 18923), True, 'import numpy as np\n'), ((19325, 19337), 'numpy.sum', 'np.sum', (['alph'], {}), '(alph)\n', (19331, 19337), True, 'import numpy as np\n'), ((19578, 19602), 'numpy.ones', 'np.ones', (['dictionary_size'], {}), '(dictionary_size)\n', (19585, 19602), True, 'import numpy as np\n'), ((20872, 20897), 'numpy.linalg.norm', 'np.linalg.norm', (['D'], {'axis': '(0)'}), '(D, axis=0)\n', (20886, 20897), True, 'import numpy as np\n'), ((21437, 21463), 'numpy.linalg.norm', 'np.linalg.norm', (['D2'], {'axis': '(0)'}), '(D2, axis=0)\n', (21451, 21463), True, 'import numpy as np\n'), ((21624, 21636), 'numpy.sum', 'np.sum', (['alph'], {}), '(alph)\n', (21630, 21636), True, 'import numpy as np\n'), ((21877, 21901), 'numpy.ones', 'np.ones', (['dictionary_size'], {}), '(dictionary_size)\n', (21884, 21901), True, 'import numpy as np\n'), ((16844, 16892), 'numpy.sum', 'np.sum', (['(alpha_dr_herding[index1, :, index2] != 0)'], {}), '(alpha_dr_herding[index1, :, index2] != 0)\n', (16850, 16892), True, 'import numpy as np\n'), ((19400, 19415), 'numpy.dot', 'np.dot', (['D', 'alph'], {}), '(D, alph)\n', (19406, 19415), True, 'import numpy as np\n'), ((19415, 19431), 'numpy.dot', 'np.dot', (['D2', 'alph'], {}), '(D2, alph)\n', (19421, 19431), True, 'import numpy as np\n'), ((19681, 19696), 'numpy.dot', 'np.dot', (['D', 'alph'], {}), '(D, alph)\n', (19687, 19696), True, 'import numpy as np\n'), ((19696, 19712), 'numpy.dot', 'np.dot', (['D2', 'alph'], {}), '(D2, alph)\n', (19702, 19712), True, 'import numpy as np\n'), ((21699, 21714), 'numpy.dot', 'np.dot', (['D', 'alph'], {}), '(D, alph)\n', (21705, 21714), True, 'import numpy as np\n'), ((21714, 21730), 'numpy.dot', 'np.dot', (['D2', 'alph'], {}), '(D2, alph)\n', (21720, 21730), True, 'import numpy as np\n'), ((21980, 21995), 'numpy.dot', 'np.dot', (['D', 'alph'], {}), '(D, alph)\n', (21986, 21995), True, 'import numpy as np\n'), ((21995, 22011), 'numpy.dot', 'np.dot', (['D2', 'alph'], {}), '(D2, alph)\n', (22001, 22011), True, 'import numpy as np\n'), ((9523, 9560), 'torch.mean', 'torch.mean', (['old_embedding_norm'], {'dim': '(0)'}), '(old_embedding_norm, dim=0)\n', (9533, 9560), False, 'import torch\n'), ((10020, 10046), 'numpy.where', 'np.where', (['(cls_indices == 1)'], {}), '(cls_indices == 1)\n', (10028, 10046), True, 'import numpy as np\n'), ((19145, 19164), 'numpy.where', 'np.where', (['(alph == 1)'], {}), '(alph == 1)\n', (19153, 19164), True, 'import numpy as np\n'), ((19269, 19288), 'numpy.where', 'np.where', (['(alph == 1)'], {}), '(alph == 1)\n', (19277, 19288), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy
import rawcam
import random
from hashlib import md5
while True:
rc = rawcam.init() # initializes camera interface, returns config object
#rc.pack = rawcam.Pack.NONE
#rc.unpack = rawcam.Unpack.NONE
#rawcam.set_timing(0, 0, 0, 0, 0, 0, 0)
rawcam.set_data_lanes(2)
rawcam.set_image_id(0x2a)
rawcam.set_buffer_size(2048*128)
rawcam.set_buffer_num(8)
rawcam.set_buffer_dimensions(2048, 128)
rawcam.set_pack_mode(0)
rawcam.set_unpack_mode(0)
rawcam.set_unpack_mode(0)
rawcam.set_encoding_fourcc(ord('G'), ord('R'), ord('B'), ord('G'))
#rawcam.set_encode_block_length(32)
#rawcam.set_embedded_data_lines(32)
rawcam.set_zero_copy(1)
rawcam.set_camera_num(1)
print("debug after init params")
rawcam.debug()
#rawcam.format_commit()
#print("debug after format_commit")
#rawcam.debug()
print("start rawcam")
rawcam.start()
print("debug after start")
rawcam.debug()
j=0
while j<50:
#print("iter")
#print(rawcam.buffer_count())
for i in range(rawcam.buffer_count()):
j+=1
buf = rawcam.buffer_get()
#print(dir(buf))
print ("[%4d] got buf %s, len=%d, hash=%s" % (j,buf,len(buf),md5(buf).hexdigest()))
arr=numpy.frombuffer(buf,dtype='uint8') # yes this is zerocopy
#print ("average sample value %d" % (arr.sum()/len(arr)))
#print(j)
if (1):
open(("rxtest/%02d.bin" % j),"wb").write(buf)
# do other stuff with buffer contents
rawcam.buffer_free(buf)
rawcam.stop()
| [
"rawcam.set_buffer_size",
"rawcam.set_pack_mode",
"rawcam.set_unpack_mode",
"rawcam.set_buffer_dimensions",
"rawcam.set_camera_num",
"rawcam.buffer_get",
"numpy.frombuffer",
"rawcam.set_data_lanes",
"hashlib.md5",
"rawcam.init",
"rawcam.set_buffer_num",
"rawcam.set_zero_copy",
"rawcam.buffer... | [((110, 123), 'rawcam.init', 'rawcam.init', ([], {}), '()\n', (121, 123), False, 'import rawcam\n'), ((295, 319), 'rawcam.set_data_lanes', 'rawcam.set_data_lanes', (['(2)'], {}), '(2)\n', (316, 319), False, 'import rawcam\n'), ((324, 347), 'rawcam.set_image_id', 'rawcam.set_image_id', (['(42)'], {}), '(42)\n', (343, 347), False, 'import rawcam\n'), ((354, 388), 'rawcam.set_buffer_size', 'rawcam.set_buffer_size', (['(2048 * 128)'], {}), '(2048 * 128)\n', (376, 388), False, 'import rawcam\n'), ((391, 415), 'rawcam.set_buffer_num', 'rawcam.set_buffer_num', (['(8)'], {}), '(8)\n', (412, 415), False, 'import rawcam\n'), ((420, 459), 'rawcam.set_buffer_dimensions', 'rawcam.set_buffer_dimensions', (['(2048)', '(128)'], {}), '(2048, 128)\n', (448, 459), False, 'import rawcam\n'), ((464, 487), 'rawcam.set_pack_mode', 'rawcam.set_pack_mode', (['(0)'], {}), '(0)\n', (484, 487), False, 'import rawcam\n'), ((492, 517), 'rawcam.set_unpack_mode', 'rawcam.set_unpack_mode', (['(0)'], {}), '(0)\n', (514, 517), False, 'import rawcam\n'), ((522, 547), 'rawcam.set_unpack_mode', 'rawcam.set_unpack_mode', (['(0)'], {}), '(0)\n', (544, 547), False, 'import rawcam\n'), ((703, 726), 'rawcam.set_zero_copy', 'rawcam.set_zero_copy', (['(1)'], {}), '(1)\n', (723, 726), False, 'import rawcam\n'), ((732, 756), 'rawcam.set_camera_num', 'rawcam.set_camera_num', (['(1)'], {}), '(1)\n', (753, 756), False, 'import rawcam\n'), ((799, 813), 'rawcam.debug', 'rawcam.debug', ([], {}), '()\n', (811, 813), False, 'import rawcam\n'), ((934, 948), 'rawcam.start', 'rawcam.start', ([], {}), '()\n', (946, 948), False, 'import rawcam\n'), ((985, 999), 'rawcam.debug', 'rawcam.debug', ([], {}), '()\n', (997, 999), False, 'import rawcam\n'), ((1717, 1730), 'rawcam.stop', 'rawcam.stop', ([], {}), '()\n', (1728, 1730), False, 'import rawcam\n'), ((1118, 1139), 'rawcam.buffer_count', 'rawcam.buffer_count', ([], {}), '()\n', (1137, 1139), False, 'import rawcam\n'), ((1177, 1196), 'rawcam.buffer_get', 'rawcam.buffer_get', ([], {}), '()\n', (1194, 1196), False, 'import rawcam\n'), ((1351, 1387), 'numpy.frombuffer', 'numpy.frombuffer', (['buf'], {'dtype': '"""uint8"""'}), "(buf, dtype='uint8')\n", (1367, 1387), False, 'import numpy\n'), ((1688, 1711), 'rawcam.buffer_free', 'rawcam.buffer_free', (['buf'], {}), '(buf)\n', (1706, 1711), False, 'import rawcam\n'), ((1299, 1307), 'hashlib.md5', 'md5', (['buf'], {}), '(buf)\n', (1302, 1307), False, 'from hashlib import md5\n')] |
import argparse
import admin as ad
from config import Config
import numpy as np
"""Bring in the configuration filename from the command line"""
parser = argparse.ArgumentParser(
description="Get input YAML file as inputFile")
parser.add_argument('inputFile',
help='The input YAML file to drive the simulation')
args = parser.parse_args()
yaml_data = ad.yaml_loader(args.inputFile)
config = Config(yaml_data)
""" Preallocate memory for the step outputs """
lineSteps = config.steps[1:]
initStep = config.steps[0]
stepOutput = np.zeros(initStep['dim'])
outputs = [stepOutput]
nOut = initStep['dim']
for step in lineSteps:
nIn = nOut
nOut = step['dim']
stepOutput = np.zeros(nOut)
outputs.append(stepOutput)
fd = open(config.outputFile, "w")
""" Time to run the line """
for sample in range(config.nSamples):
iStep = 0
stepType = initStep['type']
ad.log(10, "Step {} is type '{}'.".format(iStep, stepType))
for i in range(initStep['dim']):
outputs[0][i] = np.random.normal(
initStep['mean'][i],
initStep['sigma'][i],
1)
nOut = initStep['dim']
for step in lineSteps:
iStep = iStep + 1
stepType = step['type']
ad.log(10, "Step {} is type '{}'.".format(iStep, stepType))
nIn = nOut
nOut = step['dim']
outputs[iStep][:] = np.random.normal(step['mean'], step['sigma'], nOut)
for outputKey in step['polynomials']:
outputFunction = step['polynomials'][outputKey]
toOutput = outputFunction['output']
for monomialKey in outputFunction['terms']:
monomial = outputFunction['terms'][monomialKey]
contrib = 1.0
(j, k) = monomial[0]
for factor in monomial[1:]:
contrib *= factor[0]
contrib *= outputs[iStep + j][k] ** factor[1]
outputs[iStep][toOutput] += contrib
tmp1 = np.array([])
for tmp2 in outputs:
tmp1 = np.concatenate([tmp1, np.array(tmp2)])
fd.write(ad.array2csv(tmp1))
| [
"numpy.random.normal",
"argparse.ArgumentParser",
"config.Config",
"admin.array2csv",
"numpy.array",
"admin.yaml_loader",
"numpy.zeros"
] | [((154, 225), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get input YAML file as inputFile"""'}), "(description='Get input YAML file as inputFile')\n", (177, 225), False, 'import argparse\n'), ((374, 404), 'admin.yaml_loader', 'ad.yaml_loader', (['args.inputFile'], {}), '(args.inputFile)\n', (388, 404), True, 'import admin as ad\n'), ((414, 431), 'config.Config', 'Config', (['yaml_data'], {}), '(yaml_data)\n', (420, 431), False, 'from config import Config\n'), ((550, 575), 'numpy.zeros', 'np.zeros', (["initStep['dim']"], {}), "(initStep['dim'])\n", (558, 575), True, 'import numpy as np\n'), ((702, 716), 'numpy.zeros', 'np.zeros', (['nOut'], {}), '(nOut)\n', (710, 716), True, 'import numpy as np\n'), ((1982, 1994), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1990, 1994), True, 'import numpy as np\n'), ((1023, 1085), 'numpy.random.normal', 'np.random.normal', (["initStep['mean'][i]", "initStep['sigma'][i]", '(1)'], {}), "(initStep['mean'][i], initStep['sigma'][i], 1)\n", (1039, 1085), True, 'import numpy as np\n'), ((1377, 1428), 'numpy.random.normal', 'np.random.normal', (["step['mean']", "step['sigma']", 'nOut'], {}), "(step['mean'], step['sigma'], nOut)\n", (1393, 1428), True, 'import numpy as np\n'), ((2088, 2106), 'admin.array2csv', 'ad.array2csv', (['tmp1'], {}), '(tmp1)\n', (2100, 2106), True, 'import admin as ad\n'), ((2057, 2071), 'numpy.array', 'np.array', (['tmp2'], {}), '(tmp2)\n', (2065, 2071), True, 'import numpy as np\n')] |
import sys
import time
import argparse
import os
import warnings
import numpy as np
import torch
import torch.nn as nn
from collections import defaultdict
import pickle as pk
from torch.nn import Parameter
from layers import DNANodeRepModule, ConvNodeRepModule
from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, \
compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, \
mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric
from dataset import UrbanPlanningDataset
from training_environment import TrainingSettings as ts, PerformanceLogger, NodeConvType, \
JKType
from training_environment import checkpoint_filepath, OutputLogger
from torch_geometric.nn import JumpingKnowledge
parser = argparse.ArgumentParser(description='UP')
parser.add_argument('--enable-cuda', action='store_true',
help='Enable CUDA')
args = parser.parse_args()
args.device = None
if args.enable_cuda and torch.cuda.is_available():
args.device = torch.device('cuda')
else:
args.device = torch.device('cpu')
class EdgeRegressor(nn.Module):
def __init__(self, num_node_features, num_edge_features, node_rep_size,
hidden_dim):
super(EdgeRegressor, self).__init__()
# Linear layer to transform target edge features
self.fc_edges = nn.Sequential(
nn.Linear(num_edge_features + 2 * num_node_features, hidden_dim),
nn.ReLU(),
nn.BatchNorm1d(hidden_dim),
nn.Dropout(p=ts.drop_prob),
nn.Linear(hidden_dim, hidden_dim),
)
concat_hidden_dim = hidden_dim
if ts.include_node_reps:
if ts.node_conv_type == NodeConvType.GraphConvolution:
self.node_rep_module = ConvNodeRepModule(num_node_features,
node_rep_size,
ts.num_node_rep_layers,
ts.improved_gcn,
ts.drop_prob)
elif ts.node_conv_type == NodeConvType.DNAConvolution:
self.node_rep_module = DNANodeRepModule(num_node_features,
node_rep_size,
ts.num_node_rep_layers,
ts.dna_heads,
ts.dna_groups,
ts.drop_prob)
concat_hidden_dim += 2 * node_rep_size
if ts.jk_type is not JKType.NoJK:
self.jk = JumpingKnowledge(ts.jk_type.value, channels=8,
num_layers=ts.num_node_rep_layers)
lin_size = node_rep_size
if ts.jk_type is JKType.Concat:
lin_size = ts.num_node_rep_layers*node_rep_size
self.jk_lin = nn.Linear(lin_size, node_rep_size)
self.node_weight = Parameter(torch.from_numpy(np.array(1.0, dtype=np.float32)))
self.edge_weight = Parameter(torch.from_numpy(np.array(1.0, dtype=np.float32)))
self.regression_head = nn.Sequential(
nn.ReLU(),
nn.BatchNorm1d(hidden_dim),
nn.Dropout(p=ts.drop_prob),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.BatchNorm1d(hidden_dim),
nn.Dropout(p=ts.drop_prob),
nn.Linear(hidden_dim, 1)
)
def forward(self, x_nodes, x_edges_batch, edge_indices_batch, edge_indices,
edge_weight=None):
"""
:param x_nodes: Node features of shape [N, D]
:param x_edges_batch: Edge features of shape [B, K]
:param edge_indices_batch: Matrix of shape [B, 2] indicating the
indices of the nodes connected by each edge.
:param edge_indices: Matrix of shape [2, E] indicating for each edge
in the graph the two node IDs it connects.
:param edge_weight: Vector of shape [E] containing the edge weight for
each edge in the graph.
:return: Predictions for edges with shape [B, 1]
"""
# Compute hidden representation of target edge
x_nodes_left = x_nodes[edge_indices_batch[:, 0]]
x_nodes_right = x_nodes[edge_indices_batch[:, 1]]
x_concat = torch.cat([x_nodes_left, x_edges_batch, x_nodes_right], dim=-1)
h_edges = self.fc_edges(x_concat)
h_total = self.node_weight * h_edges
# Compute hidden representations of nodes
if ts.include_node_reps:
intermediate_node_reps = self.node_rep_module(x_nodes,
edge_indices.t(),
edge_weight)
if ts.jk_type is JKType.NoJK:
h_nodes = intermediate_node_reps[-1]
else:
h_nodes = self.jk(intermediate_node_reps)
h_nodes = self.jk_lin(h_nodes)
# Get hidden representations of nodes incident to target edges
h_nodes_left = h_nodes[edge_indices_batch[:, 0]]
h_nodes_right = h_nodes[edge_indices_batch[:, 1]]
h_total += self.edge_weight * h_nodes_left
h_total += self.edge_weight * h_nodes_right
regression_output = self.regression_head(h_total)
return regression_output.squeeze(-1)
def train_epoch(epoch, predictor, data, optimizer, loss_criterion, logger,
lr_schedule):
predictor.train()
for (edge_idcs_batch, x_edges_batch, edge_labels_batch,
_) in data.train_loader:
edge_idcs_batch = edge_idcs_batch.to(device=args.device)
x_edges_batch = x_edges_batch.to(device=args.device)
edge_labels_batch = edge_labels_batch.to(device=args.device)
optimizer.zero_grad()
reg_out = predictor(data.node_feats, x_edges_batch, edge_idcs_batch,
data.flow_topology.edge_indices,
edge_weight=data.flow_topology.edge_weights)
loss = loss_criterion(reg_out, edge_labels_batch)
loss.backward()
optimizer.step()
logger.add_values({"train_loss": loss.item()})
lr_schedule.step()
def validate_epoch(epoch, predictor, data, loss_criterion, data_loader, logger,
test):
predictor.eval()
prefix = "test" if test else "val"
for (edge_idcs_batch, x_edges_batch, edge_labels_batch, edge_buckets_batch) in data_loader:
edge_idcs_batch = edge_idcs_batch.to(device=args.device)
x_edges_batch = x_edges_batch.to(device=args.device)
edge_labels_batch = edge_labels_batch.to(device=args.device)
reg_out = predictor(data.node_feats, x_edges_batch, edge_idcs_batch,
data.flow_topology.edge_indices,
edge_weight=data.flow_topology.edge_weights)
loss = loss_criterion(reg_out, edge_labels_batch)
logger.add_values({
prefix + "_loss": loss.item(),
prefix + "_predictions": reg_out.detach().cpu().numpy(),
prefix + "_labels": edge_labels_batch.detach().cpu().numpy(),
prefix + "_bins": edge_buckets_batch.detach().cpu().numpy()
})
if test:
with open("preds_labels.pk", "wb") as fd:
preds = data.label_scaler.inverse_transform(np.concatenate(logger._current_epoch_metrics["test_predictions"], axis=-1).reshape(-1, 1))
labels = data.label_scaler.inverse_transform(np.concatenate(logger._current_epoch_metrics["test_labels"], axis=-1).reshape(-1, 1))
pk.dump((preds, labels, logger._current_epoch_metrics["test_node_idcs"]), fd)
def run_training():
# Set up training environment
if not os.path.exists(ts.cp_folder):
os.makedirs(ts.cp_folder)
log_filepath = checkpoint_filepath(ts.cp_folder, "log", __file__, {},
".pk")
summary_filepath = checkpoint_filepath(ts.cp_folder, "summary", __file__,
{}, ".txt")
output_logger = OutputLogger(checkpoint_filepath(ts.cp_folder, "output",
__file__, {}, ".txt"))
sys.stdout = output_logger
ts.write_summary_file(checkpoint_filepath(ts.cp_folder, "hyperparams",
__file__, {}, "txt"))
print(ts.settings_description())
# Load data
ds = UrbanPlanningDataset(ts.data_base_path, ts.num_bins, ts.batch_size,
ts.n_quantiles, ts.resampling,
ts.excluded_node_feature_columns,
ts.excluded_edge_feature_columns, False,
ts.include_edge_flow_feat, ts.adj_flow_threshold,
ts.seed)
# Preprocess data
ds.to(args.device)
def _get_metric_funcs(prefix):
preds_key = prefix+"_predictions"
labels_key = prefix+"_labels"
bins_key = prefix+"_bins"
return {
prefix+"_loss": (lambda m: np.nanmean(m[prefix+"_loss"])),
prefix + "_mae": (lambda m: compute_mae(m[preds_key], m[labels_key], ds)),
prefix + "_binned_mae": (lambda m: compute_binned_metric(mae_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_mae": (lambda m: compute_macro_metric(mae_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_mape": (lambda m: compute_mape(m[preds_key], m[labels_key], ds)),
prefix + "_binned_mape": (lambda m: compute_binned_metric(mape_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_mape": (lambda m: compute_macro_metric(mape_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_ssi": (lambda m: compute_ssi(m[preds_key], m[labels_key], ds)),
prefix + "_binned_ssi": (lambda m: compute_binned_metric(ssi_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_ssi": (lambda m: compute_macro_metric(ssi_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_geh": (lambda m: compute_geh(m[preds_key], m[labels_key], ds)),
prefix + "_binned_geh": (lambda m: compute_binned_metric(geh_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_geh": (lambda m: compute_macro_metric(geh_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_cpl": (lambda m: compute_cpl(m[preds_key], m[labels_key], ds)),
prefix + "_binned_cpl": (lambda m: compute_binned_metric(cpl_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_cpl": (lambda m: compute_macro_metric(cpl_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_cpc": (lambda m: compute_cpc(m[preds_key], m[labels_key], ds)),
prefix + "_binned_cpc": (lambda m: compute_binned_metric(cpc_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_cpc": (lambda m: compute_macro_metric(cpc_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
}
metric_funcs = {
"train_loss": (lambda m: np.nanmean(m["train_loss"])),
**_get_metric_funcs("val"),
**_get_metric_funcs("test"),
}
logger = PerformanceLogger(metric_funcs, "val_macro_mae", log_filepath,
write_every=ts.write_log_every)
predictor = EdgeRegressor(ds.num_node_feats, ds.num_edge_feats,
hidden_dim=ts.hidden_dim,
node_rep_size=ts.node_rep_size)
predictor = predictor.to(device=args.device)
optimizer = torch.optim.Adam(predictor.parameters(), lr=ts.lr)
lr_schedule = torch.optim.lr_scheduler.MultiStepLR(optimizer,
list(ts.lr_schedule))
loss_criterion = (nn.L1Loss() if ts.regression_loss == "L1"
else nn.MSELoss())
print("Start training")
for epoch in range(-1, ts.num_epochs):
if epoch >= 0:
train_epoch(epoch, predictor, ds, optimizer, loss_criterion,
logger, lr_schedule)
validate_epoch(epoch, predictor, ds, loss_criterion, ds.val_loader,
logger, test=False)
validate_epoch(epoch, predictor, ds, loss_criterion, ds.test_loader,
logger, test=True)
logger.complete_epoch()
print(logger.epoch_summary())
if epoch % ts.write_log_every == 0:
logger.write(log_filepath)
logger.write(log_filepath)
logger.write_summary(summary_filepath, ts.settings_description())
return logger
if __name__ == '__main__':
run_training()
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.L1Loss",
"metrics.compute_mape",
"torch.nn.MSELoss",
"torch.nn.BatchNorm1d",
"training_environment.checkpoint_filepath",
"torch.cuda.is_available",
"dataset.UrbanPlanningDataset",
"numpy.nanmean",
"numpy.array",
"layers.DNANodeRepModule",
"layer... | [((762, 803), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""UP"""'}), "(description='UP')\n", (785, 803), False, 'import argparse\n'), ((972, 997), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (995, 997), False, 'import torch\n'), ((1017, 1037), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1029, 1037), False, 'import torch\n'), ((1062, 1081), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1074, 1081), False, 'import torch\n'), ((8030, 8091), 'training_environment.checkpoint_filepath', 'checkpoint_filepath', (['ts.cp_folder', '"""log"""', '__file__', '{}', '""".pk"""'], {}), "(ts.cp_folder, 'log', __file__, {}, '.pk')\n", (8049, 8091), False, 'from training_environment import checkpoint_filepath, OutputLogger\n'), ((8154, 8220), 'training_environment.checkpoint_filepath', 'checkpoint_filepath', (['ts.cp_folder', '"""summary"""', '__file__', '{}', '""".txt"""'], {}), "(ts.cp_folder, 'summary', __file__, {}, '.txt')\n", (8173, 8220), False, 'from training_environment import checkpoint_filepath, OutputLogger\n'), ((8654, 8901), 'dataset.UrbanPlanningDataset', 'UrbanPlanningDataset', (['ts.data_base_path', 'ts.num_bins', 'ts.batch_size', 'ts.n_quantiles', 'ts.resampling', 'ts.excluded_node_feature_columns', 'ts.excluded_edge_feature_columns', '(False)', 'ts.include_edge_flow_feat', 'ts.adj_flow_threshold', 'ts.seed'], {}), '(ts.data_base_path, ts.num_bins, ts.batch_size, ts.\n n_quantiles, ts.resampling, ts.excluded_node_feature_columns, ts.\n excluded_edge_feature_columns, False, ts.include_edge_flow_feat, ts.\n adj_flow_threshold, ts.seed)\n', (8674, 8901), False, 'from dataset import UrbanPlanningDataset\n'), ((11726, 11825), 'training_environment.PerformanceLogger', 'PerformanceLogger', (['metric_funcs', '"""val_macro_mae"""', 'log_filepath'], {'write_every': 'ts.write_log_every'}), "(metric_funcs, 'val_macro_mae', log_filepath, write_every=\n ts.write_log_every)\n", (11743, 11825), False, 'from training_environment import TrainingSettings as ts, PerformanceLogger, NodeConvType, JKType\n'), ((4483, 4546), 'torch.cat', 'torch.cat', (['[x_nodes_left, x_edges_batch, x_nodes_right]'], {'dim': '(-1)'}), '([x_nodes_left, x_edges_batch, x_nodes_right], dim=-1)\n', (4492, 4546), False, 'import torch\n'), ((7947, 7975), 'os.path.exists', 'os.path.exists', (['ts.cp_folder'], {}), '(ts.cp_folder)\n', (7961, 7975), False, 'import os\n'), ((7985, 8010), 'os.makedirs', 'os.makedirs', (['ts.cp_folder'], {}), '(ts.cp_folder)\n', (7996, 8010), False, 'import os\n'), ((8297, 8362), 'training_environment.checkpoint_filepath', 'checkpoint_filepath', (['ts.cp_folder', '"""output"""', '__file__', '{}', '""".txt"""'], {}), "(ts.cp_folder, 'output', __file__, {}, '.txt')\n", (8316, 8362), False, 'from training_environment import checkpoint_filepath, OutputLogger\n'), ((8474, 8543), 'training_environment.checkpoint_filepath', 'checkpoint_filepath', (['ts.cp_folder', '"""hyperparams"""', '__file__', '{}', '"""txt"""'], {}), "(ts.cp_folder, 'hyperparams', __file__, {}, 'txt')\n", (8493, 8543), False, 'from training_environment import checkpoint_filepath, OutputLogger\n'), ((8601, 8626), 'training_environment.TrainingSettings.settings_description', 'ts.settings_description', ([], {}), '()\n', (8624, 8626), True, 'from training_environment import TrainingSettings as ts, PerformanceLogger, NodeConvType, JKType\n'), ((12321, 12332), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (12330, 12332), True, 'import torch.nn as nn\n'), ((12390, 12402), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (12400, 12402), True, 'import torch.nn as nn\n'), ((13083, 13108), 'training_environment.TrainingSettings.settings_description', 'ts.settings_description', ([], {}), '()\n', (13106, 13108), True, 'from training_environment import TrainingSettings as ts, PerformanceLogger, NodeConvType, JKType\n'), ((1378, 1442), 'torch.nn.Linear', 'nn.Linear', (['(num_edge_features + 2 * num_node_features)', 'hidden_dim'], {}), '(num_edge_features + 2 * num_node_features, hidden_dim)\n', (1387, 1442), True, 'import torch.nn as nn\n'), ((1456, 1465), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1463, 1465), True, 'import torch.nn as nn\n'), ((1479, 1505), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_dim'], {}), '(hidden_dim)\n', (1493, 1505), True, 'import torch.nn as nn\n'), ((1519, 1545), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'ts.drop_prob'}), '(p=ts.drop_prob)\n', (1529, 1545), True, 'import torch.nn as nn\n'), ((1559, 1592), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (1568, 1592), True, 'import torch.nn as nn\n'), ((3329, 3338), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3336, 3338), True, 'import torch.nn as nn\n'), ((3352, 3378), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_dim'], {}), '(hidden_dim)\n', (3366, 3378), True, 'import torch.nn as nn\n'), ((3392, 3418), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'ts.drop_prob'}), '(p=ts.drop_prob)\n', (3402, 3418), True, 'import torch.nn as nn\n'), ((3432, 3465), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (3441, 3465), True, 'import torch.nn as nn\n'), ((3479, 3488), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3486, 3488), True, 'import torch.nn as nn\n'), ((3502, 3528), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_dim'], {}), '(hidden_dim)\n', (3516, 3528), True, 'import torch.nn as nn\n'), ((3542, 3568), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'ts.drop_prob'}), '(p=ts.drop_prob)\n', (3552, 3568), True, 'import torch.nn as nn\n'), ((3582, 3606), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(1)'], {}), '(hidden_dim, 1)\n', (3591, 3606), True, 'import torch.nn as nn\n'), ((7802, 7879), 'pickle.dump', 'pk.dump', (["(preds, labels, logger._current_epoch_metrics['test_node_idcs'])", 'fd'], {}), "((preds, labels, logger._current_epoch_metrics['test_node_idcs']), fd)\n", (7809, 7879), True, 'import pickle as pk\n'), ((11604, 11631), 'numpy.nanmean', 'np.nanmean', (["m['train_loss']"], {}), "(m['train_loss'])\n", (11614, 11631), True, 'import numpy as np\n'), ((1783, 1893), 'layers.ConvNodeRepModule', 'ConvNodeRepModule', (['num_node_features', 'node_rep_size', 'ts.num_node_rep_layers', 'ts.improved_gcn', 'ts.drop_prob'], {}), '(num_node_features, node_rep_size, ts.num_node_rep_layers,\n ts.improved_gcn, ts.drop_prob)\n', (1800, 1893), False, 'from layers import DNANodeRepModule, ConvNodeRepModule\n'), ((2746, 2832), 'torch_geometric.nn.JumpingKnowledge', 'JumpingKnowledge', (['ts.jk_type.value'], {'channels': '(8)', 'num_layers': 'ts.num_node_rep_layers'}), '(ts.jk_type.value, channels=8, num_layers=ts.\n num_node_rep_layers)\n', (2762, 2832), False, 'from torch_geometric.nn import JumpingKnowledge\n'), ((3058, 3092), 'torch.nn.Linear', 'nn.Linear', (['lin_size', 'node_rep_size'], {}), '(lin_size, node_rep_size)\n', (3067, 3092), True, 'import torch.nn as nn\n'), ((3148, 3179), 'numpy.array', 'np.array', (['(1.0)'], {'dtype': 'np.float32'}), '(1.0, dtype=np.float32)\n', (3156, 3179), True, 'import numpy as np\n'), ((3236, 3267), 'numpy.array', 'np.array', (['(1.0)'], {'dtype': 'np.float32'}), '(1.0, dtype=np.float32)\n', (3244, 3267), True, 'import numpy as np\n'), ((9288, 9319), 'numpy.nanmean', 'np.nanmean', (["m[prefix + '_loss']"], {}), "(m[prefix + '_loss'])\n", (9298, 9319), True, 'import numpy as np\n'), ((9360, 9404), 'metrics.compute_mae', 'compute_mae', (['m[preds_key]', 'm[labels_key]', 'ds'], {}), '(m[preds_key], m[labels_key], ds)\n', (9371, 9404), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((9454, 9550), 'metrics.compute_binned_metric', 'compute_binned_metric', (['mae_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(mae_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (9475, 9550), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((9595, 9690), 'metrics.compute_macro_metric', 'compute_macro_metric', (['mae_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(mae_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (9615, 9690), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((9730, 9775), 'metrics.compute_mape', 'compute_mape', (['m[preds_key]', 'm[labels_key]', 'ds'], {}), '(m[preds_key], m[labels_key], ds)\n', (9742, 9775), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((9826, 9923), 'metrics.compute_binned_metric', 'compute_binned_metric', (['mape_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(mape_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (9847, 9923), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((9969, 10065), 'metrics.compute_macro_metric', 'compute_macro_metric', (['mape_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(mape_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (9989, 10065), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((10104, 10148), 'metrics.compute_ssi', 'compute_ssi', (['m[preds_key]', 'm[labels_key]', 'ds'], {}), '(m[preds_key], m[labels_key], ds)\n', (10115, 10148), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((10198, 10294), 'metrics.compute_binned_metric', 'compute_binned_metric', (['ssi_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(ssi_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (10219, 10294), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((10339, 10434), 'metrics.compute_macro_metric', 'compute_macro_metric', (['ssi_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(ssi_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (10359, 10434), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((10473, 10517), 'metrics.compute_geh', 'compute_geh', (['m[preds_key]', 'm[labels_key]', 'ds'], {}), '(m[preds_key], m[labels_key], ds)\n', (10484, 10517), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((10567, 10663), 'metrics.compute_binned_metric', 'compute_binned_metric', (['geh_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(geh_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (10588, 10663), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((10708, 10803), 'metrics.compute_macro_metric', 'compute_macro_metric', (['geh_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(geh_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (10728, 10803), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((10842, 10886), 'metrics.compute_cpl', 'compute_cpl', (['m[preds_key]', 'm[labels_key]', 'ds'], {}), '(m[preds_key], m[labels_key], ds)\n', (10853, 10886), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((10936, 11032), 'metrics.compute_binned_metric', 'compute_binned_metric', (['cpl_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(cpl_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (10957, 11032), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((11077, 11172), 'metrics.compute_macro_metric', 'compute_macro_metric', (['cpl_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(cpl_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (11097, 11172), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((11211, 11255), 'metrics.compute_cpc', 'compute_cpc', (['m[preds_key]', 'm[labels_key]', 'ds'], {}), '(m[preds_key], m[labels_key], ds)\n', (11222, 11255), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((11305, 11401), 'metrics.compute_binned_metric', 'compute_binned_metric', (['cpc_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(cpc_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (11326, 11401), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((11446, 11541), 'metrics.compute_macro_metric', 'compute_macro_metric', (['cpc_metric', 'm[preds_key]', 'm[labels_key]', 'm[bins_key]', 'ds', 'ts.num_bins'], {}), '(cpc_metric, m[preds_key], m[labels_key], m[bins_key],\n ds, ts.num_bins)\n', (11466, 11541), False, 'from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric\n'), ((2224, 2345), 'layers.DNANodeRepModule', 'DNANodeRepModule', (['num_node_features', 'node_rep_size', 'ts.num_node_rep_layers', 'ts.dna_heads', 'ts.dna_groups', 'ts.drop_prob'], {}), '(num_node_features, node_rep_size, ts.num_node_rep_layers,\n ts.dna_heads, ts.dna_groups, ts.drop_prob)\n', (2240, 2345), False, 'from layers import DNANodeRepModule, ConvNodeRepModule\n'), ((7556, 7630), 'numpy.concatenate', 'np.concatenate', (["logger._current_epoch_metrics['test_predictions']"], {'axis': '(-1)'}), "(logger._current_epoch_metrics['test_predictions'], axis=-1)\n", (7570, 7630), True, 'import numpy as np\n'), ((7704, 7773), 'numpy.concatenate', 'np.concatenate', (["logger._current_epoch_metrics['test_labels']"], {'axis': '(-1)'}), "(logger._current_epoch_metrics['test_labels'], axis=-1)\n", (7718, 7773), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import scipy
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def discrete_to_continuous(values, value_times, sampling_rate=1000):
"""
3rd order spline interpolation.
Parameters
----------
values : dataframe
Values.
value_times : list
Time indices of values.
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
signal : pd.Series
An array containing the values indexed by time.
Example
----------
>>> import neurokit as nk
>>> signal = discrete_to_continuous([800, 900, 700, 500], [1000, 2000, 3000, 4000], sampling_rate=1000)
>>> pd.Series(signal).plot()
Notes
----------
*Authors*
- `<NAME> <https://dominiquemakowski.github.io/>`_
*Dependencies*
- scipy
- pandas
"""
# values=RRis.copy()
# value_times=beats_times.copy()
# Preprocessing
initial_index = value_times[0]
value_times = np.array(value_times) - initial_index
# fit a 3rd degree spline on the data.
spline = scipy.interpolate.splrep(x=value_times, y=values, k=3, s=0) # s=0 guarantees that it will pass through ALL the given points
x = np.arange(0, value_times[-1], 1)
# Get the values indexed per time
signal = scipy.interpolate.splev(x=x, tck=spline, der=0)
# Transform to series
signal = pd.Series(signal)
signal.index = np.array(np.arange(initial_index, initial_index+len(signal), 1))
return(signal) | [
"pandas.Series",
"numpy.array",
"scipy.interpolate.splev",
"scipy.interpolate.splrep",
"numpy.arange"
] | [((1718, 1777), 'scipy.interpolate.splrep', 'scipy.interpolate.splrep', ([], {'x': 'value_times', 'y': 'values', 'k': '(3)', 's': '(0)'}), '(x=value_times, y=values, k=3, s=0)\n', (1742, 1777), False, 'import scipy\n'), ((1851, 1883), 'numpy.arange', 'np.arange', (['(0)', 'value_times[-1]', '(1)'], {}), '(0, value_times[-1], 1)\n', (1860, 1883), True, 'import numpy as np\n'), ((1935, 1982), 'scipy.interpolate.splev', 'scipy.interpolate.splev', ([], {'x': 'x', 'tck': 'spline', 'der': '(0)'}), '(x=x, tck=spline, der=0)\n', (1958, 1982), False, 'import scipy\n'), ((2022, 2039), 'pandas.Series', 'pd.Series', (['signal'], {}), '(signal)\n', (2031, 2039), True, 'import pandas as pd\n'), ((1623, 1644), 'numpy.array', 'np.array', (['value_times'], {}), '(value_times)\n', (1631, 1644), True, 'import numpy as np\n')] |
# Copyright 2018 The CapsLayer Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==========================================================================
"""
This module provides a set of high-level capsule networks layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import capslayer as cl
import tensorflow as tf
from capslayer.core import routing
from capslayer.core import transforming
def dense(inputs, activation,
num_outputs,
out_caps_dims,
routing_method='EMRouting',
num_iter=3,
coordinate_addition=False,
reuse=None,
name=None):
"""A fully connected capsule layer.
Args:
inputs: A 4-D tensor with shape [batch_size, num_inputs] + in_caps_dims or [batch_size, in_height, in_width, in_channels] + in_caps_dims
activation: [batch_size, num_inputs] or [batch_size, in_height, in_width, in_channels]
num_outputs: Integer, the number of output capsules in the layer.
out_caps_dims: A list with two elements, pose shape of output capsules.
Returns:
pose: A 4-D tensor with shape [batch_size, num_outputs] + out_caps_dims
activation: [batch_size, num_outputs]
"""
name = "dense" if name is None else name
with tf.compat.v1.variable_scope(name) as scope:
if reuse:
scope.reuse()
if coordinate_addition and len(inputs.shape) == 6 and len(activation.shape) == 4:
vote = transforming(inputs, num_outputs=num_outputs, out_caps_dims=out_caps_dims)
with tf.name_scope("coodinate_addition"):
batch_size, in_height, in_width, in_channels, _, out_caps_height, out_caps_width = cl.shape(vote)
num_inputs = in_height * in_width * in_channels
zeros = np.zeros((in_height, out_caps_width - 1))
coord_offset_h = ((np.arange(in_height) + 0.5) / in_height).reshape([in_height, 1])
coord_offset_h = np.concatenate([zeros, coord_offset_h], axis=-1)
zeros = np.zeros((out_caps_height - 1, out_caps_width))
coord_offset_h = np.stack([np.concatenate([coord_offset_h[i:(i + 1), :], zeros], axis=0) for i in range(in_height)], axis=0)
coord_offset_h = coord_offset_h.reshape((1, in_height, 1, 1, 1, out_caps_height, out_caps_width))
zeros = np.zeros((1, in_width))
coord_offset_w = ((np.arange(in_width) + 0.5) / in_width).reshape([1, in_width])
coord_offset_w = np.concatenate([zeros, coord_offset_w, zeros, zeros], axis=0)
zeros = np.zeros((out_caps_height, out_caps_width - 1))
coord_offset_w = np.stack([np.concatenate([zeros, coord_offset_w[:, i:(i + 1)]], axis=1) for i in range(in_width)], axis=0)
coord_offset_w = coord_offset_w.reshape((1, 1, in_width, 1, 1, out_caps_height, out_caps_width))
vote = vote + tf.constant(coord_offset_h + coord_offset_w, dtype=tf.float32)
vote = tf.reshape(vote, shape=[batch_size, num_inputs, num_outputs] + out_caps_dims)
activation = tf.reshape(activation, shape=[batch_size, num_inputs])
elif len(inputs.shape) == 4 and len(activation.shape) == 2:
vote = transforming(inputs, num_outputs=num_outputs, out_caps_dims=out_caps_dims)
else:
raise TypeError("Wrong rank for inputs or activation")
if routing_method == 'SDARouting':
activation = tf.norm(inputs, axis=(-2, -1))
return routing(vote, activation, routing_method, num_iter)
pose, activation = routing(vote, activation, routing_method, num_iter=num_iter)
# pose, activation = cl.core.gluing(vote, activation)
assert len(pose.shape) == 4
assert len(activation.shape) == 2
return pose, activation, None
def primaryCaps(inputs, filters,
kernel_size,
strides,
out_caps_dims,
method=None,
name=None):
'''Primary capsule layer.
Args:
inputs: [batch_size, in_height, in_width, in_channels].
filters: Integer, the dimensionality of the output space.
kernel_size: kernel_size
strides: strides
out_caps_dims: A list of 2 integers.
method: the method of calculating probability of entity existence(logistic, norm, None)
Returns:
pose: A 6-D tensor, [batch_size, out_height, out_width, filters] + out_caps_dims
activation: A 4-D tensor, [batch_size, out_height, out_width, filters]
'''
name = "primary_capsule" if name is None else name
with tf.compat.v1.variable_scope(name):
channels = filters * np.prod(out_caps_dims)
channels = channels + filters if method == "logistic" else channels
pose = tf.layers.conv2d(inputs, channels,
kernel_size=kernel_size,
strides=strides, activation=None)
shape = cl.shape(pose, name="get_pose_shape")
batch_size = shape[0]
height = shape[1]
width = shape[2]
shape = [batch_size, height, width, filters] + out_caps_dims
if method == 'logistic':
# logistic activation unit
pose, activation_logit = tf.split(pose, [channels - filters, filters], axis=-1)
pose = tf.reshape(pose, shape=shape)
activation = tf.sigmoid(activation_logit)
elif method == 'norm' or method is None:
pose = tf.reshape(pose, shape=shape)
squash_on = -2 if out_caps_dims[-1] == 1 else [-2, -1]
pose = cl.ops.squash(pose, axis=squash_on)
activation = cl.norm(pose, axis=(-2, -1))
activation = tf.clip_by_value(activation, 1e-20, 1. - 1e-20)
return(pose, activation)
| [
"capslayer.shape",
"numpy.prod",
"tensorflow.compat.v1.variable_scope",
"tensorflow.split",
"capslayer.norm",
"capslayer.core.transforming",
"capslayer.ops.squash",
"tensorflow.layers.conv2d",
"numpy.zeros",
"tensorflow.sigmoid",
"tensorflow.name_scope",
"tensorflow.clip_by_value",
"numpy.co... | [((1880, 1913), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['name'], {}), '(name)\n', (1907, 1913), True, 'import tensorflow as tf\n'), ((4250, 4310), 'capslayer.core.routing', 'routing', (['vote', 'activation', 'routing_method'], {'num_iter': 'num_iter'}), '(vote, activation, routing_method, num_iter=num_iter)\n', (4257, 4310), False, 'from capslayer.core import routing\n'), ((5288, 5321), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['name'], {}), '(name)\n', (5315, 5321), True, 'import tensorflow as tf\n'), ((5467, 5564), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['inputs', 'channels'], {'kernel_size': 'kernel_size', 'strides': 'strides', 'activation': 'None'}), '(inputs, channels, kernel_size=kernel_size, strides=strides,\n activation=None)\n', (5483, 5564), True, 'import tensorflow as tf\n'), ((5641, 5678), 'capslayer.shape', 'cl.shape', (['pose'], {'name': '"""get_pose_shape"""'}), "(pose, name='get_pose_shape')\n", (5649, 5678), True, 'import capslayer as cl\n'), ((6393, 6441), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['activation', '(1e-20)', '(1.0 - 1e-20)'], {}), '(activation, 1e-20, 1.0 - 1e-20)\n', (6409, 6441), True, 'import tensorflow as tf\n'), ((2077, 2151), 'capslayer.core.transforming', 'transforming', (['inputs'], {'num_outputs': 'num_outputs', 'out_caps_dims': 'out_caps_dims'}), '(inputs, num_outputs=num_outputs, out_caps_dims=out_caps_dims)\n', (2089, 2151), False, 'from capslayer.core import transforming\n'), ((4120, 4150), 'tensorflow.norm', 'tf.norm', (['inputs'], {'axis': '(-2, -1)'}), '(inputs, axis=(-2, -1))\n', (4127, 4150), True, 'import tensorflow as tf\n'), ((4170, 4221), 'capslayer.core.routing', 'routing', (['vote', 'activation', 'routing_method', 'num_iter'], {}), '(vote, activation, routing_method, num_iter)\n', (4177, 4221), False, 'from capslayer.core import routing\n'), ((5352, 5374), 'numpy.prod', 'np.prod', (['out_caps_dims'], {}), '(out_caps_dims)\n', (5359, 5374), True, 'import numpy as np\n'), ((5939, 5993), 'tensorflow.split', 'tf.split', (['pose', '[channels - filters, filters]'], {'axis': '(-1)'}), '(pose, [channels - filters, filters], axis=-1)\n', (5947, 5993), True, 'import tensorflow as tf\n'), ((6013, 6042), 'tensorflow.reshape', 'tf.reshape', (['pose'], {'shape': 'shape'}), '(pose, shape=shape)\n', (6023, 6042), True, 'import tensorflow as tf\n'), ((6068, 6096), 'tensorflow.sigmoid', 'tf.sigmoid', (['activation_logit'], {}), '(activation_logit)\n', (6078, 6096), True, 'import tensorflow as tf\n'), ((2169, 2204), 'tensorflow.name_scope', 'tf.name_scope', (['"""coodinate_addition"""'], {}), "('coodinate_addition')\n", (2182, 2204), True, 'import tensorflow as tf\n'), ((2305, 2319), 'capslayer.shape', 'cl.shape', (['vote'], {}), '(vote)\n', (2313, 2319), True, 'import capslayer as cl\n'), ((2409, 2450), 'numpy.zeros', 'np.zeros', (['(in_height, out_caps_width - 1)'], {}), '((in_height, out_caps_width - 1))\n', (2417, 2450), True, 'import numpy as np\n'), ((2584, 2632), 'numpy.concatenate', 'np.concatenate', (['[zeros, coord_offset_h]'], {'axis': '(-1)'}), '([zeros, coord_offset_h], axis=-1)\n', (2598, 2632), True, 'import numpy as np\n'), ((2657, 2704), 'numpy.zeros', 'np.zeros', (['(out_caps_height - 1, out_caps_width)'], {}), '((out_caps_height - 1, out_caps_width))\n', (2665, 2704), True, 'import numpy as np\n'), ((2985, 3008), 'numpy.zeros', 'np.zeros', (['(1, in_width)'], {}), '((1, in_width))\n', (2993, 3008), True, 'import numpy as np\n'), ((3139, 3200), 'numpy.concatenate', 'np.concatenate', (['[zeros, coord_offset_w, zeros, zeros]'], {'axis': '(0)'}), '([zeros, coord_offset_w, zeros, zeros], axis=0)\n', (3153, 3200), True, 'import numpy as np\n'), ((3225, 3272), 'numpy.zeros', 'np.zeros', (['(out_caps_height, out_caps_width - 1)'], {}), '((out_caps_height, out_caps_width - 1))\n', (3233, 3272), True, 'import numpy as np\n'), ((3644, 3721), 'tensorflow.reshape', 'tf.reshape', (['vote'], {'shape': '([batch_size, num_inputs, num_outputs] + out_caps_dims)'}), '(vote, shape=[batch_size, num_inputs, num_outputs] + out_caps_dims)\n', (3654, 3721), True, 'import tensorflow as tf\n'), ((3751, 3805), 'tensorflow.reshape', 'tf.reshape', (['activation'], {'shape': '[batch_size, num_inputs]'}), '(activation, shape=[batch_size, num_inputs])\n', (3761, 3805), True, 'import tensorflow as tf\n'), ((3894, 3968), 'capslayer.core.transforming', 'transforming', (['inputs'], {'num_outputs': 'num_outputs', 'out_caps_dims': 'out_caps_dims'}), '(inputs, num_outputs=num_outputs, out_caps_dims=out_caps_dims)\n', (3906, 3968), False, 'from capslayer.core import transforming\n'), ((6165, 6194), 'tensorflow.reshape', 'tf.reshape', (['pose'], {'shape': 'shape'}), '(pose, shape=shape)\n', (6175, 6194), True, 'import tensorflow as tf\n'), ((6281, 6316), 'capslayer.ops.squash', 'cl.ops.squash', (['pose'], {'axis': 'squash_on'}), '(pose, axis=squash_on)\n', (6294, 6316), True, 'import capslayer as cl\n'), ((6342, 6370), 'capslayer.norm', 'cl.norm', (['pose'], {'axis': '(-2, -1)'}), '(pose, axis=(-2, -1))\n', (6349, 6370), True, 'import capslayer as cl\n'), ((3557, 3619), 'tensorflow.constant', 'tf.constant', (['(coord_offset_h + coord_offset_w)'], {'dtype': 'tf.float32'}), '(coord_offset_h + coord_offset_w, dtype=tf.float32)\n', (3568, 3619), True, 'import tensorflow as tf\n'), ((2748, 2807), 'numpy.concatenate', 'np.concatenate', (['[coord_offset_h[i:i + 1, :], zeros]'], {'axis': '(0)'}), '([coord_offset_h[i:i + 1, :], zeros], axis=0)\n', (2762, 2807), True, 'import numpy as np\n'), ((3316, 3375), 'numpy.concatenate', 'np.concatenate', (['[zeros, coord_offset_w[:, i:i + 1]]'], {'axis': '(1)'}), '([zeros, coord_offset_w[:, i:i + 1]], axis=1)\n', (3330, 3375), True, 'import numpy as np\n'), ((2486, 2506), 'numpy.arange', 'np.arange', (['in_height'], {}), '(in_height)\n', (2495, 2506), True, 'import numpy as np\n'), ((3044, 3063), 'numpy.arange', 'np.arange', (['in_width'], {}), '(in_width)\n', (3053, 3063), True, 'import numpy as np\n')] |
import numpy as np
def sweepcut(p,g):
"""
Computes a cluster using sweep cut and conductance as a criterion.
Parameters
----------
p: numpy array
A vector that is used to perform rounding.
g: graph object
Returns
-------
In a list of length 3 it returns the following.
output 0: list
Stores indices of the best clusters found by the last called rounding procedure.
output 1: float
Stores the value of the best conductance found by the last called rounding procedure.
output 2: list of objects
A two dimensional list of objects. For example,
sweep_profile[0] contains a numpy array with all conductances for all
clusters that were calculated by the last called rounding procedure.
sweep_profile[1] is a multidimensional list that contains the indices
of all clusters that were calculated by the rounding procedure. For example,
sweep_profile[1,5] is a list that contains the indices of the 5th cluster
that was calculated by the rounding procedure.
The set of indices in sweep_profile[1][5] also correspond
to conductance in sweep_profile[0][5].
"""
n = g.adjacency_matrix.shape[0]
srt_idx = np.argsort(-1*p,axis=0)
size_loop = np.count_nonzero(p)
if size_loop == n:
size_loop = n-1
A_temp_prev = np.zeros((n,1))
vol_sum = 0
quad_prev = 0
output = [[],[],[]]
output[2] = [np.zeros(size_loop),[[] for jj in range(size_loop)]]
output[1] = 2
for i in range(size_loop):
idx = srt_idx[i]
vol_sum = vol_sum + g.d[idx]
quad_new = g.adjacency_matrix[idx,idx]
quad_prev_new = A_temp_prev[idx,0]
cut = vol_sum - quad_prev - quad_new - 2*quad_prev_new
quad_prev = quad_prev + quad_new + 2*quad_prev_new
A_temp_prev = A_temp_prev + g.adjacency_matrix[idx,:].T
denominator = min(vol_sum,g.vol_G - vol_sum)
cond = cut/denominator
output[2][0][i] = cond
current_support = (srt_idx[0:i+1]).tolist()
output[2][1][i] = current_support
if cond < output[1]:
output[1] = cond
output[0] = current_support
return output
| [
"numpy.argsort",
"numpy.count_nonzero",
"numpy.zeros"
] | [((1345, 1371), 'numpy.argsort', 'np.argsort', (['(-1 * p)'], {'axis': '(0)'}), '(-1 * p, axis=0)\n', (1355, 1371), True, 'import numpy as np\n'), ((1394, 1413), 'numpy.count_nonzero', 'np.count_nonzero', (['p'], {}), '(p)\n', (1410, 1413), True, 'import numpy as np\n'), ((1480, 1496), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (1488, 1496), True, 'import numpy as np\n'), ((1577, 1596), 'numpy.zeros', 'np.zeros', (['size_loop'], {}), '(size_loop)\n', (1585, 1596), True, 'import numpy as np\n')] |
# All credits to the fmriprep peeps
from nipype.interfaces.utility import Function
def erode_mask(in_file, epi_mask, epi_mask_erosion_mm=0,
erosion_mm=0):
import os
import nibabel as nib
import scipy.ndimage as nd
# thresholding
probability_map_nii = nib.load(in_file)
probability_map_data = probability_map_nii.get_data()
probability_map_data[probability_map_data < 0.95] = 0
probability_map_data[probability_map_data != 0] = 1
epi_mask_nii = nib.load(epi_mask)
epi_mask_data = epi_mask_nii.get_data()
if epi_mask_erosion_mm:
iters = int(epi_mask_erosion_mm/max(probability_map_nii.header.get_zooms()))
epi_mask_data = nd.binary_erosion(epi_mask_data,
iterations=iters).astype(int)
eroded_mask_file = os.path.abspath("erodd_mask.nii.gz")
niimg = nib.Nifti1Image(epi_mask_data, epi_mask_nii.affine, epi_mask_nii.header)
niimg.to_filename(eroded_mask_file)
else:
eroded_mask_file = epi_mask
probability_map_data[epi_mask_data != 1] = 0
# shrinking
if erosion_mm:
iter_n = int(erosion_mm/max(probability_map_nii.header.get_zooms()))
probability_map_data = nd.binary_erosion(probability_map_data,
iterations=iter_n).astype(int)
new_nii = nib.Nifti1Image(probability_map_data, probability_map_nii.affine,
probability_map_nii.header)
new_nii.to_filename("roi.nii.gz")
return os.path.abspath("roi.nii.gz"), eroded_mask_file
Erode_mask = Function(function=erode_mask, input_names=['in_file',
'epi_mask',
'epi_mask_erosion_mm',
'erosion_mm'],
output_names=['roi_eroded', 'epi_mask_eroded'])
def combine_rois(in_CSF, in_WM, epi_ref):
import os
import numpy as np
import nibabel as nib
CSF_nii = nib.load(in_CSF)
CSF_data = CSF_nii.get_data()
WM_nii = nib.load(in_WM)
WM_data = WM_nii.get_data()
combined = np.zeros_like(WM_data)
combined[WM_data != 0] = 1
combined[CSF_data != 0] = 1
epi_ref_nii = nib.load(epi_ref)
affine, header = epi_ref_nii.affine, epi_ref_nii.header
# we have to do this explicitly because of potential differences in
# qform_code between the two files that prevent aCompCor to work
new_nii = nib.Nifti1Image(combined, affine, header)
new_nii.to_filename("logical_or.nii.gz")
return os.path.abspath("logical_or.nii.gz")
Combine_rois = Function(function=combine_rois, input_names=['in_CSF', 'in_WM',
'epi_ref'],
output_names=['combined_roi'])
def combine_component_files(acomp, tcomp):
import os.path as op
import pandas as pd
acomp_df = pd.read_csv(acomp, sep=str('\t'))
tcomp_df = pd.read_csv(tcomp, sep=str('\t'))
df = pd.concat((acomp_df, tcomp_df), axis=1)
fn = op.abspath('all_compcor.tsv')
df.to_csv(fn, index=None, sep=str('\t'))
return fn
Combine_component_files = Function(function=combine_component_files,
input_names=['acomp', 'tcomp'],
output_names=['out_file']) | [
"nipype.interfaces.utility.Function",
"nibabel.load",
"scipy.ndimage.binary_erosion",
"pandas.concat",
"nibabel.Nifti1Image",
"os.path.abspath",
"numpy.zeros_like"
] | [((1623, 1782), 'nipype.interfaces.utility.Function', 'Function', ([], {'function': 'erode_mask', 'input_names': "['in_file', 'epi_mask', 'epi_mask_erosion_mm', 'erosion_mm']", 'output_names': "['roi_eroded', 'epi_mask_eroded']"}), "(function=erode_mask, input_names=['in_file', 'epi_mask',\n 'epi_mask_erosion_mm', 'erosion_mm'], output_names=['roi_eroded',\n 'epi_mask_eroded'])\n", (1631, 1782), False, 'from nipype.interfaces.utility import Function\n'), ((2707, 2817), 'nipype.interfaces.utility.Function', 'Function', ([], {'function': 'combine_rois', 'input_names': "['in_CSF', 'in_WM', 'epi_ref']", 'output_names': "['combined_roi']"}), "(function=combine_rois, input_names=['in_CSF', 'in_WM', 'epi_ref'],\n output_names=['combined_roi'])\n", (2715, 2817), False, 'from nipype.interfaces.utility import Function\n'), ((3265, 3370), 'nipype.interfaces.utility.Function', 'Function', ([], {'function': 'combine_component_files', 'input_names': "['acomp', 'tcomp']", 'output_names': "['out_file']"}), "(function=combine_component_files, input_names=['acomp', 'tcomp'],\n output_names=['out_file'])\n", (3273, 3370), False, 'from nipype.interfaces.utility import Function\n'), ((305, 322), 'nibabel.load', 'nib.load', (['in_file'], {}), '(in_file)\n', (313, 322), True, 'import nibabel as nib\n'), ((515, 533), 'nibabel.load', 'nib.load', (['epi_mask'], {}), '(epi_mask)\n', (523, 533), True, 'import nibabel as nib\n'), ((1388, 1485), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['probability_map_data', 'probability_map_nii.affine', 'probability_map_nii.header'], {}), '(probability_map_data, probability_map_nii.affine,\n probability_map_nii.header)\n', (1403, 1485), True, 'import nibabel as nib\n'), ((2087, 2103), 'nibabel.load', 'nib.load', (['in_CSF'], {}), '(in_CSF)\n', (2095, 2103), True, 'import nibabel as nib\n'), ((2152, 2167), 'nibabel.load', 'nib.load', (['in_WM'], {}), '(in_WM)\n', (2160, 2167), True, 'import nibabel as nib\n'), ((2216, 2238), 'numpy.zeros_like', 'np.zeros_like', (['WM_data'], {}), '(WM_data)\n', (2229, 2238), True, 'import numpy as np\n'), ((2322, 2339), 'nibabel.load', 'nib.load', (['epi_ref'], {}), '(epi_ref)\n', (2330, 2339), True, 'import nibabel as nib\n'), ((2555, 2596), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['combined', 'affine', 'header'], {}), '(combined, affine, header)\n', (2570, 2596), True, 'import nibabel as nib\n'), ((2653, 2689), 'os.path.abspath', 'os.path.abspath', (['"""logical_or.nii.gz"""'], {}), "('logical_or.nii.gz')\n", (2668, 2689), False, 'import os\n'), ((3099, 3138), 'pandas.concat', 'pd.concat', (['(acomp_df, tcomp_df)'], {'axis': '(1)'}), '((acomp_df, tcomp_df), axis=1)\n', (3108, 3138), True, 'import pandas as pd\n'), ((3148, 3177), 'os.path.abspath', 'op.abspath', (['"""all_compcor.tsv"""'], {}), "('all_compcor.tsv')\n", (3158, 3177), True, 'import os.path as op\n'), ((843, 879), 'os.path.abspath', 'os.path.abspath', (['"""erodd_mask.nii.gz"""'], {}), "('erodd_mask.nii.gz')\n", (858, 879), False, 'import os\n'), ((896, 968), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['epi_mask_data', 'epi_mask_nii.affine', 'epi_mask_nii.header'], {}), '(epi_mask_data, epi_mask_nii.affine, epi_mask_nii.header)\n', (911, 968), True, 'import nibabel as nib\n'), ((1560, 1589), 'os.path.abspath', 'os.path.abspath', (['"""roi.nii.gz"""'], {}), "('roi.nii.gz')\n", (1575, 1589), False, 'import os\n'), ((715, 765), 'scipy.ndimage.binary_erosion', 'nd.binary_erosion', (['epi_mask_data'], {'iterations': 'iters'}), '(epi_mask_data, iterations=iters)\n', (732, 765), True, 'import scipy.ndimage as nd\n'), ((1253, 1311), 'scipy.ndimage.binary_erosion', 'nd.binary_erosion', (['probability_map_data'], {'iterations': 'iter_n'}), '(probability_map_data, iterations=iter_n)\n', (1270, 1311), True, 'import scipy.ndimage as nd\n')] |
import unittest
import numpy as np
from RyStats.inferential import pearsons_correlation, polyserial_correlation
class TestCorrelation(unittest.TestCase):
"""Test Fixture for correlation."""
def test_pearsons_correlation(self):
"""Testing pearsons correlation."""
rng = np.random.default_rng(34982750394857201981982375)
n_items = 100
dataset = rng.standard_normal((n_items, 1000))
results = pearsons_correlation(dataset)
# Get the number of valid correlations
correlation = np.abs(results['Correlation'])
r_critical = results['R critical']['.05']
significant_data = (np.count_nonzero(correlation > r_critical)
- n_items) / (n_items * (n_items - 1))
self.assertAlmostEqual(significant_data, .05, delta=0.01)
class TestPolyserialCorrelation(unittest.TestCase):
"""Polyserial Correlation Test Fixture."""
def test_polyserial_correlation(self):
"""Testing polyserial corelation function."""
rng = np.random.default_rng(425365645347626485721532938464553254)
rho = -0.6
thresholds = [-.2, 0., .8]
continuous = rng.multivariate_normal([0, 0], [[1, rho],
[rho, 1]], size=10000)
ordinal = np.digitize(continuous[:, 1], thresholds)
result = polyserial_correlation(continuous[:, 0], ordinal)['Correlation']
point_polyserial = np.corrcoef(continuous[:, 0], ordinal)[0, 1]
self.assertAlmostEqual(result, rho, delta=.01)
self.assertLess(np.abs(result - rho),
np.abs(point_polyserial - rho))
def test_biserial_correlation(self):
"""Testing biserial correlation."""
# The polyserial function should include binary
# inputs
rng = np.random.default_rng(7921354169283445716651382455716656333145)
rho = 0.45
thresholds = [.3]
continuous = rng.multivariate_normal([0, 0], [[1, rho],
[rho, 1]], size=10000)
ordinal = np.digitize(continuous[:, 1], thresholds)
result = polyserial_correlation(continuous[:, 0], ordinal)['Correlation']
point_polyserial = np.corrcoef(continuous[:, 0], ordinal)[0, 1]
self.assertAlmostEqual(result, rho, delta=.015)
self.assertLess(np.abs(result - rho),
np.abs(point_polyserial - rho))
if __name__ == "__main__":
unittest.main() | [
"numpy.abs",
"numpy.random.default_rng",
"numpy.corrcoef",
"numpy.digitize",
"numpy.count_nonzero",
"RyStats.inferential.polyserial_correlation",
"unittest.main",
"RyStats.inferential.pearsons_correlation"
] | [((2540, 2555), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2553, 2555), False, 'import unittest\n'), ((298, 347), 'numpy.random.default_rng', 'np.random.default_rng', (['(34982750394857201981982375)'], {}), '(34982750394857201981982375)\n', (319, 347), True, 'import numpy as np\n'), ((444, 473), 'RyStats.inferential.pearsons_correlation', 'pearsons_correlation', (['dataset'], {}), '(dataset)\n', (464, 473), False, 'from RyStats.inferential import pearsons_correlation, polyserial_correlation\n'), ((544, 574), 'numpy.abs', 'np.abs', (["results['Correlation']"], {}), "(results['Correlation'])\n", (550, 574), True, 'import numpy as np\n'), ((1046, 1105), 'numpy.random.default_rng', 'np.random.default_rng', (['(425365645347626485721532938464553254)'], {}), '(425365645347626485721532938464553254)\n', (1067, 1105), True, 'import numpy as np\n'), ((1330, 1371), 'numpy.digitize', 'np.digitize', (['continuous[:, 1]', 'thresholds'], {}), '(continuous[:, 1], thresholds)\n', (1341, 1371), True, 'import numpy as np\n'), ((1858, 1921), 'numpy.random.default_rng', 'np.random.default_rng', (['(7921354169283445716651382455716656333145)'], {}), '(7921354169283445716651382455716656333145)\n', (1879, 1921), True, 'import numpy as np\n'), ((2137, 2178), 'numpy.digitize', 'np.digitize', (['continuous[:, 1]', 'thresholds'], {}), '(continuous[:, 1], thresholds)\n', (2148, 2178), True, 'import numpy as np\n'), ((1390, 1439), 'RyStats.inferential.polyserial_correlation', 'polyserial_correlation', (['continuous[:, 0]', 'ordinal'], {}), '(continuous[:, 0], ordinal)\n', (1412, 1439), False, 'from RyStats.inferential import pearsons_correlation, polyserial_correlation\n'), ((1482, 1520), 'numpy.corrcoef', 'np.corrcoef', (['continuous[:, 0]', 'ordinal'], {}), '(continuous[:, 0], ordinal)\n', (1493, 1520), True, 'import numpy as np\n'), ((1607, 1627), 'numpy.abs', 'np.abs', (['(result - rho)'], {}), '(result - rho)\n', (1613, 1627), True, 'import numpy as np\n'), ((1653, 1683), 'numpy.abs', 'np.abs', (['(point_polyserial - rho)'], {}), '(point_polyserial - rho)\n', (1659, 1683), True, 'import numpy as np\n'), ((2197, 2246), 'RyStats.inferential.polyserial_correlation', 'polyserial_correlation', (['continuous[:, 0]', 'ordinal'], {}), '(continuous[:, 0], ordinal)\n', (2219, 2246), False, 'from RyStats.inferential import pearsons_correlation, polyserial_correlation\n'), ((2289, 2327), 'numpy.corrcoef', 'np.corrcoef', (['continuous[:, 0]', 'ordinal'], {}), '(continuous[:, 0], ordinal)\n', (2300, 2327), True, 'import numpy as np\n'), ((2415, 2435), 'numpy.abs', 'np.abs', (['(result - rho)'], {}), '(result - rho)\n', (2421, 2435), True, 'import numpy as np\n'), ((2461, 2491), 'numpy.abs', 'np.abs', (['(point_polyserial - rho)'], {}), '(point_polyserial - rho)\n', (2467, 2491), True, 'import numpy as np\n'), ((655, 697), 'numpy.count_nonzero', 'np.count_nonzero', (['(correlation > r_critical)'], {}), '(correlation > r_critical)\n', (671, 697), True, 'import numpy as np\n')] |
import numpy as np
from stable_baselines3 import SAC
# from stable_baselines3.sac import CnnPolicy
from stable_baselines3.sac import MlpPolicy
import gym
import d4rl
import json
import os
env = gym.make("carla-lane-v0")
exp_name = "baseline_carla"
total_timesteps = 1000000
save_every = 5000
tensorboard_log = os.path.join("./logs", exp_name)
model = SAC(MlpPolicy, env, verbose=1, buffer_size=10000, tensorboard_log=tensorboard_log)
# model = SAC(CnnPolicy, env, verbose=1, buffer_size=10000, tensorboard_log="./log/stable_baseline_duck_none/")
reward_log = {}
for i in range(total_timesteps // save_every):
model.learn(total_timesteps=save_every, log_interval=4, tb_log_name="first_run")
done = False
total_reward = []
obs = env.reset()
for i in range(3):
i_reward = 0
while not done:
action, _states = model.predict(obs, deterministic=True)
# Perform action
obs, reward, done, _ = env.step(action)
i_reward += reward
total_reward.append(i_reward)
reward_log[i] = (np.mean(total_reward), np.std(total_reward))
with open(os.path.join(tensorboard_log, "reward_log.json"), "w") as f:
json.dump(reward_log, f)
obs = env.reset()
model.save(exp_name)
model.save(exp_name)
# del model # remove to demonstrate saving and loading
# model = SAC.load(exp_name)
obs = env.reset()
while True:
done = False
while not done:
action, _states = model.predict(obs, deterministic=True)
# Perform action
obs, reward, done, _ = env.step(action)
print(action, reward)
env.render()
obs = env.reset()
| [
"numpy.mean",
"stable_baselines3.SAC",
"os.path.join",
"numpy.std",
"gym.make",
"json.dump"
] | [((196, 221), 'gym.make', 'gym.make', (['"""carla-lane-v0"""'], {}), "('carla-lane-v0')\n", (204, 221), False, 'import gym\n'), ((313, 345), 'os.path.join', 'os.path.join', (['"""./logs"""', 'exp_name'], {}), "('./logs', exp_name)\n", (325, 345), False, 'import os\n'), ((355, 442), 'stable_baselines3.SAC', 'SAC', (['MlpPolicy', 'env'], {'verbose': '(1)', 'buffer_size': '(10000)', 'tensorboard_log': 'tensorboard_log'}), '(MlpPolicy, env, verbose=1, buffer_size=10000, tensorboard_log=\n tensorboard_log)\n', (358, 442), False, 'from stable_baselines3 import SAC\n'), ((1074, 1095), 'numpy.mean', 'np.mean', (['total_reward'], {}), '(total_reward)\n', (1081, 1095), True, 'import numpy as np\n'), ((1097, 1117), 'numpy.std', 'np.std', (['total_reward'], {}), '(total_reward)\n', (1103, 1117), True, 'import numpy as np\n'), ((1202, 1226), 'json.dump', 'json.dump', (['reward_log', 'f'], {}), '(reward_log, f)\n', (1211, 1226), False, 'import json\n'), ((1133, 1181), 'os.path.join', 'os.path.join', (['tensorboard_log', '"""reward_log.json"""'], {}), "(tensorboard_log, 'reward_log.json')\n", (1145, 1181), False, 'import os\n')] |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from collections import OrderedDict
import warnings
import numpy as np
import pytest
from openvino.tools.pot.algorithms.sparsity.default.utils import check_model_sparsity_level
from openvino.tools.pot.data_loaders.creator import create_data_loader
from openvino.tools.pot.engines.creator import create_engine
from openvino.tools.pot.graph import load_model, save_model
from openvino.tools.pot.pipeline.initializer import create_pipeline
from tests.utils.check_graph import check_model
from tools.evaluate import evaluate
from .utils.config import get_engine_config, merge_configs, make_algo_config
# pylint: disable=W0611,C0412
try:
import torch
TORCH_AVAILABLE = True
except ImportError:
TORCH_AVAILABLE = False
SPARSITY_MODELS = [
('mobilenet-v2', 'caffe', 'WeightSparsity', 'performance', 0.3, {'accuracy@top1': 0.3150, 'accuracy@top5': 0.5630})
]
def run_algo(model, model_name, algorithm_config, tmp_path, reference_name):
engine_config = get_engine_config(model_name)
config = merge_configs(model.model_params, engine_config, algorithm_config)
model = load_model(model.model_params)
data_loader = create_data_loader(engine_config, model)
engine = create_engine(engine_config, data_loader=data_loader, metric=None)
pipeline = create_pipeline(algorithm_config.algorithms, engine)
with torch.backends.mkldnn.flags(enabled=False):
model = pipeline.run(model)
paths = save_model(model, tmp_path.as_posix(), reference_name)
engine.set_model(model)
metrics = evaluate(config=config, subset=range(1000), paths=paths)
metrics = OrderedDict([(metric.name, np.mean(metric.evaluated_value))
for metric in metrics])
return metrics, model
@pytest.mark.parametrize('model_params', SPARSITY_MODELS,
ids=['{}_{}_sparse_tuning'.format(m[0], m[1]) for m in SPARSITY_MODELS])
def test_sparsity_with_finetuning_algo(models, tmp_path, model_params):
model_name, model_framework, algo_name, preset, sparsity_level, expected_accuracy = model_params
if not TORCH_AVAILABLE:
warnings.warn(UserWarning('Skipping layerwise finetuning test since torch is not importable'))
return
additional_params = {
'sparsity_level': sparsity_level,
'stat_subset_size': 300,
'use_layerwise_tuning': True,
'weights_lr': 1e-5,
'bias_lr': 1e-3,
'batch_size': 20,
'num_samples_for_tuning': 40,
'tuning_iterations': 1,
'use_ranking_subset': False,
}
algorithm_config = make_algo_config(algo_name, preset, additional_params=additional_params)
model = models.get(model_name, model_framework, tmp_path)
reference_name = model_name + '_sparse_tuned'
metrics, sparse_model = run_algo(model, model_name, algorithm_config, tmp_path, reference_name)
check_model_sparsity_level(sparse_model, None, sparsity_level, strict=True)
for metric_name in metrics:
print('{}: {:.4f}'.format(metric_name, metrics[metric_name]))
assert metrics == pytest.approx(expected_accuracy, abs=0.006)
check_model(tmp_path, sparse_model, reference_name,
model_framework, check_weights=False)
QUANTIZATION_MODELS = [
('mobilenet-v2', 'caffe', 'DefaultQuantization', 'performance', {'accuracy@top1': 0.7140, 'accuracy@top5': 0.8970})
]
@pytest.mark.parametrize('model_params', QUANTIZATION_MODELS,
ids=['{}_{}_quantize_tuned'.format(m[0], m[1]) for m in QUANTIZATION_MODELS])
def test_quantization_with_finetuning_algo(models, tmp_path, model_params):
model_name, model_framework, algo_name, preset, expected_accuracy = model_params
if not TORCH_AVAILABLE:
warnings.warn(UserWarning('Skipping layerwise finetuning test since torch is not importable'))
return
additional_params = {
'use_layerwise_tuning': True,
'batch_size': 20,
'num_samples_for_tuning': 40,
}
algorithm_config = make_algo_config(algo_name, preset, additional_params=additional_params)
model = models.get(model_name, model_framework, tmp_path)
reference_name = model_name + '_quantized_tuned'
metrics, quantized_model = run_algo(model, model_name, algorithm_config, tmp_path, reference_name)
for metric_name in metrics:
print('{}: {:.4f}'.format(metric_name, metrics[metric_name]))
assert metrics == pytest.approx(expected_accuracy, abs=0.006)
check_model(tmp_path, quantized_model, reference_name,
model_framework, check_weights=False)
| [
"pytest.approx",
"numpy.mean",
"openvino.tools.pot.data_loaders.creator.create_data_loader",
"tests.utils.check_graph.check_model",
"openvino.tools.pot.graph.load_model",
"openvino.tools.pot.pipeline.initializer.create_pipeline",
"openvino.tools.pot.engines.creator.create_engine",
"openvino.tools.pot.... | [((1177, 1207), 'openvino.tools.pot.graph.load_model', 'load_model', (['model.model_params'], {}), '(model.model_params)\n', (1187, 1207), False, 'from openvino.tools.pot.graph import load_model, save_model\n'), ((1226, 1266), 'openvino.tools.pot.data_loaders.creator.create_data_loader', 'create_data_loader', (['engine_config', 'model'], {}), '(engine_config, model)\n', (1244, 1266), False, 'from openvino.tools.pot.data_loaders.creator import create_data_loader\n'), ((1280, 1346), 'openvino.tools.pot.engines.creator.create_engine', 'create_engine', (['engine_config'], {'data_loader': 'data_loader', 'metric': 'None'}), '(engine_config, data_loader=data_loader, metric=None)\n', (1293, 1346), False, 'from openvino.tools.pot.engines.creator import create_engine\n'), ((1362, 1414), 'openvino.tools.pot.pipeline.initializer.create_pipeline', 'create_pipeline', (['algorithm_config.algorithms', 'engine'], {}), '(algorithm_config.algorithms, engine)\n', (1377, 1414), False, 'from openvino.tools.pot.pipeline.initializer import create_pipeline\n'), ((2945, 3020), 'openvino.tools.pot.algorithms.sparsity.default.utils.check_model_sparsity_level', 'check_model_sparsity_level', (['sparse_model', 'None', 'sparsity_level'], {'strict': '(True)'}), '(sparse_model, None, sparsity_level, strict=True)\n', (2971, 3020), False, 'from openvino.tools.pot.algorithms.sparsity.default.utils import check_model_sparsity_level\n'), ((3194, 3287), 'tests.utils.check_graph.check_model', 'check_model', (['tmp_path', 'sparse_model', 'reference_name', 'model_framework'], {'check_weights': '(False)'}), '(tmp_path, sparse_model, reference_name, model_framework,\n check_weights=False)\n', (3205, 3287), False, 'from tests.utils.check_graph import check_model\n'), ((4545, 4641), 'tests.utils.check_graph.check_model', 'check_model', (['tmp_path', 'quantized_model', 'reference_name', 'model_framework'], {'check_weights': '(False)'}), '(tmp_path, quantized_model, reference_name, model_framework,\n check_weights=False)\n', (4556, 4641), False, 'from tests.utils.check_graph import check_model\n'), ((1425, 1467), 'torch.backends.mkldnn.flags', 'torch.backends.mkldnn.flags', ([], {'enabled': '(False)'}), '(enabled=False)\n', (1452, 1467), False, 'import torch\n'), ((3146, 3189), 'pytest.approx', 'pytest.approx', (['expected_accuracy'], {'abs': '(0.006)'}), '(expected_accuracy, abs=0.006)\n', (3159, 3189), False, 'import pytest\n'), ((4497, 4540), 'pytest.approx', 'pytest.approx', (['expected_accuracy'], {'abs': '(0.006)'}), '(expected_accuracy, abs=0.006)\n', (4510, 4540), False, 'import pytest\n'), ((1712, 1743), 'numpy.mean', 'np.mean', (['metric.evaluated_value'], {}), '(metric.evaluated_value)\n', (1719, 1743), True, 'import numpy as np\n')] |
from scipy.io import wavfile
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
import pylab
import math
from utils import escribir_pixel, filtrar
from PIL import Image
''' constantes '''
PORCH_TIME = 0.00208
SYNC_TIME = 0.02
DETECT_SYNC_TIME = SYNC_TIME * 0.7
LINE_COMP_TIME = 0.1216
#fs, data = wavfile.read('./audios_imagenes_prueba/pass_1_norm_7000.wav')
#t = np.arange(len(data))/fs
def crear_hilbert(atten, delta):
'''
crea el filtro de hilbert enventanado por kaiser
delta en radianes
'''
if atten < 21:
beta = 0
elif atten > 21 and atten < 50:
beta = 0.5842 * (atten-21)**(2/5) + 0.07886 * (atten-21)
else:
beta = 0.1102 * (atten-8.7)
m = 2 * ((atten-8)/(4.57*delta))
if int(m) % 2 == 0:
m = int(m+1)
else:
m = int(m+2)
window = np.kaiser(m, beta)
filter = []
for n in range((-m+1)//2, (m-1)//2 + 1):
if n % 2 != 0:
filter.append(2/(np.pi*n))
else:
filter.append(0)
hilbert = filter * window
return hilbert
def crear_analitica(datos, filtro):
'''devuelve la senal analitica de la forma x + y*j'''
zeros = np.zeros((len(filtro)-1) // 2)
datareal = np.concatenate([zeros, datos, zeros])
datacompleja = np.convolve(datos, filtro)*1j
senal = datareal + datacompleja
return senal
def boundary(value):
'''checkea los valores maximos y minimos de la frecuencia instantanea'''
value = min(value, 2300)
value = max(1500, value)
return value
def inicializar_demod(datos, image_filename, fs):
img = Image.new('YCbCr', (640,496), "white")
signal = crear_analitica(datos, crear_hilbert(40, (2000 / fs) * 2*np.pi)) #frecuencia normalizada
import raw_file
with open('antes_filtro.raw', 'wb') as output_file:
for s in signal:
raw_file.write_complex_sample(output_file, s)
inst_ph = np.unwrap(np.angle(signal)) #unwrap deja a la fase de forma lineal en vez de rampa
inst_fr = np.diff((inst_ph) / (2.0*np.pi) * fs) #diff toma el valor de x(n+1) y lo resta con x(n)
inst_fr = list(filtrar(inst_fr, 1000 / (fs/2), 500 / (fs/2), 30)) #toma senal, frec corte, banda de trans, y caida en dB
muestras = 0
cont_linea = -1
i = 0
import raw_file
with open('despues_filtro.raw', 'wb') as output_file:
for s in inst_fr:
raw_file.write_sample(output_file, s/3000)
while i < len(inst_fr):
if 900 <= inst_fr[i] <= 1300:
muestras += 1 #contador de muestras, si muchas se encuentran en el rango era cambio de linea
if muestras > int((DETECT_SYNC_TIME)*fs):
cont_linea += 2 #casi seguro indico la siguiente
muestras = 0 #resetear muestras para la proxima iteracion
i = i - int((DETECT_SYNC_TIME)*fs) + int((SYNC_TIME+PORCH_TIME)*fs) #encajar i para comenzar justo en luminancia
desfase = 1200 - np.mean(inst_fr[i-int((SYNC_TIME+PORCH_TIME)*fs) : i-int(PORCH_TIME*fs)])
valor = inst_fr[i]
if i + int(LINE_COMP_TIME*4*fs) >= len(inst_fr):
# La señal termina antes de que termine la línea
break
# try:
y_resampleados = scipy.signal.resample(inst_fr[i:i+int(LINE_COMP_TIME*fs)],640)
for columna, valor in enumerate(y_resampleados):
escribir_pixel(img, columna, cont_linea, "lum", boundary(valor+desfase))
cr_resampleados = scipy.signal.resample(inst_fr[i+int(LINE_COMP_TIME*fs):i+int(LINE_COMP_TIME*2*fs)],640)
for columna, valor in enumerate(cr_resampleados):
escribir_pixel(img, columna, cont_linea, "cr", boundary(valor+desfase))
cb_resampleados = scipy.signal.resample(inst_fr[i+int(LINE_COMP_TIME*2*fs):i+int(LINE_COMP_TIME*3*fs)],640)
for columna, valor in enumerate(cb_resampleados):
escribir_pixel(img, columna, cont_linea, "cb", boundary(valor+desfase))
ny_resampleados = scipy.signal.resample(inst_fr[i+int(LINE_COMP_TIME*3*fs):i+int(LINE_COMP_TIME*4*fs)],640)
for columna, valor in enumerate(ny_resampleados):
escribir_pixel(img, columna, cont_linea, "nxt_lum", boundary(valor+desfase))
# except:
# break
i+=int(LINE_COMP_TIME*2*fs)
i += 1
imgrgb = img.convert("RGB")
imgrgb.save(image_filename, "PNG")
| [
"numpy.convolve",
"raw_file.write_complex_sample",
"PIL.Image.new",
"numpy.kaiser",
"utils.filtrar",
"raw_file.write_sample",
"numpy.diff",
"numpy.angle",
"numpy.concatenate"
] | [((845, 863), 'numpy.kaiser', 'np.kaiser', (['m', 'beta'], {}), '(m, beta)\n', (854, 863), True, 'import numpy as np\n'), ((1234, 1271), 'numpy.concatenate', 'np.concatenate', (['[zeros, datos, zeros]'], {}), '([zeros, datos, zeros])\n', (1248, 1271), True, 'import numpy as np\n'), ((1612, 1651), 'PIL.Image.new', 'Image.new', (['"""YCbCr"""', '(640, 496)', '"""white"""'], {}), "('YCbCr', (640, 496), 'white')\n", (1621, 1651), False, 'from PIL import Image\n'), ((2026, 2063), 'numpy.diff', 'np.diff', (['(inst_ph / (2.0 * np.pi) * fs)'], {}), '(inst_ph / (2.0 * np.pi) * fs)\n', (2033, 2063), True, 'import numpy as np\n'), ((1291, 1317), 'numpy.convolve', 'np.convolve', (['datos', 'filtro'], {}), '(datos, filtro)\n', (1302, 1317), True, 'import numpy as np\n'), ((1939, 1955), 'numpy.angle', 'np.angle', (['signal'], {}), '(signal)\n', (1947, 1955), True, 'import numpy as np\n'), ((2135, 2188), 'utils.filtrar', 'filtrar', (['inst_fr', '(1000 / (fs / 2))', '(500 / (fs / 2))', '(30)'], {}), '(inst_fr, 1000 / (fs / 2), 500 / (fs / 2), 30)\n', (2142, 2188), False, 'from utils import escribir_pixel, filtrar\n'), ((1868, 1913), 'raw_file.write_complex_sample', 'raw_file.write_complex_sample', (['output_file', 's'], {}), '(output_file, s)\n', (1897, 1913), False, 'import raw_file\n'), ((2406, 2450), 'raw_file.write_sample', 'raw_file.write_sample', (['output_file', '(s / 3000)'], {}), '(output_file, s / 3000)\n', (2427, 2450), False, 'import raw_file\n')] |
# Copyright (c) 2019 <NAME>
from ipywidgets import Box
from aixplot.widget import Filter, NoneFilter
from aixplot.widget import Widget as Aixplot
import numpy as np
from .cacher import IterationCacher
from .label import Label
from IPython.core.magic import line_magic, magics_class, Magics
from IPython.core.magic_arguments import argument, magic_arguments, \
parse_argstring
class ConvergenceFilter(Filter):
def __repr__(self):
return "Convergence"
def __call__(self, label, cache):
a = np.array(cache[label])
f = np.array(cache[Label.CONVERGENCE])
return a[f]
class Widget(Aixplot):
def __init__(self, cacher_class=IterationCacher, logger=None, **traits):
self.filters = [NoneFilter(), ConvergenceFilter()]
self.filter = self.filters[1]
self.x, self.y = Label.STEP, Label.VONMISES_STRESS
super(Widget, self).__init__(cacher_class, logger=logger, **traits)
@magics_class
class DamaskPlotMagics(Magics):
@line_magic
@magic_arguments()
@argument('--filename', '-f', help='DAMASK stdout filename to be plotted')
def damask_plot(self, line=''):
args = parse_argstring(self.damask_plot, line)
if args.filename:
display(Widget(filename=args.filename))
else:
display(Widget())
| [
"IPython.core.magic_arguments.parse_argstring",
"numpy.array",
"IPython.core.magic_arguments.argument",
"IPython.core.magic_arguments.magic_arguments",
"aixplot.widget.NoneFilter"
] | [((1047, 1064), 'IPython.core.magic_arguments.magic_arguments', 'magic_arguments', ([], {}), '()\n', (1062, 1064), False, 'from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n'), ((1070, 1143), 'IPython.core.magic_arguments.argument', 'argument', (['"""--filename"""', '"""-f"""'], {'help': '"""DAMASK stdout filename to be plotted"""'}), "('--filename', '-f', help='DAMASK stdout filename to be plotted')\n", (1078, 1143), False, 'from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n'), ((556, 578), 'numpy.array', 'np.array', (['cache[label]'], {}), '(cache[label])\n', (564, 578), True, 'import numpy as np\n'), ((591, 625), 'numpy.array', 'np.array', (['cache[Label.CONVERGENCE]'], {}), '(cache[Label.CONVERGENCE])\n', (599, 625), True, 'import numpy as np\n'), ((1195, 1234), 'IPython.core.magic_arguments.parse_argstring', 'parse_argstring', (['self.damask_plot', 'line'], {}), '(self.damask_plot, line)\n', (1210, 1234), False, 'from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n'), ((771, 783), 'aixplot.widget.NoneFilter', 'NoneFilter', ([], {}), '()\n', (781, 783), False, 'from aixplot.widget import Filter, NoneFilter\n')] |
from numpy import pi, isclose
from pyroll.core import CircularOvalGroove
def test_circular_oval():
g = CircularOvalGroove(depth=5.05, r1=7, r2=33)
assert isclose(g.usable_width, 17.63799973 * 2)
assert isclose(g.alpha1, 29.102618 / 180 * pi)
assert isclose(g.alpha2, 29.102618 / 180 * pi)
assert isclose(g.z1, 19.45501221)
| [
"pyroll.core.CircularOvalGroove",
"numpy.isclose"
] | [((110, 153), 'pyroll.core.CircularOvalGroove', 'CircularOvalGroove', ([], {'depth': '(5.05)', 'r1': '(7)', 'r2': '(33)'}), '(depth=5.05, r1=7, r2=33)\n', (128, 153), False, 'from pyroll.core import CircularOvalGroove\n'), ((166, 206), 'numpy.isclose', 'isclose', (['g.usable_width', '(17.63799973 * 2)'], {}), '(g.usable_width, 17.63799973 * 2)\n', (173, 206), False, 'from numpy import pi, isclose\n'), ((218, 257), 'numpy.isclose', 'isclose', (['g.alpha1', '(29.102618 / 180 * pi)'], {}), '(g.alpha1, 29.102618 / 180 * pi)\n', (225, 257), False, 'from numpy import pi, isclose\n'), ((269, 308), 'numpy.isclose', 'isclose', (['g.alpha2', '(29.102618 / 180 * pi)'], {}), '(g.alpha2, 29.102618 / 180 * pi)\n', (276, 308), False, 'from numpy import pi, isclose\n'), ((320, 346), 'numpy.isclose', 'isclose', (['g.z1', '(19.45501221)'], {}), '(g.z1, 19.45501221)\n', (327, 346), False, 'from numpy import pi, isclose\n')] |
#0 -*- coding: utf-8 -*-
"""
Population genomics statistics.
Functions in this module are used to estimate population genomics statistics along a sequence.
"""
import pandas as pd
from Bio.Seq import Seq
import PiSlice.input as input
from itertools import compress
import numpy as np
import mapply
import multiprocessing
import re
#from pandarallel import pandarallel
import intervaltree
def piSlice(windows, statistics=[""], min_bp=6, splicing_strategy="merge", n_cpus=6, *args, **kwargs):
"""
The main function to return a data frame of population genomics statistics for a list of genomic windows.
:param windows: DataFrame, a pandas data frame (can be gff) with at least three columns: seqname, start, end
:param statistics: str, a list of statistics to compute
:param **fasta: fasta, a fasta object with multiple fasta sequences
:param **gff: DataFrame, a gff object
:param **vcf: vcf, a vcf object
:return: DataFrame, a data frame with population statistics for each windows
"""
fasta = kwargs.get("fasta", "")
gff = kwargs.get("gff", "")
vcf = kwargs.get("vcf", "")
#pandarallel.initialize(nb_workers=n_cpus, progress_bar=True)
# Function to subset sequences in the fasta file
def make_dataset(windows, fasta):
# Sample sequences
# Sample all sequences from chromosomes and start-end positions
list_seq = list(windows.apply(lambda x: fasta.sample_sequence(x["seqname"], x["start"], x["end"]), axis=1))
return(list_seq)
# TODO A progress bar
# Header
print("Number of windows:", len(windows.index))
print("Chromosomes are", " ".join(windows.seqname.unique()))
if (n_cpus == 0):
n_cpus = multiprocessing.cpu_count()
sensible_cpus = mapply.parallel.sensible_cpu_count()
mapply.init(n_workers=min(sensible_cpus, n_cpus))
if "gene_count" in statistics:
print("Process number of genes")
estimates = windows.mapply(lambda x: gene_count(gff,
x["seqname"],
x["start"],
x["end"]),
axis=1)
windows["gene_count"] = estimates
if "gene_length" in statistics:
print("Process mean gene length (bp)")
estimates = windows.mapply(lambda x: feature_length(gff,
x["seqname"],
x["start"],
x["end"],
feature="gene"),
axis=1)
windows["gene_length"] = estimates
if "exon_length" in statistics:
print("Process mean exon length (bp)")
estimates = windows.mapply(lambda x: feature_length(gff,
x["seqname"],
x["start"],
x["end"],
feature="exon"),
axis=1)
windows["exon_length"] = estimates
if "intron_length" in statistics:
print("Process mean intron length (bp)")
estimates = windows.mapply(lambda x: feature_length(gff,
x["seqname"],
x["start"],
x["end"],
feature="intron"),
axis=1)
windows["intron_length"] = estimates
if "gene_nbexons" in statistics:
print("Process the mean number of exons")
estimates = windows.mapply(lambda x: gene_nbexons(gff,
x["seqname"],
x["start"],
x["end"]),
axis=1)
windows["gene_nbexons"] = estimates
if "gene_density" in statistics:
print("Process gene density")
estimates = windows.mapply(lambda x: gene_density(gff,
x["seqname"],
x["start"],
x["end"]),
axis=1)
windows["gene_density"] = estimates
if "snp_count" in statistics:
print("Process number of SNPs")
estimates = windows.mapply(lambda x: snp_count(vcf,
x["seqname"],
x["start"],
x["end"]),
axis=1)
windows["snp_count"] = estimates
if "gc" in statistics:
print("Process GC content")
list_seq = make_dataset(windows, fasta)
# Compute GC content
estimates = list(map(lambda x: gc(x), list_seq))
# Add column for statistics
windows["gc"] = estimates
if "gc_noncoding" in statistics:
print("Process non-coding GC content")
estimates = windows.apply(lambda x: gc_noncoding(fasta,
gff,
x["seqname"],
x["start"],
x["end"],
min_bp=min_bp),
axis=1)
list_gc = [item[0] for item in estimates]
list_density = [item[1] for item in estimates]
# Add column for statistics
windows["gc_noncoding"] = list_gc
windows["noncoding_proportion"] = list_density
if "gc_intergenic" in statistics:
print("Process intergenic GC content")
estimates = windows.apply(lambda x: gc_intergenic(fasta,
gff,
x["seqname"],
x["start"],
x["end"],
min_bp=min_bp),
axis=1)
list_gc = [item[0] for item in estimates]
list_density = [item[1] for item in estimates]
# Add column for statistics
windows["gc_intergenic"] = list_gc
windows["intergenic_proportion"] = list_density
if "gc_intron" in statistics:
print("Process intron GC content")
estimates = windows.apply(lambda x: gc_intron(fasta,
gff,
x["seqname"],
x["start"],
x["end"],
min_bp=min_bp,
splicing_strategy=splicing_strategy),
axis=1)
list_gc = [item[0] for item in estimates]
list_density = [item[1] for item in estimates]
# Add column for statistics
windows["gc_intron"] = list_gc
windows["intron_proportion"] = list_density
if "gc_codon" in statistics:
print("Process GC content with codon positions")
# Compute GC content
# TODO Optim apply(), but fasta.sample_sequence() can not be parralelized
# impossible to reduce using cython
estimates = windows.apply(lambda x: gc_codon(fasta,
gff,
x["seqname"],
x["start"],
x["end"],
min_bp=min_bp),
axis=1)
list_gc = [item[0] for item in estimates]
list_gc1 = [item[1] for item in estimates]
list_gc2 = [item[2] for item in estimates]
list_gc3 = [item[3] for item in estimates]
list_cds_proportion = [item[4] for item in estimates]
# Add column for statistics
windows["gc_codon"] = list_gc
windows["gc1"] = list_gc1
windows["gc2"] = list_gc2
windows["gc3"] = list_gc3
windows["cds_proportion"] = list_cds_proportion
if "gc3exon1" in statistics:
print("Process GC3 first exon")
estimates = windows.apply(lambda x: gc3exon1(fasta,
gff,
x["seqname"],
x["start"],
x["end"],
min_bp=min_bp),
axis=1)
windows["gc3_exon1"] = estimates
if "cpg" in statistics:
print("Process CpG densities")
list_seq = make_dataset(windows, fasta)
# Compute CpG density
estimates = list(map(lambda x: cpg(x), list_seq))
# Add column for statistics
windows["cpg"] = estimates
if "seq" in statistics:
print("Retrieving sequences")
sequences = list(map(lambda x: fasta.sample_sequence(windows.loc[x, "seqname"],
windows.loc[x, "start"],
windows.loc[x, "end"]),
windows.index))
windows["seq"] = sequences
return windows
def gene_count(gff, chromosome, start, end):
"""
Count the number of genes beginning in the window (number of start positions).
:param gff: DataFrame, a gff file with gene annotations
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:return: int, number of genes in the gff window
"""
gene_count = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['start'] < int(end)) &
(gff['feature'] == "gene")]
gene_count = len(gene_count)
return gene_count
# DONE Factorize gene_length, exon_length, intron_length to a generic feature_length function
def feature_length(gff, chromosome, start, end, feature="gene"):
"""
Estimate the mean length (bp) of a feature in the window
:param gff: DataFrame, a gff file with gene annotations
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:param feature: str, the type of feature to sample
:return: int, mean feature length
"""
feat_count = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['start'] < int(end)) &
(gff['feature'] == feature)]
feat_len = feat_count['end'] - feat_count['start'] + 1
feat_len = np.mean(feat_len)
return feat_len
def max_rank(gff, gene_id):
"""
Return the max rank for a given gene id. Gff must be parsed for ranks
"""
# Get second order children (mRNA and exons)
children = gff.gff.children(gene_id, all=True)
#children2 = gff.gff.children(children1["id"])
#frames = [children1, children2]
#result = pd.concat(frames)
max_rank = np.max(children["rank"])
return(max_rank)
def gene_nbexons(gff, chromosome, start, end):
"""
Estimate the mean number of exons in genes
:param gff: DataFrame, a gff file with gene annotations, must be parsed before
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:return: int, mean number of exons per gene
"""
genes = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['start'] < int(end))].copy()
# TODO Parse only if ranks have not been inferred
# genes = genes.gff.parse_attributes(infer_rank=True, verbose=False)
# Max rank for each gene
list_genes = genes['id'][genes['feature'] == "gene"]
gene_nbexons = list(list_genes.apply(lambda x: max_rank(genes, x)))
# mean = 0 if ranks have not been inferred before
gene_nbexons = np.mean(gene_nbexons)
return(gene_nbexons)
def gene_density(gff, chromosome, start, end):
"""
Estimate gene density in the window (between 0 and 1)
:param gff: DataFrame, a gff file with gene annotations
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:return: int, gene density
"""
gene_count = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['start'] < int(end)) &
(gff['feature'] == "gene")]
gene_len = gene_count['end'] - gene_count['start'] + 1
gene_len = np.sum(gene_len)
gene_density = gene_len/(end - start + 1)
return gene_density
def snp_count(vcf, chromosome, start, end):
"""
Count the number of snps in the window.
:param vcf: vcf, a vcf file with SNPs and their genomic positions
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:return: int, number of snps in the vcf window
"""
snp_count = vcf.sample_variant(str(chromosome), int(start), int(end))
snp_count = sum(1 for item in snp_count)
return snp_count
def gc(sequence, min_bp=6):
"""
Estimate the fraction of G+C bases in a DNA sequence.
It reads a DNA sequence and count the number of G+C bases divided by the total number of bases.
GC = (G+C)/(G+C+A+T)
:param sequence: str, A string containing a DNA sequence
:return: float, Numeric value of the GC proportion in the sequence
"""
if len(sequence) > min_bp:
# Make sequence uppercase for simple computation
sequence = sequence.upper()
base_a = sequence.count("A")
base_c = sequence.count("C")
base_g = sequence.count("G")
base_t = sequence.count("T")
try:
gc_content = (base_g + base_c)/(base_a + base_c + base_g + base_t)
except ZeroDivisionError:
gc_content = np.NaN
# Do not use the GC calculation from Biopython
# Because it does not deal with 'N' nucleotides
# gc_content = GC(sequence)/100
else:
gc_content = np.NaN
return gc_content
# TODO GC exact computation to account for ambiguous nucleotides S(G or C)
# TODO Test gc_cds for GC1, GC2, GC3 contents
def gc_codon(fasta, gff, chromosome, start, end, min_bp=6):
"""
Estimate the fraction of G+C bases within CDS at codon positions 1, 2 and 3.
Use a list of CDS features (start, end, frame, phase) to subset a list of DNA sequences
and estimate GC content at each position.
:param fasta: str, A fasta object with the same coordinates as the gff
:param gff: DataFrame, A gff data frame
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:param min_bp: int, the minimal number of nucleotides to consider a sequence
:return: Numeric values of the global GC proportion in the sequence and
GC proportion at each codon position in the sequence
"""
# Subset features
# exons contain UTR that can alter the frame shift
# It is preferable to estimate GC content on CDS
feat = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['end'] <= int(end)) &
(gff['feature'] == "CDS")]
if (feat.shape[0] > 0):
# Sample all sequences from chromosomes and start-end positions
# Subset a list of DNA sequences according to features positions
list_seq = list(feat.apply(lambda x: fasta.sample_sequence(x["seqname"], x["start"], x["end"]), axis=1))
# Take care of short sequences (typically < 6bp) that introduce errors below
# Remove sequences shorter than the required number of nucleotides
# list_seq = list(map(lambda x: x.upper(), list_seq))
length_seq = list(map(lambda x: len(re.findall("[ATCGatcg]", x)), list_seq))
# Reduce the dataset
feat = feat.loc[list(map(lambda x: int(x) > min_bp, length_seq))]
list_seq = list(feat.apply(lambda x: fasta.sample_sequence(x["seqname"], x["start"], x["end"]), axis=1))
if (feat.shape[0] > 0):
# Merge overlapping coordinates to estimate CDS proportion properly
# in case of splicing variants and overlapping sequences
# Refactoring: use sample_sequence_masked to get CDS regions masked then p = 1 - q/l
# where p = proportion of CDS and q = length of sequence after masking CDS and l = total length of sequence
# Masking regions
mask = [(x, y) for x, y in zip(list(feat.start), list(feat.end))]
# Sample sequences
seq = fasta.sample_sequence_masked(chromosome, start, end, mask)
try:
cds_proportion = 1 - abs(len(seq) / (end - start + 1))
except ZeroDivisionError:
cds_proportion = np.NaN
# Strand of the feature
# Reverse the DNA sequence if strand == "-"
strand = list(feat.apply(lambda x: x['strand'], axis=1))
for i, seq in enumerate(list_seq):
if strand[i] == "-":
list_seq[i] = seq[::-1]
# list_seq[i] = str(Seq(seq).reverse_complement())
# Phase of CDS features
# Remove 0, 1 or 2 bp at the beginning
frame = list(feat.apply(lambda x: x['frame'], axis=1))
for i, seq in enumerate(list_seq):
list_seq[i] = seq[int(frame[i])::]
# Split in three vectors of codon position
codons = "".join(map(lambda x: x[::], list_seq))
codon1 = "".join(map(lambda x: x[0::3], list_seq))
codon2 = "".join(map(lambda x: x[1::3], list_seq))
codon3 = "".join(map(lambda x: x[2::3], list_seq))
# Estimate GC content at each codon position
gc123 = gc(codons, min_bp=min_bp)
gc1 = gc(codon1, min_bp=min_bp)
gc2 = gc(codon2, min_bp=min_bp)
gc3 = gc(codon3, min_bp=min_bp)
else:
gc123 = np.NaN
gc1 = np.NaN
gc2 = np.NaN
gc3 = np.NaN
cds_proportion = np.NaN
else:
gc123 = np.NaN
gc1 = np.NaN
gc2 = np.NaN
gc3 = np.NaN
cds_proportion = np.NaN
gc_content = (gc123, gc1, gc2, gc3, cds_proportion)
return gc_content
def gc_noncoding(fasta, gff, chromosome, start, end, min_bp=6):
"""
Estimate the fraction of G+C bases within non-coding sequences.
Use a list of CDS features (start, end, frame, phase) to subset a list of non-coding DNA sequences
:param fasta: str, A fasta object with the same coordinates as the gff
:param gff: DataFrame, A gff data frame
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:param min_bp: int, the minimal number of nucleotides to consider a sequence
:return: int, a tuple with the GC content in non-coding sequences and the proportion of non-coding sequence in the window
"""
feat = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['end'] <= int(end)) &
(gff['feature'] == "CDS")]
if (feat.shape[0] == 0):
noncoding_seq = fasta.sample_sequence(chromosome, start, end)
noncoding_prop = 1
gc_noncoding = gc(noncoding_seq, min_bp=min_bp)
elif (feat.shape[0] > 0):
# Masking regions
mask = [(x,y) for x,y in zip(list(feat.start), list(feat.end))]
# Sample sequences
seq = fasta.sample_sequence_masked(chromosome, start, end, mask)
gc_noncoding = gc(seq)
try:
noncoding_prop = len(seq)/(end-start)
except ZeroDivisionError:
noncoding_prop = np.NaN
else:
gc_noncoding = np.NaN
noncoding_prop = np.NaN
return (gc_noncoding, noncoding_prop)
def gc_intergenic(fasta, gff, chromosome, start, end, min_bp=6):
"""
Estimate the fraction of G+C bases within intergenic sequences.
Use a list of gene features (start, end) to subset a list of intergenic DNA sequences
:param fasta: str, A fasta object with the same coordinates as the gff
:param gff: DataFrame, A gff data frame
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:param min_bp: int, the minimal number of nucleotides to consider a sequence
:return: int, a tuple with the GC content in intergenic sequences and the proportion of intergenic sequence in the window
"""
feat = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['end'] <= int(end)) &
(gff['feature'] == "gene")]
if (feat.shape[0] == 0):
noncoding_seq = fasta.sample_sequence(chromosome, start, end)
intergenic_prop = 1
gc_intergenic = gc(noncoding_seq, min_bp=min_bp)
elif (feat.shape[0] > 0):
# Masking regions
mask = [(x,y) for x,y in zip(list(feat.start), list(feat.end))]
# Sample sequences
seq = fasta.sample_sequence_masked(chromosome, start, end, mask)
gc_intergenic = gc(seq)
try:
intergenic_prop = len(seq)/(end-start)
except ZeroDivisionError:
intergenic_prop = np.NaN
else:
gc_intergenic = np.NaN
intergenic_prop = np.NaN
return (gc_intergenic, intergenic_prop)
def gc_intron(fasta, gff, chromosome, start, end, min_bp=6, splicing_strategy="merge"):
"""
Estimate the fraction of G+C bases within intron sequences.
Use a list of intron features (start, end) to subset a list of intron DNA sequences
:param fasta: str, A fasta object with the same coordinates as the gff
:param gff: DataFrame, A gff data frame
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:param splicing_strategy: int, the minimal number of nucleotides to consider a sequence
:return: int, a tuple with the GC content in intron sequences and the proportion of intron sequence in the window
"""
feat = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['end'] <= int(end)) &
(gff['feature'] == "intron")]
if (feat.shape[0] == 0):
gc_intron = np.NaN
intron_prop = np.NaN
# If only one feature, Dataframe is transformed in Series
elif (isinstance(feat, pd.Series)):
list_start = [min(feat["start"], feat["end"])]
list_end = [max(feat["start"], feat["end"])]
list_seq = [fasta.sample_sequence(chromosome, x, y) for x,y in zip(list_start, list_end)]
# Sample sequences
seq = "".join(list_seq)
gc_intron = gc(seq, min_bp)
try:
intron_prop = len(seq)/(end-start)
except ZeroDivisionError:
intron_prop = np.NaN
elif (feat.shape[0] > 0):
if (splicing_strategy == "merge"):
list_start = [x[1] for x in feat["start"].items()]
list_end = [x[1] for x in feat["end"].items()]
intervals = [(min(x, y), max(x, y)) for x, y in zip(list_start, list_end) if x != y]
merge_splicing = intervaltree.IntervalTree.from_tuples(intervals)
list_start = [x.begin for x in merge_splicing]
list_end = [x.end for x in merge_splicing]
# if ((len(list_start) > 1) & (len(list_end) > 1)):
# intervals = [(x,y) for x,y in zip([list_start], [list_end])]
# merge_splicing = intervaltree.IntervalTree.from_tuples(intervals)
# list_start = [x.begin for x in merge_splicing]
# list_end = [x.end for x in merge_splicing]
# else:
# # Inverse coordinates if sequence is on "-" strand (i.e. start > end)
# list_start = min(list_start, list_end)
# list_end = min(list_start, list_end)
list_seq = [fasta.sample_sequence(chromosome, x, y) for x,y in zip(list_start, list_end)]
# Sample sequences
seq = "".join(list_seq)
gc_intron = gc(seq, min_bp)
try:
intron_prop = len(seq)/(end-start)
except ZeroDivisionError:
intron_prop = np.NaN
else:
gc_intron = np.NaN
intron_prop = np.NaN
return (gc_intron, intron_prop)
def gc1(fasta, gff, chromosome, start, end):
gc1 = gc_codon(fasta, gff, chromosome, start, end)[1]
return gc1
def gc2(fasta, gff, chromosome, start, end):
gc2 = gc_codon(fasta, gff, chromosome, start, end)[2]
return gc2
def gc3(fasta, gff, chromosome, start, end):
gc3 = gc_codon(fasta, gff, chromosome, start, end)[3]
return gc3
def gc3exon1(fasta, gff, chromosome, start, end, min_bp=6):
gffexon1 = gff.loc[((gff["rank"] == 0) | (gff["rank"] == 1))]
gc3exon1 = gc_codon(fasta, gffexon1, chromosome, start, end, min_bp=min_bp)[3]
return gc3exon1
def cpg(sequence):
""""
Estimate the CpG density as the number of CG sites divided by the total number of sites
(i.e. total number of nucleotides divided by two)
:param sequence: str, a fasta sequence
:return: float, a CpG density
"""
if len(sequence) > 6:
sequence = sequence.upper()
if "CG" in sequence:
seq_len = len(re.findall("[ATCGatcg]", sequence))
cpg_density = sequence.count('CG')/(seq_len/2)
else:
cpg_density = 0
else:
cpg_density = np.NaN
return(cpg_density)
def pi(polymorphism):
"""
Compute the Nucleotide diversity Pi at a given site (window) in a population, as described by Nei and Li in 1979.
:param polymorphim:
:return:
Reference:
<NAME>.; <NAME>; <NAME> (October 1, 1979).
"Mathematical Model for Studying Genetic Variation in Terms of Restriction Endonucleases". PNAS. 76 (10): 5269–73.
"""
polymorphism
return pi
| [
"numpy.mean",
"multiprocessing.cpu_count",
"numpy.max",
"numpy.sum",
"intervaltree.IntervalTree.from_tuples",
"mapply.parallel.sensible_cpu_count",
"re.findall"
] | [((1766, 1802), 'mapply.parallel.sensible_cpu_count', 'mapply.parallel.sensible_cpu_count', ([], {}), '()\n', (1800, 1802), False, 'import mapply\n'), ((11114, 11131), 'numpy.mean', 'np.mean', (['feat_len'], {}), '(feat_len)\n', (11121, 11131), True, 'import numpy as np\n'), ((11507, 11531), 'numpy.max', 'np.max', (["children['rank']"], {}), "(children['rank'])\n", (11513, 11531), True, 'import numpy as np\n'), ((12451, 12472), 'numpy.mean', 'np.mean', (['gene_nbexons'], {}), '(gene_nbexons)\n', (12458, 12472), True, 'import numpy as np\n'), ((13124, 13140), 'numpy.sum', 'np.sum', (['gene_len'], {}), '(gene_len)\n', (13130, 13140), True, 'import numpy as np\n'), ((1718, 1745), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1743, 1745), False, 'import multiprocessing\n'), ((26228, 26262), 're.findall', 're.findall', (['"""[ATCGatcg]"""', 'sequence'], {}), "('[ATCGatcg]', sequence)\n", (26238, 26262), False, 'import re\n'), ((24101, 24149), 'intervaltree.IntervalTree.from_tuples', 'intervaltree.IntervalTree.from_tuples', (['intervals'], {}), '(intervals)\n', (24138, 24149), False, 'import intervaltree\n'), ((16497, 16524), 're.findall', 're.findall', (['"""[ATCGatcg]"""', 'x'], {}), "('[ATCGatcg]', x)\n", (16507, 16524), False, 'import re\n')] |
"""
Module containing the Company Class.
Abreviations used in code:
dfi = input dataframe
dfo = output dataframe
"""
from typing import Literal
import numpy as np
import pandas as pd
from . import config as c
class Company:
"""
Finance Data Class for listed Brazilian Companies.
Attributes
----------
identifier: int or str
A unique identifier to filter a company in as fi. Both CVM
ID or Fiscal ID can be used. CVM ID (regulator ID) must be an integer.
Fiscal ID must be a string in 'XX.XXX.XXX/XXXX-XX' format.
"""
def __init__(
self,
identifier: int | str,
acc_method: Literal["consolidated", "separate"] = "consolidated",
acc_unit: float | str = 1.0,
tax_rate: float = 0.34,
):
"""Initialize main variables.
Parameters
----------
identifier: int or str
A unique identifier to filter a company in as fi.
Both CVM ID or Fiscal ID can be used.
CVM ID (regulator ID) must be an integer.
Fiscal ID must be a string in 'XX.XXX.XXX/XXXX-XX' format.
acc_method : {'consolidated', 'separate'}, default 'consolidated'
Accounting method used for registering investments in subsidiaries.
acc_unit : float or str, default 1.0
acc_unit is a constant that will divide company account values.
The constant can be a number greater than zero or the strings
{'thousand', 'million', 'billion'}.
tax_rate : float, default 0.34
The 'tax_rate' attribute will be used to calculate some of the
company indicators.
"""
self.set_id(identifier)
self.acc_method = acc_method
self.acc_unit = acc_unit
self.tax_rate = tax_rate
def set_id(self, identifier: int | str):
"""
Set a unique identifier to filter the company in as fi.
Parameters
----------
value: int or str
A unique identifier to filter a company in as fi.
Both CVM ID or Fiscal ID can be used.
CVM ID (regulator ID) must be an integer.
Fiscal ID must be a string in 'XX.XXX.XXX/XXXX-XX' format.
Returns
-------
int or str
Raises
------
KeyError
* If passed ``identifier`` not found in as fi.
"""
# Create custom data frame for ID selection
df = (
c.main_df[["cvm_id", "fiscal_id"]]
.drop_duplicates()
.astype({"cvm_id": int, "fiscal_id": str})
)
if identifier in df["cvm_id"].values:
self._cvm_id = identifier
self._fiscal_id = df.loc[df["cvm_id"] == identifier, "fiscal_id"].item()
elif identifier in df["fiscal_id"].values:
self._fiscal_id = identifier
self._cvm_id = df.loc[df["fiscal_id"] == identifier, "cvm_id"].item()
else:
raise KeyError("Company 'identifier' not found in database")
# Only set company data after object identifier validation
self._set_main_data()
@property
def acc_method(self):
"""
Get or set accounting method used for registering investments in
subsidiaries.
Parameters
----------
value : {'consolidated', 'separate'}, default 'consolidated'
Accounting method used for registering investments in subsidiaries.
Returns
-------
str
Raises
------
ValueError
* If passed ``value`` is invalid.
"""
return self._acc_unit
@acc_method.setter
def acc_method(self, value: Literal["consolidated", "separate"]):
if value in {"consolidated", "separate"}:
self._acc_method = value
else:
raise ValueError("acc_method expects 'consolidated' or 'separate'")
@property
def acc_unit(self):
"""
Get or set a constant to divide company account values.
Parameters
----------
value : float or str, default 1.0
acc_unit is a constant that will divide company account values.
The constant can be a number greater than zero or the strings
{'thousand', 'million', 'billion'}.
Returns
-------
float
Raises
------
ValueError
* If passed ``value`` is invalid.
"""
return self._acc_unit
@acc_unit.setter
def acc_unit(self, value: float | str):
if value == "thousand":
self._acc_unit = 1_000
elif value == "million":
self._acc_unit = 1_000_000
elif value == "billion":
self._acc_unit = 1_000_000_000
elif value >= 0:
self._acc_unit = value
else:
raise ValueError("Accounting Unit is invalid")
@property
def tax_rate(self):
"""
Get or set company 'tax_rate' attribute.
Parameters
----------
value : float, default 0.34
'value' will be passed to 'tax_rate' object attribute if
0 <= value <= 1.
Returns
-------
float
Raises
------
ValueError
* If passed ``value`` is invalid.
"""
return self._tax_rate
@tax_rate.setter
def tax_rate(self, value: float):
if 0 <= value <= 1:
self._tax_rate = value
else:
raise ValueError("Company 'tax_rate' value is invalid")
def _set_main_data(self) -> pd.DataFrame:
self._COMP_DF = (
c.main_df.query("cvm_id == @self._cvm_id")
.astype(
{
"co_name": str,
"cvm_id": np.uint32,
"fiscal_id": str,
"report_type": str,
"report_version": str,
"period_reference": "datetime64",
"period_begin": "datetime64",
"period_end": "datetime64",
"period_order": np.int8,
"acc_code": str,
"acc_name": str,
"acc_method": str,
"acc_fixed": bool,
"acc_value": float,
"equity_statement_column": str,
}
)
.sort_values(by="acc_code", ignore_index=True)
)
self._NAME = self._COMP_DF["co_name"].iloc[0]
self._FIRST_ANNUAL = self._COMP_DF.query('report_type == "annual"')[
"period_end"
].min()
self._LAST_ANNUAL = self._COMP_DF.query('report_type == "annual"')[
"period_end"
].max()
self._LAST_QUARTERLY = self._COMP_DF.query('report_type == "quarterly"')[
"period_end"
].max()
def info(self) -> pd.DataFrame:
"""Return dataframe with company info."""
company_info = {
"Name": self._NAME,
"CVM ID": self._cvm_id,
"Fiscal ID (CNPJ)": self._fiscal_id,
"Total Accounting Rows": len(self._COMP_DF.index),
"Selected Tax Rate": self._tax_rate,
"Selected Accounting Method": self._acc_method,
"Selected Accounting Unit": self._acc_unit,
"First Annual Report": self._FIRST_ANNUAL.strftime("%Y-%m-%d"),
"Last Annual Report": self._LAST_ANNUAL.strftime("%Y-%m-%d"),
"Last Quarterly Report": self._LAST_QUARTERLY.strftime("%Y-%m-%d"),
}
df = pd.DataFrame.from_dict(company_info, orient="index", columns=["Values"])
df.index.name = "Company Info"
return df
def report(
self,
report_type: str,
acc_level: int | None = None,
num_years: int = 0,
) -> pd.DataFrame:
"""
Return a DataFrame with company selected report type.
This function generates a report representing one of the financial
statements for the company adjusted by the attributes passed and
returns a pandas.DataFrame with this report.
Parameters
----------
report_type : {'assets', 'liabilities_and_equity', 'liabilities',
'equity', 'income', 'cash_flow'}
Report type to be generated.
acc_level : {None, 2, 3, 4}, default None
Detail level to show for account codes.
acc_level = None -> X... (default: show all accounts)
acc_level = 2 -> X.YY (show 2 levels)
acc_level = 3 -> X.YY.ZZ (show 3 levels)
acc_level = 4 -> X.YY.ZZ.WW (show 4 levels)
num_years : int, default 0
Select how many last years to show where 0 -> show all years
Returns
------
pandas.DataFrame
Raises
------
ValueError
* If ``report_type`` attribute is invalid
* If ``acc_level`` attribute is invalid
"""
# Check input arguments.
if acc_level not in {None, 2, 3, 4}:
raise ValueError("acc_level expects None, 2, 3 or 4")
df = self._COMP_DF.query("acc_method == @self._acc_method").copy()
# Change acc_unit only for accounts different from 3.99
df["acc_value"] = np.where(
df["acc_code"].str.startswith("3.99"),
df["acc_value"],
df["acc_value"] / self._acc_unit,
)
# Filter dataframe for selected acc_level
if acc_level:
acc_code_limit = acc_level * 3 - 2 # noqa
df.query("acc_code.str.len() <= @acc_code_limit", inplace=True)
"""
Filter dataframe for selected report_type (report type)
df['acc_code'].str[0].unique() -> [1, 2, 3, 4, 5, 6, 7]
The first part of 'acc_code' is the report type
Table of reports correspondence:
1 -> Balance Sheet - Assets
2 -> Balance Sheet - Liabilities and Shareholders’ Equity
3 -> Income
4 -> Comprehensive Income
5 -> Changes in Equity
6 -> Cash Flow (Indirect Method)
7 -> Added Value
"""
report_types = {
"assets": ["1"],
"cash": ["1.01.01", "1.01.02"],
"current_assets": ["1.01"],
"non_current_assets": ["1.02"],
"liabilities": ["2.01", "2.02"],
"debt": ["2.01.04", "2.02.01"],
"current_liabilities": ["2.01"],
"non_current_liabilities": ["2.02"],
"liabilities_and_equity": ["2"],
"equity": ["2.03"],
"income": ["3"],
# "earnings_per_share": ["3.99.01.01", "3.99.02.01"],
"earnings_per_share": ["3.99"],
"comprehensive_income": ["4"],
"changes_in_equity": ["5"],
"cash_flow": ["6"],
"added_value": ["7"],
}
acc_codes = report_types[report_type]
expression = ""
for count, acc_code in enumerate(acc_codes):
if count > 0:
expression += " or "
expression += f'acc_code.str.startswith("{acc_code}")'
df.query(expression, inplace=True)
# remove earnings per share from income statment
if report_type == 'income':
df = df[~df['acc_code'].str.startswith("3.99")]
if report_type in {"income", "cash_flow"}:
df = self._calculate_ttm(df)
df.reset_index(drop=True, inplace=True)
report_df = self._make_report(df)
report_df.set_index(keys="acc_code", drop=True, inplace=True)
# Show only selected years
if num_years > 0:
cols = report_df.columns.to_list()
cols = cols[0:2] + cols[-num_years:]
report_df = report_df[cols]
return report_df
def _calculate_ttm(self, dfi: pd.DataFrame) -> pd.DataFrame:
if self._LAST_ANNUAL > self._LAST_QUARTERLY:
return dfi.query('report_type == "annual"').copy()
df1 = dfi.query("period_end == @self._LAST_QUARTERLY").copy()
df1.query("period_begin == period_begin.min()", inplace=True)
df2 = dfi.query("period_reference == @self._LAST_QUARTERLY").copy()
df2.query("period_begin == period_begin.min()", inplace=True)
df2["acc_value"] = -df2["acc_value"]
df3 = dfi.query("period_end == @self._LAST_ANNUAL").copy()
df_ttm = (
pd.concat([df1, df2, df3], ignore_index=True)[["acc_code", "acc_value"]]
.groupby(by="acc_code")
.sum()
.reset_index()
)
df1.drop(columns="acc_value", inplace=True)
df_ttm = pd.merge(df1, df_ttm)
df_ttm["report_type"] = "quarterly"
df_ttm["period_begin"] = self._LAST_QUARTERLY - pd.DateOffset(years=1)
df_annual = dfi.query('report_type == "annual"').copy()
return pd.concat([df_annual, df_ttm], ignore_index=True)
def custom_report(
self,
acc_list: list[str],
num_years: int = 0,
) -> pd.DataFrame:
"""
Return a financial report from custom list of accounting codes
Creates DataFrame object with a custom list of accounting codes
adjusted by function attributes
Parameters
----------
acc_list : list[str]
A list of strings containg accounting codes to be used in report
num_years : int, default 0
Select how many last years to show where 0 -> show all years
Returns
-------
pandas.DataFrame
"""
df_as = self.report("assets")
df_le = self.report("liabilities_and_equity")
df_is = self.report("income")
df_cf = self.report("cash_flow")
dfo = pd.concat([df_as, df_le, df_is, df_cf]).query("acc_code == @acc_list")
# Show only selected years
if num_years > 0:
cols = dfo.columns.to_list()
cols = cols[0:2] + cols[-num_years:]
dfo = dfo[cols]
return dfo
@staticmethod
def _prior_values(s: pd.Series, is_prior: bool) -> pd.Series:
"""Shift row to the right in order to obtain series previous values"""
if is_prior:
arr = s.iloc[:-1].values
return np.append(np.nan, arr)
else:
return s
def indicators(self, num_years: int = 0, is_prior: bool = True) -> pd.DataFrame:
"""
Return company main operating indicators.
Creates DataFrame object with company operating indicators as
described in reference [1]
Parameters
----------
num_years : int, default 0
Select how many last years to show where 0 -> show all years
is_prior : bool, default True
Divide return measurements by book values from the end of the prior
year (see Damodaran reference).
Returns
-------
pandas.Dataframe
References
----------
.. [1] <NAME>, "Return on Capital (ROC), Return on Invested
Capital (ROIC) and Return on Equity (ROE): Measurement and
Implications.", 2007,
https://people.stern.nyu.edu/adamodar/pdfoles/papers/returnmeasures.pdf
https://people.stern.nyu.edu/adamodar/New_Home_Page/datafile/variable.htm
"""
df_as = self.report("assets")
df_le = self.report("liabilities_and_equity")
df_in = self.report("income")
df_cf = self.report("cash_flow")
df = pd.concat([df_as, df_le, df_in, df_cf]).drop(
columns=["acc_fixed", "acc_name"]
)
# Calculate indicators series
revenues = df.loc["3.01"]
gross_profit = df.loc["3.03"]
ebit = df.loc["3.05"]
ebt = df.loc["3.07"]
effective_tax = df.loc["3.08"]
depreciation_amortization = df.loc["6.01.01.04"]
ebitda = ebit + depreciation_amortization
operating_cash_flow = df.loc["6.01"]
# capex = df.loc["6.02"]
net_income = df.loc["3.11"]
total_assets = df.loc["1"]
total_assets_p = self._prior_values(total_assets, is_prior)
equity = df.loc["2.03"]
equity_p = self._prior_values(equity, is_prior)
total_cash = df.loc["1.01.01"] + df.loc["1.01.02"]
current_assets = df.loc["1.01"]
current_liabilities = df.loc["2.01"]
working_capital = current_assets - current_liabilities
total_debt = df.loc["2.01.04"] + df.loc["2.02.01"]
net_debt = total_debt - total_cash
invested_capital = total_debt + equity - total_cash
invested_capital_p = self._prior_values(invested_capital, is_prior)
# Output Dataframe (dfo)
dfo = pd.DataFrame(columns=df.columns)
dfo.loc["revenues"] = revenues
dfo.loc["operating_cash_flow"] = operating_cash_flow
# dfo.loc["capex"] = capex
dfo.loc["ebitda"] = ebitda
dfo.loc["ebit"] = ebit
dfo.loc["ebt"] = ebt
dfo.loc["effective_tax_rate"] = -1 * effective_tax / ebt
dfo.loc["net_income"] = net_income
dfo.loc["total_cash"] = total_cash
dfo.loc["total_debt"] = total_debt
dfo.loc["net_debt"] = net_debt
dfo.loc["working_capital"] = working_capital
dfo.loc["invested_capital"] = invested_capital
dfo.loc["return_on_assets"] = ebit * (1 - self._tax_rate) / total_assets_p
dfo.loc["return_on_capital"] = ebit * (1 - self._tax_rate) / invested_capital_p
dfo.loc["return_on_equity"] = net_income / equity_p
dfo.loc["gross_margin"] = gross_profit / revenues
dfo.loc["ebitda_margin"] = ebitda / revenues
dfo.loc["pre_tax_operating_margin"] = ebit / revenues
dfo.loc["after_tax_operating_margin"] = ebit * (1 - self._tax_rate) / revenues
dfo.loc["net_margin"] = net_income / revenues
dfo.index.name = "Company Financial Indicators"
# Show only the selected number of years
if num_years > 0:
dfo = dfo[dfo.columns[-num_years:]]
# Since all columns are strings representing corporate year, convert them to datetime64
dfo.columns = pd.to_datetime(dfo.columns)
return dfo
def _make_report(self, dfi: pd.DataFrame) -> pd.DataFrame:
# keep only last quarterly fs
if self._LAST_ANNUAL > self._LAST_QUARTERLY:
df = dfi.query('report_type == "annual"').copy()
df.query(
"period_order == -1 or \
period_end == @self._LAST_ANNUAL",
inplace=True,
)
else:
df = dfi.query(
'report_type == "annual" or \
period_end == @self._LAST_QUARTERLY'
).copy()
df.query(
"period_order == -1 or \
period_end == @self._LAST_QUARTERLY or \
period_end == @self._LAST_ANNUAL",
inplace=True,
)
# Create output dataframe with only the index
dfo = df.sort_values(by="period_end", ascending=True)[
["acc_name", "acc_code", "acc_fixed"]
].drop_duplicates(subset="acc_code", ignore_index=True, keep="last")
periods = list(df["period_end"].sort_values().unique())
for period in periods:
df_year = df.query("period_end == @period")[
["acc_value", "acc_code"]
].copy()
period_str = str(np.datetime_as_string(period, unit="D"))
if period == self._LAST_QUARTERLY:
period_str += " (ttm)"
df_year.rename(columns={"acc_value": period_str}, inplace=True)
dfo = pd.merge(dfo, df_year, how="left", on=["acc_code"])
return dfo.sort_values("acc_code", ignore_index=True)
| [
"pandas.merge",
"pandas.DataFrame.from_dict",
"numpy.append",
"pandas.DateOffset",
"pandas.DataFrame",
"numpy.datetime_as_string",
"pandas.concat",
"pandas.to_datetime"
] | [((7638, 7710), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['company_info'], {'orient': '"""index"""', 'columns': "['Values']"}), "(company_info, orient='index', columns=['Values'])\n", (7660, 7710), True, 'import pandas as pd\n'), ((12778, 12799), 'pandas.merge', 'pd.merge', (['df1', 'df_ttm'], {}), '(df1, df_ttm)\n', (12786, 12799), True, 'import pandas as pd\n'), ((13004, 13053), 'pandas.concat', 'pd.concat', (['[df_annual, df_ttm]'], {'ignore_index': '(True)'}), '([df_annual, df_ttm], ignore_index=True)\n', (13013, 13053), True, 'import pandas as pd\n'), ((16875, 16907), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (16887, 16907), True, 'import pandas as pd\n'), ((18322, 18349), 'pandas.to_datetime', 'pd.to_datetime', (['dfo.columns'], {}), '(dfo.columns)\n', (18336, 18349), True, 'import pandas as pd\n'), ((12900, 12922), 'pandas.DateOffset', 'pd.DateOffset', ([], {'years': '(1)'}), '(years=1)\n', (12913, 12922), True, 'import pandas as pd\n'), ((14386, 14408), 'numpy.append', 'np.append', (['np.nan', 'arr'], {}), '(np.nan, arr)\n', (14395, 14408), True, 'import numpy as np\n'), ((19836, 19887), 'pandas.merge', 'pd.merge', (['dfo', 'df_year'], {'how': '"""left"""', 'on': "['acc_code']"}), "(dfo, df_year, how='left', on=['acc_code'])\n", (19844, 19887), True, 'import pandas as pd\n'), ((13876, 13915), 'pandas.concat', 'pd.concat', (['[df_as, df_le, df_is, df_cf]'], {}), '([df_as, df_le, df_is, df_cf])\n', (13885, 13915), True, 'import pandas as pd\n'), ((15659, 15698), 'pandas.concat', 'pd.concat', (['[df_as, df_le, df_in, df_cf]'], {}), '([df_as, df_le, df_in, df_cf])\n', (15668, 15698), True, 'import pandas as pd\n'), ((19615, 19654), 'numpy.datetime_as_string', 'np.datetime_as_string', (['period'], {'unit': '"""D"""'}), "(period, unit='D')\n", (19636, 19654), True, 'import numpy as np\n'), ((12544, 12589), 'pandas.concat', 'pd.concat', (['[df1, df2, df3]'], {'ignore_index': '(True)'}), '([df1, df2, df3], ignore_index=True)\n', (12553, 12589), True, 'import pandas as pd\n')] |
"""
Impulse response functions for the LQ permanent income model permanent and
transitory shocks.
"""
import numpy as np
import matplotlib.pyplot as plt
r = 0.05
beta = 1 / (1 + r)
T = 20 # Time horizon
S = 5 # Impulse date
sigma1 = sigma2 = 0.15
def time_path(permanent=False):
"Time path of consumption and debt given shock sequence"
w1 = np.zeros(T+1)
w2 = np.zeros(T+1)
b = np.zeros(T+1)
c = np.zeros(T+1)
if permanent:
w1[S+1] = 1.0
else:
w2[S+1] = 1.0
for t in range(1, T):
b[t+1] = b[t] - sigma2 * w2[t]
c[t+1] = c[t] + sigma1 * w1[t+1] + (1 - beta) * sigma2 * w2[t+1]
return b, c
fig, axes = plt.subplots(2, 1)
plt.subplots_adjust(hspace=0.5)
p_args = {'lw': 2, 'alpha': 0.7}
L = 0.175
for ax in axes:
ax.grid(alpha=0.5)
ax.set_xlabel(r'Time')
ax.set_ylim(-L, L)
ax.plot((S, S), (-L, L), 'k-', lw=0.5)
ax = axes[0]
b, c = time_path(permanent=0)
ax.set_title('impulse-response, transitory income shock')
ax.plot(list(range(T+1)), c, 'g-', label="consumption", **p_args)
ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args)
ax.legend(loc='upper right')
ax = axes[1]
b, c = time_path(permanent=1)
ax.set_title('impulse-response, permanent income shock')
ax.plot(list(range(T+1)), c, 'g-', label="consumption", **p_args)
ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args)
ax.legend(loc='lower right')
plt.show()
| [
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((700, 718), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (712, 718), True, 'import matplotlib.pyplot as plt\n'), ((719, 750), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (738, 750), True, 'import matplotlib.pyplot as plt\n'), ((1439, 1449), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1447, 1449), True, 'import matplotlib.pyplot as plt\n'), ((379, 394), 'numpy.zeros', 'np.zeros', (['(T + 1)'], {}), '(T + 1)\n', (387, 394), True, 'import numpy as np\n'), ((402, 417), 'numpy.zeros', 'np.zeros', (['(T + 1)'], {}), '(T + 1)\n', (410, 417), True, 'import numpy as np\n'), ((424, 439), 'numpy.zeros', 'np.zeros', (['(T + 1)'], {}), '(T + 1)\n', (432, 439), True, 'import numpy as np\n'), ((446, 461), 'numpy.zeros', 'np.zeros', (['(T + 1)'], {}), '(T + 1)\n', (454, 461), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the GNU-License license.
"""
"""
import numpy as np
def jonswap(w, Hs, Tp):
""" JONSWAP wave spectrum, IEC 61400-3
w: ndarray of shape (n,), frequencies to be sampled at, rad/s
Hs: significant wave height, m
Tp: wave peak period, sec
"""
w = np.squeeze(w)
with np.errstate(divide='ignore'):
wp = 2*np.pi/Tp
gamma = 3.3
sigma = 0.07 * np.ones(w.shape)
sigma[w > wp] = 0.09
assert w[0] >= 0 ,'Single side power spectrum start with frequency greater or eqaul to 0, w[0]={:4.2f}'.format(w[0])
JS1 = 5/16 * Hs**2 * wp**4 * w**-5
JS2 = np.exp(-1.25*(w/wp)**-4) * (1-0.287*np.log(gamma))
JS3 = gamma**(np.exp(-0.5*((w-wp)/sigma/wp)**2))
JS1[np.isinf(JS1)] = 0
JS2[np.isinf(JS2)] = 0
JS3[np.isinf(JS3)] = 0
JS = JS1 * JS2 * JS3
return w, JS
def spec_test1(w, c=2):
"""
Test FFT and iFFT for spectrum and acf
F(w) = Fourier(f(t))
where
F(w) = 2c / (c**2 + w**2)
f(t) = e^(-c|t|)
Arguments:
w: frequencies to be evaluated at (Hz)
c: arbitrary real constant larger than 0
Returns:
Sw: psd value at specified w
sa: approximated area under psd curve with specified w
"""
# print('\t{:s} : c= {:.2f}'.format(spec_test1.__name__, c))
Sw = 2*c/(c**2 + w**2)
dw = w[1] - w[0]
sa = np.sum(Sw*dw)
return w, Sw
def white_noise(w, F0=1,a=0,b=5):
Sw = F0
sa = abs(b-a) * F0
return w, Sw
| [
"numpy.ones",
"numpy.log",
"numpy.squeeze",
"numpy.exp",
"numpy.sum",
"numpy.errstate",
"numpy.isinf"
] | [((403, 416), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (413, 416), True, 'import numpy as np\n'), ((1542, 1557), 'numpy.sum', 'np.sum', (['(Sw * dw)'], {}), '(Sw * dw)\n', (1548, 1557), True, 'import numpy as np\n'), ((426, 454), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (437, 454), True, 'import numpy as np\n'), ((527, 543), 'numpy.ones', 'np.ones', (['w.shape'], {}), '(w.shape)\n', (534, 543), True, 'import numpy as np\n'), ((765, 795), 'numpy.exp', 'np.exp', (['(-1.25 * (w / wp) ** -4)'], {}), '(-1.25 * (w / wp) ** -4)\n', (771, 795), True, 'import numpy as np\n'), ((838, 881), 'numpy.exp', 'np.exp', (['(-0.5 * ((w - wp) / sigma / wp) ** 2)'], {}), '(-0.5 * ((w - wp) / sigma / wp) ** 2)\n', (844, 881), True, 'import numpy as np\n'), ((886, 899), 'numpy.isinf', 'np.isinf', (['JS1'], {}), '(JS1)\n', (894, 899), True, 'import numpy as np\n'), ((917, 930), 'numpy.isinf', 'np.isinf', (['JS2'], {}), '(JS2)\n', (925, 930), True, 'import numpy as np\n'), ((948, 961), 'numpy.isinf', 'np.isinf', (['JS3'], {}), '(JS3)\n', (956, 961), True, 'import numpy as np\n'), ((801, 814), 'numpy.log', 'np.log', (['gamma'], {}), '(gamma)\n', (807, 814), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from bmi_tester.api import check_unit_is_valid
def test_get_var_itemsize(initialized_bmi, var_name):
"""Test getting a variable's itemsize"""
itemsize = initialized_bmi.get_var_itemsize(var_name)
assert itemsize > 0
# @pytest.mark.dependency()
def test_get_var_nbytes(initialized_bmi, var_name):
"""Test getting a variable's nbytes"""
nbytes = initialized_bmi.get_var_nbytes(var_name)
assert nbytes > 0
# @pytest.mark.dependency()
def test_get_var_location(initialized_bmi, var_name):
"""Test getting a variable's grid location"""
location = initialized_bmi.get_var_location(var_name)
assert isinstance(location, str)
assert location in ("node", "edge", "face", "none")
# @pytest.mark.dependency(depends=["test_get_var_location"])
def test_var_on_grid(initialized_bmi, var_name):
loc = initialized_bmi.get_var_location(var_name)
if initialized_bmi.get_var_location(var_name) == "none":
pytest.skip(f"var, {var_name}, is not located on a grid")
gid = initialized_bmi.get_var_grid(var_name)
if initialized_bmi.get_grid_type(gid) == "unstructured":
if loc == "node":
assert initialized_bmi.get_grid_node_count(gid) > 0
elif loc == "edge":
assert initialized_bmi.get_grid_edge_count(gid) > 0
elif loc == "face":
assert initialized_bmi.get_grid_face_count(gid) > 0
def test_get_var_type(initialized_bmi, var_name):
"""Test getting a variable's data type"""
dtype = initialized_bmi.get_var_type(var_name)
assert isinstance(dtype, str)
try:
np.empty(1, dtype=dtype)
except TypeError:
raise AssertionError(
"get_var_type: bad data type name ({dtype})".format(dtype=dtype)
)
def test_get_var_units(initialized_bmi, var_name):
"""Test the units of the variables."""
units = initialized_bmi.get_var_units(var_name)
assert isinstance(units, str)
assert check_unit_is_valid(units)
| [
"pytest.skip",
"numpy.empty",
"bmi_tester.api.check_unit_is_valid"
] | [((1985, 2011), 'bmi_tester.api.check_unit_is_valid', 'check_unit_is_valid', (['units'], {}), '(units)\n', (2004, 2011), False, 'from bmi_tester.api import check_unit_is_valid\n'), ((984, 1041), 'pytest.skip', 'pytest.skip', (['f"""var, {var_name}, is not located on a grid"""'], {}), "(f'var, {var_name}, is not located on a grid')\n", (995, 1041), False, 'import pytest\n'), ((1628, 1652), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (1636, 1652), True, 'import numpy as np\n')] |
import numpy as np
from bokeh.plotting import figure
from dq_poc.util import plot_grid
def plot(f):
x = np.linspace(0, 2 * 3.14159)
p = figure(plot_height=1500, plot_width=2000)
p.line(x, f(x))
return p
title = 'Coffee Machine Uptime'
content = plot_grid(2, plot(np.sin), plot(np.cos), plot(np.tan), plot(np.sin))
description = 'Availability of the coffee machine. The availability of the machine itself as well as the supply of coffee beans are measured.'
| [
"numpy.linspace",
"bokeh.plotting.figure"
] | [((109, 136), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * 3.14159)'], {}), '(0, 2 * 3.14159)\n', (120, 136), True, 'import numpy as np\n'), ((146, 187), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': '(1500)', 'plot_width': '(2000)'}), '(plot_height=1500, plot_width=2000)\n', (152, 187), False, 'from bokeh.plotting import figure\n')] |
import numpy as np
import pdb
class ModelBranch:
def __init__(self, initialW, initialGrad):
print("initializing model")
self.chain = [[initialW, initialGrad]]
self.pendingGradients = []
self.gradientHistory = []
def updateModel(self):
### TODO:: Refactor out ###
acc = np.zeros(self.chain[0][0].size)
numPending = len(self.pendingGradients)
for grad in self.pendingGradients:
acc += grad
newGrad = acc / numPending
###
newW = self.chain[-1][0] + newGrad
self.chain.append([newW, newGrad])
### Testing to see if gradients can be linked ###
self.gradientHistory.append(self.pendingGradients[:])
###
self.pendingGradients = []
def getWeights(self):
return self.chain[-1][0]
def getPreviousGrad(self):
return self.chain[-1][1]
def submitGradient(self, grad):
self.pendingGradients.append(grad) | [
"numpy.zeros"
] | [((331, 362), 'numpy.zeros', 'np.zeros', (['self.chain[0][0].size'], {}), '(self.chain[0][0].size)\n', (339, 362), True, 'import numpy as np\n')] |
"""
module for crystal structure
"""
import numpy as np
import sys
import os
import shutil
import copy
import pymatflow.base as base
from pymatflow.base.atom import Atom
"""
Usage:
"""
class Crystal:
""" an abstraction of crystal structure
usage:
>>> a = Crystal()
"""
def __init__(self):
"""
"""
self.cell = None
self.atoms = None
self.kpath = None
def from_base_xyz(self, xyz):
"""
:param basexyz: instance of pymatflow.base.xyz.BaseXyz
"""
self.cell = xyz.cell
self.atoms = xyz.atoms
def from_xyz_file(self, filepath):
"""
"""
xyz = base.BaseXyz()
xyz.get_xyz(filepath)
self.cell = xyz.cell
self.atoms = xyz.atoms
def from_cif_file(self, cif):
"""
:param cif: filepath for cif file
"""
import pymatflow.third.aseio
self.cell, self.atoms = aseio.read_cif(cif)
def get_cell(self, cell):
"""
:params cell: [[a1, a2, a3], [b1, b2, b3], [c1, c2, c3]] in unit of Anstrom
"""
self.cell = cell
def get_atoms(self, atoms):
"""
:params cell: [[a1, a2, a3], [b1, b2, b3], [c1, c2, c3]] in unit of Anstrom
:params atoms (in cartesian coordinates and unit of Anstrom)
[
["C", 0.00000, 0.0000000, 0.0000],
["O", 1.12300, 3.3250000, 2.4893],
....
]
"""
self.atoms = [Atom(atoms[i][0], atoms[i][1], atoms[i][2], atoms[i][3]) for i in range(len(atoms))]
def get_cell_atoms(self, cell, atoms):
"""
:params cell: [[a1, a2, a3], [b1, b2, b3], [c1, c2, c3]] in unit of Anstrom
:params atoms (in cartesian coordinates and unit of Anstrom)
[
["C", 0.00000, 0.0000000, 0.0000],
["O", 1.12300, 3.3250000, 2.4893],
....
]
"""
self.cell = cell
self.atoms = [Atom(atoms[i][0], atoms[i][1], atoms[i][2], atoms[i][3]) for i in range(len(atoms))]
def cell(self):
"""
:return cell: cell parameters of the structure
[[a1, a2, a3], [b1, b2, b3], [c1, c2, c3]]
"""
return self.cell
def cartesian(self):
"""
:return cartesian coordinates
[
["C", 0.00000, 0.0000000, 0.0000],
["O", 1.12300, 3.3250000, 2.4893],
....
]
"""
return [[self.atoms[i].name, self.atoms[i].x, self.atoms[i].y, self.atoms[i].z] for i in range(self.natom)]
def get_fractional(self):
"""
:return out: fractional coordinates
[
["C", 0.00000, 0.000000, 0.0000],
["O", 0.00000, 0.500000, 0.0000],
....
]
"""
out = []
latcell = np.array(self.cell)
convmat = np.linalg.inv(latcell.T)
for i in range(len(self.atoms)):
atom = []
atom.append(self.atoms[i].name)
atom = atom + list(convmat.dot(np.array([self.atoms[i].x, self.atoms[i].y, self.atoms[i].z])))
out.append(atom)
#
return out
def volume(self):
"""
:return volume in unit of Angstrom^3
"""
return np.linalg.det(self.cell)
def build_supercell(self, n):
"""
:param n: [n1, n2, n3]
:return out:
{
"cell": [[], [], []],
"atoms": [
["C", 0.00000, 0.000000, 0.0000],
["O", 0.00000, 0.500000, 0.0000],
...
]
}
Note: will not affect status of self
"""
#
cell = copy.deepcopy(self.cell)
for i in range(3):
for j in range(3):
cell[i][j] = n[i] * self.cell[i][j]
atoms = copy.deepcopy(self.atoms)
# build supercell: replica in three vector one by one
for i in range(3):
natom_now = len(atoms)
for j in range(n[i] - 1):
for atom in atoms[:natom_now]:
x = atom.x + float(j + 1) * self.cell[i][0]
y = atom.y + float(j + 1) * self.cell[i][1]
z = atom.z + float(j + 1) * self.cell[i][2]
atoms.append(Atom(atom.name, x, y, z))
return {"cell": cell, "atoms": [[atom.name, atom.x, atom.y, atom.z] for atom in atoms]}
def write_xyz(self, filepath):
"""
:param filepath: output xyz file path
"""
with open(filepath, 'w') as fout:
fout.write("%d\n" % len(self.atoms))
fout.write("cell: %f %f %f | %f %f %f | %f %f %f\n" % (self.cell[0][0], self.cell[0][1], self.cell[0][2], self.cell[1][0], self.cell[1][1], self.cell[1][2], self.cell[2][0], self.cell[2][1], self.cell[2][2]))
for atom in self.atoms:
fout.write("%s\t%f\t%f\t%f\n" % (atom.name, atom.x, atom.y, atom.z))
def to_base_xyz(self):
"""
:return xyz: instance of pymatflow.base.xyz.BaseXyz()
"""
xyz = base.BaseXyz()
xyz.file=None
xyz.cell = self.cell
xyz.atoms = self.atoms
xyz.natom = len(self.atoms)
xyz.set_species_number()
return xyz
def remove_atom(self, number):
""" remove one atom from self.atoms
:param number: an integer specifying the atom to remove
"""
del self.atoms[number]
self.natom = len(self.atoms)
def remove_atoms(self, number):
""" remove several atoms from self.atoms
:param number: a list of integer specifying atoms to remove
index start with 0
"""
for i in number:
self.atoms[i] = None
while None in self.atoms:
self.atoms.remove(None)
self.natom = len(self.atoms) | [
"pymatflow.base.atom.Atom",
"pymatflow.base.BaseXyz",
"numpy.linalg.det",
"numpy.array",
"numpy.linalg.inv",
"copy.deepcopy"
] | [((717, 731), 'pymatflow.base.BaseXyz', 'base.BaseXyz', ([], {}), '()\n', (729, 731), True, 'import pymatflow.base as base\n'), ((3081, 3100), 'numpy.array', 'np.array', (['self.cell'], {}), '(self.cell)\n', (3089, 3100), True, 'import numpy as np\n'), ((3120, 3144), 'numpy.linalg.inv', 'np.linalg.inv', (['latcell.T'], {}), '(latcell.T)\n', (3133, 3144), True, 'import numpy as np\n'), ((3537, 3561), 'numpy.linalg.det', 'np.linalg.det', (['self.cell'], {}), '(self.cell)\n', (3550, 3561), True, 'import numpy as np\n'), ((4019, 4043), 'copy.deepcopy', 'copy.deepcopy', (['self.cell'], {}), '(self.cell)\n', (4032, 4043), False, 'import copy\n'), ((4174, 4199), 'copy.deepcopy', 'copy.deepcopy', (['self.atoms'], {}), '(self.atoms)\n', (4187, 4199), False, 'import copy\n'), ((5455, 5469), 'pymatflow.base.BaseXyz', 'base.BaseXyz', ([], {}), '()\n', (5467, 5469), True, 'import pymatflow.base as base\n'), ((1607, 1663), 'pymatflow.base.atom.Atom', 'Atom', (['atoms[i][0]', 'atoms[i][1]', 'atoms[i][2]', 'atoms[i][3]'], {}), '(atoms[i][0], atoms[i][1], atoms[i][2], atoms[i][3])\n', (1611, 1663), False, 'from pymatflow.base.atom import Atom\n'), ((2144, 2200), 'pymatflow.base.atom.Atom', 'Atom', (['atoms[i][0]', 'atoms[i][1]', 'atoms[i][2]', 'atoms[i][3]'], {}), '(atoms[i][0], atoms[i][1], atoms[i][2], atoms[i][3])\n', (2148, 2200), False, 'from pymatflow.base.atom import Atom\n'), ((3299, 3360), 'numpy.array', 'np.array', (['[self.atoms[i].x, self.atoms[i].y, self.atoms[i].z]'], {}), '([self.atoms[i].x, self.atoms[i].y, self.atoms[i].z])\n', (3307, 3360), True, 'import numpy as np\n'), ((4643, 4667), 'pymatflow.base.atom.Atom', 'Atom', (['atom.name', 'x', 'y', 'z'], {}), '(atom.name, x, y, z)\n', (4647, 4667), False, 'from pymatflow.base.atom import Atom\n')] |
import cv2
import joblib
from skimage.feature import hog
import numpy
import pygame
clf = joblib.load("digits.pkl")
pygame.init()
screen = pygame.display.set_mode((600, 400))
screen.fill((255, 255, 255))
pygame.display.set_caption("Draw the Number")
loop = True
while loop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.image.save(screen, 'num_rec.jpg')
loop = False
x, y = pygame.mouse.get_pos()
if pygame.mouse.get_pressed() == (1, 0, 0):
pygame.draw.circle(screen, (0, 0, 0), (x, y), 10)
pygame.display.update()
pygame.quit()
im = cv2.imread("num_rec.jpg")
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0)
ret, im_th = cv2.threshold(im_gray, 90, 255, cv2.THRESH_BINARY_INV)
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
for rect in rects:
cv2.rectangle(im, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3)
leng = int(rect[3] * 1.6)
pt1 = int(rect[1] + rect[3] // 2 - leng // 2)
pt2 = int(rect[0] + rect[2] // 2 - leng // 2)
roi = im_th[pt1:pt1+leng, pt2:pt2+leng]
height, width = roi.shape
if height != 0 and width != 0:
roi = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)
roi = cv2.dilate(roi, (3, 3))
roi_hog_fd = hog(roi, orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualize=False)
nbr = clf.predict(numpy.array([roi_hog_fd], 'float64'))
cv2.putText(im, str(int(nbr[0])), (rect[0], rect[1]), cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 0), 3)
cv2.imshow("Predicted Number", im)
cv2.waitKey() | [
"cv2.rectangle",
"pygame.mouse.get_pressed",
"pygame.init",
"pygame.quit",
"cv2.imshow",
"numpy.array",
"cv2.threshold",
"pygame.display.set_mode",
"pygame.mouse.get_pos",
"pygame.image.save",
"joblib.load",
"pygame.display.update",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.resize",
"cv2.Gau... | [((90, 115), 'joblib.load', 'joblib.load', (['"""digits.pkl"""'], {}), "('digits.pkl')\n", (101, 115), False, 'import joblib\n'), ((117, 130), 'pygame.init', 'pygame.init', ([], {}), '()\n', (128, 130), False, 'import pygame\n'), ((141, 176), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(600, 400)'], {}), '((600, 400))\n', (164, 176), False, 'import pygame\n'), ((206, 251), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Draw the Number"""'], {}), "('Draw the Number')\n", (232, 251), False, 'import pygame\n'), ((598, 611), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (609, 611), False, 'import pygame\n'), ((618, 643), 'cv2.imread', 'cv2.imread', (['"""num_rec.jpg"""'], {}), "('num_rec.jpg')\n", (628, 643), False, 'import cv2\n'), ((655, 691), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (667, 691), False, 'import cv2\n'), ((702, 738), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im_gray', '(5, 5)', '(0)'], {}), '(im_gray, (5, 5), 0)\n', (718, 738), False, 'import cv2\n'), ((753, 807), 'cv2.threshold', 'cv2.threshold', (['im_gray', '(90)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(im_gray, 90, 255, cv2.THRESH_BINARY_INV)\n', (766, 807), False, 'import cv2\n'), ((1692, 1726), 'cv2.imshow', 'cv2.imshow', (['"""Predicted Number"""', 'im'], {}), "('Predicted Number', im)\n", (1702, 1726), False, 'import cv2\n'), ((1727, 1740), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1738, 1740), False, 'import cv2\n'), ((294, 312), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (310, 312), False, 'import pygame\n'), ((441, 463), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (461, 463), False, 'import pygame\n'), ((574, 597), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (595, 597), False, 'import pygame\n'), ((907, 928), 'cv2.boundingRect', 'cv2.boundingRect', (['ctr'], {}), '(ctr)\n', (923, 928), False, 'import cv2\n'), ((970, 1068), 'cv2.rectangle', 'cv2.rectangle', (['im', '(rect[0], rect[1])', '(rect[0] + rect[2], rect[1] + rect[3])', '(0, 255, 0)', '(3)'], {}), '(im, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]\n ), (0, 255, 0), 3)\n', (983, 1068), False, 'import cv2\n'), ((471, 497), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (495, 497), False, 'import pygame\n'), ((520, 569), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(0, 0, 0)', '(x, y)', '(10)'], {}), '(screen, (0, 0, 0), (x, y), 10)\n', (538, 569), False, 'import pygame\n'), ((1317, 1372), 'cv2.resize', 'cv2.resize', (['roi', '(28, 28)'], {'interpolation': 'cv2.INTER_AREA'}), '(roi, (28, 28), interpolation=cv2.INTER_AREA)\n', (1327, 1372), False, 'import cv2\n'), ((1387, 1410), 'cv2.dilate', 'cv2.dilate', (['roi', '(3, 3)'], {}), '(roi, (3, 3))\n', (1397, 1410), False, 'import cv2\n'), ((1432, 1527), 'skimage.feature.hog', 'hog', (['roi'], {'orientations': '(9)', 'pixels_per_cell': '(14, 14)', 'cells_per_block': '(1, 1)', 'visualize': '(False)'}), '(roi, orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1),\n visualize=False)\n', (1435, 1527), False, 'from skimage.feature import hog\n'), ((364, 404), 'pygame.image.save', 'pygame.image.save', (['screen', '"""num_rec.jpg"""'], {}), "(screen, 'num_rec.jpg')\n", (381, 404), False, 'import pygame\n'), ((1550, 1586), 'numpy.array', 'numpy.array', (['[roi_hog_fd]', '"""float64"""'], {}), "([roi_hog_fd], 'float64')\n", (1561, 1586), False, 'import numpy\n')] |
#%%
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
x = np.linspace(0, 20, 100)
plt.plot(x, np.sin(x))
plt.show()
# %%
x = np.arange(0,9,0.1)
y = np.sin(x)
y1 = np.cos(x)
plt.title("y=xin(x)")
plt.xlabel("x")
plt.ylabel("y")
plt.plot(x,y,"-b",x,y1,"-r")
plt.show()
# %%
a = np.array([22,87,5,43,56,73,55,54,11,20,51,5,79,31,27])
plt.hist(a, bins = [0,20,40,60,80,100])
plt.title("histogram")
plt.show()
# %%
plt.rcParams['font.family']=['STFangsong']
x = np.arange(0,3* np.pi,0.1)
y = np.sin(x) #2 * x + 5
plt.title("测试")
plt.xlabel("x")
plt.ylabel("y")
plt.plot(x,y,"-r")
plt.show()
# %%
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((85, 108), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(100)'], {}), '(0, 20, 100)\n', (96, 108), True, 'import numpy as np\n'), ((132, 142), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (140, 142), True, 'import matplotlib.pyplot as plt\n'), ((153, 173), 'numpy.arange', 'np.arange', (['(0)', '(9)', '(0.1)'], {}), '(0, 9, 0.1)\n', (162, 173), True, 'import numpy as np\n'), ((176, 185), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (182, 185), True, 'import numpy as np\n'), ((191, 200), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (197, 200), True, 'import numpy as np\n'), ((201, 222), 'matplotlib.pyplot.title', 'plt.title', (['"""y=xin(x)"""'], {}), "('y=xin(x)')\n", (210, 222), True, 'import matplotlib.pyplot as plt\n'), ((223, 238), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (233, 238), True, 'import matplotlib.pyplot as plt\n'), ((239, 254), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (249, 254), True, 'import matplotlib.pyplot as plt\n'), ((255, 288), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-b"""', 'x', 'y1', '"""-r"""'], {}), "(x, y, '-b', x, y1, '-r')\n", (263, 288), True, 'import matplotlib.pyplot as plt\n'), ((284, 294), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (292, 294), True, 'import matplotlib.pyplot as plt\n'), ((305, 373), 'numpy.array', 'np.array', (['[22, 87, 5, 43, 56, 73, 55, 54, 11, 20, 51, 5, 79, 31, 27]'], {}), '([22, 87, 5, 43, 56, 73, 55, 54, 11, 20, 51, 5, 79, 31, 27])\n', (313, 373), True, 'import numpy as np\n'), ((361, 403), 'matplotlib.pyplot.hist', 'plt.hist', (['a'], {'bins': '[0, 20, 40, 60, 80, 100]'}), '(a, bins=[0, 20, 40, 60, 80, 100])\n', (369, 403), True, 'import matplotlib.pyplot as plt\n'), ((403, 425), 'matplotlib.pyplot.title', 'plt.title', (['"""histogram"""'], {}), "('histogram')\n", (412, 425), True, 'import matplotlib.pyplot as plt\n'), ((427, 437), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (435, 437), True, 'import matplotlib.pyplot as plt\n'), ((491, 519), 'numpy.arange', 'np.arange', (['(0)', '(3 * np.pi)', '(0.1)'], {}), '(0, 3 * np.pi, 0.1)\n', (500, 519), True, 'import numpy as np\n'), ((522, 531), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (528, 531), True, 'import numpy as np\n'), ((546, 561), 'matplotlib.pyplot.title', 'plt.title', (['"""测试"""'], {}), "('测试')\n", (555, 561), True, 'import matplotlib.pyplot as plt\n'), ((564, 579), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (574, 579), True, 'import matplotlib.pyplot as plt\n'), ((581, 596), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (591, 596), True, 'import matplotlib.pyplot as plt\n'), ((598, 618), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-r"""'], {}), "(x, y, '-r')\n", (606, 618), True, 'import matplotlib.pyplot as plt\n'), ((618, 628), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (626, 628), True, 'import matplotlib.pyplot as plt\n'), ((121, 130), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (127, 130), True, 'import numpy as np\n')] |
from ClusterDataGen.NetworkToTree import *
from ClusterDataGen.LGT_network import *
from ClusterDataGen.tree_to_newick import *
from datetime import datetime
import pandas as pd
import numpy as np
import pickle
import time
import sys
def make_data_fun(net_num, unique, partial, num_trees, train_data=True):
# PARAMS OF LGT GENERATOR
beta = 1
distances = True
comb_measure = True
if train_data and unique:
num_trees = None
ret_num_max = 9
else:
ret_num_max = 100
if unique:
map_name = "UniqueResults"
else:
map_name = "NonUniqueResults"
if unique and train_data:
tree_info = ""
else:
tree_info = f"_T{num_trees}"
now = datetime.now().time()
st = time.time()
# choose n
n = np.random.randint(10, 120)
if unique:
if n < 50:
alpha = 0.3
elif n > 80:
alpha = 0.1
else:
alpha = 0.2
else:
alpha = 0.2
# make network
network_gen_st = time.time()
while True:
print(f"JOB {net_num} ({now}): Start creating NETWORK (n = {n})")
net = simulation(n, alpha, 1, beta)
ret_num = len(reticulations(net))
if unique and train_data:
if 1 < ret_num < ret_num_max+1:
break
elif np.ceil(np.log2(num_trees)) < ret_num < ret_num_max+1:
break
if time.time() - network_gen_st > 60*1:
print(f"JOB {net_num} ({now}): FAILED (n = {n}, alpha = {alpha})")
return None
net_nodes = int(len(net.nodes))
now = datetime.now().time()
print(f"JOB {net_num} ({now}): Start creating TREE SET (N = {net_nodes}, R = {ret_num})")
num_rets = len(reticulations(net))
num_leaves = len(leaves(net))
tree_set, tree_lvs = net_to_tree(net, num_trees, distances=distances, partial=partial, net_lvs=num_leaves)
tree_to_newick_fun(tree_set, net_num, train_data=train_data, partial=partial,
map_name=map_name, tree_info=tree_info)
now = datetime.now().time()
if num_trees is None:
num_trees = 2 ** num_rets
metadata_index = ["rets", "nodes", "net_leaves", "chers", "ret_chers", "trees", "n", "alpha", "beta",
"tree_lvs_10", "tree_lvs_50", "tree_lvs_90", "runtime"]
# tree_set information
tree_lvs_10 = np.quantile(tree_lvs, 0.1)
tree_lvs_50 = np.quantile(tree_lvs, 0.5)
tree_lvs_90 = np.quantile(tree_lvs, 0.9)
if train_data:
print(f"JOB {net_num} ({now}): Start creating DATA SET (N = {net_nodes}, R = {ret_num}, T = {num_trees})")
X, Y, num_cher, num_ret_cher = data_gen(net, tree_set, num_net=net_num, distances=distances, comb_measure=comb_measure)
print(f"JOB {net_num} ({now}): DATA GENERATION NETWORK FINISHED (N = {net_nodes}, R = {ret_num}, T = {num_trees})")
metadata = pd.Series([num_rets, net_nodes, num_leaves, num_cher, num_ret_cher, len(tree_set), n, alpha, beta,
tree_lvs_10, tree_lvs_50, tree_lvs_90, time.time() - st],
index=metadata_index,
dtype=float)
output = {"net": net, "X": X, "Y": Y, "metadata": metadata}
if partial:
with open(
f"ClusterDataGen/Data/Train/{map_name}/inst_results/LGT_part_tree_data{tree_info}_{net_num}.pickle", "wb") as handle:
pickle.dump(output, handle)
else:
with open(
f"ClusterDataGen/Data/Train/{map_name}/inst_results/LGT_tree_data{tree_info}_{net_num}.pickle", "wb") as handle:
pickle.dump(output, handle)
else:
net_cher, net_ret_cher = network_cherries(net)
metadata = pd.Series([num_rets, net_nodes, num_leaves, len(net_cher), len(net_ret_cher), len(tree_set), n,
alpha, beta, tree_lvs_10, tree_lvs_50, tree_lvs_90, time.time() - st],
index=metadata_index,
dtype=float)
output = {"net": net, "metadata": metadata}
if partial:
with open(
f"ClusterDataGen/Data/Test/inst_results/LGT_part_tree_data{tree_info}_{net_num}.pickle", "wb") as handle:
pickle.dump(output, handle)
else:
with open(
f"ClusterDataGen/Data/Test/inst_results/LGT_tree_data{tree_info}_{net_num}.pickle", "wb") as handle:
pickle.dump(output, handle)
now = datetime.now().time()
print(f"JOB {net_num} ({now}): FINISHED in {np.round(time.time() - st, 3)}s (N = {net_nodes}, R = {ret_num}, T = {num_trees})"
f"[{np.round(tree_lvs_10, 2)}, {np.round(tree_lvs_50, 2)}, {np.round(tree_lvs_90, 2)}]")
return output
if __name__ == "__main__":
net_num = int(sys.argv[1])
unique = int(sys.argv[2])
partial = True
if len(sys.argv) == 4:
num_trees = int(sys.argv[3])
else:
num_trees = None
train_data = True
make_data_fun(net_num, unique, partial, num_trees, train_data)
| [
"pickle.dump",
"datetime.datetime.now",
"numpy.random.randint",
"numpy.quantile",
"numpy.log2",
"time.time",
"numpy.round"
] | [((755, 766), 'time.time', 'time.time', ([], {}), '()\n', (764, 766), False, 'import time\n'), ((790, 816), 'numpy.random.randint', 'np.random.randint', (['(10)', '(120)'], {}), '(10, 120)\n', (807, 816), True, 'import numpy as np\n'), ((1030, 1041), 'time.time', 'time.time', ([], {}), '()\n', (1039, 1041), False, 'import time\n'), ((2372, 2398), 'numpy.quantile', 'np.quantile', (['tree_lvs', '(0.1)'], {}), '(tree_lvs, 0.1)\n', (2383, 2398), True, 'import numpy as np\n'), ((2417, 2443), 'numpy.quantile', 'np.quantile', (['tree_lvs', '(0.5)'], {}), '(tree_lvs, 0.5)\n', (2428, 2443), True, 'import numpy as np\n'), ((2462, 2488), 'numpy.quantile', 'np.quantile', (['tree_lvs', '(0.9)'], {}), '(tree_lvs, 0.9)\n', (2473, 2488), True, 'import numpy as np\n'), ((724, 738), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (736, 738), False, 'from datetime import datetime\n'), ((1602, 1616), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1614, 1616), False, 'from datetime import datetime\n'), ((2059, 2073), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2071, 2073), False, 'from datetime import datetime\n'), ((4535, 4549), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4547, 4549), False, 'from datetime import datetime\n'), ((1415, 1426), 'time.time', 'time.time', ([], {}), '()\n', (1424, 1426), False, 'import time\n'), ((3442, 3469), 'pickle.dump', 'pickle.dump', (['output', 'handle'], {}), '(output, handle)\n', (3453, 3469), False, 'import pickle\n'), ((3656, 3683), 'pickle.dump', 'pickle.dump', (['output', 'handle'], {}), '(output, handle)\n', (3667, 3683), False, 'import pickle\n'), ((4295, 4322), 'pickle.dump', 'pickle.dump', (['output', 'handle'], {}), '(output, handle)\n', (4306, 4322), False, 'import pickle\n'), ((4497, 4524), 'pickle.dump', 'pickle.dump', (['output', 'handle'], {}), '(output, handle)\n', (4508, 4524), False, 'import pickle\n'), ((4702, 4726), 'numpy.round', 'np.round', (['tree_lvs_10', '(2)'], {}), '(tree_lvs_10, 2)\n', (4710, 4726), True, 'import numpy as np\n'), ((4730, 4754), 'numpy.round', 'np.round', (['tree_lvs_50', '(2)'], {}), '(tree_lvs_50, 2)\n', (4738, 4754), True, 'import numpy as np\n'), ((4758, 4782), 'numpy.round', 'np.round', (['tree_lvs_90', '(2)'], {}), '(tree_lvs_90, 2)\n', (4766, 4782), True, 'import numpy as np\n'), ((1339, 1357), 'numpy.log2', 'np.log2', (['num_trees'], {}), '(num_trees)\n', (1346, 1357), True, 'import numpy as np\n'), ((3064, 3075), 'time.time', 'time.time', ([], {}), '()\n', (3073, 3075), False, 'import time\n'), ((3946, 3957), 'time.time', 'time.time', ([], {}), '()\n', (3955, 3957), False, 'import time\n'), ((4614, 4625), 'time.time', 'time.time', ([], {}), '()\n', (4623, 4625), False, 'import time\n')] |
import numpy as np
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
from sklearn.model_selection._split import _BaseKFold
from hypernets.utils import logging
logger = logging.get_logger(__name__)
class PrequentialSplit(_BaseKFold):
STRATEGY_PREQ_BLS = 'preq-bls'
STRATEGY_PREQ_SLID_BLS = 'preq-slid-bls'
STRATEGY_PREQ_BLS_GAP = 'preq-bls-gap'
"""
Parameters
----------
strategy : Strategies of requential approach applied in blocks for performance estimation
`preq-bls`: The data is split into n blocks. In the initial iteration, only the first two blocks
are used, the first for training and the second for test. In the next iteration, the second block
is merged with the first and the third block is used for test. This procedure continues until all
blocks are tested.
`preq-slid-bls`: Instead of merging the blocks after each iteration (growing window), one can forget
the older blocks in a sliding window fashion. This idea is typically adopted when past data becomes
deprecated, which is common in non-stationary environments.
`preq-bls-gap`: This illustrates a prequential approach applied in blocks, where a gap block is
introduced. The rationale behind this idea is to increase the independence between training and
test sets.
n_splits : int, default=5.
Number of splits. Must be at least 2.
max_train_size : int, default=None.
Maximum size for a single training set.
test_size : int, default=None.
Number of samples in each test set. Defaults to
``(n_samples - base_size) / (n_splits + 1)``.
gap_size : int, default=0. For strategy `preq-bls`.
Number of samples to exclude from the end of each train set before the test set.
References
----------
<NAME>, <NAME>, <NAME>. Evaluating time series forecasting models: An empirical study on performance
estimation methods[J]. Machine Learning, 2020, 109(11): 1997-2028.
"""
def __init__(self, strategy='preq-bls', base_size=None, n_splits=5, stride=1, *, max_train_size=None,
test_size=None, gap_size=0):
super(PrequentialSplit, self).__init__(n_splits=max(n_splits, 2), shuffle=False, random_state=None)
self.max_train_size = max_train_size
self.test_size = test_size
self.gap_size = gap_size
self.base_size = base_size
self.stride = stride
self.n_folds = n_splits
self.strategy = strategy
self.fold_size = None
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
gap_size = self.gap_size
base = 0
if self.base_size is not None and self.base_size > 0:
base = self.base_size
base += n_samples % n_folds
if self.test_size is not None and self.test_size > 0:
test_size = self.test_size
else:
test_size = (n_samples - base) // n_folds
self.test_size = test_size
if self.n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds, n_samples))
first_test = n_samples - test_size*n_splits
if first_test < 0:
raise ValueError(
("Too many splits={0} for number of samples"
"={1} with test_size={2}").format(n_splits, n_samples, test_size))
indices = np.arange(n_samples)
logger.info(f'n_folds:{self.n_folds}')
logger.info(f'test_size:{test_size}')
if self.strategy == PrequentialSplit.STRATEGY_PREQ_BLS_GAP:
test_starts = range(first_test * 2 + base, n_samples, test_size)
else:
test_starts = range(first_test + base, n_samples, test_size)
last_step = -1
for fold, test_start in enumerate(test_starts):
if last_step == fold // self.stride:
# skip this fold
continue
else:
last_step = fold // self.stride
if self.strategy == PrequentialSplit.STRATEGY_PREQ_BLS:
train_end = test_start - gap_size
if self.max_train_size and self.max_train_size < train_end:
yield (indices[train_end - self.max_train_size:train_end],
indices[test_start:test_start + test_size])
else:
yield (indices[:max(train_end, 0)],
indices[test_start:test_start + test_size])
elif self.strategy == PrequentialSplit.STRATEGY_PREQ_SLID_BLS:
if self.max_train_size and self.max_train_size < test_start:
yield (indices[test_start - self.max_train_size:test_start],
indices[test_start:test_start + test_size])
else:
yield (indices[test_start - (test_size + base):test_start],
indices[test_start:test_start + test_size])
elif self.strategy == PrequentialSplit.STRATEGY_PREQ_BLS_GAP:
yield (indices[:test_start - test_size], indices[test_start:test_start + test_size])
else:
raise ValueError(f'{self.strategy} is not supported') | [
"sklearn.utils.indexable",
"sklearn.utils.validation._num_samples",
"hypernets.utils.logging.get_logger",
"numpy.arange"
] | [((205, 233), 'hypernets.utils.logging.get_logger', 'logging.get_logger', (['__name__'], {}), '(__name__)\n', (223, 233), False, 'from hypernets.utils import logging\n'), ((3521, 3544), 'sklearn.utils.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (3530, 3544), False, 'from sklearn.utils import indexable\n'), ((3565, 3580), 'sklearn.utils.validation._num_samples', '_num_samples', (['X'], {}), '(X)\n', (3577, 3580), False, 'from sklearn.utils.validation import _num_samples\n'), ((4517, 4537), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (4526, 4537), True, 'import numpy as np\n')] |
"""Command line tools for optimisation."""
import datetime
import json
import logging
from pathlib import Path
from typing import List
import click
import matplotlib.pyplot as plt
import numpy as np
from hoqunm.data_tools.base import (EXAMPLE_FILEPATH_OPTIMISATION_COMPUTATION,
EXAMPLE_FILEPATH_OPTIMISATION_SIMULATION,
EXAMPLE_MODEL_NO_CART,
OUTPUT_DIR_OPTIMISATION)
from hoqunm.data_tools.modelling import HospitalModel
from hoqunm.optimisation.optimators import Optimator
from hoqunm.simulation.evaluators import (EvaluationResults,
SimulationEvaluator,
SimulationEvaluatorSpecs)
from hoqunm.utils.utils import get_logger
# pylint: disable=too-many-locals
# pylint: disable=broad-except
def _create_plots(results: List[EvaluationResults], rejection: bool,
profit: bool, logger: logging.Logger,
utilisation_constraints: np.ndarray,
rejection_costs: np.ndarray, bed_costs: np.ndarray,
eps_rejection: float, eps_profit_rejection: float,
output_dir: Path) -> None:
optimator = Optimator(results)
if rejection:
logger.info("Get optimum w.r.t. minimal rejection.")
try:
idx, values = optimator.utilisation_restricted(
utilisation_constraints=utilisation_constraints)
result = optimator.results[idx]
logger.info(f"Chosen opitimum for rejection: \n"
f"capacities: {result.hospital_specs.capacities}\n"
f"rejection: {result.rejection}\n"
f"utilisation: {result.utilisation()}\n"
f"profit: {result.profit()}\n"
f"profit_rejection: {result.profit_rejection()}")
logger.info(
f"Chosen opitimum for rejection range: "
f"{values[idx] - eps_rejection}, {values[idx]}, {values[idx] + eps_rejection}"
)
for angle in [3, 15]:
for rotation in [300, 60]:
idx_ = [
i for i, v in enumerate(values)
if abs(values[idx] - v) < eps_rejection and i != idx
]
optimator.plot_results(
op_idx=idx,
values=values,
op_idx_rel=idx_,
color_map="viridis_r",
angle=angle,
rotation=rotation,
savepath=output_dir,
filename=f"rejection - "
f"utilisation_constraints[{utilisation_constraints}] - "
f"angle[{angle}]_rotation[{rotation}]")
plt.close()
except ValueError:
logger.warning(f"Failed for rejection.")
if profit:
try:
idx, values = optimator.profit_rejection(
bed_costs=bed_costs, rejection_costs=rejection_costs)
result = optimator.results[idx]
logger.info(f"Chosen opitimum for profit_rejection: \n"
f"capacities: {result.hospital_specs.capacities}\n"
f"rejection: {result.rejection}\n"
f"utilisation: {result.utilisation()}\n"
f"profit: {result.profit()}\n"
f"profit_rejection: {result.profit_rejection()}")
logger.info(
f"Chosen opitimum for profit_rejection range: "
f"{values[idx] - eps_profit_rejection}, {values[idx]}, "
f"{values[idx] + eps_profit_rejection}")
for angle in [3, 15]:
for rotation in [300, 60]:
idx_ = [
i for i, v in enumerate(values)
if abs(values[idx] -
v) < eps_profit_rejection and i != idx
]
optimator.plot_results(
op_idx=idx,
values=values,
op_idx_rel=idx_,
color_map="viridis_r",
angle=angle,
rotation=rotation,
savepath=output_dir,
filename=
f"profit_rejection - angle[{angle}]_rotation[{rotation}]"
)
plt.close()
except ValueError as e:
logger.warning(f"Failed for profit. {e}")
logger.info("Finished optimisation computation.")
def _get_evaluation_results(results_path: Path,
logger: logging.Logger) -> List[EvaluationResults]:
evaluation_results: List[EvaluationResults] = []
for file in results_path.glob("*.json"):
try:
evaluation_results.append(EvaluationResults.load(file))
except BaseException as e:
logger.warning(f"Not able to load {file}. {e}.")
return evaluation_results
@click.command()
@click.option(
"--specsfile",
"-s",
type=click.Path(exists=True),
default=str(EXAMPLE_FILEPATH_OPTIMISATION_SIMULATION),
required=True,
help="Filepath to specifications for model building. "
f"Default can be found in {EXAMPLE_FILEPATH_OPTIMISATION_SIMULATION}.")
@click.option("--model",
"-m",
type=click.Choice(["1", "2", "3"]),
default="1",
required=True,
help="Model to evaluate. You can choose between 1,2 and 3.\n"
"Default: 1")
@click.option(
"--waiting",
"-w",
is_flag=True,
help="If waiting shall be assessed according to given waiting map.")
@click.option("--rejection",
"-r",
is_flag=True,
help="Optimise according to minimal rejection.")
@click.option("--profit",
"-p",
is_flag=True,
help="Optimise according to profit.")
def simulate_optimum(specsfile: str, model: str, waiting: bool,
rejection: bool, profit: bool):
"""Analyse different capacity combinations according to the specified
optimisation problem."""
with open(specsfile, "r") as f:
specs = json.load(f)
output_dir = Path(
specs["output_dir"]
) if specs["output_dir"] is not None else OUTPUT_DIR_OPTIMISATION
if not output_dir.is_dir():
output_dir.mkdir()
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M")
logger = get_logger(
"simulate_optimum",
output_dir.joinpath(f"simulate_optimum - {timestamp}.log"))
if specs["modelfile"] is None:
logger.info(
f"No model file given. Recursing to default models from {EXAMPLE_MODEL_NO_CART}."
)
modelfile = EXAMPLE_MODEL_NO_CART
else:
modelfile = Path(specs["modelfile"])
if not modelfile.is_file():
raise FileNotFoundError(modelfile)
wards = specs["wards"]
capacities = specs["capacities"]
adjust_int_rates = specs["adjust_int_rates"]
service_name = specs["service_name"]
if service_name not in ["expon", "hypererlang"]:
raise ValueError(
f"service_name has to be one of [expon, hypererlang]. Current value: {service_name}."
)
waitings = specs["waitings"] if waiting else dict()
simulation_evaluator_specs = SimulationEvaluatorSpecs(**specs["DES_specs"])
optimisation_specs = specs["optimisation_specs"]
lower_capacities = np.array(optimisation_specs["lower_capacities"])
upper_capacities = np.array(optimisation_specs["upper_capacities"])
utilisation_constraints = np.array(
optimisation_specs["utilisation_constraints"])
bed_costs = np.array(optimisation_specs["bed_costs"])
rejection_costs = np.array(optimisation_specs["rejection_costs"])
eps_rejection = optimisation_specs["eps_rejection"]
eps_profit_rejection = optimisation_specs["eps_profit_rejection"]
results: List[EvaluationResults] = []
combinations = np.prod(upper_capacities - lower_capacities + 1)
logger.info(f"Start simulation of possible capacity combinations. "
f"# combinations={combinations}.")
ward_capacity = dict(zip(wards, capacities))
hospital_model = HospitalModel.load(filepath=modelfile, logger=logger)
hospital_specs = hospital_model.get_model(
model=int(model),
capacities=ward_capacity,
service_name=service_name,
adjust_int_rates=adjust_int_rates,
waitings=waitings)
if not (profit or rejection):
logger.info("No optimisation routine specified.")
raise ValueError("No optimisation routine specified.")
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
results_dir = output_dir.joinpath("Simulation results - " + timestamp)
results_dir.mkdir()
for i, index in enumerate(
np.ndindex(*(upper_capacities - lower_capacities + 1))):
capacities_ = lower_capacities + np.array(index)
ward_capacity = dict(zip(wards, capacities_))
hospital_specs.set_capacities(**ward_capacity)
logger.info(
f"Simulate model on capacities {capacities_}. {i + 1} of {combinations}"
)
simulation_evaluator = SimulationEvaluator(
hospital_specs=hospital_specs,
simulation_evaluator_specs=simulation_evaluator_specs,
logger=logger)
simulation_evaluator.evaluate()
key = f"{modelfile.name},model:{model},service:{service_name},waiting:{waiting}"
simulation_evaluator.name = key
results.append(simulation_evaluator)
simulation_evaluator.save(
results_dir.joinpath(
f"simulation_result-capacities"
f"{list(simulation_evaluator.hospital_specs.capacities)}.json")
)
_create_plots(results=results,
rejection=rejection,
profit=profit,
logger=logger,
utilisation_constraints=utilisation_constraints,
rejection_costs=rejection_costs,
bed_costs=bed_costs,
eps_rejection=eps_rejection,
eps_profit_rejection=eps_profit_rejection,
output_dir=output_dir)
@click.command()
@click.option(
"--specsfile",
"-s",
type=click.Path(exists=True),
default=str(EXAMPLE_FILEPATH_OPTIMISATION_COMPUTATION),
required=True,
help="Filepath to specifications for model building. "
f"Default can be found in {EXAMPLE_FILEPATH_OPTIMISATION_COMPUTATION}.")
@click.option("--rejection",
"-r",
is_flag=True,
help="Optimise according to minimal rejection.")
@click.option("--profit",
"-p",
is_flag=True,
help="Optimise according to profit.")
def compute_optimum(specsfile: str, rejection: bool, profit: bool):
"""Analyse different capacity combinations according to the specified
optimisation problem.
Take results from specified direcotry.
"""
with open(specsfile, "r") as f:
specs = json.load(f)
output_dir = Path(
specs["output_dir"]
) if specs["output_dir"] is not None else OUTPUT_DIR_OPTIMISATION
if not output_dir.is_dir():
output_dir.mkdir()
input_dir = Path(specs["input_dir"])
if not input_dir.is_dir():
raise NotADirectoryError(input_dir)
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M")
logger = get_logger(
"compute_optimum",
output_dir.joinpath(f"compute_optimum - {timestamp}.log"))
optimisation_specs = specs["optimisation_specs"]
utilisation_constraints = np.array(
optimisation_specs["utilisation_constraints"])
bed_costs = np.array(optimisation_specs["bed_costs"])
rejection_costs = np.array(optimisation_specs["rejection_costs"])
eps_rejection = optimisation_specs["eps_rejection"]
eps_profit_rejection = optimisation_specs["eps_profit_rejection"]
if not (profit or rejection):
logger.info("No optimisation routine specified.")
raise ValueError("No optimisation routine specified.")
results = _get_evaluation_results(Path(input_dir), logger=logger)
_create_plots(results=results,
rejection=rejection,
profit=profit,
logger=logger,
utilisation_constraints=utilisation_constraints,
rejection_costs=rejection_costs,
bed_costs=bed_costs,
eps_rejection=eps_rejection,
eps_profit_rejection=eps_profit_rejection,
output_dir=output_dir)
| [
"numpy.prod",
"click.Choice",
"pathlib.Path",
"click.option",
"hoqunm.data_tools.modelling.HospitalModel.load",
"hoqunm.simulation.evaluators.EvaluationResults.load",
"numpy.ndindex",
"hoqunm.simulation.evaluators.SimulationEvaluator",
"matplotlib.pyplot.close",
"hoqunm.optimisation.optimators.Opt... | [((5230, 5245), 'click.command', 'click.command', ([], {}), '()\n', (5243, 5245), False, 'import click\n'), ((5793, 5912), 'click.option', 'click.option', (['"""--waiting"""', '"""-w"""'], {'is_flag': '(True)', 'help': '"""If waiting shall be assessed according to given waiting map."""'}), "('--waiting', '-w', is_flag=True, help=\n 'If waiting shall be assessed according to given waiting map.')\n", (5805, 5912), False, 'import click\n'), ((5926, 6027), 'click.option', 'click.option', (['"""--rejection"""', '"""-r"""'], {'is_flag': '(True)', 'help': '"""Optimise according to minimal rejection."""'}), "('--rejection', '-r', is_flag=True, help=\n 'Optimise according to minimal rejection.')\n", (5938, 6027), False, 'import click\n'), ((6066, 6153), 'click.option', 'click.option', (['"""--profit"""', '"""-p"""'], {'is_flag': '(True)', 'help': '"""Optimise according to profit."""'}), "('--profit', '-p', is_flag=True, help=\n 'Optimise according to profit.')\n", (6078, 6153), False, 'import click\n'), ((10548, 10563), 'click.command', 'click.command', ([], {}), '()\n', (10561, 10563), False, 'import click\n'), ((10858, 10959), 'click.option', 'click.option', (['"""--rejection"""', '"""-r"""'], {'is_flag': '(True)', 'help': '"""Optimise according to minimal rejection."""'}), "('--rejection', '-r', is_flag=True, help=\n 'Optimise according to minimal rejection.')\n", (10870, 10959), False, 'import click\n'), ((10998, 11085), 'click.option', 'click.option', (['"""--profit"""', '"""-p"""'], {'is_flag': '(True)', 'help': '"""Optimise according to profit."""'}), "('--profit', '-p', is_flag=True, help=\n 'Optimise according to profit.')\n", (11010, 11085), False, 'import click\n'), ((1268, 1286), 'hoqunm.optimisation.optimators.Optimator', 'Optimator', (['results'], {}), '(results)\n', (1277, 1286), False, 'from hoqunm.optimisation.optimators import Optimator\n'), ((7610, 7656), 'hoqunm.simulation.evaluators.SimulationEvaluatorSpecs', 'SimulationEvaluatorSpecs', ([], {}), "(**specs['DES_specs'])\n", (7634, 7656), False, 'from hoqunm.simulation.evaluators import EvaluationResults, SimulationEvaluator, SimulationEvaluatorSpecs\n'), ((7733, 7781), 'numpy.array', 'np.array', (["optimisation_specs['lower_capacities']"], {}), "(optimisation_specs['lower_capacities'])\n", (7741, 7781), True, 'import numpy as np\n'), ((7805, 7853), 'numpy.array', 'np.array', (["optimisation_specs['upper_capacities']"], {}), "(optimisation_specs['upper_capacities'])\n", (7813, 7853), True, 'import numpy as np\n'), ((7884, 7939), 'numpy.array', 'np.array', (["optimisation_specs['utilisation_constraints']"], {}), "(optimisation_specs['utilisation_constraints'])\n", (7892, 7939), True, 'import numpy as np\n'), ((7965, 8006), 'numpy.array', 'np.array', (["optimisation_specs['bed_costs']"], {}), "(optimisation_specs['bed_costs'])\n", (7973, 8006), True, 'import numpy as np\n'), ((8029, 8076), 'numpy.array', 'np.array', (["optimisation_specs['rejection_costs']"], {}), "(optimisation_specs['rejection_costs'])\n", (8037, 8076), True, 'import numpy as np\n'), ((8266, 8314), 'numpy.prod', 'np.prod', (['(upper_capacities - lower_capacities + 1)'], {}), '(upper_capacities - lower_capacities + 1)\n', (8273, 8314), True, 'import numpy as np\n'), ((8510, 8563), 'hoqunm.data_tools.modelling.HospitalModel.load', 'HospitalModel.load', ([], {'filepath': 'modelfile', 'logger': 'logger'}), '(filepath=modelfile, logger=logger)\n', (8528, 8563), False, 'from hoqunm.data_tools.modelling import HospitalModel\n'), ((11607, 11631), 'pathlib.Path', 'Path', (["specs['input_dir']"], {}), "(specs['input_dir'])\n", (11611, 11631), False, 'from pathlib import Path\n'), ((11976, 12031), 'numpy.array', 'np.array', (["optimisation_specs['utilisation_constraints']"], {}), "(optimisation_specs['utilisation_constraints'])\n", (11984, 12031), True, 'import numpy as np\n'), ((12057, 12098), 'numpy.array', 'np.array', (["optimisation_specs['bed_costs']"], {}), "(optimisation_specs['bed_costs'])\n", (12065, 12098), True, 'import numpy as np\n'), ((12121, 12168), 'numpy.array', 'np.array', (["optimisation_specs['rejection_costs']"], {}), "(optimisation_specs['rejection_costs'])\n", (12129, 12168), True, 'import numpy as np\n'), ((6464, 6476), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6473, 6476), False, 'import json\n'), ((6495, 6520), 'pathlib.Path', 'Path', (["specs['output_dir']"], {}), "(specs['output_dir'])\n", (6499, 6520), False, 'from pathlib import Path\n'), ((7078, 7102), 'pathlib.Path', 'Path', (["specs['modelfile']"], {}), "(specs['modelfile'])\n", (7082, 7102), False, 'from pathlib import Path\n'), ((9142, 9196), 'numpy.ndindex', 'np.ndindex', (['*(upper_capacities - lower_capacities + 1)'], {}), '(*(upper_capacities - lower_capacities + 1))\n', (9152, 9196), True, 'import numpy as np\n'), ((9516, 9640), 'hoqunm.simulation.evaluators.SimulationEvaluator', 'SimulationEvaluator', ([], {'hospital_specs': 'hospital_specs', 'simulation_evaluator_specs': 'simulation_evaluator_specs', 'logger': 'logger'}), '(hospital_specs=hospital_specs,\n simulation_evaluator_specs=simulation_evaluator_specs, logger=logger)\n', (9535, 9640), False, 'from hoqunm.simulation.evaluators import EvaluationResults, SimulationEvaluator, SimulationEvaluatorSpecs\n'), ((5299, 5322), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (5309, 5322), False, 'import click\n'), ((5601, 5630), 'click.Choice', 'click.Choice', (["['1', '2', '3']"], {}), "(['1', '2', '3'])\n", (5613, 5630), False, 'import click\n'), ((11396, 11408), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11405, 11408), False, 'import json\n'), ((11427, 11452), 'pathlib.Path', 'Path', (["specs['output_dir']"], {}), "(specs['output_dir'])\n", (11431, 11452), False, 'from pathlib import Path\n'), ((12490, 12505), 'pathlib.Path', 'Path', (['input_dir'], {}), '(input_dir)\n', (12494, 12505), False, 'from pathlib import Path\n'), ((10617, 10640), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (10627, 10640), False, 'import click\n'), ((6675, 6698), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6696, 6698), False, 'import datetime\n'), ((8949, 8972), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8970, 8972), False, 'import datetime\n'), ((9240, 9255), 'numpy.array', 'np.array', (['index'], {}), '(index)\n', (9248, 9255), True, 'import numpy as np\n'), ((11724, 11747), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11745, 11747), False, 'import datetime\n'), ((5071, 5099), 'hoqunm.simulation.evaluators.EvaluationResults.load', 'EvaluationResults.load', (['file'], {}), '(file)\n', (5093, 5099), False, 'from hoqunm.simulation.evaluators import EvaluationResults, SimulationEvaluator, SimulationEvaluatorSpecs\n'), ((2940, 2951), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2949, 2951), True, 'import matplotlib.pyplot as plt\n'), ((4639, 4650), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4648, 4650), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import networkx as nx
from scipy.spatial.distance import cosine
from scipy import sparse
from tqdm import tqdm
class RandomWalk:
def __init__(self, graph: nx.Graph, num_walks: int = 10, walk_length: int = 80) -> None:
r"""
Generate randomly uniform random walks
"""
self.graph = graph
self.num_walks = num_walks
self.walk_length = walk_length
def simulate_walks(self):
walks = []
for _ in tqdm(range(self.num_walks), desc='Generating Walks'):
for node in self.graph.nodes():
walks.append(self._walk(node))
return walks
def _walk(self, start):
length = self.walk_length
walk = [start]
while len(walk) < length:
current = walk[-1]
neighbors = list(self.graph.neighbors(current))
next = np.random.choice(neighbors)
walk.append(next)
return walk
class LazyRandomWalk:
def __init__(self, graph: nx.Graph, num_walks: int = 5, walk_length: int = 80, sigma: float = 0.2, alpha: float = 0.2, similarity = cosine) -> None:
r"""
Source: https://arxiv.org/abs/2008.03639
section: III-A-3
"""
self.graph = graph
self.num_walks = num_walks
self.walk_length = walk_length
self.sigma = sigma
self.alpha = alpha
self.similarity = similarity
self.transition = None
self.nodes = np.arange(self.graph.number_of_nodes())
self.process_graph()
def weighting(self, u, v):
sigma = self.sigma
xu = self.graph.nodes[u]['node_attr']
xv = self.graph.nodes[v]['node_attr']
return np.exp(- self.similarity(xu, xv) / 2 * (sigma ** 2))
def process_graph(self):
r"""
Calculate transition probabilities, using node attributes and pre-defined
node-wise similarity function
"""
alpha = self.alpha
adj = nx.adjacency_matrix(self.graph)
edges = np.stack(adj.nonzero()).T.tolist()
W = sparse.lil_matrix((self.graph.number_of_nodes(), self.graph.number_of_nodes()))
for u, v in tqdm(edges, desc='Computing Transition probabilities'):
score = self.weighting(u, v)
W[u, v] = score
rows = self.nodes
cols = self.nodes
alphas = np.ones(self.graph.number_of_nodes()) * (1 - alpha)
degress = 1./ np.array(list(dict(self.graph.degree()).values()))
A = sparse.coo_matrix((alphas, (rows, cols)))
D = sparse.coo_matrix((degress, (rows, cols)))
P = (W + A) @ D
self.transition = P
def simulate_walks(self):
walks = []
for _ in range(self.num_walks):
for node in tqdm(self.graph.nodes(), desc='Generating Walks'):
walks.append(self._walk(node))
return walks
def _walk(self, start):
length = self.walk_length
walk = [start]
while len(walk) < length:
current = walk[-1]
probs = self.transition[current, :]
probs = probs.todense()
probs /= probs.sum() # normalize transition # TODO: try normalize by node degree
probs = np.array(probs)[0]
next = np.random.choice(self.nodes, p=probs)
walk.append(next)
return walk
| [
"networkx.adjacency_matrix",
"numpy.random.choice",
"tqdm.tqdm",
"numpy.array",
"scipy.sparse.coo_matrix"
] | [((2066, 2097), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['self.graph'], {}), '(self.graph)\n', (2085, 2097), True, 'import networkx as nx\n'), ((2262, 2316), 'tqdm.tqdm', 'tqdm', (['edges'], {'desc': '"""Computing Transition probabilities"""'}), "(edges, desc='Computing Transition probabilities')\n", (2266, 2316), False, 'from tqdm import tqdm\n'), ((2602, 2643), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(alphas, (rows, cols))'], {}), '((alphas, (rows, cols)))\n', (2619, 2643), False, 'from scipy import sparse\n'), ((2656, 2698), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(degress, (rows, cols))'], {}), '((degress, (rows, cols)))\n', (2673, 2698), False, 'from scipy import sparse\n'), ((910, 937), 'numpy.random.choice', 'np.random.choice', (['neighbors'], {}), '(neighbors)\n', (926, 937), True, 'import numpy as np\n'), ((3411, 3448), 'numpy.random.choice', 'np.random.choice', (['self.nodes'], {'p': 'probs'}), '(self.nodes, p=probs)\n', (3427, 3448), True, 'import numpy as np\n'), ((3372, 3387), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (3380, 3387), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 14:52:52 2019
@author: yifan
"""
import csv, time, random, math
from mpi4py import MPI
import numpy
def eucl_distance(point_one, point_two):#计算两点欧式距离
if(len(point_one) != len(point_two)):
raise Exception("Error: non comparable points")
sum_diff=0
for i in range(len(point_one)):
diff = pow((float(point_one[i]) - float(point_two[i])), 2)
sum_diff += diff
final = math.sqrt(sum_diff)
return final
def compare_center(initial_center, derived_center, dimensions, num_clusters, cutoff):
if(len(initial_center) != len(derived_center)):
raise Exception("Error: non comparable points")
flag = 0
for i in range(num_clusters):
diff = eucl_distance(initial_center[i], derived_center[i])
if(diff < cutoff):
flag += 1
return flag
nums=10*6
data=[[float((i+1000)/nums),float((2000+i)/nums)] for i in range(nums)]
#data=[]
#with open('kmeans_1.txt','r') as f:
# for line in f:
# tmps=line.strip('\n').split()
# if tmps!=[]:
# data.append([float(tmp) for tmp in tmps])
def main():
global data
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
dimensions=2
num_clusters=size
cutoff = 0.002
compare_val = 0
num_points = len(data)
dimensions = len(data[0])
initial = []
for i in range(size):#initial包含所有data
initial.append(data[i])
start_time = time.time()
while True:
dist = []
min_dist = numpy.zeros(num_points)
for point in data:#dist记录每个rank到其他点的欧式距离
dist.append(eucl_distance(initial[rank], point))
temp_dist = numpy.array(dist)
comm.Reduce(temp_dist, min_dist, op = MPI.MIN)#min_dist记录每个点到每个center最小距离
comm.Barrier()
if rank == 0:
min_dist = min_dist.tolist()#numpy数据类型变list
recv_min_dist = comm.bcast(min_dist, root = 0)
comm.Barrier()
cluster = []
for i in range(len(recv_min_dist)):
if recv_min_dist[i] == dist[i]:
cluster.append(data[i])#表示该点到center的距离就是最小的
center = []
center_val = [0] * dimensions
for i in cluster:
for j in range(dimensions):
center_val[j] += float(i[j])
for j in range(dimensions):
if(len(cluster) != 0):
center_val[j] = center_val[j] / len(cluster)#即每个center_val的中心坐标
center = comm.gather(center_val, root = 0)
comm.Barrier()
if rank == 0:
compare_val = compare_center(initial, center, dimensions, size, cutoff)
if compare_val == size:
print('my rank is %d'% rank,center)
print("Execution time %s seconds" % (time.time() - start_time))
break_val = comm.bcast(compare_val, root = 0)
initial = comm.bcast(center, root = 0)
comm.Barrier()
if break_val == size:
break
MPI.Finalize()
if __name__ == "__main__":
main() | [
"math.sqrt",
"numpy.array",
"numpy.zeros",
"mpi4py.MPI.Finalize",
"time.time"
] | [((486, 505), 'math.sqrt', 'math.sqrt', (['sum_diff'], {}), '(sum_diff)\n', (495, 505), False, 'import csv, time, random, math\n'), ((1562, 1573), 'time.time', 'time.time', ([], {}), '()\n', (1571, 1573), False, 'import csv, time, random, math\n'), ((3111, 3125), 'mpi4py.MPI.Finalize', 'MPI.Finalize', ([], {}), '()\n', (3123, 3125), False, 'from mpi4py import MPI\n'), ((1631, 1654), 'numpy.zeros', 'numpy.zeros', (['num_points'], {}), '(num_points)\n', (1642, 1654), False, 'import numpy\n'), ((1788, 1805), 'numpy.array', 'numpy.array', (['dist'], {}), '(dist)\n', (1799, 1805), False, 'import numpy\n'), ((2902, 2913), 'time.time', 'time.time', ([], {}), '()\n', (2911, 2913), False, 'import csv, time, random, math\n')] |
import numpy as np
import pykin.utils.transform_utils as t_utils
import pykin.utils.kin_utils as k_utils
import pykin.kinematics.jacobian as jac
from pykin.planners.planner import Planner
from pykin.utils.error_utils import OriValueError, CollisionError
from pykin.utils.kin_utils import ShellColors as sc, logging_time
from pykin.utils.log_utils import create_logger
from pykin.utils.transform_utils import get_linear_interpoation, get_quaternion_slerp
logger = create_logger('Cartesian Planner', "debug",)
class CartesianPlanner(Planner):
"""
path planner in Cartesian space
Args:
robot(SingleArm or Bimanual): The manipulator robot type is SingleArm or Bimanual
self_collision_manager: CollisionManager for robot's self collision check
object_collision_manager: CollisionManager for collision check between robot and object
n_step(int): Number of waypoints
dimension(int): robot arm's dof
waypoint_type(str): Type of waypoint ex) "Linear", "Cubic", "Circular"
"""
def __init__(
self,
robot,
self_collision_manager=None,
object_collision_manager=None,
n_step=500,
dimension=7,
waypoint_type="Linear"
):
super(CartesianPlanner, self).__init__(
robot,
self_collision_manager,
object_collision_manager,
dimension)
self.n_step = n_step
self.waypoint_type = waypoint_type
self.eef_name = self.robot.eef_name
self.arm = None
self._dimension = dimension
super()._setup_q_limits()
super()._setup_eef_name()
def __repr__(self):
return 'pykin.planners.cartesian_planner.{}()'.format(type(self).__name__)
@logging_time
def get_path_in_joinst_space(
self,
current_q=None,
goal_pose=None,
waypoints=None,
resolution=1,
damping=0.5,
epsilon=1e-12,
pos_sensitivity=0.03,
is_slerp=False
):
self._cur_qpos = super()._change_types(current_q)
self._goal_pose = super()._change_types(goal_pose)
init_fk = self.robot.kin.forward_kinematics(self.robot.desired_frames, self._cur_qpos)
self._cur_pose = self.robot.get_eef_pose(init_fk)
self._resolution = resolution
self._damping = damping
self._pos_sensitivity = pos_sensitivity
self._is_slerp = is_slerp
if waypoints is None:
waypoints = self.generate_waypoints(is_slerp)
paths, target_positions = self._compute_path_and_target_pose(waypoints, epsilon)
return paths, target_positions
def _compute_path_and_target_pose(self, waypoints, epsilon):
cnt = 0
total_cnt = 10
while True:
cnt += 1
collision_pose = {}
cur_fk = self.robot.kin.forward_kinematics(self.robot.desired_frames, self._cur_qpos)
current_transform = cur_fk[self.eef_name].h_mat
eef_position = cur_fk[self.eef_name].pos
paths = [self._cur_qpos]
target_positions = [eef_position]
for step, (pos, ori) in enumerate(waypoints):
target_transform = t_utils.get_h_mat(pos, ori)
err_pose = k_utils.calc_pose_error(target_transform, current_transform, epsilon)
J = jac.calc_jacobian(self.robot.desired_frames, cur_fk, self._dimension)
J_dls = np.dot(J.T, np.linalg.inv(np.dot(J, J.T) + self._damping**2 * np.identity(6)))
dq = np.dot(J_dls, err_pose)
self._cur_qpos = np.array([(self._cur_qpos[i] + dq[i]) for i in range(self._dimension)]).reshape(self._dimension,)
is_collision_free = self._collision_free(self._cur_qpos)
if not is_collision_free:
_, name = self.self_c_manager.in_collision_other(other_manager=self.object_c_manager, return_names=True)
collision_pose[step] = (name, np.round(target_transform[:3,3], 6))
continue
if not self._check_q_in_limits(self._cur_qpos):
continue
cur_fk = self.robot.kin.forward_kinematics(self.robot.desired_frames, self._cur_qpos)
current_transform = cur_fk[self.robot.eef_name].h_mat
if step % (1/self._resolution) == 0 or step == len(waypoints)-1:
paths.append(self._cur_qpos)
target_positions.append(pos)
err = t_utils.compute_pose_error(self._goal_pose[:3], cur_fk[self.eef_name].pos)
if collision_pose.keys():
logger.error(f"Failed Generate Path.. Collision may occur.")
for name, pose in collision_pose.values():
logger.warning(f"\n\tCollision Names : {name} \n\tCollision Position : {pose}")
# logger.warning(f"Collision Position : {pose}")
raise CollisionError("Conflict confirmed. Check the object position!")
if err < self._pos_sensitivity:
logger.info(f"Generate Path Successfully!! Error is {err:6f}")
break
if cnt > total_cnt:
logger.error(f"Failed Generate Path.. The number of retries of {cnt} exceeded")
paths, target_positions = None, None
break
logger.error(f"Failed Generate Path.. Position Error is {err:6f}")
print(f"{sc.BOLD}Retry Generate Path, the number of retries is {cnt}/{total_cnt} {sc.ENDC}\n")
return paths, target_positions
# TODO
# generate cubic, circular waypoints
def generate_waypoints(self, is_slerp):
if self.waypoint_type == "Linear":
waypoints = [path for path in self._get_linear_path(self._cur_pose, self._goal_pose, is_slerp)]
if self.waypoint_type == "Cubic":
pass
if self.waypoint_type == "Circular":
pass
return waypoints
def get_waypoints(self):
return self.waypoints
def _change_pose_type(self, pose):
ret = np.zeros(7)
ret[:3] = pose[:3]
if isinstance(pose, (list, tuple)):
pose = np.asarray(pose)
ori = pose[3:]
if ori.shape == (3,):
ori = t_utils.get_quaternion_from_rpy(ori)
ret[3:] = ori
elif ori.shape == (4,):
ret[3:] = ori
else:
raise OriValueError(ori.shape)
return ret
def _get_linear_path(self, init_pose, goal_pose, is_slerp):
for step in range(1, self.n_step + 1):
delta_t = step / self.n_step
pos = get_linear_interpoation(init_pose[:3], goal_pose[:3], delta_t)
ori = init_pose[3:]
if is_slerp:
ori = get_quaternion_slerp(init_pose[3:], goal_pose[3:], delta_t)
yield (pos, ori)
def _get_cubic_path(self):
pass
def _get_cicular_path(self):
pass
@property
def resolution(self):
return self._resolution
@resolution.setter
def resolution(self, resolution):
self._resolution = resolution
@property
def damping(self):
return self._damping
@damping.setter
def damping(self, damping):
self._damping = damping
@property
def pos_sensitivity(self):
return self._pos_sensitivity
@pos_sensitivity.setter
def pos_sensitivity(self, pos_sensitivity):
self._pos_sensitivity = pos_sensitivity
@property
def is_slerp(self):
return self._is_slerp
@is_slerp.setter
def is_slerp(self, is_slerp):
self._is_slerp = is_slerp | [
"numpy.identity",
"pykin.utils.log_utils.create_logger",
"pykin.utils.transform_utils.get_quaternion_from_rpy",
"pykin.utils.error_utils.OriValueError",
"pykin.utils.kin_utils.calc_pose_error",
"pykin.utils.transform_utils.get_linear_interpoation",
"pykin.utils.error_utils.CollisionError",
"numpy.asar... | [((467, 510), 'pykin.utils.log_utils.create_logger', 'create_logger', (['"""Cartesian Planner"""', '"""debug"""'], {}), "('Cartesian Planner', 'debug')\n", (480, 510), False, 'from pykin.utils.log_utils import create_logger\n'), ((6242, 6253), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (6250, 6253), True, 'import numpy as np\n'), ((4594, 4668), 'pykin.utils.transform_utils.compute_pose_error', 't_utils.compute_pose_error', (['self._goal_pose[:3]', 'cur_fk[self.eef_name].pos'], {}), '(self._goal_pose[:3], cur_fk[self.eef_name].pos)\n', (4620, 4668), True, 'import pykin.utils.transform_utils as t_utils\n'), ((6353, 6369), 'numpy.asarray', 'np.asarray', (['pose'], {}), '(pose)\n', (6363, 6369), True, 'import numpy as np\n'), ((6442, 6478), 'pykin.utils.transform_utils.get_quaternion_from_rpy', 't_utils.get_quaternion_from_rpy', (['ori'], {}), '(ori)\n', (6473, 6478), True, 'import pykin.utils.transform_utils as t_utils\n'), ((6811, 6873), 'pykin.utils.transform_utils.get_linear_interpoation', 'get_linear_interpoation', (['init_pose[:3]', 'goal_pose[:3]', 'delta_t'], {}), '(init_pose[:3], goal_pose[:3], delta_t)\n', (6834, 6873), False, 'from pykin.utils.transform_utils import get_linear_interpoation, get_quaternion_slerp\n'), ((3251, 3278), 'pykin.utils.transform_utils.get_h_mat', 't_utils.get_h_mat', (['pos', 'ori'], {}), '(pos, ori)\n', (3268, 3278), True, 'import pykin.utils.transform_utils as t_utils\n'), ((3306, 3375), 'pykin.utils.kin_utils.calc_pose_error', 'k_utils.calc_pose_error', (['target_transform', 'current_transform', 'epsilon'], {}), '(target_transform, current_transform, epsilon)\n', (3329, 3375), True, 'import pykin.utils.kin_utils as k_utils\n'), ((3397, 3466), 'pykin.kinematics.jacobian.calc_jacobian', 'jac.calc_jacobian', (['self.robot.desired_frames', 'cur_fk', 'self._dimension'], {}), '(self.robot.desired_frames, cur_fk, self._dimension)\n', (3414, 3466), True, 'import pykin.kinematics.jacobian as jac\n'), ((3592, 3615), 'numpy.dot', 'np.dot', (['J_dls', 'err_pose'], {}), '(J_dls, err_pose)\n', (3598, 3615), True, 'import numpy as np\n'), ((5064, 5128), 'pykin.utils.error_utils.CollisionError', 'CollisionError', (['"""Conflict confirmed. Check the object position!"""'], {}), "('Conflict confirmed. Check the object position!')\n", (5078, 5128), False, 'from pykin.utils.error_utils import OriValueError, CollisionError\n'), ((6595, 6619), 'pykin.utils.error_utils.OriValueError', 'OriValueError', (['ori.shape'], {}), '(ori.shape)\n', (6608, 6619), False, 'from pykin.utils.error_utils import OriValueError, CollisionError\n'), ((6953, 7012), 'pykin.utils.transform_utils.get_quaternion_slerp', 'get_quaternion_slerp', (['init_pose[3:]', 'goal_pose[3:]', 'delta_t'], {}), '(init_pose[3:], goal_pose[3:], delta_t)\n', (6973, 7012), False, 'from pykin.utils.transform_utils import get_linear_interpoation, get_quaternion_slerp\n'), ((4061, 4097), 'numpy.round', 'np.round', (['target_transform[:3, 3]', '(6)'], {}), '(target_transform[:3, 3], 6)\n', (4069, 4097), True, 'import numpy as np\n'), ((3517, 3531), 'numpy.dot', 'np.dot', (['J', 'J.T'], {}), '(J, J.T)\n', (3523, 3531), True, 'import numpy as np\n'), ((3553, 3567), 'numpy.identity', 'np.identity', (['(6)'], {}), '(6)\n', (3564, 3567), True, 'import numpy as np\n')] |
# Copyright 2020, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
import random
import numpy as np
def initialize(self, runInfo, inputs):
seed = 9491
random.seed(seed)
def run(self,Input):
# intput:
# output:
numberDaysSD = float(random.randint(10,30))
costPerDay = 0.8 + 0.4 * random.random()
cost_V1 = numberDaysSD * costPerDay
self.cost_V1 = cost_V1 * np.ones(Input['time'].size)
| [
"numpy.ones",
"random.random",
"random.randint",
"random.seed"
] | [((159, 176), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (170, 176), False, 'import random\n'), ((247, 269), 'random.randint', 'random.randint', (['(10)', '(30)'], {}), '(10, 30)\n', (261, 269), False, 'import random\n'), ((381, 408), 'numpy.ones', 'np.ones', (["Input['time'].size"], {}), "(Input['time'].size)\n", (388, 408), True, 'import numpy as np\n'), ((299, 314), 'random.random', 'random.random', ([], {}), '()\n', (312, 314), False, 'import random\n')] |
# -*- coding: utf-8 -*-
from __future__ import division
import random
from operator import itemgetter
import numpy as np
from common.gamestate import BoardState
def other_player(player_id):
if player_id == 1:
return 2
elif player_id == 2:
return 1
def state_transition(player_id, state, action):
return state.make_move(action, player_id)
class ReinforcementPlayer:
def __init__(self, data_storage, config):
assert data_storage.rows == config['board']['rows']
assert data_storage.cols == config['board']['cols']
assert config['estimated_optimal_future_value'] in ('best_known', 'mean')
self.data_storage = data_storage
self.config = config
if config['estimated_optimal_future_value'] == 'best_known':
self.estimated_optimal_future_value = self.estimated_optimal_future_value_best_known
elif config['estimated_optimal_future_value'] == 'mean':
self.estimated_optimal_future_value = self.estimated_optimal_future_value_mean
self.boltzmann_temperature = self.config['boltzmann_temperature']
def is_terminal(self, state):
return state.game_result(self.config['needed_to_win']) is not None
def get_possible_actions(self, state):
return [col for col in range(self.config['board']['cols'])
if state.get_available_row_in_col(col) is not None]
def reward_in_state(self, player_id, state):
game_result = state.game_result(self.config['needed_to_win'])
if game_result is None:
return self.config['rewards']['each_move']
else:
if game_result['result'] == 'won':
if game_result['player_id'] == player_id:
return self.config['rewards']['win']
else:
return self.config['rewards']['loss']
elif game_result['result'] == 'tied':
return self.config['rewards']['tie']
def best_possible_action_with_value(self, player_id, state):
possible_actions = self.get_possible_actions(state)
actions_with_values = [(action, self.data_storage.get_value_of_action(player_id, state, action))
for action in possible_actions]
best_action_with_value = sorted(actions_with_values, key=itemgetter(1), reverse=True)[0]
return best_action_with_value
def best_possible_action(self, player_id, state):
return self.best_possible_action_with_value(player_id, state)[0]
def random_possible_action(self, state):
possible_actions = self.get_possible_actions(state)
return random.choice(possible_actions)
def select_action_boltzmann(self, player_id, state):
possible_actions = self.get_possible_actions(state)
actions_with_values = [(action, self.data_storage.get_value_of_action(player_id, state, action))
for action in possible_actions]
actions, values = zip(*actions_with_values)
numerators = np.exp(np.array(values) / self.boltzmann_temperature)
denumerator = sum(numerators)
probabilities = numerators / denumerator
return np.random.choice(actions, p=probabilities)
def estimated_optimal_future_value_mean(self, player_id, state):
possible_actions = self.get_possible_actions(state)
if len(possible_actions) == 0:
return 0
rewards = 0
for action in possible_actions:
possible_state = state.make_move(action, other_player(player_id))
if not self.is_terminal(possible_state):
best_next_action = self.best_possible_action(player_id, possible_state)
rewards += self.data_storage.get_value_of_action(player_id, possible_state, best_next_action)
return rewards / len(possible_actions)
def estimated_optimal_future_value_best_known(self, player_id, state):
if not self.is_terminal(state):
best_opponents_action = self.best_possible_action(other_player(player_id), state)
worst_possible_state = state.make_move(best_opponents_action, other_player(player_id))
if not self.is_terminal(worst_possible_state):
_, best_next_action_value = self.best_possible_action_with_value(player_id, worst_possible_state)
return best_next_action_value
return 0
def best_next_state(self, player_id, state):
action = self.best_possible_action(player_id, state)
return state_transition(player_id, state, action)
def update_action_value(self, player_id, old_state, action, new_state):
reward = self.reward_in_state(player_id, new_state)
learned_value = reward + self.config['discount_factor'] * self.estimated_optimal_future_value(player_id, new_state)
self.data_storage.update_value_of_action(player_id, old_state, action, learned_value)
def self_play_game(self):
board_state = BoardState(self.config['board']['rows'], self.config['board']['cols'])
while True:
for player_id in (1, 2):
selected_action = self.select_action_boltzmann(player_id, board_state)
new_state = board_state.make_move(selected_action, player_id)
self.update_action_value(player_id, board_state, selected_action, new_state)
board_state = new_state
if self.is_terminal(board_state):
return board_state
def save_data(self):
self.data_storage.save()
| [
"random.choice",
"numpy.random.choice",
"common.gamestate.BoardState",
"numpy.array",
"operator.itemgetter"
] | [((2650, 2681), 'random.choice', 'random.choice', (['possible_actions'], {}), '(possible_actions)\n', (2663, 2681), False, 'import random\n'), ((3197, 3239), 'numpy.random.choice', 'np.random.choice', (['actions'], {'p': 'probabilities'}), '(actions, p=probabilities)\n', (3213, 3239), True, 'import numpy as np\n'), ((4988, 5058), 'common.gamestate.BoardState', 'BoardState', (["self.config['board']['rows']", "self.config['board']['cols']"], {}), "(self.config['board']['rows'], self.config['board']['cols'])\n", (4998, 5058), False, 'from common.gamestate import BoardState\n'), ((3048, 3064), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (3056, 3064), True, 'import numpy as np\n'), ((2331, 2344), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (2341, 2344), False, 'from operator import itemgetter\n')] |
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
import math
import os
import pandas as pd
import re
def search_year(year, years):
for idx, _year in enumerate(years):
if idx == len(years) -1:
continue
if year >= _year and year < years[idx + 1]:
return idx
return -1
def save_plots_topics(folder, articles_df, column_name, topic_modeler, with_sorted = False,
vmax = 800, relative_number = False, years = list(range(2008,2021)), cnt_per_plot = 25):
if not os.path.exists(folder):
os.makedirs(folder)
topic_year = np.zeros((topic_modeler.n_components, len(years)-1), dtype = int if not relative_number else float)
topic_map = {}
topic_map_by_id = {}
topic_id = 0
all_articles_by_year = np.zeros(len(years)-1, dtype=int)
for i in range(len(articles_df)):
year_ind = search_year(articles_df["year"].values[i], years)
if year_ind >= 0:
for topic in articles_df[column_name].values[i]:
if topic not in topic_map:
topic_map[topic] = topic_id
topic_map_by_id[topic_id] = topic
topic_id += 1
topic_year[topic_map[topic]][year_ind] += 1
all_articles_by_year[year_ind] += 1
if with_sorted:
result = sorted([(idx, topic_val) for idx,topic_val in enumerate(np.sum(topic_year, axis = 1))],key=lambda x: x[1], reverse = True)
else:
result = [(idx, topic_val) for idx,topic_val in enumerate(np.sum(topic_year, axis = 1))]
if relative_number:
topic_year /= all_articles_by_year
topic_year *= 100
for ind in range(math.ceil(topic_modeler.n_components/cnt_per_plot)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame(topic_year[[i for i, cnt in result[ind*cnt_per_plot:(ind+1)*cnt_per_plot]],:])
topic_year_df.index = [ topic_map_by_id[i] for i, cnt in result[ind*cnt_per_plot:(ind+1)*cnt_per_plot]]
topic_year_df.columns = [ "%d-%d"%(years[idx], years[idx+1]) for idx, year in enumerate(years) if idx != len(years) -1]
if relative_number:
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax=vmax, annot=True, fmt=".1f")
else:
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax=vmax, annot=True, fmt="d")
plt.tight_layout()
plt.savefig(os.path.join(folder,'%d-%dtopics.png'%(ind*cnt_per_plot+1, (ind+1)*cnt_per_plot)))
def save_plots_districts(folder,
big_dataset,countries = ["Nigeria/", "Malawi/", "Kenya/", "Tanzania/", "Mali/", "Zambia/", "Burkina Faso/", "Philippines/", "Bangladesh/"], with_sorted = False, image_format="eps"):
for country in countries:
country_folder = os.path.join(folder, country)
if not os.path.exists(country_folder):
os.makedirs(country_folder)
districts_dict = {}
districts_dict_interv = {}
for i in range(len(big_dataset)):
for district in big_dataset["districts"].values[i]:
if country in district:
if district not in districts_dict:
districts_dict[district] = 0
districts_dict[district] += 1
if district not in districts_dict_interv:
districts_dict_interv[district] = {"technology intervention": 0, "socioeconomic intervention": 0, "ecosystem intervention": 0}
for column in ["technology intervention", "socioeconomic intervention", "ecosystem intervention"]:
if len(big_dataset[column].values[i]) > 0:
districts_dict_interv[district][column] += 1
if with_sorted:
result = sorted([(name, (interv_val["technology intervention"], interv_val["socioeconomic intervention"], interv_val["ecosystem intervention"]),\
sum(interv_val.values())) for name,interv_val in districts_dict_interv.items()],key=lambda x: x[2], reverse = True)
else:
result = sorted([(name, (districts_dict_interv[name]["technology intervention"], districts_dict_interv[name]["socioeconomic intervention"], districts_dict_interv[name]["ecosystem intervention"]),\
cnt) for name, cnt in districts_dict.items()], key = lambda x: x[2], reverse= True)
for ind in range(math.ceil(len(districts_dict)/30)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame([val[1] for val in result[ind*30:(ind+1)*30]])
topic_year_df.index = [val[0] for val in result[ind*30:(ind+1)*30]]
topic_year_df.columns = ["Technology intervention", "Socioeconomic intervention", "Ecosystem intervention"]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax = 50, annot=True, fmt = "d")
plt.tight_layout()
plt.savefig(os.path.join(country_folder,'%d-%dinterventions.%s'%(ind*30+1, (ind+1)*30, image_format)), format=image_format)
def save_plots_districts_unique(folder, big_dataset,countries = ["Nigeria/", "Malawi/", "Kenya/", "Tanzania/", "Mali/", "Zambia/", "Burkina Faso/", "Philippines/", "Bangladesh/"], with_sorted = False):
for country in countries:
country_folder = os.path.join(folder, country)
if not os.path.exists(country_folder):
os.makedirs(country_folder)
districts_dict = {}
districts_dict_interv = {}
for i in range(len(big_dataset)):
for district in big_dataset["districts"].values[i]:
if country in district:
if district not in districts_dict:
districts_dict[district] = 0
districts_dict[district] += 1
if district not in districts_dict_interv:
districts_dict_interv[district] = {"technology intervention": set(), "socioeconomic intervention":set(), "ecosystem intervention": set()}
for column in ["technology intervention", "socioeconomic intervention", "ecosystem intervention"]:
for val in big_dataset[column].values[i]:
districts_dict_interv[district][column].add(val)
if with_sorted:
result = sorted([(name, (len(interv_val["technology intervention"]), len(interv_val["socioeconomic intervention"]), len(interv_val["ecosystem intervention"])),\
sum([len(interv_val[v]) for v in interv_val])) for name,interv_val in districts_dict_interv.items()],key=lambda x: x[2], reverse = True)
else:
result = sorted([(name, (len(districts_dict_interv[name]["technology intervention"]), len(districts_dict_interv[name]["socioeconomic intervention"]), len(districts_dict_interv[name]["ecosystem intervention"])),\
cnt) for name, cnt in districts_dict.items()], key = lambda x: x[2], reverse= True)
for ind in range(math.ceil(len(districts_dict)/30)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame([val[1] for val in result[ind*30:(ind+1)*30]])
topic_year_df.index = [val[0] for val in result[ind*30:(ind+1)*30]]
topic_year_df.columns = ["Technology intervention", "Socioeconomic intervention", "Ecosystem intervention"]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax = 50, annot=True, fmt = "d")
plt.tight_layout()
plt.savefig(os.path.join(country_folder,'%d-%dinterventions.png'%(ind*30+1, (ind+1)*30)))
def code_sequence(values):
return 4*int("Technology intervention" in values) + 2*int("Socioeconomic intervention" in values) + int("Ecosystem intervention" in values)
def decode_sequence(num):
values = []
for idx, col in enumerate(["Eco", "Socio", "Tech"]):
if num & 2**idx:
values.append(col)
return list(sorted(values))
def save_plots_districts_with_overlapping(folder, big_dataset,
countries = ["Nigeria/", "Malawi/", "Kenya/", "Tanzania/", "Mali/", "Zambia/", "Burkina Faso/", "Philippines/", "Bangladesh/"], with_sorted = False, image_format="eps"):
for country in countries:
country_folder = os.path.join(folder, country)
if not os.path.exists(country_folder):
os.makedirs(country_folder)
districts_dict = {}
districts_dict_interv = {}
for i in range(len(big_dataset)):
for district in big_dataset["districts"].values[i]:
if country in district:
if district not in districts_dict:
districts_dict[district] = 0
districts_dict[district] += 1
if district not in districts_dict_interv:
districts_dict_interv[district] = {}
for i in range(1,8):
districts_dict_interv[district][i] = 0
if code_sequence(big_dataset["intervention_labels"].values[i]) > 0:
districts_dict_interv[district][code_sequence(big_dataset["intervention_labels"].values[i])] += 1
if with_sorted:
result = sorted([(name, tuple([interv_val[w] for w in [4,2,1,6,3,5,7] ]),\
sum(interv_val.values())) for name,interv_val in districts_dict_interv.items()],key=lambda x: x[2], reverse = True)
else:
result = sorted([(name, tuple([interv_val[w] for w in [4,2,1,6,3,5,7] ]),\
cnt) for name, cnt in districts_dict.items()], key = lambda x: x[2], reverse= True)
for ind in range(math.ceil(len(districts_dict)/30)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame([val[1] for val in result[ind*30:(ind+1)*30]])
topic_year_df.index = [val[0] for val in result[ind*30:(ind+1)*30]]
topic_year_df.columns = ["; ".join(decode_sequence(w))for w in [4,2,1,6,3,5,7]]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax = 50, annot=True, fmt = "d")
plt.tight_layout()
plt.savefig(os.path.join(country_folder,'%d-%dinterventions.%s'%(ind*30+1, (ind+1)*30, image_format)), format=image_format)
def save_plots_topics_interv(folder, articles_df, column_name, with_sorted = True, topic_numbers=125, image_format="eps"):
if not os.path.exists(folder):
os.makedirs(folder)
topic_year_names = {}
topic_year = np.zeros(topic_numbers, dtype = int)
topics_per_page = int(topic_numbers/5)
for i in range(len(articles_df)):
for topic in articles_df[column_name].values[i]:
topic_num = int(re.search("#(\d+)", topic).group(1)) -1
topic_year_names[topic_num] = topic
topic_year[topic_num] += 1
if with_sorted:
result = sorted([(idx, topic_val) for idx,topic_val in enumerate(topic_year)],key=lambda x: x[1], reverse = True)
else:
result = [(idx, topic_val) for idx,topic_val in enumerate(topic_year)]
for ind in range(5):
plt.figure(figsize=(6, 6), dpi=150)
topic_year_df = pd.DataFrame(topic_year[[i for i,cnt in result[ind*topics_per_page:(ind+1)*topics_per_page]]])
topic_year_df.index = [ topic_year_names[i] for i,cnt in result[ind*topics_per_page:(ind+1)*topics_per_page]]
topic_year_df.columns = ["All"]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", annot=True, fmt = "d", vmax = 50)
plt.tight_layout()
plt.savefig(os.path.join(folder,'%d-%dinterventions.%s'%(ind*topics_per_page+1, (ind+1)*topics_per_page, image_format)), format=image_format)
def save_plots_topics_cooccur(folder, articles_df, topic_num, column_name = "topics", with_sorted = True):
if not os.path.exists(folder):
os.makedirs(folder)
topic_year = np.zeros(150, dtype = int)
for i in range(len(articles_df)):
should_be_used = False
for topic in articles_df[column_name].values[i]:
_topic_num = int(re.search("#(\d+)", topic).group(1))
if _topic_num == topic_num:
should_be_used = True
if should_be_used:
for topic in articles_df[column_name].values[i]:
_topic_num = int(re.search("#(\d+)", topic).group(1)) -1
topic_year[_topic_num] += 1
if with_sorted:
result = sorted([(idx, topic_val) for idx,topic_val in enumerate(topic_year)],key=lambda x: x[1], reverse = True)
else:
result = [(idx, topic_val) for idx,topic_val in enumerate(topic_year)]
for ind in range(5):
plt.figure(figsize=(6, 6), dpi=150)
topic_year_df = pd.DataFrame(topic_year[[i for i,cnt in result[ind*30:(ind+1)*30]]])
topic_year_df.index = [ topic_year_names[i] for i,cnt in result[ind*30:(ind+1)*30]]
topic_year_df.columns = ["All"]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", annot=True, fmt = "d", vmax = 50)
plt.title("Coocurance of topics with the topic " + topic_year_names[topic_num-1])
plt.tight_layout()
plt.savefig(os.path.join(folder,'%d-%dtopics.png'%(ind*30+1, (ind+1)*30)))
def save_plots_cooccur_interv(folder, big_dataset, topic_num, with_sorted = False, save_as_eps=False):
if not os.path.exists(folder):
os.makedirs(folder)
districts_dict = {}
districts_dict_interv = {}
for i in range(len(big_dataset)):
should_be_used = False
for topic in big_dataset["topics"].values[i]:
_topic_num = int(re.search("#(\d+)", topic).group(1))
if _topic_num == topic_num:
should_be_used = True
if should_be_used:
for topic in big_dataset["topics"].values[i]:
if topic not in districts_dict:
districts_dict[topic] = 0
districts_dict[topic] += 1
if topic not in districts_dict_interv:
districts_dict_interv[topic] = {"all":0, "technology intervention": 0, "socioeconomic intervention": 0, "ecosystem intervention": 0}
for interv in big_dataset["Intervention labels"].values[i].split(";"):
districts_dict_interv[topic][interv] += 1
districts_dict_interv[topic]["all"] += 1
if with_sorted:
result = sorted([(name, (interv_val["all"], interv_val["technology intervention"], interv_val["socioeconomic intervention"], interv_val["ecosystem intervention"]),\
interv_val["all"]) for name,interv_val in districts_dict_interv.items()],key=lambda x: x[2], reverse = True)
else:
result = sorted([(name, (districts_dict_interv[name]["all"],districts_dict_interv[name]["technology intervention"], districts_dict_interv[name]["socioeconomic intervention"], districts_dict_interv[name]["ecosystem intervention"]),\
cnt) for name, cnt in districts_dict.items()], key = lambda x: x[2], reverse= True)
for ind in range(math.ceil(len(districts_dict)/30)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame([val[1] for val in result[ind*30:(ind+1)*30]])
topic_year_df.index = [val[0] for val in result[ind*30:(ind+1)*30]]
topic_year_df.columns = ["All","Technology interv.", "Socioeconomic interv.", "Ecosystem interv."]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax = 50, annot=True, fmt = "d")
plt.title("Coocurance of topics with the topic " + topic_year_names[topic_num-1])
plt.tight_layout()
plt.savefig(os.path.join(folder,'%d-%dinterventions.png'%(ind*30+1, (ind+1)*30)))
def save_plots_cooccur_interv_relative(folder, big_dataset, topic_num, with_sorted = False):
if not os.path.exists(folder):
os.makedirs(folder)
districts_dict = {}
districts_dict_interv = {}
for i in range(len(big_dataset)):
should_be_used = False
for topic in big_dataset["topics"].values[i]:
_topic_num = int(re.search("#(\d+)", topic).group(1))
if _topic_num == topic_num:
should_be_used = True
if should_be_used:
for topic in big_dataset["topics"].values[i]:
if topic not in districts_dict:
districts_dict[topic] = 0
districts_dict[topic] += 1
if topic not in districts_dict_interv:
districts_dict_interv[topic] = {"all":0, "technology intervention": 0, "socioeconomic intervention": 0, "ecosystem intervention": 0}
for interv in big_dataset["Intervention labels"].values[i].split(";"):
districts_dict_interv[topic][interv] += 1
districts_dict_interv[topic]["all"] += 1
if with_sorted:
result = sorted([(name, (interv_val["technology intervention"]*100/interv_val["all"], interv_val["socioeconomic intervention"]*100/interv_val["all"], interv_val["ecosystem intervention"]*100/interv_val["all"]),\
interv_val["all"]) for name,interv_val in districts_dict_interv.items()],key=lambda x: x[2], reverse = True)
else:
result = sorted([(name, (districts_dict_interv[name]["technology intervention"]*100/districts_dict_interv[name]["all"], districts_dict_interv[name]["socioeconomic intervention"]*100/districts_dict_interv[name]["all"], districts_dict_interv[name]["ecosystem intervention"]*100/districts_dict_interv[name]["all"]),\
cnt) for name, cnt in districts_dict.items()], key = lambda x: x[2], reverse= True)
for ind in range(math.ceil(len(districts_dict)/30)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame([val[1] for val in result[ind*30:(ind+1)*30]])
topic_year_df.index = [val[0] for val in result[ind*30:(ind+1)*30]]
topic_year_df.columns = ["Technology interv.", "Socioeconomic interv.", "Ecosystem interv."]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax = 80, annot=True, fmt = "0.1f")
plt.title("Coocurance of topics with the topic " + topic_year_names[topic_num-1])
plt.tight_layout()
plt.savefig(os.path.join(folder,'%d-%dinterventions.png'%(ind*30+1, (ind+1)*30)))
def save_plots_interventions_districts(folder, big_dataset, column_name= "Intervention labels", with_sorted = False, image_format="eps"):
if not os.path.exists(folder):
os.makedirs(folder)
districts_dict = {}
districts_dict_interv = {}
for i in range(len(big_dataset)):
for topic in big_dataset["topics"].values[i]:
if topic not in districts_dict:
districts_dict[topic] = 0
districts_dict[topic] += 1
if topic not in districts_dict_interv:
districts_dict_interv[topic] = {"technology intervention": 0, "socioeconomic intervention": 0, "ecosystem intervention": 0}
for interv in big_dataset[column_name].values[i]:
interv = interv.lower()
if interv in districts_dict_interv[topic]:
districts_dict_interv[topic][interv] += 1
if with_sorted:
result = sorted([(name, (interv_val["technology intervention"], interv_val["socioeconomic intervention"], interv_val["ecosystem intervention"]),\
sum(interv_val.values())) for name,interv_val in districts_dict_interv.items()],key=lambda x: x[2], reverse = True)
else:
result = sorted([(name, (districts_dict_interv[name]["technology intervention"], districts_dict_interv[name]["socioeconomic intervention"], districts_dict_interv[name]["ecosystem intervention"]),\
cnt) for name, cnt in districts_dict.items()], key = lambda x: x[2], reverse= True)
for ind in range(math.ceil(len(districts_dict)/25)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame([val[1] for val in result[ind*25:(ind+1)*25]])
topic_year_df.index = [val[0] for val in result[ind*25:(ind+1)*25]]
topic_year_df.columns = ["Technology intervention", "Socioeconomic intervention", "Ecosystem intervention"]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax = 50000, annot=True, fmt = "d")
plt.tight_layout()
plt.savefig(os.path.join(folder,'%d-%dinterventions.%s'%(ind*25+1, (ind+1)*25, image_format)), format=image_format)
def save_population_vs_geo_regions(folder, articles_df, vmax = 800, relative_number = False):
if not os.path.exists(folder):
os.makedirs(folder)
geo_regions = list(set([geo_reg for geo_region in articles_df["geo_regions"] for geo_reg in geo_region]))
geo_regions_vs_population = np.zeros((len(geo_regions), 3), dtype = int if not relative_number else float)
all_articles_by_type = np.zeros(len(geo_regions), dtype=int)
for i in range(len(articles_df)):
for geo_region in articles_df["geo_regions"].values[i]:
if geo_region in geo_regions:
geo_ind = geo_regions.index(geo_region)
if "Small scale farmers" in articles_df["population tags"].values[i]:
geo_regions_vs_population[geo_ind][0] += 1
elif "Farmers" in articles_df["population tags"].values[i]:
geo_regions_vs_population[geo_ind][1] += 1
else:
geo_regions_vs_population[geo_ind][2] += 1
all_articles_by_type[geo_ind] += 1
result = [(idx, cnt_val) for idx,cnt_val in enumerate(np.sum(geo_regions_vs_population, axis = 1))]
if relative_number:
geo_regions_vs_population = geo_regions_vs_population.T
geo_regions_vs_population /= all_articles_by_type
geo_regions_vs_population *= 100
geo_regions_vs_population = geo_regions_vs_population.T
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame(geo_regions_vs_population[[i for i,cnt in result],:])
topic_year_df.index = geo_regions
topic_year_df.columns = ["Small scale farmers", "Farmers", "Undefined"]
if relative_number:
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax=vmax, annot=True, fmt=".1f")
else:
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax=vmax, annot=True, fmt="d")
plt.tight_layout()
plt.savefig(os.path.join(folder,'plot.png'))
def run_plots():
save_plots_topics("topics_climate_relative_rearranged", subset_df, "topics", topic_modeler, with_sorted = True, vmax = 20, relative_number=True)
save_plots_topics("topics_climate_relative", subset_df, "topics", topic_modeler, with_sorted = False, vmax = 20, relative_number=True)
save_plots_topics("topics_up_to_date_125", big_dataset, "topics", topic_modeler, with_sorted = False, vmax = 3000)
save_plots_topics("topics_up_to_date_rearranged_125", big_dataset, "topics", topic_modeler, with_sorted = True, vmax = 3000)
save_plots_topics("topics_up_to_date_125_relative_number", big_dataset, "topics", topic_modeler, with_sorted = False, vmax = 15,relative_number=True)
save_plots_topics("topics_up_to_date_rearranged_125_relative_number", big_dataset, "topics", topic_modeler, with_sorted = True, vmax = 15,relative_number=True)
save_plots_topics("topics_climate_subset", subset_df, "topics_new", topic_modeler, with_sorted = False)
save_plots_topics("topics_climate_relative_subset_rearranged", subset_df, "topics_new", topic_modeler, with_sorted = True,vmax = 20, relative_number = True)
save_plots_topics("topics_climate_relative_subset", subset_df, "topics_new", topic_modeler, with_sorted = False,vmax = 20, relative_number = True)
save_plots_topics("topics_climate_rearranged_subset", subset_df, "topics_new", topic_modeler, with_sorted = True)
save_plots_districts_with_overlapping("countries_plots_with_overlapping", big_dataset, with_sorted=True)
save_plots_districts_unique("countries_plots_unique", big_dataset, with_sorted=True)
save_plots_districts("countries_plots", big_dataset, with_sorted=True)
save_plots_topics_interv("topic_interventions", temp_df, "topics", with_sorted = True)
for topic in [30, 81, 121, 140, 10, 25, 91, 112, 124, 97]:
save_plots_cooccur_interv_relative("topic_coocur_interv_relative_topic_%d"%topic, all_df, topic, with_sorted=True)
save_plots_cooccur_interv("topic_coocur_interv_topic_%d"%topic, all_df, topic, with_sorted=True)
save_plots_topics_cooccur("topic_coocur_topic_%d"%topic, all_df, topic, with_sorted=True)
save_plots_cooccur_interv("topic_coocur_weather_interv_topic_97", all_df, 97, with_sorted=True)
save_plots_topics_cooccur("topic_coocur_ICT_topic_111", all_df, 111, with_sorted = True)
save_plots_interventions_districts("intervention_labels_vs_topics", all_df, with_sorted = False) | [
"os.path.exists",
"math.ceil",
"os.makedirs",
"matplotlib.pylab.tight_layout",
"matplotlib.pylab.figure",
"matplotlib.pylab.title",
"os.path.join",
"seaborn.heatmap",
"numpy.sum",
"numpy.zeros",
"pandas.DataFrame",
"re.search"
] | [((10689, 10723), 'numpy.zeros', 'np.zeros', (['topic_numbers'], {'dtype': 'int'}), '(topic_numbers, dtype=int)\n', (10697, 10723), True, 'import numpy as np\n'), ((12076, 12100), 'numpy.zeros', 'np.zeros', (['(150)'], {'dtype': 'int'}), '(150, dtype=int)\n', (12084, 12100), True, 'import numpy as np\n'), ((22191, 22227), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 6)', 'dpi': '(150)'}), '(figsize=(15, 6), dpi=150)\n', (22201, 22227), True, 'import matplotlib.pylab as plt\n'), ((22248, 22316), 'pandas.DataFrame', 'pd.DataFrame', (['geo_regions_vs_population[[i for i, cnt in result], :]'], {}), '(geo_regions_vs_population[[i for i, cnt in result], :])\n', (22260, 22316), True, 'import pandas as pd\n'), ((22693, 22711), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22709, 22711), True, 'import matplotlib.pylab as plt\n'), ((539, 561), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (553, 561), False, 'import os\n'), ((571, 590), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (582, 590), False, 'import os\n'), ((1698, 1750), 'math.ceil', 'math.ceil', (['(topic_modeler.n_components / cnt_per_plot)'], {}), '(topic_modeler.n_components / cnt_per_plot)\n', (1707, 1750), False, 'import math\n'), ((1759, 1795), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 6)', 'dpi': '(150)'}), '(figsize=(15, 6), dpi=150)\n', (1769, 1795), True, 'import matplotlib.pylab as plt\n'), ((1820, 1923), 'pandas.DataFrame', 'pd.DataFrame', (['topic_year[[i for i, cnt in result[ind * cnt_per_plot:(ind + 1) *\n cnt_per_plot]], :]'], {}), '(topic_year[[i for i, cnt in result[ind * cnt_per_plot:(ind + 1\n ) * cnt_per_plot]], :])\n', (1832, 1923), True, 'import pandas as pd\n'), ((2436, 2454), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2452, 2454), True, 'import matplotlib.pylab as plt\n'), ((2838, 2867), 'os.path.join', 'os.path.join', (['folder', 'country'], {}), '(folder, country)\n', (2850, 2867), False, 'import os\n'), ((5404, 5433), 'os.path.join', 'os.path.join', (['folder', 'country'], {}), '(folder, country)\n', (5416, 5433), False, 'import os\n'), ((8399, 8428), 'os.path.join', 'os.path.join', (['folder', 'country'], {}), '(folder, country)\n', (8411, 8428), False, 'import os\n'), ((10594, 10616), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (10608, 10616), False, 'import os\n'), ((10626, 10645), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (10637, 10645), False, 'import os\n'), ((11292, 11327), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(6, 6)', 'dpi': '(150)'}), '(figsize=(6, 6), dpi=150)\n', (11302, 11327), True, 'import matplotlib.pylab as plt\n'), ((11352, 11457), 'pandas.DataFrame', 'pd.DataFrame', (['topic_year[[i for i, cnt in result[ind * topics_per_page:(ind + 1) *\n topics_per_page]]]'], {}), '(topic_year[[i for i, cnt in result[ind * topics_per_page:(ind +\n 1) * topics_per_page]]])\n', (11364, 11457), True, 'import pandas as pd\n'), ((11619, 11710), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'annot': '(True)', 'fmt': '"""d"""', 'vmax': '(50)'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', annot=True, fmt=\n 'd', vmax=50)\n", (11630, 11710), True, 'import seaborn as sns\n'), ((11718, 11736), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11734, 11736), True, 'import matplotlib.pylab as plt\n'), ((12006, 12028), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (12020, 12028), False, 'import os\n'), ((12038, 12057), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (12049, 12057), False, 'import os\n'), ((12860, 12895), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(6, 6)', 'dpi': '(150)'}), '(figsize=(6, 6), dpi=150)\n', (12870, 12895), True, 'import matplotlib.pylab as plt\n'), ((12920, 12995), 'pandas.DataFrame', 'pd.DataFrame', (['topic_year[[i for i, cnt in result[ind * 30:(ind + 1) * 30]]]'], {}), '(topic_year[[i for i, cnt in result[ind * 30:(ind + 1) * 30]]])\n', (12932, 12995), True, 'import pandas as pd\n'), ((13135, 13226), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'annot': '(True)', 'fmt': '"""d"""', 'vmax': '(50)'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', annot=True, fmt=\n 'd', vmax=50)\n", (13146, 13226), True, 'import seaborn as sns\n'), ((13234, 13322), 'matplotlib.pylab.title', 'plt.title', (["('Coocurance of topics with the topic ' + topic_year_names[topic_num - 1])"], {}), "('Coocurance of topics with the topic ' + topic_year_names[\n topic_num - 1])\n", (13243, 13322), True, 'import matplotlib.pylab as plt\n'), ((13324, 13342), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13340, 13342), True, 'import matplotlib.pylab as plt\n'), ((13541, 13563), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (13555, 13563), False, 'import os\n'), ((13573, 13592), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (13584, 13592), False, 'import os\n'), ((15315, 15351), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 6)', 'dpi': '(150)'}), '(figsize=(15, 6), dpi=150)\n', (15325, 15351), True, 'import matplotlib.pylab as plt\n'), ((15376, 15441), 'pandas.DataFrame', 'pd.DataFrame', (['[val[1] for val in result[ind * 30:(ind + 1) * 30]]'], {}), '([val[1] for val in result[ind * 30:(ind + 1) * 30]])\n', (15388, 15441), True, 'import pandas as pd\n'), ((15633, 15731), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'vmin': '(0)', 'vmax': '(50)', 'annot': '(True)', 'fmt': '"""d"""'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', vmin=0, vmax=50,\n annot=True, fmt='d')\n", (15644, 15731), True, 'import seaborn as sns\n'), ((15742, 15830), 'matplotlib.pylab.title', 'plt.title', (["('Coocurance of topics with the topic ' + topic_year_names[topic_num - 1])"], {}), "('Coocurance of topics with the topic ' + topic_year_names[\n topic_num - 1])\n", (15751, 15830), True, 'import matplotlib.pylab as plt\n'), ((15832, 15850), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15848, 15850), True, 'import matplotlib.pylab as plt\n'), ((16046, 16068), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (16060, 16068), False, 'import os\n'), ((16078, 16097), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (16089, 16097), False, 'import os\n'), ((17953, 17989), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 6)', 'dpi': '(150)'}), '(figsize=(15, 6), dpi=150)\n', (17963, 17989), True, 'import matplotlib.pylab as plt\n'), ((18014, 18079), 'pandas.DataFrame', 'pd.DataFrame', (['[val[1] for val in result[ind * 30:(ind + 1) * 30]]'], {}), '([val[1] for val in result[ind * 30:(ind + 1) * 30]])\n', (18026, 18079), True, 'import pandas as pd\n'), ((18265, 18366), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'vmin': '(0)', 'vmax': '(80)', 'annot': '(True)', 'fmt': '"""0.1f"""'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', vmin=0, vmax=80,\n annot=True, fmt='0.1f')\n", (18276, 18366), True, 'import seaborn as sns\n'), ((18377, 18465), 'matplotlib.pylab.title', 'plt.title', (["('Coocurance of topics with the topic ' + topic_year_names[topic_num - 1])"], {}), "('Coocurance of topics with the topic ' + topic_year_names[\n topic_num - 1])\n", (18386, 18465), True, 'import matplotlib.pylab as plt\n'), ((18467, 18485), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18483, 18485), True, 'import matplotlib.pylab as plt\n'), ((18726, 18748), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (18740, 18748), False, 'import os\n'), ((18758, 18777), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (18769, 18777), False, 'import os\n'), ((20172, 20208), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 6)', 'dpi': '(150)'}), '(figsize=(15, 6), dpi=150)\n', (20182, 20208), True, 'import matplotlib.pylab as plt\n'), ((20233, 20298), 'pandas.DataFrame', 'pd.DataFrame', (['[val[1] for val in result[ind * 25:(ind + 1) * 25]]'], {}), '([val[1] for val in result[ind * 25:(ind + 1) * 25]])\n', (20245, 20298), True, 'import pandas as pd\n'), ((20499, 20600), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'vmin': '(0)', 'vmax': '(50000)', 'annot': '(True)', 'fmt': '"""d"""'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', vmin=0, vmax=50000,\n annot=True, fmt='d')\n", (20510, 20600), True, 'import seaborn as sns\n'), ((20611, 20629), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20627, 20629), True, 'import matplotlib.pylab as plt\n'), ((20860, 20882), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (20874, 20882), False, 'import os\n'), ((20892, 20911), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (20903, 20911), False, 'import os\n'), ((22466, 22568), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'vmin': '(0)', 'vmax': 'vmax', 'annot': '(True)', 'fmt': '""".1f"""'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', vmin=0, vmax=vmax,\n annot=True, fmt='.1f')\n", (22477, 22568), True, 'import seaborn as sns\n'), ((22590, 22690), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'vmin': '(0)', 'vmax': 'vmax', 'annot': '(True)', 'fmt': '"""d"""'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', vmin=0, vmax=vmax,\n annot=True, fmt='d')\n", (22601, 22690), True, 'import seaborn as sns\n'), ((22728, 22760), 'os.path.join', 'os.path.join', (['folder', '"""plot.png"""'], {}), "(folder, 'plot.png')\n", (22740, 22760), False, 'import os\n'), ((2197, 2299), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'vmin': '(0)', 'vmax': 'vmax', 'annot': '(True)', 'fmt': '""".1f"""'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', vmin=0, vmax=vmax,\n annot=True, fmt='.1f')\n", (2208, 2299), True, 'import seaborn as sns\n'), ((2329, 2429), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'vmin': '(0)', 'vmax': 'vmax', 'annot': '(True)', 'fmt': '"""d"""'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', vmin=0, vmax=vmax,\n annot=True, fmt='d')\n", (2340, 2429), True, 'import seaborn as sns\n'), ((2475, 2571), 'os.path.join', 'os.path.join', (['folder', "('%d-%dtopics.png' % (ind * cnt_per_plot + 1, (ind + 1) * cnt_per_plot))"], {}), "(folder, '%d-%dtopics.png' % (ind * cnt_per_plot + 1, (ind + 1) *\n cnt_per_plot))\n", (2487, 2571), False, 'import os\n'), ((2883, 2913), 'os.path.exists', 'os.path.exists', (['country_folder'], {}), '(country_folder)\n', (2897, 2913), False, 'import os\n'), ((2927, 2954), 'os.makedirs', 'os.makedirs', (['country_folder'], {}), '(country_folder)\n', (2938, 2954), False, 'import os\n'), ((4534, 4570), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 6)', 'dpi': '(150)'}), '(figsize=(15, 6), dpi=150)\n', (4544, 4570), True, 'import matplotlib.pylab as plt\n'), ((4599, 4664), 'pandas.DataFrame', 'pd.DataFrame', (['[val[1] for val in result[ind * 30:(ind + 1) * 30]]'], {}), '([val[1] for val in result[ind * 30:(ind + 1) * 30]])\n', (4611, 4664), True, 'import pandas as pd\n'), ((4877, 4975), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'vmin': '(0)', 'vmax': '(50)', 'annot': '(True)', 'fmt': '"""d"""'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', vmin=0, vmax=50,\n annot=True, fmt='d')\n", (4888, 4975), True, 'import seaborn as sns\n'), ((4990, 5008), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5006, 5008), True, 'import matplotlib.pylab as plt\n'), ((5449, 5479), 'os.path.exists', 'os.path.exists', (['country_folder'], {}), '(country_folder)\n', (5463, 5479), False, 'import os\n'), ((5493, 5520), 'os.makedirs', 'os.makedirs', (['country_folder'], {}), '(country_folder)\n', (5504, 5520), False, 'import os\n'), ((7165, 7201), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 6)', 'dpi': '(150)'}), '(figsize=(15, 6), dpi=150)\n', (7175, 7201), True, 'import matplotlib.pylab as plt\n'), ((7230, 7295), 'pandas.DataFrame', 'pd.DataFrame', (['[val[1] for val in result[ind * 30:(ind + 1) * 30]]'], {}), '([val[1] for val in result[ind * 30:(ind + 1) * 30]])\n', (7242, 7295), True, 'import pandas as pd\n'), ((7508, 7606), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'vmin': '(0)', 'vmax': '(50)', 'annot': '(True)', 'fmt': '"""d"""'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', vmin=0, vmax=50,\n annot=True, fmt='d')\n", (7519, 7606), True, 'import seaborn as sns\n'), ((7621, 7639), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7637, 7639), True, 'import matplotlib.pylab as plt\n'), ((8444, 8474), 'os.path.exists', 'os.path.exists', (['country_folder'], {}), '(country_folder)\n', (8458, 8474), False, 'import os\n'), ((8488, 8515), 'os.makedirs', 'os.makedirs', (['country_folder'], {}), '(country_folder)\n', (8499, 8515), False, 'import os\n'), ((9876, 9912), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 6)', 'dpi': '(150)'}), '(figsize=(15, 6), dpi=150)\n', (9886, 9912), True, 'import matplotlib.pylab as plt\n'), ((9941, 10006), 'pandas.DataFrame', 'pd.DataFrame', (['[val[1] for val in result[ind * 30:(ind + 1) * 30]]'], {}), '([val[1] for val in result[ind * 30:(ind + 1) * 30]])\n', (9953, 10006), True, 'import pandas as pd\n'), ((10191, 10289), 'seaborn.heatmap', 'sns.heatmap', (['topic_year_df'], {'linewidth': '(0.5)', 'cmap': '"""YlGnBu"""', 'vmin': '(0)', 'vmax': '(50)', 'annot': '(True)', 'fmt': '"""d"""'}), "(topic_year_df, linewidth=0.5, cmap='YlGnBu', vmin=0, vmax=50,\n annot=True, fmt='d')\n", (10202, 10289), True, 'import seaborn as sns\n'), ((10304, 10322), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10320, 10322), True, 'import matplotlib.pylab as plt\n'), ((11757, 11880), 'os.path.join', 'os.path.join', (['folder', "('%d-%dinterventions.%s' % (ind * topics_per_page + 1, (ind + 1) *\n topics_per_page, image_format))"], {}), "(folder, '%d-%dinterventions.%s' % (ind * topics_per_page + 1, \n (ind + 1) * topics_per_page, image_format))\n", (11769, 11880), False, 'import os\n'), ((13363, 13435), 'os.path.join', 'os.path.join', (['folder', "('%d-%dtopics.png' % (ind * 30 + 1, (ind + 1) * 30))"], {}), "(folder, '%d-%dtopics.png' % (ind * 30 + 1, (ind + 1) * 30))\n", (13375, 13435), False, 'import os\n'), ((15871, 15950), 'os.path.join', 'os.path.join', (['folder', "('%d-%dinterventions.png' % (ind * 30 + 1, (ind + 1) * 30))"], {}), "(folder, '%d-%dinterventions.png' % (ind * 30 + 1, (ind + 1) * 30))\n", (15883, 15950), False, 'import os\n'), ((18506, 18585), 'os.path.join', 'os.path.join', (['folder', "('%d-%dinterventions.png' % (ind * 30 + 1, (ind + 1) * 30))"], {}), "(folder, '%d-%dinterventions.png' % (ind * 30 + 1, (ind + 1) * 30))\n", (18518, 18585), False, 'import os\n'), ((20650, 20747), 'os.path.join', 'os.path.join', (['folder', "('%d-%dinterventions.%s' % (ind * 25 + 1, (ind + 1) * 25, image_format))"], {}), "(folder, '%d-%dinterventions.%s' % (ind * 25 + 1, (ind + 1) * \n 25, image_format))\n", (20662, 20747), False, 'import os\n'), ((5033, 5137), 'os.path.join', 'os.path.join', (['country_folder', "('%d-%dinterventions.%s' % (ind * 30 + 1, (ind + 1) * 30, image_format))"], {}), "(country_folder, '%d-%dinterventions.%s' % (ind * 30 + 1, (ind +\n 1) * 30, image_format))\n", (5045, 5137), False, 'import os\n'), ((7664, 7755), 'os.path.join', 'os.path.join', (['country_folder', "('%d-%dinterventions.png' % (ind * 30 + 1, (ind + 1) * 30))"], {}), "(country_folder, '%d-%dinterventions.png' % (ind * 30 + 1, (ind +\n 1) * 30))\n", (7676, 7755), False, 'import os\n'), ((10347, 10451), 'os.path.join', 'os.path.join', (['country_folder', "('%d-%dinterventions.%s' % (ind * 30 + 1, (ind + 1) * 30, image_format))"], {}), "(country_folder, '%d-%dinterventions.%s' % (ind * 30 + 1, (ind +\n 1) * 30, image_format))\n", (10359, 10451), False, 'import os\n'), ((21889, 21930), 'numpy.sum', 'np.sum', (['geo_regions_vs_population'], {'axis': '(1)'}), '(geo_regions_vs_population, axis=1)\n', (21895, 21930), True, 'import numpy as np\n'), ((1548, 1574), 'numpy.sum', 'np.sum', (['topic_year'], {'axis': '(1)'}), '(topic_year, axis=1)\n', (1554, 1574), True, 'import numpy as np\n'), ((1405, 1431), 'numpy.sum', 'np.sum', (['topic_year'], {'axis': '(1)'}), '(topic_year, axis=1)\n', (1411, 1431), True, 'import numpy as np\n'), ((12267, 12294), 're.search', 're.search', (['"""#(\\\\d+)"""', 'topic'], {}), "('#(\\\\d+)', topic)\n", (12276, 12294), False, 'import re\n'), ((13810, 13837), 're.search', 're.search', (['"""#(\\\\d+)"""', 'topic'], {}), "('#(\\\\d+)', topic)\n", (13819, 13837), False, 'import re\n'), ((16315, 16342), 're.search', 're.search', (['"""#(\\\\d+)"""', 'topic'], {}), "('#(\\\\d+)', topic)\n", (16324, 16342), False, 'import re\n'), ((10892, 10919), 're.search', 're.search', (['"""#(\\\\d+)"""', 'topic'], {}), "('#(\\\\d+)', topic)\n", (10901, 10919), False, 'import re\n'), ((12503, 12530), 're.search', 're.search', (['"""#(\\\\d+)"""', 'topic'], {}), "('#(\\\\d+)', topic)\n", (12512, 12530), False, 'import re\n')] |
import pytest
from PySide2 import QtWidgets
import sys
from numpy import ones
from SciDataTool import DataTime, DataLinspace
class TestGUI(object):
@classmethod
def setup_class(cls):
"""Run at the begining of every test to setup the gui"""
if not QtWidgets.QApplication.instance():
cls.app = QtWidgets.QApplication(sys.argv)
else:
cls.app = QtWidgets.QApplication.instance()
X = DataLinspace(name="time", unit="s", initial=0, final=10, number=11)
field_1d = ones((11))
for i in range(11):
field_1d[i] *= i
Field = DataTime(
name="Airgap flux density",
symbol="B_r",
unit="T",
axes=[X],
values=field_1d,
)
cls.UI = Field.plot(is_show_fig=False, is_create_appli=False)
@pytest.mark.gui
def check_combobox(self):
"""Testing that the combobox is disabled if there is only one item"""
# As we only have one axis then the combobox is disabled
assert (
self.UI.w_plot_manager.w_axis_manager.w_axis_1.c_axis.isEnabled() == False
)
def check_axis_2(self):
"""Testing that the second WAxisSelector is hidden as we only have one axis inside the data object"""
assert self.UI.w_plot_manager.w_axis_manager.w_axis_2.isHidden() == True
if __name__ == "__main__":
a = TestGUI()
a.setup_class()
# Testing that the checkbox are disabled if there is only one item in them
a.check_combobox()
# Testing that axis 2 is hidden
a.check_axis_2()
print("Done")
| [
"SciDataTool.DataTime",
"numpy.ones",
"PySide2.QtWidgets.QApplication.instance",
"PySide2.QtWidgets.QApplication",
"SciDataTool.DataLinspace"
] | [((462, 529), 'SciDataTool.DataLinspace', 'DataLinspace', ([], {'name': '"""time"""', 'unit': '"""s"""', 'initial': '(0)', 'final': '(10)', 'number': '(11)'}), "(name='time', unit='s', initial=0, final=10, number=11)\n", (474, 529), False, 'from SciDataTool import DataTime, DataLinspace\n'), ((550, 558), 'numpy.ones', 'ones', (['(11)'], {}), '(11)\n', (554, 558), False, 'from numpy import ones\n'), ((639, 730), 'SciDataTool.DataTime', 'DataTime', ([], {'name': '"""Airgap flux density"""', 'symbol': '"""B_r"""', 'unit': '"""T"""', 'axes': '[X]', 'values': 'field_1d'}), "(name='Airgap flux density', symbol='B_r', unit='T', axes=[X],\n values=field_1d)\n", (647, 730), False, 'from SciDataTool import DataTime, DataLinspace\n'), ((284, 317), 'PySide2.QtWidgets.QApplication.instance', 'QtWidgets.QApplication.instance', ([], {}), '()\n', (315, 317), False, 'from PySide2 import QtWidgets\n'), ((342, 374), 'PySide2.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (364, 374), False, 'from PySide2 import QtWidgets\n'), ((413, 446), 'PySide2.QtWidgets.QApplication.instance', 'QtWidgets.QApplication.instance', ([], {}), '()\n', (444, 446), False, 'from PySide2 import QtWidgets\n')] |
###################################################################################################
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################################
import numpy
import tensorflow as tf
import numpy as np
from base.custom_lbfgs import lbfgs, Struct
def function_factory(model, loss_fcn, x, y, callback_fcn, epochs, x_test=None, y_test=None,
val_freq=1000, log_freq=1000, verbose=1):
"""
A factory to create a function required by the L-BFGS implementation.
:param tf.keras.Model model: an instance of `tf.keras.Model` or its subclasses
:param object loss_fcn: a function with signature loss_value = loss(y_pred, y_true)
:param tf.tensor x: input tensor of the training dataset
:param tf.tensor y: output tensor of the training dataset
:param object callback_fcn: callback function, which is called after each epoch
:param int epochs: number of epochs
:param tf.tensor x_test: input tensor of the test dataset, used to evaluate accuracy
:param tf.tensor y_test: output tensor of the test dataset, used to evaluate accuracy
:return: object: a function that has the signature of loss_value, gradients = f(model_parameters)
"""
# obtain the shapes of all trainable parameters in the model
shapes = tf.shape_n(model.trainable_variables)
n_tensors = len(shapes)
# we'll use tf.dynamic_stitch and tf.dynamic_partition later, so we need to
# prepare required information first
count = 0
idx = [] # stitch indices
part = [] # partition indices
for i, shape in enumerate(shapes):
n = numpy.product(shape)
idx.append(tf.reshape(tf.range(count, count + n, dtype=tf.int32), shape))
part.extend([i] * n)
count += n
part = tf.constant(part)
@tf.function
def assign_new_model_parameters(weights):
"""
Updates the model's weights
:param tf.Tensor weights: representing the model's weights
"""
weights = tf.cast(weights, tf.float64)
params = tf.dynamic_partition(weights, part, n_tensors)
for i, (shape, param) in enumerate(zip(shapes, params)):
model.trainable_variables[i].assign(tf.reshape(param, shape))
@tf.function
def train_step(weights):
# use GradientTape so that we can calculate the gradient of loss w.r.t. parameters
with tf.GradientTape() as tape:
# update the parameters in the model
assign_new_model_parameters(weights)
# calculate the loss
loss_value = loss_fcn(y, model(x, training=True))
# calculate gradients and convert to 1D tf.Tensor
grads = tape.gradient(loss_value, model.trainable_variables)
grads = tf.dynamic_stitch(idx, grads)
return loss_value, grads
def f(weights):
"""
Function that can be used in the L-BFGS implementation.
This function is created by function_factory.
:param tf.Tensor weights: representing the model's weights
:return: tf.Tensor loss_value: current loss value, tf.Tensor grads: gradients w.r.t. the weights
"""
loss_value, grads = train_step(weights)
# print out iteration & loss
f.iter += 1
callback_fcn(f.iter, loss_value, epochs, x_test, y_test, val_freq=val_freq, log_freq=log_freq, verbose=verbose)
# store loss value so we can retrieve later
tf.py_function(f.history.append, inp=[loss_value], Tout=[])
return loss_value, grads
# store these information as members so we can use them outside the scope
f.iter = 0
f.idx = idx
f.part = part
f.shapes = shapes
f.assign_new_model_parameters = assign_new_model_parameters
f.history = []
return f
class LBFGS:
"""
Class used to represent the L-BFGS optimizer.
"""
def minimize(self, model, loss_fcn, x, y, callback_fcn, epochs=2000, learning_rate=1.,
x_test=None, y_test=None, val_freq=1000, log_freq=1000, verbose=1):
"""
Performs the Neural Network training with the L-BFGS implementation.
:param tf.keras.Model model: an instance of `tf.keras.Model` or its subclasses
:param object loss_fcn: a function with signature loss_value = loss(y_pred, y_true)
:param tf.tensor x: input tensor of the training dataset
:param tf.tensor y: output tensor of the training dataset
:param object callback_fcn: callback function, which is called after each epoch
:param int epochs: number of epochs
:param tf.tensor x_test: input tensor of the test dataset, used to evaluate accuracy
:param tf.tensor y_test: output tensor of the test dataset, used to evaluate accuracy
"""
func = function_factory(model, loss_fcn, x, y, callback_fcn, epochs, x_test=x_test, y_test=y_test,
val_freq=val_freq, log_freq=log_freq, verbose=verbose)
# convert initial model parameters to a 1D tf.Tensor
init_params = tf.dynamic_stitch(func.idx, model.trainable_variables)
nt_epochs = epochs
nt_config = Struct()
nt_config.learningRate = learning_rate
nt_config.maxIter = nt_epochs
nt_config.nCorrection = 50
nt_config.tolFun = 1.0 * np.finfo(float).eps
lbfgs(func, init_params, nt_config, Struct(), True, lambda x, y, z: None)
| [
"numpy.product",
"tensorflow.shape_n",
"tensorflow.py_function",
"tensorflow.dynamic_stitch",
"tensorflow.GradientTape",
"tensorflow.range",
"tensorflow.constant",
"base.custom_lbfgs.Struct",
"tensorflow.dynamic_partition",
"tensorflow.reshape",
"numpy.finfo",
"tensorflow.cast"
] | [((2416, 2453), 'tensorflow.shape_n', 'tf.shape_n', (['model.trainable_variables'], {}), '(model.trainable_variables)\n', (2426, 2453), True, 'import tensorflow as tf\n'), ((2899, 2916), 'tensorflow.constant', 'tf.constant', (['part'], {}), '(part)\n', (2910, 2916), True, 'import tensorflow as tf\n'), ((2736, 2756), 'numpy.product', 'numpy.product', (['shape'], {}), '(shape)\n', (2749, 2756), False, 'import numpy\n'), ((3128, 3156), 'tensorflow.cast', 'tf.cast', (['weights', 'tf.float64'], {}), '(weights, tf.float64)\n', (3135, 3156), True, 'import tensorflow as tf\n'), ((3175, 3221), 'tensorflow.dynamic_partition', 'tf.dynamic_partition', (['weights', 'part', 'n_tensors'], {}), '(weights, part, n_tensors)\n', (3195, 3221), True, 'import tensorflow as tf\n'), ((3876, 3905), 'tensorflow.dynamic_stitch', 'tf.dynamic_stitch', (['idx', 'grads'], {}), '(idx, grads)\n', (3893, 3905), True, 'import tensorflow as tf\n'), ((4563, 4622), 'tensorflow.py_function', 'tf.py_function', (['f.history.append'], {'inp': '[loss_value]', 'Tout': '[]'}), '(f.history.append, inp=[loss_value], Tout=[])\n', (4577, 4622), True, 'import tensorflow as tf\n'), ((6171, 6225), 'tensorflow.dynamic_stitch', 'tf.dynamic_stitch', (['func.idx', 'model.trainable_variables'], {}), '(func.idx, model.trainable_variables)\n', (6188, 6225), True, 'import tensorflow as tf\n'), ((6274, 6282), 'base.custom_lbfgs.Struct', 'Struct', ([], {}), '()\n', (6280, 6282), False, 'from base.custom_lbfgs import lbfgs, Struct\n'), ((3512, 3529), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3527, 3529), True, 'import tensorflow as tf\n'), ((6501, 6509), 'base.custom_lbfgs.Struct', 'Struct', ([], {}), '()\n', (6507, 6509), False, 'from base.custom_lbfgs import lbfgs, Struct\n'), ((2787, 2829), 'tensorflow.range', 'tf.range', (['count', '(count + n)'], {'dtype': 'tf.int32'}), '(count, count + n, dtype=tf.int32)\n', (2795, 2829), True, 'import tensorflow as tf\n'), ((3335, 3359), 'tensorflow.reshape', 'tf.reshape', (['param', 'shape'], {}), '(param, shape)\n', (3345, 3359), True, 'import tensorflow as tf\n'), ((6436, 6451), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (6444, 6451), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from loguru import logger
def count_column_values_within_ranges(df_inp, column_name, bins=None):
"""
Count the number of values of a specific column according to the define ranges.
:param pd.DataFrame df_inp: pandas dataframe
:param column_name: column name to be counted
:param bins: list of values to be used as the ranges
"""
if bins is None:
bins = np.arange(0, 7000, 100)
all_values = df_inp[column_name].values
# TODO: check type then convert
all_values = all_values.astype(np.float)
try:
df_inp.loc[:, "count"] = pd.cut(df_inp[column_name].astype(float), bins)
except Exception as e:
print(e)
print("pd.cut produces", pd.cut(df_inp[column_name].astype(float), bins))
raise Exception("Can not set cut to column")
df_counting = df_inp["count"].value_counts().sort_index()
df_counting = df_counting.to_frame().reset_index()
df_counting.columns = ["prices", "count"]
df_counting.loc[:, "percent"] = df_counting["count"] / df_counting["count"].sum()
couting_data = {
"price": bins[:-1],
"count": df_counting["count"].values,
"percent": df_counting["percent"].values,
"all_prices": all_values,
}
return couting_data
def count_column_values_within_ranges_two_levels_deep(
df_inp,
first_groupby_column_name,
second_groupby_column_name,
count_column_name,
bins=None,
):
"""
Count column values within ranges, but groupby twice in dataframe
"""
if bins is None:
logger.warning("No bins specified, will use a default range 0-10000")
bins = np.arange(0, 10000, 100)
df_first_groups = df_inp.groupby(first_groupby_column_name)
list_of_first_groups = []
return_data = {}
for first_groups_key, one_df_of_first_groups in df_first_groups:
list_of_first_groups.append(first_groups_key)
counting_data_of_one_group = {}
df_second_level_groups = one_df_of_first_groups.groupby(
second_groupby_column_name
)
for second_groups_key, one_df_of_second_groups in df_second_level_groups:
counting_data_of_one_group[
second_groups_key
] = count_column_values_within_ranges(
one_df_of_second_groups, count_column_name, bins
)
return_data[first_groups_key] = counting_data_of_one_group
return return_data
| [
"loguru.logger.warning",
"numpy.arange"
] | [((432, 455), 'numpy.arange', 'np.arange', (['(0)', '(7000)', '(100)'], {}), '(0, 7000, 100)\n', (441, 455), True, 'import numpy as np\n'), ((1602, 1671), 'loguru.logger.warning', 'logger.warning', (['"""No bins specified, will use a default range 0-10000"""'], {}), "('No bins specified, will use a default range 0-10000')\n", (1616, 1671), False, 'from loguru import logger\n'), ((1687, 1711), 'numpy.arange', 'np.arange', (['(0)', '(10000)', '(100)'], {}), '(0, 10000, 100)\n', (1696, 1711), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Columbia Engineering
# MECS 4603 - Fall 2017
import math
import numpy
import time
import rospy
import random
from std_msgs.msg import Header
from geometry_msgs.msg import Pose2D
from state_estimator.msg import RobotPose
from state_estimator.msg import SensorData
from state_estimator.msg import Landmark
from state_estimator.msg import LandmarkReading
from state_estimator.msg import LandmarkSet
def create_landmark(x, y):
l = Landmark()
l.x = x
l.y = y
return l
class Robot(object):
def __init__(self):
self.x = 0.0
self.y = 0.0
self.theta = 0.0
self.vt = 0.0
self.vrot = 0.0
self.step_size = 0.01
self.model_noise_trans = 0.0025
self.model_noise_rot = 0.005
self.sensor_noise_range = 0.1
self.sensor_noise_bearing = 0.05
self.start_flag = False
self.landmarks = []
self.landmarks.append(create_landmark(5,5))
self.landmarks.append(create_landmark(5,6))
self.landmarks.append(create_landmark(6,5))
self.landmarks.append(create_landmark(-5,5))
self.landmarks.append(create_landmark(-5,6))
self.landmarks.append(create_landmark(-6,5))
self.landmarks.append(create_landmark(5,-5))
self.landmarks.append(create_landmark(5,-6))
self.landmarks.append(create_landmark(6,-5))
self.landmarks.append(create_landmark(-5,-5))
self.landmarks.append(create_landmark(-5,-6))
self.landmarks.append(create_landmark(-6,-5))
self.landmarks.append(create_landmark(5,0))
self.landmarks.append(create_landmark(-5,0))
self.landmarks.append(create_landmark(0,5))
self.landmarks.append(create_landmark(0,-5))
self.landmarks.append(create_landmark(1,0))
self.sensing_range = 2.5
self.pub_pose = rospy.Publisher("/robot_pose", RobotPose, queue_size=1)
self.pub_sens = rospy.Publisher("/sensor_data", SensorData, queue_size=1)
self.pub_landmarks = rospy.Publisher("/landmarks", LandmarkSet, queue_size=1)
rospy.Timer(rospy.Duration(1.0), self.publish_landmarks)
rospy.Timer(rospy.Duration(self.step_size), self.step)
rospy.sleep(5)
rospy.Timer(rospy.Duration(3.0), self.rand_vel)
self.start_flag = True
def get_sensor_data(self):
sens = SensorData()
sens.vel_trans = self.vt
sens.vel_ang = self.vrot
for i in range(0,len(self.landmarks)):
r = math.sqrt( (self.landmarks[i].x-self.x)*(self.landmarks[i].x-self.x) +
(self.landmarks[i].y-self.y)*(self.landmarks[i].y-self.y) )
if r < self.sensing_range:
reading = LandmarkReading()
reading.landmark = self.landmarks[i]
reading.range = r
reading.bearing = math.atan2( (self.landmarks[i].y - self.y),
(self.landmarks[i].x - self.x)) - self.theta
if self.start_flag:
reading.range += numpy.random.normal(0.0, self.sensor_noise_range)
reading.bearing += numpy.random.normal(0.0, self.sensor_noise_bearing)
sens.readings.append(reading)
return sens
def step(self, event):
self.x = self.x + self.step_size * self.vt * math.cos(self.theta)
self.y = self.y + self.step_size * self.vt * math.sin(self.theta)
self.theta = self.theta + self.step_size * self.vrot
if self.start_flag:
self.x += numpy.random.normal(0.0, self.model_noise_trans)
self.y += numpy.random.normal(0.0, self.model_noise_trans)
self.theta += numpy.random.normal(0.0, self.model_noise_rot)
time = rospy.Time.now()
pose_msg = RobotPose()
pose_msg.header.stamp = time
pose_msg.pose.x = self.x
pose_msg.pose.y = self.y
pose_msg.pose.theta = self.theta
self.pub_pose.publish(pose_msg)
sensor_msg = self.get_sensor_data()
sensor_msg.header.stamp = time
self.pub_sens.publish(sensor_msg)
def publish_landmarks(self,event=None):
msg = LandmarkSet()
msg.landmarks = self.landmarks
self.pub_landmarks.publish(msg)
def rand_vel(self, event):
r = math.sqrt(self.x*self.x + self.y*self.y)
if math.fabs(self.x) < 6 and math.fabs(self.y) < 6:
self.vt = 0.5 + random.random() * 1.0
self.vrot = (-math.pi + random.random() * 2 * math.pi) / 5
else:
if ((self.x * math.cos(self.theta) + self.y * math.sin(self.theta)) / r < -0.2):
self.vt = 0.5 + random.random() * 1.0
self.vrot = 0.0
else:
self.vt = 0.0
self.vrot = (-math.pi + random.random() * 2 * math.pi) / 2
if __name__ == '__main__':
rospy.init_node('mobile_robot_sim', anonymous=True)
robot = Robot()
rospy.spin()
| [
"numpy.random.normal",
"state_estimator.msg.Landmark",
"state_estimator.msg.RobotPose",
"rospy.init_node",
"math.sqrt",
"state_estimator.msg.SensorData",
"math.sin",
"rospy.Time.now",
"math.cos",
"state_estimator.msg.LandmarkSet",
"math.fabs",
"rospy.spin",
"math.atan2",
"state_estimator.m... | [((458, 468), 'state_estimator.msg.Landmark', 'Landmark', ([], {}), '()\n', (466, 468), False, 'from state_estimator.msg import Landmark\n'), ((4985, 5036), 'rospy.init_node', 'rospy.init_node', (['"""mobile_robot_sim"""'], {'anonymous': '(True)'}), "('mobile_robot_sim', anonymous=True)\n", (5000, 5036), False, 'import rospy\n'), ((5061, 5073), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (5071, 5073), False, 'import rospy\n'), ((1913, 1968), 'rospy.Publisher', 'rospy.Publisher', (['"""/robot_pose"""', 'RobotPose'], {'queue_size': '(1)'}), "('/robot_pose', RobotPose, queue_size=1)\n", (1928, 1968), False, 'import rospy\n'), ((1993, 2050), 'rospy.Publisher', 'rospy.Publisher', (['"""/sensor_data"""', 'SensorData'], {'queue_size': '(1)'}), "('/sensor_data', SensorData, queue_size=1)\n", (2008, 2050), False, 'import rospy\n'), ((2080, 2136), 'rospy.Publisher', 'rospy.Publisher', (['"""/landmarks"""', 'LandmarkSet'], {'queue_size': '(1)'}), "('/landmarks', LandmarkSet, queue_size=1)\n", (2095, 2136), False, 'import rospy\n'), ((2275, 2289), 'rospy.sleep', 'rospy.sleep', (['(5)'], {}), '(5)\n', (2286, 2289), False, 'import rospy\n'), ((2424, 2436), 'state_estimator.msg.SensorData', 'SensorData', ([], {}), '()\n', (2434, 2436), False, 'from state_estimator.msg import SensorData\n'), ((3853, 3869), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3867, 3869), False, 'import rospy\n'), ((3889, 3900), 'state_estimator.msg.RobotPose', 'RobotPose', ([], {}), '()\n', (3898, 3900), False, 'from state_estimator.msg import RobotPose\n'), ((4270, 4283), 'state_estimator.msg.LandmarkSet', 'LandmarkSet', ([], {}), '()\n', (4281, 4283), False, 'from state_estimator.msg import LandmarkSet\n'), ((4407, 4451), 'math.sqrt', 'math.sqrt', (['(self.x * self.x + self.y * self.y)'], {}), '(self.x * self.x + self.y * self.y)\n', (4416, 4451), False, 'import math\n'), ((2158, 2177), 'rospy.Duration', 'rospy.Duration', (['(1.0)'], {}), '(1.0)\n', (2172, 2177), False, 'import rospy\n'), ((2223, 2253), 'rospy.Duration', 'rospy.Duration', (['self.step_size'], {}), '(self.step_size)\n', (2237, 2253), False, 'import rospy\n'), ((2310, 2329), 'rospy.Duration', 'rospy.Duration', (['(3.0)'], {}), '(3.0)\n', (2324, 2329), False, 'import rospy\n'), ((2575, 2719), 'math.sqrt', 'math.sqrt', (['((self.landmarks[i].x - self.x) * (self.landmarks[i].x - self.x) + (self.\n landmarks[i].y - self.y) * (self.landmarks[i].y - self.y))'], {}), '((self.landmarks[i].x - self.x) * (self.landmarks[i].x - self.x) +\n (self.landmarks[i].y - self.y) * (self.landmarks[i].y - self.y))\n', (2584, 2719), False, 'import math\n'), ((3644, 3692), 'numpy.random.normal', 'numpy.random.normal', (['(0.0)', 'self.model_noise_trans'], {}), '(0.0, self.model_noise_trans)\n', (3663, 3692), False, 'import numpy\n'), ((3715, 3763), 'numpy.random.normal', 'numpy.random.normal', (['(0.0)', 'self.model_noise_trans'], {}), '(0.0, self.model_noise_trans)\n', (3734, 3763), False, 'import numpy\n'), ((3790, 3836), 'numpy.random.normal', 'numpy.random.normal', (['(0.0)', 'self.model_noise_rot'], {}), '(0.0, self.model_noise_rot)\n', (3809, 3836), False, 'import numpy\n'), ((2799, 2816), 'state_estimator.msg.LandmarkReading', 'LandmarkReading', ([], {}), '()\n', (2814, 2816), False, 'from state_estimator.msg import LandmarkReading\n'), ((3437, 3457), 'math.cos', 'math.cos', (['self.theta'], {}), '(self.theta)\n', (3445, 3457), False, 'import math\n'), ((3511, 3531), 'math.sin', 'math.sin', (['self.theta'], {}), '(self.theta)\n', (3519, 3531), False, 'import math\n'), ((4459, 4476), 'math.fabs', 'math.fabs', (['self.x'], {}), '(self.x)\n', (4468, 4476), False, 'import math\n'), ((4485, 4502), 'math.fabs', 'math.fabs', (['self.y'], {}), '(self.y)\n', (4494, 4502), False, 'import math\n'), ((2938, 3008), 'math.atan2', 'math.atan2', (['(self.landmarks[i].y - self.y)', '(self.landmarks[i].x - self.x)'], {}), '(self.landmarks[i].y - self.y, self.landmarks[i].x - self.x)\n', (2948, 3008), False, 'import math\n'), ((3147, 3196), 'numpy.random.normal', 'numpy.random.normal', (['(0.0)', 'self.sensor_noise_range'], {}), '(0.0, self.sensor_noise_range)\n', (3166, 3196), False, 'import numpy\n'), ((3236, 3287), 'numpy.random.normal', 'numpy.random.normal', (['(0.0)', 'self.sensor_noise_bearing'], {}), '(0.0, self.sensor_noise_bearing)\n', (3255, 3287), False, 'import numpy\n'), ((4536, 4551), 'random.random', 'random.random', ([], {}), '()\n', (4549, 4551), False, 'import random\n'), ((4768, 4783), 'random.random', 'random.random', ([], {}), '()\n', (4781, 4783), False, 'import random\n'), ((4594, 4609), 'random.random', 'random.random', ([], {}), '()\n', (4607, 4609), False, 'import random\n'), ((4669, 4689), 'math.cos', 'math.cos', (['self.theta'], {}), '(self.theta)\n', (4677, 4689), False, 'import math\n'), ((4701, 4721), 'math.sin', 'math.sin', (['self.theta'], {}), '(self.theta)\n', (4709, 4721), False, 'import math\n'), ((4910, 4925), 'random.random', 'random.random', ([], {}), '()\n', (4923, 4925), False, 'import random\n')] |
import cv2
import os
import numpy as np
import traceback
from time import *
import winsound
import pyttsx3
# s1,s2:就是识别人的名字
subjects = ["stranger", "hfp", "lc"]
def menu():
"""菜单"""
print("*"*10 + "人脸识别系统" + "*"*10)
print("*"*10 + "菜单" + "*"*10)
print("*"*5 + "1、进行检测" + "*"*8)
print("*"*5 + "2、输入图片检测" + "*"*5)
print("*"*5 + "3、录入人脸信息" + "*"*5)
print("*"*5 + "4、重新训练预测" + "*"*5)
print("*"*5 + "5、退出" + "*" * 12)
print("*"*30)
def catchPICFromVideo(addr):
"""截取图片"""
cap = cv2.VideoCapture(0)
# 告诉OpenCV使用人脸识别分类器
classfier = cv2.CascadeClassifier(r'G:\Opencv 4.5.3\opencv\build\etc\lbpcascades\lbpcascade_frontalface_improved'
r'.xml')
rect_color = (0, 255, 0)
num = 1
while cap.isOpened():
_, frame = cap.read()
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 将当前桢图像转换成灰度图像
faceRects = classfier.detectMultiScale(grey, scaleFactor=1.2, minNeighbors=20, flags=4)
print("11", faceRects)
if len(faceRects) > 0: # 大于0则检测到人脸
for faceRect in faceRects: # 单独框出每一张人脸
print("22",faceRect)
x, y, w, h = faceRect
# print(x, y, w, h)
# 将当前帧保存为图片
img_name = r'%s\%d.jpg ' % (addr + "/", num)
print("33",img_name)
image = frame[y - 100: y + h + 40, x - 60: x + w + 80]
cv2.imwrite(img_name, image)
sleep(0.05)
cv2.rectangle(frame, (x - 20, y - 20), (x + w + 20, y + h + 20), rect_color, 2)
# 显示当前捕捉到了多少人脸图片了
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'num:%d' % num, (x + 30, y + 30), font, 1, (255, 0, 255), 4)
num += 1
# 如果超过指定最大保存数量退出循环
if num > 200:
break
# 超过指定最大保存数量结束程序
if num > 200: # to_noNeed
break
# 显示图像
cv2.imshow("photo", frame)
c = cv2.waitKey(10)
if c == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def detect_face(img):
"""使用OpenCV检测人脸的函数"""
# 转化为灰度图
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 使用LBP分类器,还有一种更精确但速度较慢的Haar分类器
face_cascade = cv2.CascadeClassifier(r'G:\Opencv 4.5.3\opencv\build\etc\lbpcascades\lbpcascade_frontalface.xml') # todo
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5)
if len(faces) == 0:
return None
# 假设就只有一张人脸
(x, y, w, h) = faces[0]
return gray[y:y + w, x:x + h], faces[0]
def prepare_training_data(data_folder_path):
"""读取所有人员的训练图像,从每个图像中检测人脸,并返回两个大小完全相同的列表,一个人脸列表和另一个人脸标签列表"""
dirs = os.listdir(data_folder_path)
faces = []
labels = []
for dir_name in dirs:
if not dir_name.startswith("tupian"):
continue
label = int(dir_name.replace("tupian", ""))
# 得到当前的图像文件的一个地址
subject_dir_path = data_folder_path + "/" + dir_name
subject_images_names = os.listdir(subject_dir_path)
for image_name in subject_images_names:
if image_name.startswith("."):
continue
# 得到当前图片的地址
image_path = subject_dir_path + "/" + image_name
image = cv2.imread(image_path)
# cv2.imshow("image", image)
cv2.waitKey(100)
try:
face, rect = detect_face(image)
except:
pass
else:
# 忽略为检测到的人脸
if face is not None:
faces.append(face)
labels.append(label)
# cv2.destroyAllWindows()
cv2.waitKey(1)
cv2.destroyAllWindows()
return faces, labels
def draw_rectangle(img, rect):
"""矩形绘制"""
(x, y, w, h) = rect
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
def draw_text(img, text, x, y):
"""文本绘制"""
cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 3)
def predict(face_recognizer, subjects):
"""识别所传递图像中的人物,并在检测到的人脸周围绘制一个带有对象名称的矩形"""
# 加载训练好的模型
face_recognizer.read(r'./models/train.yml')
addr = input("请输入图片地址:")
# 读取图片
test_img = cv2.imread(addr)
# 复制图像,因为我们不想更改原始图像
test_img = test_img.copy()
try:
# 有时会因为环境因素导致报错,加一个异常处理
face, rect = detect_face(test_img)
except Exception as e:
print("错误信息为:", e)
traceback.print_exc()
print('traceback.format_exc():\n%s' % traceback.format_exc())
else:
# 进行预测,得到标签和置信度
label = face_recognizer.predict(face)
# 得到预测姓名
label_text = subjects[label[0]]
# 画矩形框
draw_rectangle(test_img, rect)
# 在图片上填上预测的姓名
draw_text(test_img, label_text, rect[0], rect[1] - 5)
# 进行显示
cv2.imshow("predict", test_img)
# 按下q键退出显示
key = cv2.waitKey(0)
if key == ord("q"):
cv2.destroyAllWindows()
def realTimeIdentification(face_recognizer, subjects):
"""实时识别"""
print("进行实时预测")
face_recognizer.read(r'./models/train.yml')
cap = cv2.VideoCapture(0)
# 视频保存 保存的文件的路径 fourcc:指定编码器 fps:要保存的视频的帧率 frameSize:要保存的文件的画面尺寸 isColor:指示是黑白画面还是彩色的画面
fourcc = cv2.VideoWriter_fourcc('I', '4', '2', '0')
out = cv2.VideoWriter(r'./output.avi', fourcc, 20.0, (640, 480), True)
# 循环检测识别人脸
start_time = time()
while True:
_, frame = cap.read()
sleep(0.01)
try:
face, rect = detect_face(frame)
label = face_recognizer.predict(face)
except Exception as e:
print("错误信息为:", e)
traceback.print_exc()
print('traceback.format_exc():\n%s'%traceback.format_exc())
cv2.imshow('camera', frame)
else:
print(label)
if label[1] > 80:
engine = pyttsx3.init()
end_time = time()
draw_rectangle(frame, rect)
draw_text(frame, subjects[0], rect[0], rect[1] - 5)
out.write(frame)
run_time = end_time - start_time
if frame is not None and run_time > 10:
winsound.Beep(1440, 1500) # 主板蜂鸣器
engine.say("警告,警告,有陌生人靠近")
engine.runAndWait()
start_time = end_time
else:
label_text = subjects[label[0]]
draw_rectangle(frame, rect)
draw_text(frame, label_text, rect[0], rect[1] - 5)
cv2.imshow('camera', frame)
# 等待10毫秒看是否有按键输入
k = cv2.waitKey(10)
# 如果输入q则退出循环
if k & 0xFF == ord('q'):
break
# 释放摄像头并销毁所有窗口
out.release()
cap.release()
cv2.destroyAllWindows()
def train(face_recognizer):
"""训练数据"""
print("数据准备")
faces, labels = prepare_training_data(r"./train_data")
print("准备完成")
print("Total faces: ", len(faces))
print("Total labels: ", len(labels))
print("开始训练")
# 训练人脸识别器
face_recognizer.train(faces, np.array(labels))
# 保存训练好的模型
face_recognizer.save(r"./models/train.yml")
print("训练完成")
def InputInformation(face_recognizer, subjects):
name = input("请输入录入的名字:")
subjects.append(name)
num = len(os.listdir(r"./train_data/"))
if not os.path.exists(r"./train_data/tupian"+str(num+1)):
os.makedirs(r"./train_data/tupian"+str(num+1))
print("请耐心等待一会") # 约80秒
catchPICFromVideo(r"./train_data/tupian"+str(num+1))
train(face_recognizer)
def main():
# LBPH面都识别
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
while True:
num = eval(input("请输入对应的数字:"))
if num == 1:
realTimeIdentification(face_recognizer, subjects)
elif num == 2:
predict(face_recognizer, subjects)
elif num == 3:
InputInformation(face_recognizer, subjects)
realTimeIdentification(face_recognizer, subjects)
elif num == 4:
train(face_recognizer)
predict(face_recognizer, subjects)
elif num == 5:
print("欢迎下次再来!")
break
if __name__ == "__main__":
menu()
main()
# EigenFaces人脸识别器
# face_recognizer = cv2.face.EigenFaceRecognizer_create()
# FisherFaces人脸识别器
# face_recognizer = cv2.face.FisherFaceRecognizer_create()
| [
"cv2.rectangle",
"cv2.face.LBPHFaceRecognizer_create",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.CascadeClassifier",
"os.listdir",
"cv2.VideoWriter",
"cv2.VideoWriter_fourcc",
"traceback.print_exc",
"cv2.waitKey",
"cv2.putText",
"cv2.cvtColor",
"cv2.imread",
"cv2.imwrite... | [((550, 569), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (566, 569), False, 'import cv2\n'), ((612, 733), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""G:\\\\Opencv 4.5.3\\\\opencv\\\\build\\\\etc\\\\lbpcascades\\\\lbpcascade_frontalface_improved.xml"""'], {}), "(\n 'G:\\\\Opencv 4.5.3\\\\opencv\\\\build\\\\etc\\\\lbpcascades\\\\lbpcascade_frontalface_improved.xml'\n )\n", (633, 733), False, 'import cv2\n'), ((2190, 2213), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2211, 2213), False, 'import cv2\n'), ((2294, 2331), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2306, 2331), False, 'import cv2\n'), ((2389, 2501), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""G:\\\\Opencv 4.5.3\\\\opencv\\\\build\\\\etc\\\\lbpcascades\\\\lbpcascade_frontalface.xml"""'], {}), "(\n 'G:\\\\Opencv 4.5.3\\\\opencv\\\\build\\\\etc\\\\lbpcascades\\\\lbpcascade_frontalface.xml'\n )\n", (2410, 2501), False, 'import cv2\n'), ((2846, 2874), 'os.listdir', 'os.listdir', (['data_folder_path'], {}), '(data_folder_path)\n', (2856, 2874), False, 'import os\n'), ((3846, 3860), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3857, 3860), False, 'import cv2\n'), ((3866, 3889), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3887, 3889), False, 'import cv2\n'), ((3998, 4056), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (4011, 4056), False, 'import cv2\n'), ((4115, 4190), 'cv2.putText', 'cv2.putText', (['img', 'text', '(x, y)', 'cv2.FONT_HERSHEY_PLAIN', '(1.5)', '(0, 255, 0)', '(3)'], {}), '(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 3)\n', (4126, 4190), False, 'import cv2\n'), ((4406, 4422), 'cv2.imread', 'cv2.imread', (['addr'], {}), '(addr)\n', (4416, 4422), False, 'import cv2\n'), ((5341, 5360), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (5357, 5360), False, 'import cv2\n'), ((5468, 5510), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""I"""', '"""4"""', '"""2"""', '"""0"""'], {}), "('I', '4', '2', '0')\n", (5490, 5510), False, 'import cv2\n'), ((5522, 5585), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""./output.avi"""', 'fourcc', '(20.0)', '(640, 480)', '(True)'], {}), "('./output.avi', fourcc, 20.0, (640, 480), True)\n", (5537, 5585), False, 'import cv2\n'), ((7026, 7049), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7047, 7049), False, 'import cv2\n'), ((7897, 7933), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (7931, 7933), False, 'import cv2\n'), ((879, 918), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (891, 918), False, 'import cv2\n'), ((2062, 2088), 'cv2.imshow', 'cv2.imshow', (['"""photo"""', 'frame'], {}), "('photo', frame)\n", (2072, 2088), False, 'import cv2\n'), ((2102, 2117), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (2113, 2117), False, 'import cv2\n'), ((3177, 3205), 'os.listdir', 'os.listdir', (['subject_dir_path'], {}), '(subject_dir_path)\n', (3187, 3205), False, 'import os\n'), ((5034, 5065), 'cv2.imshow', 'cv2.imshow', (['"""predict"""', 'test_img'], {}), "('predict', test_img)\n", (5044, 5065), False, 'import cv2\n'), ((5103, 5117), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5114, 5117), False, 'import cv2\n'), ((6803, 6830), 'cv2.imshow', 'cv2.imshow', (['"""camera"""', 'frame'], {}), "('camera', frame)\n", (6813, 6830), False, 'import cv2\n'), ((6870, 6885), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (6881, 6885), False, 'import cv2\n'), ((7347, 7363), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (7355, 7363), True, 'import numpy as np\n'), ((7576, 7603), 'os.listdir', 'os.listdir', (['"""./train_data/"""'], {}), "('./train_data/')\n", (7586, 7603), False, 'import os\n'), ((3435, 3457), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (3445, 3457), False, 'import cv2\n'), ((3513, 3529), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (3524, 3529), False, 'import cv2\n'), ((4632, 4653), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4651, 4653), False, 'import traceback\n'), ((5160, 5183), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5181, 5183), False, 'import cv2\n'), ((1495, 1523), 'cv2.imwrite', 'cv2.imwrite', (['img_name', 'image'], {}), '(img_name, image)\n', (1506, 1523), False, 'import cv2\n'), ((1570, 1649), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x - 20, y - 20)', '(x + w + 20, y + h + 20)', 'rect_color', '(2)'], {}), '(frame, (x - 20, y - 20), (x + w + 20, y + h + 20), rect_color, 2)\n', (1583, 1649), False, 'import cv2\n'), ((1751, 1830), 'cv2.putText', 'cv2.putText', (['frame', "('num:%d' % num)", '(x + 30, y + 30)', 'font', '(1)', '(255, 0, 255)', '(4)'], {}), "(frame, 'num:%d' % num, (x + 30, y + 30), font, 1, (255, 0, 255), 4)\n", (1762, 1830), False, 'import cv2\n'), ((5884, 5905), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5903, 5905), False, 'import traceback\n'), ((5992, 6019), 'cv2.imshow', 'cv2.imshow', (['"""camera"""', 'frame'], {}), "('camera', frame)\n", (6002, 6019), False, 'import cv2\n'), ((6118, 6132), 'pyttsx3.init', 'pyttsx3.init', ([], {}), '()\n', (6130, 6132), False, 'import pyttsx3\n'), ((4701, 4723), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4721, 4723), False, 'import traceback\n'), ((6444, 6469), 'winsound.Beep', 'winsound.Beep', (['(1440)', '(1500)'], {}), '(1440, 1500)\n', (6457, 6469), False, 'import winsound\n'), ((5955, 5977), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5975, 5977), False, 'import traceback\n')] |
"""
Requires matplotlib
pipenv install matplotlib
python plot.py
"""
import yaml
import numpy
import matplotlib.pyplot as plt
from connected_conics import conic, helpers
fullspec = """
- r: [8]
e: [0.0]
d: 6.0
- r: [9]
e: [0.5]
d: 10.0
- r: [11]
e: [1.1]
d: 12.0
"""
fullspec_dict = yaml.safe_load(fullspec)
c = helpers.get_conic_from_fullspec(fullspec_dict, 0)
X = numpy.linspace(0, 6, 1000)
Y = conic.find_val_vectorized(c["rs"], c["es"], c["hds"], c["offsets"], X)
plt.figure()
plt.plot(X, Y)
plt.show()
| [
"connected_conics.conic.find_val_vectorized",
"connected_conics.helpers.get_conic_from_fullspec",
"matplotlib.pyplot.plot",
"yaml.safe_load",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((296, 320), 'yaml.safe_load', 'yaml.safe_load', (['fullspec'], {}), '(fullspec)\n', (310, 320), False, 'import yaml\n'), ((325, 374), 'connected_conics.helpers.get_conic_from_fullspec', 'helpers.get_conic_from_fullspec', (['fullspec_dict', '(0)'], {}), '(fullspec_dict, 0)\n', (356, 374), False, 'from connected_conics import conic, helpers\n'), ((379, 405), 'numpy.linspace', 'numpy.linspace', (['(0)', '(6)', '(1000)'], {}), '(0, 6, 1000)\n', (393, 405), False, 'import numpy\n'), ((410, 480), 'connected_conics.conic.find_val_vectorized', 'conic.find_val_vectorized', (["c['rs']", "c['es']", "c['hds']", "c['offsets']", 'X'], {}), "(c['rs'], c['es'], c['hds'], c['offsets'], X)\n", (435, 480), False, 'from connected_conics import conic, helpers\n'), ((481, 493), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (491, 493), True, 'import matplotlib.pyplot as plt\n'), ((494, 508), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {}), '(X, Y)\n', (502, 508), True, 'import matplotlib.pyplot as plt\n'), ((509, 519), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (517, 519), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG Website - A Django-powered website for Reaction Mechanism Generator #
# #
# Copyright (c) 2011-2018 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
import os
import time
import numpy as np
import rmgpy.constants as constants
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from rmgpy.statmech import *
from rmgweb.main.tools import *
from rmgweb.pdep.models import *
from rmgweb.pdep.forms import *
################################################################################
def index(request):
"""
The Pressure Dependent Networks homepage.
"""
if request.user.is_authenticated:
networks = Network.objects.filter(user=request.user)
else:
networks = []
return render(request, 'pdep.html', {'networks': networks})
@login_required
def start(request):
"""
A view called when a user wants to begin a new Pdep Network calculation. This
view creates a new Network and redirects the user to the main page for that
network.
"""
# Create and save a new Network
network = Network(title='Untitled Network', user=request.user)
network.save()
return HttpResponseRedirect(reverse('pdep:network-index', args=(network.pk,)))
def networkIndex(request, networkKey):
"""
A view called when a user wants to see the main page for a Network object
indicated by `networkKey`.
"""
network_model = get_object_or_404(Network, pk=networkKey)
# Get file sizes of files in
file_size = {}
modification_time = {}
if network_model.inputFileExists():
file_size['inputFile'] = '{0:.1f}'.format(os.path.getsize(network_model.getInputFilename()))
modification_time['inputFile'] = time.ctime(os.path.getmtime(network_model.getInputFilename()))
if network_model.outputFileExists():
file_size['outputFile'] = '{0:.1f}'.format(os.path.getsize(network_model.getOutputFilename()))
modification_time['outputFile'] = time.ctime(os.path.getmtime(network_model.getOutputFilename()))
if network_model.logFileExists():
file_size['logFile'] = '{0:.1f}'.format(os.path.getsize(network_model.getLogFilename()))
modification_time['logFile'] = time.ctime(os.path.getmtime(network_model.getLogFilename()))
if network_model.surfaceFilePNGExists():
file_size['surfaceFilePNG'] = '{0:.1f}'.format(os.path.getsize(network_model.getSurfaceFilenamePNG()))
modification_time['surfaceFilePNG'] = time.ctime(os.path.getmtime(network_model.getSurfaceFilenamePNG()))
if network_model.surfaceFilePDFExists():
file_size['surfaceFilePDF'] = '{0:.1f}'.format(os.path.getsize(network_model.getSurfaceFilenamePDF()))
modification_time['surfaceFilePDF'] = time.ctime(os.path.getmtime(network_model.getSurfaceFilenamePDF()))
if network_model.surfaceFileSVGExists():
file_size['surfaceFileSVG'] = '{0:.1f}'.format(os.path.getsize(network_model.getSurfaceFilenameSVG()))
modification_time['surfaceFileSVG'] = time.ctime(os.path.getmtime(network_model.getSurfaceFilenameSVG()))
network = network_model.load()
# Get species information
spc_list = []
if network is not None:
for spec in network.get_all_species():
speciesType = []
if spec in network.isomers:
speciesType.append('isomer')
if any([spec in reactants.species for reactants in network.reactants]):
speciesType.append('reactant')
if any([spec in products.species for products in network.products]):
speciesType.append('product')
if spec in network.bath_gas:
speciesType.append('bath gas')
collision = 'yes' if spec.transport_data is not None else ''
conformer = 'yes' if spec.conformer is not None else ''
thermo = 'yes' if spec.conformer is not None or spec.thermo is not None else ''
spc_list.append((spec.label, getStructureMarkup(spec), ', '.join(speciesType), collision, conformer, thermo))
# Get path reaction information
path_rxn_list = []
if network is not None:
for rxn in network.path_reactions:
reactants = ' + '.join([getStructureMarkup(reactant) for reactant in rxn.reactants])
products = ' + '.join([getStructureMarkup(reactant) for reactant in rxn.products])
arrow = '⇔' if rxn.reversible else '→'
conformer = 'yes' if rxn.transition_state.conformer is not None else ''
kinetics = 'yes' if rxn.kinetics is not None else ''
path_rxn_list.append((reactants, arrow, products, conformer, kinetics))
# Get net reaction information
net_rxn_list = []
if network is not None:
for rxn in network.net_reactions:
reactants = ' + '.join([getStructureMarkup(reactant) for reactant in rxn.reactants])
products = ' + '.join([getStructureMarkup(reactant) for reactant in rxn.products])
arrow = '⇔' if rxn.reversible else '→'
kinetics = 'yes' if rxn.kinetics is not None else ''
net_rxn_list.append((reactants, arrow, products, kinetics))
return render(request, 'networkIndex.html',
{'network': network_model,
'networkKey': networkKey,
'speciesList': spc_list,
'pathReactionList': path_rxn_list,
'netReactionList': net_rxn_list,
'filesize': file_size,
'modificationTime': modification_time,
})
def networkEditor(request, networkKey):
"""
A view called when a user wants to add/edit Network input parameters by
editing the input file in the broswer
"""
network = get_object_or_404(Network, pk=networkKey)
if request.method == 'POST':
form = EditNetworkForm(request.POST, instance=network)
if form.is_valid():
# Save the inputText field contents to the input file
network.saveInputText()
# Save the form
network = form.save()
# Go back to the network's main page
return HttpResponseRedirect(reverse('pdep:network-index', args=(network.pk,)))
else:
# Load the text from the input file into the inputText field
network.loadInputText()
# Create the form
form = EditNetworkForm(instance=network)
return render(request, 'networkEditor.html', {'network': network, 'networkKey': networkKey, 'form': form})
def networkDelete(request, networkKey):
"""
A view called when a user wants to delete a network with the specified networkKey.
"""
network = get_object_or_404(Network, pk=networkKey)
network.delete()
return HttpResponseRedirect(reverse('pdep:index'))
def networkUpload(request, networkKey):
"""
A view called when a user wants to add/edit Network input parameters by
uploading an input file.
"""
network = get_object_or_404(Network, pk=networkKey)
if request.method == 'POST':
form = UploadNetworkForm(request.POST, request.FILES, instance=network)
if form.is_valid():
# Delete the current input file
network.deleteInputFile()
# Save the form
network = form.save()
# Load the text from the input file into the inputText field
network.loadInputText()
# Go back to the network's main page
return HttpResponseRedirect(reverse('pdep:network-index', args=(network.pk,)))
else:
# Create the form
form = UploadNetworkForm(instance=network)
return render(request, 'networkUpload.html', {'network': network, 'networkKey': networkKey, 'form': form})
def networkDrawPNG(request, networkKey):
"""
A view called when a user wants to draw the potential energy surface for
a given Network in PNG format.
"""
network_model = get_object_or_404(Network, pk=networkKey)
network_model.load()
# Run Arkane! This may take some time...
network_model.pdep.execute(
output_file=network_model.getOutputFilename(),
plot=False,
file_format='png'
)
# Go back to the network's main page
return HttpResponseRedirect(reverse('pdep:network-index', args=(network_model.pk,)))
def networkDrawPDF(request, networkKey):
"""
A view called when a user wants to draw the potential energy surface for
a given Network in PDF format.
"""
network_model = get_object_or_404(Network, pk=networkKey)
network_model.load()
# Run Arkane! This may take some time...
network_model.pdep.execute(
output_file=network_model.getOutputFilename(),
plot=False,
file_format='pdf'
)
# Go back to the network's main page
return HttpResponseRedirect(reverse('pdep:network-index', args=(network_model.pk,)))
def networkDrawSVG(request, networkKey):
"""
A view called when a user wants to draw the potential energy surface for
a given Network in SVG format.
"""
network_model = get_object_or_404(Network, pk=networkKey)
network_model.load()
# Run Arkane! This may take some time...
network_model.pdep.execute(
output_file=network_model.getOutputFilename(),
plot=False,
file_format='svg'
)
# Go back to the network's main page
return HttpResponseRedirect(reverse('pdep:network-index', args=(network_model.pk,)))
def networkRun(request, networkKey):
"""
A view called when a user wants to run Arkane on the pdep input file for a
given Network.
"""
network_model = get_object_or_404(Network, pk=networkKey)
network_model.load()
# Run Arkane! This may take some time...
network_model.pdep.execute(
output_file=network_model.getOutputFilename(),
plot=False,
file_format='png'
)
# Go back to the network's main page
return HttpResponseRedirect(reverse('pdep:network-index', args=(network_model.pk,)))
def networkSpecies(request, networkKey, species):
"""
A view called when a user wants to view details for a single species in
a given reaction network.
"""
network_model = get_object_or_404(Network, pk=networkKey)
network = network_model.load()
label = species
for spec in network.get_all_species():
if spec.label == label:
species = spec
break
else:
raise Http404
structure = getStructureMarkup(species)
E0 = None
if species.conformer:
conformer = species.conformer
has_torsions = conformer and any([isinstance(mode, HinderedRotor) for mode in conformer.modes])
if conformer.E0:
E0 = '{0:g}'.format(conformer.E0.value_si / 4184.) # convert to kcal/mol
return render(request, 'networkSpecies.html',
{'network': network_model,
'networkKey': networkKey,
'species': species,
'label': label,
'structure': structure,
'E0': E0,
'hasTorsions': has_torsions,
})
def computeMicrocanonicalRateCoefficients(network, T=1000):
"""
Compute all of the microcanonical rate coefficients k(E) for the given
network.
"""
network.T = T
if network.e_list is None:
e_list = network.select_energy_grains(T=2000, grain_size=0.5*4184, grain_count=250)
network.e_list = e_list
else:
e_list = network.e_list
# Determine the values of some counters
# n_grains = len(Elist)
n_isom = len(network.isomers)
n_reac = len(network.reactants)
n_prod = len(network.products)
# dE = Elist[1] - Elist[0]
#
# # Get ground-state energies of all configurations
# E0 = network.calculateGroundStateEnergies()
#
# # Get first reactive grain for each isomer
# e_reac = np.ones(Nisom, np.float64) * 1e20
# for i in range(Nisom):
# for rxn in network.path_reactions:
# if rxn.reactants[0] == network.isomers[i] or rxn.products[0] == network.isomers[i]:
# if rxn.transition_state.conformer.E0.value_si < Ereac[i]:
# Ereac[i] = rxn.transition_state.conformer.E0.value
#
# # Shift energy grains such that lowest is zero
# Emin = Elist[0]
# for rxn in network.path_reactions:
# rxn.transition_state.conformer.E0.value -= Emin
# E0 -= Emin
# Ereac -= Emin
# Elist -= Emin
# Choose the angular momenta to use to compute k(T,P) values at this temperature
# (This only applies if the J-rotor is adiabatic
if not network.active_j_rotor:
j_list = network.j_list = np.arange(0, 20, 1, np.int)
n_j = network.n_j = len(j_list)
else:
j_list = network.j_list = np.array([0], np.int)
n_j = network.n_j = 1
if not hasattr(network, 'densStates'):
# Calculate density of states for each isomer and each reactant channel
# that has the necessary parameters
network.calculate_densities_of_states()
# Map the densities of states onto this set of energies
# Also shift each density of states to a common zero of energy
network.map_densities_of_states()
# Use free energy to determine equilibrium ratios of each isomer and product channel
network.calculate_equilibrium_ratios()
network.calculate_microcanonical_rates()
# Rescale densities of states such that, when they are integrated
# using the Boltzmann factor as a weighting factor, the result is unity
for i in range(n_isom + n_reac):
Q = 0.0
for s in range(n_j):
Q += np.sum(network.dens_states[i, :, s] * (2 * j_list[s]+1) * np.exp(-e_list / constants.R / T))
network.dens_states[i, :, :] /= Q
Kij = network.Kij
Gnj = network.Gnj
Fim = network.Fim
dens_states_0 = network.dens_states
# Elist += Emin
return Kij, Gnj, Fim, e_list, dens_states_0, n_isom, n_reac, n_prod
def networkPathReaction(request, networkKey, reaction):
"""
A view called when a user wants to view details for a single path reaction
in a given reaction network.
"""
network_model = get_object_or_404(Network, pk=networkKey)
network = network_model.load()
try:
index = int(reaction)
except ValueError:
raise Http404
try:
reaction = network.path_reactions[index-1]
except IndexError:
raise Http404
e0 = '{0:g}'.format(reaction.transition_state.conformer.E0.value_si / 4184.) # convert to kcal/mol
conformer = reaction.transition_state.conformer
has_torsions = conformer and any([isinstance(mode, HinderedRotor) for mode in conformer.modes])
kinetics = reaction.kinetics
Kij, Gnj, Fim, e_list, dens_states, n_isom, n_reac, n_prod = computeMicrocanonicalRateCoefficients(network)
reactants = [reactant.species for reactant in network.reactants]
products = [product.species for product in network.products]
isomers = [isomer.species[0] for isomer in network.isomers]
if reaction.is_isomerization():
reac = isomers.index(reaction.reactants[0])
prod = isomers.index(reaction.products[0])
kf_list = Kij[prod, reac, :]
kr_list = Kij[reac, prod, :]
elif reaction.is_association():
if reaction.reactants in products:
reac = products.index(reaction.reactants) + n_reac
prod = isomers.index(reaction.products[0])
kf_list = []
kr_list = Gnj[reac, prod, :]
else:
reac = reactants.index(reaction.reactants)
prod = isomers.index(reaction.products[0])
kf_list = []
kr_list = Gnj[reac, prod, :]
elif reaction.is_dissociation():
if reaction.products in products:
reac = isomers.index(reaction.reactants[0])
prod = products.index(reaction.products) + n_reac
kf_list = Gnj[prod, reac, :]
kr_list = []
else:
reac = isomers.index(reaction.reactants[0])
prod = reactants.index(reaction.products)
kf_list = Gnj[prod, reac, :]
kr_list = []
microcanonical_rates = {
'Edata': list(e_list),
'kfdata': list(kf_list),
'krdata': list(kr_list),
}
reactants_render = ' + '.join([getStructureMarkup(reactant) for reactant in reaction.reactants])
products_render = ' + '.join([getStructureMarkup(product) for product in reaction.products])
arrow = '⇔' if reaction.reversible else '→'
return render(request, 'networkPathReaction.html',
{'network': network_model,
'networkKey': networkKey,
'reaction': reaction,
'index': index,
'reactants': reactants_render,
'products': products_render,
'arrow': arrow,
'E0': e0,
'conformer': conformer,
'hasTorsions': has_torsions,
'kinetics': kinetics,
'microcanonicalRates': microcanonical_rates,
})
def networkNetReaction(request, networkKey, reaction):
"""
A view called when a user wants to view details for a single net reaction
in a given reaction network.
"""
network_model = get_object_or_404(Network, pk=networkKey)
network = network_model.load()
try:
index = int(reaction)
except ValueError:
raise Http404
try:
reaction = network.net_reactions[index-1]
except IndexError:
raise Http404
reactants = ' + '.join([getStructureMarkup(reactant) for reactant in reaction.reactants])
products = ' + '.join([getStructureMarkup(product) for product in reaction.products])
arrow = '⇔' if reaction.reversible else '→'
kinetics = reaction.kinetics
return render(request, 'networkNetReaction.html',
{'network': network_model,
'networkKey': networkKey,
'reaction': reaction,
'index': index,
'reactants': reactants,
'products': products,
'arrow': arrow,
'kinetics': kinetics,
})
def networkPlotKinetics(request, networkKey):
"""
Generate k(T,P) vs. T and k(T,P) vs. P plots for all of the net reactions
involving a given configuration as the reactant.
"""
network_model = get_object_or_404(Network, pk=networkKey)
network = network_model.load()
configurations = []
for isomer in network.isomers:
configurations.append([isomer])
configurations.extend(network.reactants)
# configurations.extend(network.products)
config_labels = []
for configuration in config_labels:
labels = [spec.label for spec in configuration]
labels.sort()
config_labels.append(u' + '.join(labels))
source = configurations[0]
T = 1000
P = 1e5
if request.method == 'POST':
form = PlotKineticsForm(config_labels, request.POST)
if form.is_valid():
source = configurations[config_labels.index(form.cleaned_data['reactant'])]
T = form.cleaned_data['T']
P = form.cleaned_data['P'] * 1e5
else:
form = PlotKineticsForm(config_labels)
kineticsSet = {}
for rxn in network.net_reactions:
if rxn.reactants == source:
products = u' + '.join([spec.label for spec in rxn.products])
kineticsSet[products] = rxn.kinetics
return render(request, 'networkPlotKinetics.html',
{'form': form,
'network': network_model,
'networkKey': networkKey,
'configurations': configurations,
'source': source,
'kineticsSet': kineticsSet,
'T': T,
'P': P,
})
def networkPlotMicro(request, networkKey):
"""
A view for showing plots of items that are functions of energy, i.e.
densities of states rho(E) and microcanonical rate coefficients k(E).
"""
network_model = get_object_or_404(Network, pk=networkKey)
network = network_model.load()
Kij, Gnj, Fim, e_list, dens_states, n_isom, n_reac, n_prod = computeMicrocanonicalRateCoefficients(network)
dens_states_data = []
reactants = [reactant.species for reactant in network.reactants]
products = [product.species for product in network.products]
isomers = [isomer.species[0] for isomer in network.isomers]
for i, species in enumerate(isomers):
dens_states_data.append({
'label': species.label,
'Edata': list(e_list),
'rhodata': list(dens_states[i, :]),
})
for n, spc_list in enumerate(reactants):
dens_states_data.append({
'label': ' + '.join([species.label for species in spc_list]),
'Edata': list(e_list),
'rhodata': list(dens_states[n + n_isom, :]),
})
micro_kinetics_data = []
for reaction in network.path_reactions:
reactants_render = ' + '.join([reactant.label for reactant in reaction.reactants])
arrow = '='
products_render = ' + '.join([product.label for product in reaction.products])
if reaction.is_isomerization():
if reaction.reactants[0] in isomers and reaction.products[0] in isomers:
reac = isomers.index(reaction.reactants[0])
prod = isomers.index(reaction.products[0])
kf_list = Kij[prod, reac, :]
kr_list = Kij[reac, prod, :]
elif reaction.reactants[0] in isomers and reaction.products in products:
reac = isomers.index(reaction.reactants[0])
prod = products.index(reaction.products) + n_reac
kf_list = Gnj[prod, reac, :]
kr_list = []
elif reaction.reactants in products and reaction.products[0] in isomers:
reac = products.index(reaction.reactants) + n_reac
prod = isomers.index(reaction.products[0])
kf_list = []
kr_list = Gnj[reac, prod, :]
elif reaction.is_association():
if reaction.reactants in products:
reac = products.index(reaction.reactants) + n_reac
prod = isomers.index(reaction.products[0])
kf_list = []
kr_list = Gnj[reac, prod, :]
else:
reac = reactants.index(reaction.reactants)
prod = isomers.index(reaction.products[0])
kf_list = []
kr_list = Gnj[reac, prod, :]
elif reaction.is_dissociation():
if reaction.products in products:
reac = isomers.index(reaction.reactants[0])
prod = products.index(reaction.products) + n_reac
kf_list = Gnj[prod, reac, :]
kr_list = []
else:
reac = isomers.index(reaction.reactants[0])
prod = reactants.index(reaction.products)
kf_list = Gnj[prod, reac, :]
kr_list = []
if len(kf_list) > 0:
micro_kinetics_data.append({
'label': '{0} {1} {2}'.format(reactants_render, arrow, products_render),
'Edata': list(e_list),
'kdata': list(kf_list),
})
if len(kr_list) > 0:
micro_kinetics_data.append({
'label': '{0} {1} {2}'.format(products_render, arrow, reactants_render),
'Edata': list(e_list),
'kdata': list(kr_list),
})
return render(request, 'networkPlotMicro.html',
{'network': network_model,
'networkKey': networkKey,
'densityOfStatesData': dens_states_data,
'microKineticsData': micro_kinetics_data,
})
| [
"django.shortcuts.render",
"django.shortcuts.get_object_or_404",
"numpy.exp",
"numpy.array",
"django.urls.reverse",
"numpy.arange"
] | [((2799, 2851), 'django.shortcuts.render', 'render', (['request', '"""pdep.html"""', "{'networks': networks}"], {}), "(request, 'pdep.html', {'networks': networks})\n", (2805, 2851), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3472, 3513), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (3489, 3513), False, 'from django.shortcuts import render, get_object_or_404\n'), ((7256, 7513), 'django.shortcuts.render', 'render', (['request', '"""networkIndex.html"""', "{'network': network_model, 'networkKey': networkKey, 'speciesList':\n spc_list, 'pathReactionList': path_rxn_list, 'netReactionList':\n net_rxn_list, 'filesize': file_size, 'modificationTime': modification_time}"], {}), "(request, 'networkIndex.html', {'network': network_model,\n 'networkKey': networkKey, 'speciesList': spc_list, 'pathReactionList':\n path_rxn_list, 'netReactionList': net_rxn_list, 'filesize': file_size,\n 'modificationTime': modification_time})\n", (7262, 7513), False, 'from django.shortcuts import render, get_object_or_404\n'), ((7845, 7886), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (7862, 7886), False, 'from django.shortcuts import render, get_object_or_404\n'), ((8512, 8615), 'django.shortcuts.render', 'render', (['request', '"""networkEditor.html"""', "{'network': network, 'networkKey': networkKey, 'form': form}"], {}), "(request, 'networkEditor.html', {'network': network, 'networkKey':\n networkKey, 'form': form})\n", (8518, 8615), False, 'from django.shortcuts import render, get_object_or_404\n'), ((8771, 8812), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (8788, 8812), False, 'from django.shortcuts import render, get_object_or_404\n'), ((9066, 9107), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (9083, 9107), False, 'from django.shortcuts import render, get_object_or_404\n'), ((9740, 9843), 'django.shortcuts.render', 'render', (['request', '"""networkUpload.html"""', "{'network': network, 'networkKey': networkKey, 'form': form}"], {}), "(request, 'networkUpload.html', {'network': network, 'networkKey':\n networkKey, 'form': form})\n", (9746, 9843), False, 'from django.shortcuts import render, get_object_or_404\n'), ((10032, 10073), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (10049, 10073), False, 'from django.shortcuts import render, get_object_or_404\n'), ((10607, 10648), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (10624, 10648), False, 'from django.shortcuts import render, get_object_or_404\n'), ((11181, 11222), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (11198, 11222), False, 'from django.shortcuts import render, get_object_or_404\n'), ((11737, 11778), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (11754, 11778), False, 'from django.shortcuts import render, get_object_or_404\n'), ((12314, 12355), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (12331, 12355), False, 'from django.shortcuts import render, get_object_or_404\n'), ((12914, 13113), 'django.shortcuts.render', 'render', (['request', '"""networkSpecies.html"""', "{'network': network_model, 'networkKey': networkKey, 'species': species,\n 'label': label, 'structure': structure, 'E0': E0, 'hasTorsions':\n has_torsions}"], {}), "(request, 'networkSpecies.html', {'network': network_model,\n 'networkKey': networkKey, 'species': species, 'label': label,\n 'structure': structure, 'E0': E0, 'hasTorsions': has_torsions})\n", (12920, 13113), False, 'from django.shortcuts import render, get_object_or_404\n'), ((16336, 16377), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (16353, 16377), False, 'from django.shortcuts import render, get_object_or_404\n'), ((18728, 19085), 'django.shortcuts.render', 'render', (['request', '"""networkPathReaction.html"""', "{'network': network_model, 'networkKey': networkKey, 'reaction': reaction,\n 'index': index, 'reactants': reactants_render, 'products':\n products_render, 'arrow': arrow, 'E0': e0, 'conformer': conformer,\n 'hasTorsions': has_torsions, 'kinetics': kinetics,\n 'microcanonicalRates': microcanonical_rates}"], {}), "(request, 'networkPathReaction.html', {'network': network_model,\n 'networkKey': networkKey, 'reaction': reaction, 'index': index,\n 'reactants': reactants_render, 'products': products_render, 'arrow':\n arrow, 'E0': e0, 'conformer': conformer, 'hasTorsions': has_torsions,\n 'kinetics': kinetics, 'microcanonicalRates': microcanonical_rates})\n", (18734, 19085), False, 'from django.shortcuts import render, get_object_or_404\n'), ((19522, 19563), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (19539, 19563), False, 'from django.shortcuts import render, get_object_or_404\n'), ((20077, 20307), 'django.shortcuts.render', 'render', (['request', '"""networkNetReaction.html"""', "{'network': network_model, 'networkKey': networkKey, 'reaction': reaction,\n 'index': index, 'reactants': reactants, 'products': products, 'arrow':\n arrow, 'kinetics': kinetics}"], {}), "(request, 'networkNetReaction.html', {'network': network_model,\n 'networkKey': networkKey, 'reaction': reaction, 'index': index,\n 'reactants': reactants, 'products': products, 'arrow': arrow,\n 'kinetics': kinetics})\n", (20083, 20307), False, 'from django.shortcuts import render, get_object_or_404\n'), ((20683, 20724), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (20700, 20724), False, 'from django.shortcuts import render, get_object_or_404\n'), ((21782, 22001), 'django.shortcuts.render', 'render', (['request', '"""networkPlotKinetics.html"""', "{'form': form, 'network': network_model, 'networkKey': networkKey,\n 'configurations': configurations, 'source': source, 'kineticsSet':\n kineticsSet, 'T': T, 'P': P}"], {}), "(request, 'networkPlotKinetics.html', {'form': form, 'network':\n network_model, 'networkKey': networkKey, 'configurations':\n configurations, 'source': source, 'kineticsSet': kineticsSet, 'T': T,\n 'P': P})\n", (21788, 22001), False, 'from django.shortcuts import render, get_object_or_404\n'), ((22390, 22431), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Network'], {'pk': 'networkKey'}), '(Network, pk=networkKey)\n', (22407, 22431), False, 'from django.shortcuts import render, get_object_or_404\n'), ((25960, 26145), 'django.shortcuts.render', 'render', (['request', '"""networkPlotMicro.html"""', "{'network': network_model, 'networkKey': networkKey, 'densityOfStatesData':\n dens_states_data, 'microKineticsData': micro_kinetics_data}"], {}), "(request, 'networkPlotMicro.html', {'network': network_model,\n 'networkKey': networkKey, 'densityOfStatesData': dens_states_data,\n 'microKineticsData': micro_kinetics_data})\n", (25966, 26145), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3235, 3284), 'django.urls.reverse', 'reverse', (['"""pdep:network-index"""'], {'args': '(network.pk,)'}), "('pdep:network-index', args=(network.pk,))\n", (3242, 3284), False, 'from django.urls import reverse\n'), ((8866, 8887), 'django.urls.reverse', 'reverse', (['"""pdep:index"""'], {}), "('pdep:index')\n", (8873, 8887), False, 'from django.urls import reverse\n'), ((10358, 10413), 'django.urls.reverse', 'reverse', (['"""pdep:network-index"""'], {'args': '(network_model.pk,)'}), "('pdep:network-index', args=(network_model.pk,))\n", (10365, 10413), False, 'from django.urls import reverse\n'), ((10933, 10988), 'django.urls.reverse', 'reverse', (['"""pdep:network-index"""'], {'args': '(network_model.pk,)'}), "('pdep:network-index', args=(network_model.pk,))\n", (10940, 10988), False, 'from django.urls import reverse\n'), ((11507, 11562), 'django.urls.reverse', 'reverse', (['"""pdep:network-index"""'], {'args': '(network_model.pk,)'}), "('pdep:network-index', args=(network_model.pk,))\n", (11514, 11562), False, 'from django.urls import reverse\n'), ((12063, 12118), 'django.urls.reverse', 'reverse', (['"""pdep:network-index"""'], {'args': '(network_model.pk,)'}), "('pdep:network-index', args=(network_model.pk,))\n", (12070, 12118), False, 'from django.urls import reverse\n'), ((14802, 14829), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(1)', 'np.int'], {}), '(0, 20, 1, np.int)\n', (14811, 14829), True, 'import numpy as np\n'), ((14914, 14935), 'numpy.array', 'np.array', (['[0]', 'np.int'], {}), '([0], np.int)\n', (14922, 14935), True, 'import numpy as np\n'), ((8264, 8313), 'django.urls.reverse', 'reverse', (['"""pdep:network-index"""'], {'args': '(network.pk,)'}), "('pdep:network-index', args=(network.pk,))\n", (8271, 8313), False, 'from django.urls import reverse\n'), ((9591, 9640), 'django.urls.reverse', 'reverse', (['"""pdep:network-index"""'], {'args': '(network.pk,)'}), "('pdep:network-index', args=(network.pk,))\n", (9598, 9640), False, 'from django.urls import reverse\n'), ((15853, 15886), 'numpy.exp', 'np.exp', (['(-e_list / constants.R / T)'], {}), '(-e_list / constants.R / T)\n', (15859, 15886), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.